1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
43#include <linux/cpu.h>
44#include <linux/cpuhotplug.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
53#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68static enum cpuhp_state lpfc_cpuhp_state;
69
70static uint32_t lpfc_present_cpu;
71static bool lpfc_pldv_detect;
72
73static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
75static void lpfc_cpuhp_add(struct lpfc_hba *phba);
76static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
77static int lpfc_post_rcv_buf(struct lpfc_hba *);
78static int lpfc_sli4_queue_verify(struct lpfc_hba *);
79static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
80static int lpfc_setup_endian_order(struct lpfc_hba *);
81static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
82static void lpfc_free_els_sgl_list(struct lpfc_hba *);
83static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
84static void lpfc_init_sgl_list(struct lpfc_hba *);
85static int lpfc_init_active_sgl_array(struct lpfc_hba *);
86static void lpfc_free_active_sgl(struct lpfc_hba *);
87static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
88static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
89static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
91static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
92static void lpfc_sli4_disable_intr(struct lpfc_hba *);
93static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
94static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
95static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
96static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
97static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
98static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
99
100static struct scsi_transport_template *lpfc_transport_template = NULL;
101static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
102static DEFINE_IDR(lpfc_hba_index);
103#define LPFC_NVMET_BUF_POST 254
104static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120int
121lpfc_config_port_prep(struct lpfc_hba *phba)
122{
123 lpfc_vpd_t *vp = &phba->vpd;
124 int i = 0, rc;
125 LPFC_MBOXQ_t *pmb;
126 MAILBOX_t *mb;
127 char *lpfc_vpd_data = NULL;
128 uint16_t offset = 0;
129 static char licensed[56] =
130 "key unlock for use with gnu public licensed code only\0";
131 static int init_key = 1;
132
133 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
134 if (!pmb) {
135 phba->link_state = LPFC_HBA_ERROR;
136 return -ENOMEM;
137 }
138
139 mb = &pmb->u.mb;
140 phba->link_state = LPFC_INIT_MBX_CMDS;
141
142 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
143 if (init_key) {
144 uint32_t *ptext = (uint32_t *) licensed;
145
146 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
147 *ptext = cpu_to_be32(*ptext);
148 init_key = 0;
149 }
150
151 lpfc_read_nv(phba, pmb);
152 memset((char*)mb->un.varRDnvp.rsvd3, 0,
153 sizeof (mb->un.varRDnvp.rsvd3));
154 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
155 sizeof (licensed));
156
157 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
158
159 if (rc != MBX_SUCCESS) {
160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
161 "0324 Config Port initialization "
162 "error, mbxCmd x%x READ_NVPARM, "
163 "mbxStatus x%x\n",
164 mb->mbxCommand, mb->mbxStatus);
165 mempool_free(pmb, phba->mbox_mem_pool);
166 return -ERESTART;
167 }
168 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
169 sizeof(phba->wwnn));
170 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
171 sizeof(phba->wwpn));
172 }
173
174
175
176
177
178 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
179
180
181 lpfc_read_rev(phba, pmb);
182 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
183 if (rc != MBX_SUCCESS) {
184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
185 "0439 Adapter failed to init, mbxCmd x%x "
186 "READ_REV, mbxStatus x%x\n",
187 mb->mbxCommand, mb->mbxStatus);
188 mempool_free( pmb, phba->mbox_mem_pool);
189 return -ERESTART;
190 }
191
192
193
194
195
196
197 if (mb->un.varRdRev.rr == 0) {
198 vp->rev.rBit = 0;
199 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
200 "0440 Adapter failed to init, READ_REV has "
201 "missing revision information.\n");
202 mempool_free(pmb, phba->mbox_mem_pool);
203 return -ERESTART;
204 }
205
206 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
207 mempool_free(pmb, phba->mbox_mem_pool);
208 return -EINVAL;
209 }
210
211
212 vp->rev.rBit = 1;
213 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
214 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
215 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
216 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
217 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
218 vp->rev.biuRev = mb->un.varRdRev.biuRev;
219 vp->rev.smRev = mb->un.varRdRev.smRev;
220 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
221 vp->rev.endecRev = mb->un.varRdRev.endecRev;
222 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
223 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
224 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
225 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
226 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
227 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
228
229
230
231
232
233 if (vp->rev.feaLevelHigh < 9)
234 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
235
236 if (lpfc_is_LC_HBA(phba->pcidev->device))
237 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
238 sizeof (phba->RandomData));
239
240
241 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
242 if (!lpfc_vpd_data)
243 goto out_free_mbox;
244 do {
245 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
246 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
247
248 if (rc != MBX_SUCCESS) {
249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
250 "0441 VPD not present on adapter, "
251 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
252 mb->mbxCommand, mb->mbxStatus);
253 mb->un.varDmp.word_cnt = 0;
254 }
255
256
257
258 if (mb->un.varDmp.word_cnt == 0)
259 break;
260
261 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
262 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
263 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
264 lpfc_vpd_data + offset,
265 mb->un.varDmp.word_cnt);
266 offset += mb->un.varDmp.word_cnt;
267 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
268
269 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
270
271 kfree(lpfc_vpd_data);
272out_free_mbox:
273 mempool_free(pmb, phba->mbox_mem_pool);
274 return 0;
275}
276
277
278
279
280
281
282
283
284
285
286
287static void
288lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
289{
290 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
291 phba->temp_sensor_support = 1;
292 else
293 phba->temp_sensor_support = 0;
294 mempool_free(pmboxq, phba->mbox_mem_pool);
295 return;
296}
297
298
299
300
301
302
303
304
305
306
307
308static void
309lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
310{
311 struct prog_id *prg;
312 uint32_t prog_id_word;
313 char dist = ' ';
314
315 char dist_char[] = "nabx";
316
317 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
318 mempool_free(pmboxq, phba->mbox_mem_pool);
319 return;
320 }
321
322 prg = (struct prog_id *) &prog_id_word;
323
324
325 prog_id_word = pmboxq->u.mb.un.varWords[7];
326
327
328 if (prg->dist < 4)
329 dist = dist_char[prg->dist];
330
331 if ((prg->dist == 3) && (prg->num == 0))
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
333 prg->ver, prg->rev, prg->lev);
334 else
335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
336 prg->ver, prg->rev, prg->lev,
337 dist, prg->num);
338 mempool_free(pmboxq, phba->mbox_mem_pool);
339 return;
340}
341
342
343
344
345
346
347
348
349
350void
351lpfc_update_vport_wwn(struct lpfc_vport *vport)
352{
353 struct lpfc_hba *phba = vport->phba;
354
355
356
357
358
359 if (vport->fc_nodename.u.wwn[0] == 0)
360 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
361 sizeof(struct lpfc_name));
362 else
363 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
364 sizeof(struct lpfc_name));
365
366
367
368
369
370 if (vport->fc_portname.u.wwn[0] != 0 &&
371 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
372 sizeof(struct lpfc_name))) {
373 vport->vport_flag |= FAWWPN_PARAM_CHG;
374
375 if (phba->sli_rev == LPFC_SLI_REV4 &&
376 vport->port_type == LPFC_PHYSICAL_PORT &&
377 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
378 lpfc_printf_log(phba, KERN_INFO,
379 LOG_SLI | LOG_DISCOVERY | LOG_ELS,
380 "2701 FA-PWWN change WWPN from %llx to "
381 "%llx: vflag x%x fawwpn_flag x%x\n",
382 wwn_to_u64(vport->fc_portname.u.wwn),
383 wwn_to_u64
384 (vport->fc_sparam.portName.u.wwn),
385 vport->vport_flag,
386 phba->sli4_hba.fawwpn_flag);
387 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
388 sizeof(struct lpfc_name));
389 }
390 }
391
392 if (vport->fc_portname.u.wwn[0] == 0)
393 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
394 sizeof(struct lpfc_name));
395 else
396 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
397 sizeof(struct lpfc_name));
398}
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413int
414lpfc_config_port_post(struct lpfc_hba *phba)
415{
416 struct lpfc_vport *vport = phba->pport;
417 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
418 LPFC_MBOXQ_t *pmb;
419 MAILBOX_t *mb;
420 struct lpfc_dmabuf *mp;
421 struct lpfc_sli *psli = &phba->sli;
422 uint32_t status, timeout;
423 int i, j;
424 int rc;
425
426 spin_lock_irq(&phba->hbalock);
427
428
429
430
431 if (phba->over_temp_state == HBA_OVER_TEMP)
432 phba->over_temp_state = HBA_NORMAL_TEMP;
433 spin_unlock_irq(&phba->hbalock);
434
435 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
436 if (!pmb) {
437 phba->link_state = LPFC_HBA_ERROR;
438 return -ENOMEM;
439 }
440 mb = &pmb->u.mb;
441
442
443 rc = lpfc_read_sparam(phba, pmb, 0);
444 if (rc) {
445 mempool_free(pmb, phba->mbox_mem_pool);
446 return -ENOMEM;
447 }
448
449 pmb->vport = vport;
450 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
451 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
452 "0448 Adapter failed init, mbxCmd x%x "
453 "READ_SPARM mbxStatus x%x\n",
454 mb->mbxCommand, mb->mbxStatus);
455 phba->link_state = LPFC_HBA_ERROR;
456 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
457 return -EIO;
458 }
459
460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
461
462
463
464
465
466 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
467 lpfc_mbuf_free(phba, mp->virt, mp->phys);
468 kfree(mp);
469 pmb->ctx_buf = NULL;
470 lpfc_update_vport_wwn(vport);
471
472
473 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
474 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
475 fc_host_max_npiv_vports(shost) = phba->max_vpi;
476
477
478
479 if (phba->SerialNumber[0] == 0) {
480 uint8_t *outptr;
481
482 outptr = &vport->fc_nodename.u.s.IEEE[0];
483 for (i = 0; i < 12; i++) {
484 status = *outptr++;
485 j = ((status & 0xf0) >> 4);
486 if (j <= 9)
487 phba->SerialNumber[i] =
488 (char)((uint8_t) 0x30 + (uint8_t) j);
489 else
490 phba->SerialNumber[i] =
491 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
492 i++;
493 j = (status & 0xf);
494 if (j <= 9)
495 phba->SerialNumber[i] =
496 (char)((uint8_t) 0x30 + (uint8_t) j);
497 else
498 phba->SerialNumber[i] =
499 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
500 }
501 }
502
503 lpfc_read_config(phba, pmb);
504 pmb->vport = vport;
505 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
506 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
507 "0453 Adapter failed to init, mbxCmd x%x "
508 "READ_CONFIG, mbxStatus x%x\n",
509 mb->mbxCommand, mb->mbxStatus);
510 phba->link_state = LPFC_HBA_ERROR;
511 mempool_free( pmb, phba->mbox_mem_pool);
512 return -EIO;
513 }
514
515
516 lpfc_sli_read_link_ste(phba);
517
518
519 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
520 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
521 "3359 HBA queue depth changed from %d to %d\n",
522 phba->cfg_hba_queue_depth,
523 mb->un.varRdConfig.max_xri);
524 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
525 }
526
527 phba->lmt = mb->un.varRdConfig.lmt;
528
529
530 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
531
532 phba->link_state = LPFC_LINK_DOWN;
533
534
535 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
536 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
537 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
538 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
539
540
541 if (phba->sli_rev != 3)
542 lpfc_post_rcv_buf(phba);
543
544
545
546
547 if (phba->intr_type == MSIX) {
548 rc = lpfc_config_msi(phba, pmb);
549 if (rc) {
550 mempool_free(pmb, phba->mbox_mem_pool);
551 return -EIO;
552 }
553 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
554 if (rc != MBX_SUCCESS) {
555 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
556 "0352 Config MSI mailbox command "
557 "failed, mbxCmd x%x, mbxStatus x%x\n",
558 pmb->u.mb.mbxCommand,
559 pmb->u.mb.mbxStatus);
560 mempool_free(pmb, phba->mbox_mem_pool);
561 return -EIO;
562 }
563 }
564
565 spin_lock_irq(&phba->hbalock);
566
567 phba->hba_flag &= ~HBA_ERATT_HANDLED;
568
569
570 if (lpfc_readl(phba->HCregaddr, &status)) {
571 spin_unlock_irq(&phba->hbalock);
572 return -EIO;
573 }
574 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
575 if (psli->num_rings > 0)
576 status |= HC_R0INT_ENA;
577 if (psli->num_rings > 1)
578 status |= HC_R1INT_ENA;
579 if (psli->num_rings > 2)
580 status |= HC_R2INT_ENA;
581 if (psli->num_rings > 3)
582 status |= HC_R3INT_ENA;
583
584 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
585 (phba->cfg_poll & DISABLE_FCP_RING_INT))
586 status &= ~(HC_R0INT_ENA);
587
588 writel(status, phba->HCregaddr);
589 readl(phba->HCregaddr);
590 spin_unlock_irq(&phba->hbalock);
591
592
593 timeout = phba->fc_ratov * 2;
594 mod_timer(&vport->els_tmofunc,
595 jiffies + msecs_to_jiffies(1000 * timeout));
596
597 mod_timer(&phba->hb_tmofunc,
598 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
599 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
600 phba->last_completion_time = jiffies;
601
602 mod_timer(&phba->eratt_poll,
603 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
604
605 if (phba->hba_flag & LINK_DISABLED) {
606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
607 "2598 Adapter Link is disabled.\n");
608 lpfc_down_link(phba, pmb);
609 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
610 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
611 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
613 "2599 Adapter failed to issue DOWN_LINK"
614 " mbox command rc 0x%x\n", rc);
615
616 mempool_free(pmb, phba->mbox_mem_pool);
617 return -EIO;
618 }
619 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
620 mempool_free(pmb, phba->mbox_mem_pool);
621 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
622 if (rc)
623 return rc;
624 }
625
626 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
627 if (!pmb) {
628 phba->link_state = LPFC_HBA_ERROR;
629 return -ENOMEM;
630 }
631
632 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
633 pmb->mbox_cmpl = lpfc_config_async_cmpl;
634 pmb->vport = phba->pport;
635 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
636
637 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
639 "0456 Adapter failed to issue "
640 "ASYNCEVT_ENABLE mbox status x%x\n",
641 rc);
642 mempool_free(pmb, phba->mbox_mem_pool);
643 }
644
645
646 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
647 if (!pmb) {
648 phba->link_state = LPFC_HBA_ERROR;
649 return -ENOMEM;
650 }
651
652 lpfc_dump_wakeup_param(phba, pmb);
653 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
654 pmb->vport = phba->pport;
655 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
656
657 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
658 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
659 "0435 Adapter failed "
660 "to get Option ROM version status x%x\n", rc);
661 mempool_free(pmb, phba->mbox_mem_pool);
662 }
663
664 return 0;
665}
666
667
668
669
670
671
672
673
674int
675lpfc_sli4_refresh_params(struct lpfc_hba *phba)
676{
677 LPFC_MBOXQ_t *mboxq;
678 struct lpfc_mqe *mqe;
679 struct lpfc_sli4_parameters *mbx_sli4_parameters;
680 int length, rc;
681
682 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
683 if (!mboxq)
684 return -ENOMEM;
685
686 mqe = &mboxq->u.mqe;
687
688 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
689 sizeof(struct lpfc_sli4_cfg_mhdr));
690 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
691 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
692 length, LPFC_SLI4_MBX_EMBED);
693
694 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
695 if (unlikely(rc)) {
696 mempool_free(mboxq, phba->mbox_mem_pool);
697 return rc;
698 }
699 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
700
701
702 if (phba->cfg_enable_mi)
703 phba->sli4_hba.pc_sli4_params.mi_ver =
704 bf_get(cfg_mi_ver, mbx_sli4_parameters);
705 else
706 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
707
708 phba->sli4_hba.pc_sli4_params.cmf =
709 bf_get(cfg_cmf, mbx_sli4_parameters);
710 phba->sli4_hba.pc_sli4_params.pls =
711 bf_get(cfg_pvl, mbx_sli4_parameters);
712
713 mempool_free(mboxq, phba->mbox_mem_pool);
714 return rc;
715}
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731static int
732lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
733{
734 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
735}
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752int
753lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
754 uint32_t flag)
755{
756 struct lpfc_vport *vport = phba->pport;
757 LPFC_MBOXQ_t *pmb;
758 MAILBOX_t *mb;
759 int rc;
760
761 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
762 if (!pmb) {
763 phba->link_state = LPFC_HBA_ERROR;
764 return -ENOMEM;
765 }
766 mb = &pmb->u.mb;
767 pmb->vport = vport;
768
769 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
770 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
771 !(phba->lmt & LMT_1Gb)) ||
772 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
773 !(phba->lmt & LMT_2Gb)) ||
774 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
775 !(phba->lmt & LMT_4Gb)) ||
776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
777 !(phba->lmt & LMT_8Gb)) ||
778 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
779 !(phba->lmt & LMT_10Gb)) ||
780 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
781 !(phba->lmt & LMT_16Gb)) ||
782 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
783 !(phba->lmt & LMT_32Gb)) ||
784 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
785 !(phba->lmt & LMT_64Gb))) {
786
787 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
788 "1302 Invalid speed for this board:%d "
789 "Reset link speed to auto.\n",
790 phba->cfg_link_speed);
791 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
792 }
793 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
794 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
795 if (phba->sli_rev < LPFC_SLI_REV4)
796 lpfc_set_loopback_flag(phba);
797 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
798 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
800 "0498 Adapter failed to init, mbxCmd x%x "
801 "INIT_LINK, mbxStatus x%x\n",
802 mb->mbxCommand, mb->mbxStatus);
803 if (phba->sli_rev <= LPFC_SLI_REV3) {
804
805 writel(0, phba->HCregaddr);
806 readl(phba->HCregaddr);
807
808 writel(0xffffffff, phba->HAregaddr);
809 readl(phba->HAregaddr);
810 }
811 phba->link_state = LPFC_HBA_ERROR;
812 if (rc != MBX_BUSY || flag == MBX_POLL)
813 mempool_free(pmb, phba->mbox_mem_pool);
814 return -EIO;
815 }
816 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
817 if (flag == MBX_POLL)
818 mempool_free(pmb, phba->mbox_mem_pool);
819
820 return 0;
821}
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static int
837lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
838{
839 LPFC_MBOXQ_t *pmb;
840 int rc;
841
842 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
843 if (!pmb) {
844 phba->link_state = LPFC_HBA_ERROR;
845 return -ENOMEM;
846 }
847
848 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
849 "0491 Adapter Link is disabled.\n");
850 lpfc_down_link(phba, pmb);
851 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
852 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
853 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
855 "2522 Adapter failed to issue DOWN_LINK"
856 " mbox command rc 0x%x\n", rc);
857
858 mempool_free(pmb, phba->mbox_mem_pool);
859 return -EIO;
860 }
861 if (flag == MBX_POLL)
862 mempool_free(pmb, phba->mbox_mem_pool);
863
864 return 0;
865}
866
867
868
869
870
871
872
873
874
875
876
877
878int
879lpfc_hba_down_prep(struct lpfc_hba *phba)
880{
881 struct lpfc_vport **vports;
882 int i;
883
884 if (phba->sli_rev <= LPFC_SLI_REV3) {
885
886 writel(0, phba->HCregaddr);
887 readl(phba->HCregaddr);
888 }
889
890 if (phba->pport->load_flag & FC_UNLOADING)
891 lpfc_cleanup_discovery_resources(phba->pport);
892 else {
893 vports = lpfc_create_vport_work_array(phba);
894 if (vports != NULL)
895 for (i = 0; i <= phba->max_vports &&
896 vports[i] != NULL; i++)
897 lpfc_cleanup_discovery_resources(vports[i]);
898 lpfc_destroy_vport_work_array(phba, vports);
899 }
900 return 0;
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916static void
917lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
918{
919 struct lpfc_iocbq *rspiocbq;
920 struct hbq_dmabuf *dmabuf;
921 struct lpfc_cq_event *cq_event;
922
923 spin_lock_irq(&phba->hbalock);
924 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
925 spin_unlock_irq(&phba->hbalock);
926
927 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
928
929 spin_lock_irq(&phba->hbalock);
930 list_remove_head(&phba->sli4_hba.sp_queue_event,
931 cq_event, struct lpfc_cq_event, list);
932 spin_unlock_irq(&phba->hbalock);
933
934 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
935 case CQE_CODE_COMPL_WQE:
936 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
937 cq_event);
938 lpfc_sli_release_iocbq(phba, rspiocbq);
939 break;
940 case CQE_CODE_RECEIVE:
941 case CQE_CODE_RECEIVE_V1:
942 dmabuf = container_of(cq_event, struct hbq_dmabuf,
943 cq_event);
944 lpfc_in_buf_free(phba, &dmabuf->dbuf);
945 }
946 }
947}
948
949
950
951
952
953
954
955
956
957
958
959
960static void
961lpfc_hba_free_post_buf(struct lpfc_hba *phba)
962{
963 struct lpfc_sli *psli = &phba->sli;
964 struct lpfc_sli_ring *pring;
965 struct lpfc_dmabuf *mp, *next_mp;
966 LIST_HEAD(buflist);
967 int count;
968
969 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
970 lpfc_sli_hbqbuf_free_all(phba);
971 else {
972
973 pring = &psli->sli3_ring[LPFC_ELS_RING];
974 spin_lock_irq(&phba->hbalock);
975 list_splice_init(&pring->postbufq, &buflist);
976 spin_unlock_irq(&phba->hbalock);
977
978 count = 0;
979 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
980 list_del(&mp->list);
981 count++;
982 lpfc_mbuf_free(phba, mp->virt, mp->phys);
983 kfree(mp);
984 }
985
986 spin_lock_irq(&phba->hbalock);
987 pring->postbufq_cnt -= count;
988 spin_unlock_irq(&phba->hbalock);
989 }
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002static void
1003lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1004{
1005 struct lpfc_sli *psli = &phba->sli;
1006 struct lpfc_queue *qp = NULL;
1007 struct lpfc_sli_ring *pring;
1008 LIST_HEAD(completions);
1009 int i;
1010 struct lpfc_iocbq *piocb, *next_iocb;
1011
1012 if (phba->sli_rev != LPFC_SLI_REV4) {
1013 for (i = 0; i < psli->num_rings; i++) {
1014 pring = &psli->sli3_ring[i];
1015 spin_lock_irq(&phba->hbalock);
1016
1017
1018
1019
1020 list_splice_init(&pring->txcmplq, &completions);
1021 pring->txcmplq_cnt = 0;
1022 spin_unlock_irq(&phba->hbalock);
1023
1024 lpfc_sli_abort_iocb_ring(phba, pring);
1025 }
1026
1027 lpfc_sli_cancel_iocbs(phba, &completions,
1028 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1029 return;
1030 }
1031 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1032 pring = qp->pring;
1033 if (!pring)
1034 continue;
1035 spin_lock_irq(&pring->ring_lock);
1036 list_for_each_entry_safe(piocb, next_iocb,
1037 &pring->txcmplq, list)
1038 piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
1039 list_splice_init(&pring->txcmplq, &completions);
1040 pring->txcmplq_cnt = 0;
1041 spin_unlock_irq(&pring->ring_lock);
1042 lpfc_sli_abort_iocb_ring(phba, pring);
1043 }
1044
1045 lpfc_sli_cancel_iocbs(phba, &completions,
1046 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060static int
1061lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1062{
1063 lpfc_hba_free_post_buf(phba);
1064 lpfc_hba_clean_txcmplq(phba);
1065 return 0;
1066}
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079static int
1080lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1081{
1082 struct lpfc_io_buf *psb, *psb_next;
1083 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1084 struct lpfc_sli4_hdw_queue *qp;
1085 LIST_HEAD(aborts);
1086 LIST_HEAD(nvme_aborts);
1087 LIST_HEAD(nvmet_aborts);
1088 struct lpfc_sglq *sglq_entry = NULL;
1089 int cnt, idx;
1090
1091
1092 lpfc_sli_hbqbuf_free_all(phba);
1093 lpfc_hba_clean_txcmplq(phba);
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1106 list_for_each_entry(sglq_entry,
1107 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1108 sglq_entry->state = SGL_FREED;
1109
1110 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1111 &phba->sli4_hba.lpfc_els_sgl_list);
1112
1113
1114 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1115
1116
1117
1118
1119 spin_lock_irq(&phba->hbalock);
1120 cnt = 0;
1121 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1122 qp = &phba->sli4_hba.hdwq[idx];
1123
1124 spin_lock(&qp->abts_io_buf_list_lock);
1125 list_splice_init(&qp->lpfc_abts_io_buf_list,
1126 &aborts);
1127
1128 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1129 psb->pCmd = NULL;
1130 psb->status = IOSTAT_SUCCESS;
1131 cnt++;
1132 }
1133 spin_lock(&qp->io_buf_list_put_lock);
1134 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1135 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1136 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1137 qp->abts_scsi_io_bufs = 0;
1138 qp->abts_nvme_io_bufs = 0;
1139 spin_unlock(&qp->io_buf_list_put_lock);
1140 spin_unlock(&qp->abts_io_buf_list_lock);
1141 }
1142 spin_unlock_irq(&phba->hbalock);
1143
1144 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1145 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1146 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1147 &nvmet_aborts);
1148 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1149 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1150 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1151 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1152 }
1153 }
1154
1155 lpfc_sli4_free_sp_events(phba);
1156 return cnt;
1157}
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170int
1171lpfc_hba_down_post(struct lpfc_hba *phba)
1172{
1173 return (*phba->lpfc_hba_down_post)(phba);
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static void
1189lpfc_hb_timeout(struct timer_list *t)
1190{
1191 struct lpfc_hba *phba;
1192 uint32_t tmo_posted;
1193 unsigned long iflag;
1194
1195 phba = from_timer(phba, t, hb_tmofunc);
1196
1197
1198 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1199 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1200 if (!tmo_posted)
1201 phba->pport->work_port_events |= WORKER_HB_TMO;
1202 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1203
1204
1205 if (!tmo_posted)
1206 lpfc_worker_wake_up(phba);
1207 return;
1208}
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222static void
1223lpfc_rrq_timeout(struct timer_list *t)
1224{
1225 struct lpfc_hba *phba;
1226 unsigned long iflag;
1227
1228 phba = from_timer(phba, t, rrq_tmr);
1229 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1230 if (!(phba->pport->load_flag & FC_UNLOADING))
1231 phba->hba_flag |= HBA_RRQ_ACTIVE;
1232 else
1233 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1234 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1235
1236 if (!(phba->pport->load_flag & FC_UNLOADING))
1237 lpfc_worker_wake_up(phba);
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256static void
1257lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1258{
1259 unsigned long drvr_flag;
1260
1261 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1262 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1263 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1264
1265
1266 mempool_free(pmboxq, phba->mbox_mem_pool);
1267 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1268 !(phba->link_state == LPFC_HBA_ERROR) &&
1269 !(phba->pport->load_flag & FC_UNLOADING))
1270 mod_timer(&phba->hb_tmofunc,
1271 jiffies +
1272 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1273 return;
1274}
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284static void
1285lpfc_idle_stat_delay_work(struct work_struct *work)
1286{
1287 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1288 struct lpfc_hba,
1289 idle_stat_delay_work);
1290 struct lpfc_queue *cq;
1291 struct lpfc_sli4_hdw_queue *hdwq;
1292 struct lpfc_idle_stat *idle_stat;
1293 u32 i, idle_percent;
1294 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1295
1296 if (phba->pport->load_flag & FC_UNLOADING)
1297 return;
1298
1299 if (phba->link_state == LPFC_HBA_ERROR ||
1300 phba->pport->fc_flag & FC_OFFLINE_MODE ||
1301 phba->cmf_active_mode != LPFC_CFG_OFF)
1302 goto requeue;
1303
1304 for_each_present_cpu(i) {
1305 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1306 cq = hdwq->io_cq;
1307
1308
1309 if (cq->chann != i)
1310 continue;
1311
1312 idle_stat = &phba->sli4_hba.idle_stat[i];
1313
1314
1315
1316
1317
1318
1319
1320 wall_idle = get_cpu_idle_time(i, &wall, 1);
1321 diff_idle = wall_idle - idle_stat->prev_idle;
1322 diff_wall = wall - idle_stat->prev_wall;
1323
1324 if (diff_wall <= diff_idle)
1325 busy_time = 0;
1326 else
1327 busy_time = diff_wall - diff_idle;
1328
1329 idle_percent = div64_u64(100 * busy_time, diff_wall);
1330 idle_percent = 100 - idle_percent;
1331
1332 if (idle_percent < 15)
1333 cq->poll_mode = LPFC_QUEUE_WORK;
1334 else
1335 cq->poll_mode = LPFC_IRQ_POLL;
1336
1337 idle_stat->prev_idle = wall_idle;
1338 idle_stat->prev_wall = wall;
1339 }
1340
1341requeue:
1342 schedule_delayed_work(&phba->idle_stat_delay_work,
1343 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1344}
1345
1346static void
1347lpfc_hb_eq_delay_work(struct work_struct *work)
1348{
1349 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1350 struct lpfc_hba, eq_delay_work);
1351 struct lpfc_eq_intr_info *eqi, *eqi_new;
1352 struct lpfc_queue *eq, *eq_next;
1353 unsigned char *ena_delay = NULL;
1354 uint32_t usdelay;
1355 int i;
1356
1357 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1358 return;
1359
1360 if (phba->link_state == LPFC_HBA_ERROR ||
1361 phba->pport->fc_flag & FC_OFFLINE_MODE)
1362 goto requeue;
1363
1364 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1365 GFP_KERNEL);
1366 if (!ena_delay)
1367 goto requeue;
1368
1369 for (i = 0; i < phba->cfg_irq_chann; i++) {
1370
1371 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1372 if (!eq)
1373 continue;
1374 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1375 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1376 ena_delay[eq->last_cpu] = 1;
1377 }
1378 }
1379
1380 for_each_present_cpu(i) {
1381 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1382 if (ena_delay[i]) {
1383 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1384 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1385 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1386 } else {
1387 usdelay = 0;
1388 }
1389
1390 eqi->icnt = 0;
1391
1392 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1393 if (unlikely(eq->last_cpu != i)) {
1394 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1395 eq->last_cpu);
1396 list_move_tail(&eq->cpu_list, &eqi_new->list);
1397 continue;
1398 }
1399 if (usdelay != eq->q_mode)
1400 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1401 usdelay);
1402 }
1403 }
1404
1405 kfree(ena_delay);
1406
1407requeue:
1408 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1409 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1420{
1421 u32 i;
1422 u32 hwq_count;
1423
1424 hwq_count = phba->cfg_hdw_queue;
1425 for (i = 0; i < hwq_count; i++) {
1426
1427 lpfc_adjust_pvt_pool_count(phba, i);
1428
1429
1430 lpfc_adjust_high_watermark(phba, i);
1431
1432#ifdef LPFC_MXP_STAT
1433
1434 lpfc_snapshot_mxp(phba, i);
1435#endif
1436 }
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int
1448lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1449{
1450 LPFC_MBOXQ_t *pmboxq;
1451 int retval;
1452
1453
1454 if (phba->hba_flag & HBA_HBEAT_INP)
1455 return 0;
1456
1457 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1458 if (!pmboxq)
1459 return -ENOMEM;
1460
1461 lpfc_heart_beat(phba, pmboxq);
1462 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1463 pmboxq->vport = phba->pport;
1464 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1465
1466 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1467 mempool_free(pmboxq, phba->mbox_mem_pool);
1468 return -ENXIO;
1469 }
1470 phba->hba_flag |= HBA_HBEAT_INP;
1471
1472 return 0;
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485void
1486lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1487{
1488 if (phba->cfg_enable_hba_heartbeat)
1489 return;
1490 phba->hba_flag |= HBA_HBEAT_TMO;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509void
1510lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1511{
1512 struct lpfc_vport **vports;
1513 struct lpfc_dmabuf *buf_ptr;
1514 int retval = 0;
1515 int i, tmo;
1516 struct lpfc_sli *psli = &phba->sli;
1517 LIST_HEAD(completions);
1518
1519 if (phba->cfg_xri_rebalancing) {
1520
1521 lpfc_hb_mxp_handler(phba);
1522 }
1523
1524 vports = lpfc_create_vport_work_array(phba);
1525 if (vports != NULL)
1526 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1527 lpfc_rcv_seq_check_edtov(vports[i]);
1528 lpfc_fdmi_change_check(vports[i]);
1529 }
1530 lpfc_destroy_vport_work_array(phba, vports);
1531
1532 if ((phba->link_state == LPFC_HBA_ERROR) ||
1533 (phba->pport->load_flag & FC_UNLOADING) ||
1534 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1535 return;
1536
1537 if (phba->elsbuf_cnt &&
1538 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1539 spin_lock_irq(&phba->hbalock);
1540 list_splice_init(&phba->elsbuf, &completions);
1541 phba->elsbuf_cnt = 0;
1542 phba->elsbuf_prev_cnt = 0;
1543 spin_unlock_irq(&phba->hbalock);
1544
1545 while (!list_empty(&completions)) {
1546 list_remove_head(&completions, buf_ptr,
1547 struct lpfc_dmabuf, list);
1548 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1549 kfree(buf_ptr);
1550 }
1551 }
1552 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1553
1554
1555 if (phba->cfg_enable_hba_heartbeat) {
1556
1557 spin_lock_irq(&phba->pport->work_port_lock);
1558 if (time_after(phba->last_completion_time +
1559 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1560 jiffies)) {
1561 spin_unlock_irq(&phba->pport->work_port_lock);
1562 if (phba->hba_flag & HBA_HBEAT_INP)
1563 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1564 else
1565 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1566 goto out;
1567 }
1568 spin_unlock_irq(&phba->pport->work_port_lock);
1569
1570
1571 if (phba->hba_flag & HBA_HBEAT_INP) {
1572
1573
1574
1575
1576
1577 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1578 "0459 Adapter heartbeat still outstanding: "
1579 "last compl time was %d ms.\n",
1580 jiffies_to_msecs(jiffies
1581 - phba->last_completion_time));
1582 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1583 } else {
1584 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1585 (list_empty(&psli->mboxq))) {
1586
1587 retval = lpfc_issue_hb_mbox(phba);
1588 if (retval) {
1589 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1590 goto out;
1591 }
1592 phba->skipped_hb = 0;
1593 } else if (time_before_eq(phba->last_completion_time,
1594 phba->skipped_hb)) {
1595 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1596 "2857 Last completion time not "
1597 " updated in %d ms\n",
1598 jiffies_to_msecs(jiffies
1599 - phba->last_completion_time));
1600 } else
1601 phba->skipped_hb = jiffies;
1602
1603 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1604 goto out;
1605 }
1606 } else {
1607
1608 if (phba->hba_flag & HBA_HBEAT_TMO) {
1609 retval = lpfc_issue_hb_mbox(phba);
1610 if (retval)
1611 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1612 else
1613 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1614 goto out;
1615 }
1616 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1617 }
1618out:
1619 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629static void
1630lpfc_offline_eratt(struct lpfc_hba *phba)
1631{
1632 struct lpfc_sli *psli = &phba->sli;
1633
1634 spin_lock_irq(&phba->hbalock);
1635 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1636 spin_unlock_irq(&phba->hbalock);
1637 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1638
1639 lpfc_offline(phba);
1640 lpfc_reset_barrier(phba);
1641 spin_lock_irq(&phba->hbalock);
1642 lpfc_sli_brdreset(phba);
1643 spin_unlock_irq(&phba->hbalock);
1644 lpfc_hba_down_post(phba);
1645 lpfc_sli_brdready(phba, HS_MBRDY);
1646 lpfc_unblock_mgmt_io(phba);
1647 phba->link_state = LPFC_HBA_ERROR;
1648 return;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658void
1659lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1660{
1661 spin_lock_irq(&phba->hbalock);
1662 if (phba->link_state == LPFC_HBA_ERROR &&
1663 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1664 spin_unlock_irq(&phba->hbalock);
1665 return;
1666 }
1667 phba->link_state = LPFC_HBA_ERROR;
1668 spin_unlock_irq(&phba->hbalock);
1669
1670 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1671 lpfc_sli_flush_io_rings(phba);
1672 lpfc_offline(phba);
1673 lpfc_hba_down_post(phba);
1674 lpfc_unblock_mgmt_io(phba);
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686static void
1687lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1688{
1689 uint32_t old_host_status = phba->work_hs;
1690 struct lpfc_sli *psli = &phba->sli;
1691
1692
1693
1694
1695 if (pci_channel_offline(phba->pcidev)) {
1696 spin_lock_irq(&phba->hbalock);
1697 phba->hba_flag &= ~DEFER_ERATT;
1698 spin_unlock_irq(&phba->hbalock);
1699 return;
1700 }
1701
1702 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1703 "0479 Deferred Adapter Hardware Error "
1704 "Data: x%x x%x x%x\n",
1705 phba->work_hs, phba->work_status[0],
1706 phba->work_status[1]);
1707
1708 spin_lock_irq(&phba->hbalock);
1709 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1710 spin_unlock_irq(&phba->hbalock);
1711
1712
1713
1714
1715
1716
1717
1718 lpfc_sli_abort_fcp_rings(phba);
1719
1720
1721
1722
1723
1724 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1725 lpfc_offline(phba);
1726
1727
1728 while (phba->work_hs & HS_FFER1) {
1729 msleep(100);
1730 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1731 phba->work_hs = UNPLUG_ERR ;
1732 break;
1733 }
1734
1735 if (phba->pport->load_flag & FC_UNLOADING) {
1736 phba->work_hs = 0;
1737 break;
1738 }
1739 }
1740
1741
1742
1743
1744
1745
1746 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1747 phba->work_hs = old_host_status & ~HS_FFER1;
1748
1749 spin_lock_irq(&phba->hbalock);
1750 phba->hba_flag &= ~DEFER_ERATT;
1751 spin_unlock_irq(&phba->hbalock);
1752 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1753 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1754}
1755
1756static void
1757lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1758{
1759 struct lpfc_board_event_header board_event;
1760 struct Scsi_Host *shost;
1761
1762 board_event.event_type = FC_REG_BOARD_EVENT;
1763 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1764 shost = lpfc_shost_from_vport(phba->pport);
1765 fc_host_post_vendor_event(shost, fc_get_event_number(),
1766 sizeof(board_event),
1767 (char *) &board_event,
1768 LPFC_NL_VENDOR_ID);
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781static void
1782lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1783{
1784 struct lpfc_vport *vport = phba->pport;
1785 struct lpfc_sli *psli = &phba->sli;
1786 uint32_t event_data;
1787 unsigned long temperature;
1788 struct temp_event temp_event_data;
1789 struct Scsi_Host *shost;
1790
1791
1792
1793
1794 if (pci_channel_offline(phba->pcidev)) {
1795 spin_lock_irq(&phba->hbalock);
1796 phba->hba_flag &= ~DEFER_ERATT;
1797 spin_unlock_irq(&phba->hbalock);
1798 return;
1799 }
1800
1801
1802 if (!phba->cfg_enable_hba_reset)
1803 return;
1804
1805
1806 lpfc_board_errevt_to_mgmt(phba);
1807
1808 if (phba->hba_flag & DEFER_ERATT)
1809 lpfc_handle_deferred_eratt(phba);
1810
1811 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1812 if (phba->work_hs & HS_FFER6)
1813
1814 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1815 "1301 Re-establishing Link "
1816 "Data: x%x x%x x%x\n",
1817 phba->work_hs, phba->work_status[0],
1818 phba->work_status[1]);
1819 if (phba->work_hs & HS_FFER8)
1820
1821 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1822 "2861 Host Authentication device "
1823 "zeroization Data:x%x x%x x%x\n",
1824 phba->work_hs, phba->work_status[0],
1825 phba->work_status[1]);
1826
1827 spin_lock_irq(&phba->hbalock);
1828 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1829 spin_unlock_irq(&phba->hbalock);
1830
1831
1832
1833
1834
1835
1836
1837 lpfc_sli_abort_fcp_rings(phba);
1838
1839
1840
1841
1842
1843 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1844 lpfc_offline(phba);
1845 lpfc_sli_brdrestart(phba);
1846 if (lpfc_online(phba) == 0) {
1847 lpfc_unblock_mgmt_io(phba);
1848 return;
1849 }
1850 lpfc_unblock_mgmt_io(phba);
1851 } else if (phba->work_hs & HS_CRIT_TEMP) {
1852 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1853 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1854 temp_event_data.event_code = LPFC_CRIT_TEMP;
1855 temp_event_data.data = (uint32_t)temperature;
1856
1857 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1858 "0406 Adapter maximum temperature exceeded "
1859 "(%ld), taking this port offline "
1860 "Data: x%x x%x x%x\n",
1861 temperature, phba->work_hs,
1862 phba->work_status[0], phba->work_status[1]);
1863
1864 shost = lpfc_shost_from_vport(phba->pport);
1865 fc_host_post_vendor_event(shost, fc_get_event_number(),
1866 sizeof(temp_event_data),
1867 (char *) &temp_event_data,
1868 SCSI_NL_VID_TYPE_PCI
1869 | PCI_VENDOR_ID_EMULEX);
1870
1871 spin_lock_irq(&phba->hbalock);
1872 phba->over_temp_state = HBA_OVER_TEMP;
1873 spin_unlock_irq(&phba->hbalock);
1874 lpfc_offline_eratt(phba);
1875
1876 } else {
1877
1878
1879
1880
1881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1882 "0457 Adapter Hardware Error "
1883 "Data: x%x x%x x%x\n",
1884 phba->work_hs,
1885 phba->work_status[0], phba->work_status[1]);
1886
1887 event_data = FC_REG_DUMP_EVENT;
1888 shost = lpfc_shost_from_vport(vport);
1889 fc_host_post_vendor_event(shost, fc_get_event_number(),
1890 sizeof(event_data), (char *) &event_data,
1891 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1892
1893 lpfc_offline_eratt(phba);
1894 }
1895 return;
1896}
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909static int
1910lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1911 bool en_rn_msg)
1912{
1913 int rc;
1914 uint32_t intr_mode;
1915 LPFC_MBOXQ_t *mboxq;
1916
1917 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1918 LPFC_SLI_INTF_IF_TYPE_2) {
1919
1920
1921
1922
1923 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1924 if (rc)
1925 return rc;
1926 }
1927
1928
1929 if (en_rn_msg)
1930 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1931 "2887 Reset Needed: Attempting Port "
1932 "Recovery...\n");
1933
1934
1935
1936
1937
1938 if (mbx_action == LPFC_MBX_NO_WAIT) {
1939 spin_lock_irq(&phba->hbalock);
1940 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1941 if (phba->sli.mbox_active) {
1942 mboxq = phba->sli.mbox_active;
1943 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
1944 __lpfc_mbox_cmpl_put(phba, mboxq);
1945 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1946 phba->sli.mbox_active = NULL;
1947 }
1948 spin_unlock_irq(&phba->hbalock);
1949 }
1950
1951 lpfc_offline_prep(phba, mbx_action);
1952 lpfc_sli_flush_io_rings(phba);
1953 lpfc_offline(phba);
1954
1955 lpfc_sli4_disable_intr(phba);
1956 rc = lpfc_sli_brdrestart(phba);
1957 if (rc) {
1958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1959 "6309 Failed to restart board\n");
1960 return rc;
1961 }
1962
1963 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1964 if (intr_mode == LPFC_INTR_ERROR) {
1965 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1966 "3175 Failed to enable interrupt\n");
1967 return -EIO;
1968 }
1969 phba->intr_mode = intr_mode;
1970 rc = lpfc_online(phba);
1971 if (rc == 0)
1972 lpfc_unblock_mgmt_io(phba);
1973
1974 return rc;
1975}
1976
1977
1978
1979
1980
1981
1982
1983
1984static void
1985lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1986{
1987 struct lpfc_vport *vport = phba->pport;
1988 uint32_t event_data;
1989 struct Scsi_Host *shost;
1990 uint32_t if_type;
1991 struct lpfc_register portstat_reg = {0};
1992 uint32_t reg_err1, reg_err2;
1993 uint32_t uerrlo_reg, uemasklo_reg;
1994 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1995 bool en_rn_msg = true;
1996 struct temp_event temp_event_data;
1997 struct lpfc_register portsmphr_reg;
1998 int rc, i;
1999
2000
2001
2002
2003 if (pci_channel_offline(phba->pcidev)) {
2004 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2005 "3166 pci channel is offline\n");
2006 lpfc_sli_flush_io_rings(phba);
2007 return;
2008 }
2009
2010 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
2011 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2012 switch (if_type) {
2013 case LPFC_SLI_INTF_IF_TYPE_0:
2014 pci_rd_rc1 = lpfc_readl(
2015 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2016 &uerrlo_reg);
2017 pci_rd_rc2 = lpfc_readl(
2018 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2019 &uemasklo_reg);
2020
2021 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
2022 return;
2023 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
2024 lpfc_sli4_offline_eratt(phba);
2025 return;
2026 }
2027 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2028 "7623 Checking UE recoverable");
2029
2030 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2031 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2032 &portsmphr_reg.word0))
2033 continue;
2034
2035 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
2036 &portsmphr_reg);
2037 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2038 LPFC_PORT_SEM_UE_RECOVERABLE)
2039 break;
2040
2041 msleep(1000);
2042 }
2043
2044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2045 "4827 smphr_port_status x%x : Waited %dSec",
2046 smphr_port_status, i);
2047
2048
2049 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
2050 LPFC_PORT_SEM_UE_RECOVERABLE) {
2051 for (i = 0; i < 20; i++) {
2052 msleep(1000);
2053 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2054 &portsmphr_reg.word0) &&
2055 (LPFC_POST_STAGE_PORT_READY ==
2056 bf_get(lpfc_port_smphr_port_status,
2057 &portsmphr_reg))) {
2058 rc = lpfc_sli4_port_sta_fn_reset(phba,
2059 LPFC_MBX_NO_WAIT, en_rn_msg);
2060 if (rc == 0)
2061 return;
2062 lpfc_printf_log(phba, KERN_ERR,
2063 LOG_TRACE_EVENT,
2064 "4215 Failed to recover UE");
2065 break;
2066 }
2067 }
2068 }
2069 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2070 "7624 Firmware not ready: Failing UE recovery,"
2071 " waited %dSec", i);
2072 phba->link_state = LPFC_HBA_ERROR;
2073 break;
2074
2075 case LPFC_SLI_INTF_IF_TYPE_2:
2076 case LPFC_SLI_INTF_IF_TYPE_6:
2077 pci_rd_rc1 = lpfc_readl(
2078 phba->sli4_hba.u.if_type2.STATUSregaddr,
2079 &portstat_reg.word0);
2080
2081 if (pci_rd_rc1 == -EIO) {
2082 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2083 "3151 PCI bus read access failure: x%x\n",
2084 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2085 lpfc_sli4_offline_eratt(phba);
2086 return;
2087 }
2088 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2089 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2090 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2091 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2092 "2889 Port Overtemperature event, "
2093 "taking port offline Data: x%x x%x\n",
2094 reg_err1, reg_err2);
2095
2096 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2097 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2098 temp_event_data.event_code = LPFC_CRIT_TEMP;
2099 temp_event_data.data = 0xFFFFFFFF;
2100
2101 shost = lpfc_shost_from_vport(phba->pport);
2102 fc_host_post_vendor_event(shost, fc_get_event_number(),
2103 sizeof(temp_event_data),
2104 (char *)&temp_event_data,
2105 SCSI_NL_VID_TYPE_PCI
2106 | PCI_VENDOR_ID_EMULEX);
2107
2108 spin_lock_irq(&phba->hbalock);
2109 phba->over_temp_state = HBA_OVER_TEMP;
2110 spin_unlock_irq(&phba->hbalock);
2111 lpfc_sli4_offline_eratt(phba);
2112 return;
2113 }
2114 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2115 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2116 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2117 "3143 Port Down: Firmware Update "
2118 "Detected\n");
2119 en_rn_msg = false;
2120 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2121 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2123 "3144 Port Down: Debug Dump\n");
2124 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2125 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2126 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2127 "3145 Port Down: Provisioning\n");
2128
2129
2130 if (!phba->cfg_enable_hba_reset)
2131 return;
2132
2133
2134 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2135 en_rn_msg);
2136 if (rc == 0) {
2137
2138 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2139 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2140 return;
2141 else
2142 break;
2143 }
2144
2145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2146 "3152 Unrecoverable error\n");
2147 phba->link_state = LPFC_HBA_ERROR;
2148 break;
2149 case LPFC_SLI_INTF_IF_TYPE_1:
2150 default:
2151 break;
2152 }
2153 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2154 "3123 Report dump event to upper layer\n");
2155
2156 lpfc_board_errevt_to_mgmt(phba);
2157
2158 event_data = FC_REG_DUMP_EVENT;
2159 shost = lpfc_shost_from_vport(vport);
2160 fc_host_post_vendor_event(shost, fc_get_event_number(),
2161 sizeof(event_data), (char *) &event_data,
2162 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2163}
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176void
2177lpfc_handle_eratt(struct lpfc_hba *phba)
2178{
2179 (*phba->lpfc_handle_eratt)(phba);
2180}
2181
2182
2183
2184
2185
2186
2187
2188
2189void
2190lpfc_handle_latt(struct lpfc_hba *phba)
2191{
2192 struct lpfc_vport *vport = phba->pport;
2193 struct lpfc_sli *psli = &phba->sli;
2194 LPFC_MBOXQ_t *pmb;
2195 volatile uint32_t control;
2196 int rc = 0;
2197
2198 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2199 if (!pmb) {
2200 rc = 1;
2201 goto lpfc_handle_latt_err_exit;
2202 }
2203
2204 rc = lpfc_mbox_rsrc_prep(phba, pmb);
2205 if (rc) {
2206 rc = 2;
2207 mempool_free(pmb, phba->mbox_mem_pool);
2208 goto lpfc_handle_latt_err_exit;
2209 }
2210
2211
2212 lpfc_els_flush_all_cmd(phba);
2213 psli->slistat.link_event++;
2214 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
2215 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2216 pmb->vport = vport;
2217
2218 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2219 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2220 if (rc == MBX_NOT_FINISHED) {
2221 rc = 4;
2222 goto lpfc_handle_latt_free_mbuf;
2223 }
2224
2225
2226 spin_lock_irq(&phba->hbalock);
2227 writel(HA_LATT, phba->HAregaddr);
2228 readl(phba->HAregaddr);
2229 spin_unlock_irq(&phba->hbalock);
2230
2231 return;
2232
2233lpfc_handle_latt_free_mbuf:
2234 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2235 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2236lpfc_handle_latt_err_exit:
2237
2238 spin_lock_irq(&phba->hbalock);
2239 psli->sli_flag |= LPFC_PROCESS_LA;
2240 control = readl(phba->HCregaddr);
2241 control |= HC_LAINT_ENA;
2242 writel(control, phba->HCregaddr);
2243 readl(phba->HCregaddr);
2244
2245
2246 writel(HA_LATT, phba->HAregaddr);
2247 readl(phba->HAregaddr);
2248 spin_unlock_irq(&phba->hbalock);
2249 lpfc_linkdown(phba);
2250 phba->link_state = LPFC_HBA_ERROR;
2251
2252 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2253 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2254
2255 return;
2256}
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272int
2273lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2274{
2275 uint8_t lenlo, lenhi;
2276 int Length;
2277 int i, j;
2278 int finished = 0;
2279 int index = 0;
2280
2281 if (!vpd)
2282 return 0;
2283
2284
2285 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2286 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2287 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2288 (uint32_t) vpd[3]);
2289 while (!finished && (index < (len - 4))) {
2290 switch (vpd[index]) {
2291 case 0x82:
2292 case 0x91:
2293 index += 1;
2294 lenlo = vpd[index];
2295 index += 1;
2296 lenhi = vpd[index];
2297 index += 1;
2298 i = ((((unsigned short)lenhi) << 8) + lenlo);
2299 index += i;
2300 break;
2301 case 0x90:
2302 index += 1;
2303 lenlo = vpd[index];
2304 index += 1;
2305 lenhi = vpd[index];
2306 index += 1;
2307 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2308 if (Length > len - index)
2309 Length = len - index;
2310 while (Length > 0) {
2311
2312 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2313 index += 2;
2314 i = vpd[index];
2315 index += 1;
2316 j = 0;
2317 Length -= (3+i);
2318 while(i--) {
2319 phba->SerialNumber[j++] = vpd[index++];
2320 if (j == 31)
2321 break;
2322 }
2323 phba->SerialNumber[j] = 0;
2324 continue;
2325 }
2326 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2327 phba->vpd_flag |= VPD_MODEL_DESC;
2328 index += 2;
2329 i = vpd[index];
2330 index += 1;
2331 j = 0;
2332 Length -= (3+i);
2333 while(i--) {
2334 phba->ModelDesc[j++] = vpd[index++];
2335 if (j == 255)
2336 break;
2337 }
2338 phba->ModelDesc[j] = 0;
2339 continue;
2340 }
2341 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2342 phba->vpd_flag |= VPD_MODEL_NAME;
2343 index += 2;
2344 i = vpd[index];
2345 index += 1;
2346 j = 0;
2347 Length -= (3+i);
2348 while(i--) {
2349 phba->ModelName[j++] = vpd[index++];
2350 if (j == 79)
2351 break;
2352 }
2353 phba->ModelName[j] = 0;
2354 continue;
2355 }
2356 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2357 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2358 index += 2;
2359 i = vpd[index];
2360 index += 1;
2361 j = 0;
2362 Length -= (3+i);
2363 while(i--) {
2364 phba->ProgramType[j++] = vpd[index++];
2365 if (j == 255)
2366 break;
2367 }
2368 phba->ProgramType[j] = 0;
2369 continue;
2370 }
2371 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2372 phba->vpd_flag |= VPD_PORT;
2373 index += 2;
2374 i = vpd[index];
2375 index += 1;
2376 j = 0;
2377 Length -= (3+i);
2378 while(i--) {
2379 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2380 (phba->sli4_hba.pport_name_sta ==
2381 LPFC_SLI4_PPNAME_GET)) {
2382 j++;
2383 index++;
2384 } else
2385 phba->Port[j++] = vpd[index++];
2386 if (j == 19)
2387 break;
2388 }
2389 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2390 (phba->sli4_hba.pport_name_sta ==
2391 LPFC_SLI4_PPNAME_NON))
2392 phba->Port[j] = 0;
2393 continue;
2394 }
2395 else {
2396 index += 2;
2397 i = vpd[index];
2398 index += 1;
2399 index += i;
2400 Length -= (3 + i);
2401 }
2402 }
2403 finished = 0;
2404 break;
2405 case 0x78:
2406 finished = 1;
2407 break;
2408 default:
2409 index ++;
2410 break;
2411 }
2412 }
2413
2414 return(1);
2415}
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429static void
2430lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2431{
2432 uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2433 char *model = "<Unknown>";
2434 int tbolt = 0;
2435
2436 switch (sub_dev_id) {
2437 case PCI_DEVICE_ID_CLRY_161E:
2438 model = "161E";
2439 break;
2440 case PCI_DEVICE_ID_CLRY_162E:
2441 model = "162E";
2442 break;
2443 case PCI_DEVICE_ID_CLRY_164E:
2444 model = "164E";
2445 break;
2446 case PCI_DEVICE_ID_CLRY_161P:
2447 model = "161P";
2448 break;
2449 case PCI_DEVICE_ID_CLRY_162P:
2450 model = "162P";
2451 break;
2452 case PCI_DEVICE_ID_CLRY_164P:
2453 model = "164P";
2454 break;
2455 case PCI_DEVICE_ID_CLRY_321E:
2456 model = "321E";
2457 break;
2458 case PCI_DEVICE_ID_CLRY_322E:
2459 model = "322E";
2460 break;
2461 case PCI_DEVICE_ID_CLRY_324E:
2462 model = "324E";
2463 break;
2464 case PCI_DEVICE_ID_CLRY_321P:
2465 model = "321P";
2466 break;
2467 case PCI_DEVICE_ID_CLRY_322P:
2468 model = "322P";
2469 break;
2470 case PCI_DEVICE_ID_CLRY_324P:
2471 model = "324P";
2472 break;
2473 case PCI_DEVICE_ID_TLFC_2XX2:
2474 model = "2XX2";
2475 tbolt = 1;
2476 break;
2477 case PCI_DEVICE_ID_TLFC_3162:
2478 model = "3162";
2479 tbolt = 1;
2480 break;
2481 case PCI_DEVICE_ID_TLFC_3322:
2482 model = "3322";
2483 tbolt = 1;
2484 break;
2485 default:
2486 model = "Unknown";
2487 break;
2488 }
2489
2490 if (mdp && mdp[0] == '\0')
2491 snprintf(mdp, 79, "%s", model);
2492
2493 if (descp && descp[0] == '\0')
2494 snprintf(descp, 255,
2495 "ATTO %s%s, Fibre Channel Adapter Initiator, Port %s",
2496 (tbolt) ? "ThunderLink FC " : "Celerity FC-",
2497 model,
2498 phba->Port);
2499}
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513static void
2514lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2515{
2516 lpfc_vpd_t *vp;
2517 uint16_t dev_id = phba->pcidev->device;
2518 int max_speed;
2519 int GE = 0;
2520 int oneConnect = 0;
2521 struct {
2522 char *name;
2523 char *bus;
2524 char *function;
2525 } m = {"<Unknown>", "", ""};
2526
2527 if (mdp && mdp[0] != '\0'
2528 && descp && descp[0] != '\0')
2529 return;
2530
2531 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2532 lpfc_get_atto_model_desc(phba, mdp, descp);
2533 return;
2534 }
2535
2536 if (phba->lmt & LMT_64Gb)
2537 max_speed = 64;
2538 else if (phba->lmt & LMT_32Gb)
2539 max_speed = 32;
2540 else if (phba->lmt & LMT_16Gb)
2541 max_speed = 16;
2542 else if (phba->lmt & LMT_10Gb)
2543 max_speed = 10;
2544 else if (phba->lmt & LMT_8Gb)
2545 max_speed = 8;
2546 else if (phba->lmt & LMT_4Gb)
2547 max_speed = 4;
2548 else if (phba->lmt & LMT_2Gb)
2549 max_speed = 2;
2550 else if (phba->lmt & LMT_1Gb)
2551 max_speed = 1;
2552 else
2553 max_speed = 0;
2554
2555 vp = &phba->vpd;
2556
2557 switch (dev_id) {
2558 case PCI_DEVICE_ID_FIREFLY:
2559 m = (typeof(m)){"LP6000", "PCI",
2560 "Obsolete, Unsupported Fibre Channel Adapter"};
2561 break;
2562 case PCI_DEVICE_ID_SUPERFLY:
2563 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2564 m = (typeof(m)){"LP7000", "PCI", ""};
2565 else
2566 m = (typeof(m)){"LP7000E", "PCI", ""};
2567 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2568 break;
2569 case PCI_DEVICE_ID_DRAGONFLY:
2570 m = (typeof(m)){"LP8000", "PCI",
2571 "Obsolete, Unsupported Fibre Channel Adapter"};
2572 break;
2573 case PCI_DEVICE_ID_CENTAUR:
2574 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2575 m = (typeof(m)){"LP9002", "PCI", ""};
2576 else
2577 m = (typeof(m)){"LP9000", "PCI", ""};
2578 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2579 break;
2580 case PCI_DEVICE_ID_RFLY:
2581 m = (typeof(m)){"LP952", "PCI",
2582 "Obsolete, Unsupported Fibre Channel Adapter"};
2583 break;
2584 case PCI_DEVICE_ID_PEGASUS:
2585 m = (typeof(m)){"LP9802", "PCI-X",
2586 "Obsolete, Unsupported Fibre Channel Adapter"};
2587 break;
2588 case PCI_DEVICE_ID_THOR:
2589 m = (typeof(m)){"LP10000", "PCI-X",
2590 "Obsolete, Unsupported Fibre Channel Adapter"};
2591 break;
2592 case PCI_DEVICE_ID_VIPER:
2593 m = (typeof(m)){"LPX1000", "PCI-X",
2594 "Obsolete, Unsupported Fibre Channel Adapter"};
2595 break;
2596 case PCI_DEVICE_ID_PFLY:
2597 m = (typeof(m)){"LP982", "PCI-X",
2598 "Obsolete, Unsupported Fibre Channel Adapter"};
2599 break;
2600 case PCI_DEVICE_ID_TFLY:
2601 m = (typeof(m)){"LP1050", "PCI-X",
2602 "Obsolete, Unsupported Fibre Channel Adapter"};
2603 break;
2604 case PCI_DEVICE_ID_HELIOS:
2605 m = (typeof(m)){"LP11000", "PCI-X2",
2606 "Obsolete, Unsupported Fibre Channel Adapter"};
2607 break;
2608 case PCI_DEVICE_ID_HELIOS_SCSP:
2609 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2610 "Obsolete, Unsupported Fibre Channel Adapter"};
2611 break;
2612 case PCI_DEVICE_ID_HELIOS_DCSP:
2613 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2614 "Obsolete, Unsupported Fibre Channel Adapter"};
2615 break;
2616 case PCI_DEVICE_ID_NEPTUNE:
2617 m = (typeof(m)){"LPe1000", "PCIe",
2618 "Obsolete, Unsupported Fibre Channel Adapter"};
2619 break;
2620 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2621 m = (typeof(m)){"LPe1000-SP", "PCIe",
2622 "Obsolete, Unsupported Fibre Channel Adapter"};
2623 break;
2624 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2625 m = (typeof(m)){"LPe1002-SP", "PCIe",
2626 "Obsolete, Unsupported Fibre Channel Adapter"};
2627 break;
2628 case PCI_DEVICE_ID_BMID:
2629 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2630 break;
2631 case PCI_DEVICE_ID_BSMB:
2632 m = (typeof(m)){"LP111", "PCI-X2",
2633 "Obsolete, Unsupported Fibre Channel Adapter"};
2634 break;
2635 case PCI_DEVICE_ID_ZEPHYR:
2636 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2637 break;
2638 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2639 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2640 break;
2641 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2642 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2643 GE = 1;
2644 break;
2645 case PCI_DEVICE_ID_ZMID:
2646 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2647 break;
2648 case PCI_DEVICE_ID_ZSMB:
2649 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2650 break;
2651 case PCI_DEVICE_ID_LP101:
2652 m = (typeof(m)){"LP101", "PCI-X",
2653 "Obsolete, Unsupported Fibre Channel Adapter"};
2654 break;
2655 case PCI_DEVICE_ID_LP10000S:
2656 m = (typeof(m)){"LP10000-S", "PCI",
2657 "Obsolete, Unsupported Fibre Channel Adapter"};
2658 break;
2659 case PCI_DEVICE_ID_LP11000S:
2660 m = (typeof(m)){"LP11000-S", "PCI-X2",
2661 "Obsolete, Unsupported Fibre Channel Adapter"};
2662 break;
2663 case PCI_DEVICE_ID_LPE11000S:
2664 m = (typeof(m)){"LPe11000-S", "PCIe",
2665 "Obsolete, Unsupported Fibre Channel Adapter"};
2666 break;
2667 case PCI_DEVICE_ID_SAT:
2668 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2669 break;
2670 case PCI_DEVICE_ID_SAT_MID:
2671 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2672 break;
2673 case PCI_DEVICE_ID_SAT_SMB:
2674 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2675 break;
2676 case PCI_DEVICE_ID_SAT_DCSP:
2677 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2678 break;
2679 case PCI_DEVICE_ID_SAT_SCSP:
2680 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2681 break;
2682 case PCI_DEVICE_ID_SAT_S:
2683 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2684 break;
2685 case PCI_DEVICE_ID_HORNET:
2686 m = (typeof(m)){"LP21000", "PCIe",
2687 "Obsolete, Unsupported FCoE Adapter"};
2688 GE = 1;
2689 break;
2690 case PCI_DEVICE_ID_PROTEUS_VF:
2691 m = (typeof(m)){"LPev12000", "PCIe IOV",
2692 "Obsolete, Unsupported Fibre Channel Adapter"};
2693 break;
2694 case PCI_DEVICE_ID_PROTEUS_PF:
2695 m = (typeof(m)){"LPev12000", "PCIe IOV",
2696 "Obsolete, Unsupported Fibre Channel Adapter"};
2697 break;
2698 case PCI_DEVICE_ID_PROTEUS_S:
2699 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2700 "Obsolete, Unsupported Fibre Channel Adapter"};
2701 break;
2702 case PCI_DEVICE_ID_TIGERSHARK:
2703 oneConnect = 1;
2704 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2705 break;
2706 case PCI_DEVICE_ID_TOMCAT:
2707 oneConnect = 1;
2708 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2709 break;
2710 case PCI_DEVICE_ID_FALCON:
2711 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2712 "EmulexSecure Fibre"};
2713 break;
2714 case PCI_DEVICE_ID_BALIUS:
2715 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2716 "Obsolete, Unsupported Fibre Channel Adapter"};
2717 break;
2718 case PCI_DEVICE_ID_LANCER_FC:
2719 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2720 break;
2721 case PCI_DEVICE_ID_LANCER_FC_VF:
2722 m = (typeof(m)){"LPe16000", "PCIe",
2723 "Obsolete, Unsupported Fibre Channel Adapter"};
2724 break;
2725 case PCI_DEVICE_ID_LANCER_FCOE:
2726 oneConnect = 1;
2727 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2728 break;
2729 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2730 oneConnect = 1;
2731 m = (typeof(m)){"OCe15100", "PCIe",
2732 "Obsolete, Unsupported FCoE"};
2733 break;
2734 case PCI_DEVICE_ID_LANCER_G6_FC:
2735 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2736 break;
2737 case PCI_DEVICE_ID_LANCER_G7_FC:
2738 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2739 break;
2740 case PCI_DEVICE_ID_LANCER_G7P_FC:
2741 m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"};
2742 break;
2743 case PCI_DEVICE_ID_SKYHAWK:
2744 case PCI_DEVICE_ID_SKYHAWK_VF:
2745 oneConnect = 1;
2746 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2747 break;
2748 default:
2749 m = (typeof(m)){"Unknown", "", ""};
2750 break;
2751 }
2752
2753 if (mdp && mdp[0] == '\0')
2754 snprintf(mdp, 79,"%s", m.name);
2755
2756
2757
2758
2759 if (descp && descp[0] == '\0') {
2760 if (oneConnect)
2761 snprintf(descp, 255,
2762 "Emulex OneConnect %s, %s Initiator %s",
2763 m.name, m.function,
2764 phba->Port);
2765 else if (max_speed == 0)
2766 snprintf(descp, 255,
2767 "Emulex %s %s %s",
2768 m.name, m.bus, m.function);
2769 else
2770 snprintf(descp, 255,
2771 "Emulex %s %d%s %s %s",
2772 m.name, max_speed, (GE) ? "GE" : "Gb",
2773 m.bus, m.function);
2774 }
2775}
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789int
2790lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2791{
2792 IOCB_t *icmd;
2793 struct lpfc_iocbq *iocb;
2794 struct lpfc_dmabuf *mp1, *mp2;
2795
2796 cnt += pring->missbufcnt;
2797
2798
2799 while (cnt > 0) {
2800
2801 iocb = lpfc_sli_get_iocbq(phba);
2802 if (iocb == NULL) {
2803 pring->missbufcnt = cnt;
2804 return cnt;
2805 }
2806 icmd = &iocb->iocb;
2807
2808
2809
2810 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2811 if (mp1)
2812 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2813 if (!mp1 || !mp1->virt) {
2814 kfree(mp1);
2815 lpfc_sli_release_iocbq(phba, iocb);
2816 pring->missbufcnt = cnt;
2817 return cnt;
2818 }
2819
2820 INIT_LIST_HEAD(&mp1->list);
2821
2822 if (cnt > 1) {
2823 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2824 if (mp2)
2825 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2826 &mp2->phys);
2827 if (!mp2 || !mp2->virt) {
2828 kfree(mp2);
2829 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2830 kfree(mp1);
2831 lpfc_sli_release_iocbq(phba, iocb);
2832 pring->missbufcnt = cnt;
2833 return cnt;
2834 }
2835
2836 INIT_LIST_HEAD(&mp2->list);
2837 } else {
2838 mp2 = NULL;
2839 }
2840
2841 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2842 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2843 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2844 icmd->ulpBdeCount = 1;
2845 cnt--;
2846 if (mp2) {
2847 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2848 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2849 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2850 cnt--;
2851 icmd->ulpBdeCount = 2;
2852 }
2853
2854 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2855 icmd->ulpLe = 1;
2856
2857 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2858 IOCB_ERROR) {
2859 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2860 kfree(mp1);
2861 cnt++;
2862 if (mp2) {
2863 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2864 kfree(mp2);
2865 cnt++;
2866 }
2867 lpfc_sli_release_iocbq(phba, iocb);
2868 pring->missbufcnt = cnt;
2869 return cnt;
2870 }
2871 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2872 if (mp2)
2873 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2874 }
2875 pring->missbufcnt = 0;
2876 return 0;
2877}
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890static int
2891lpfc_post_rcv_buf(struct lpfc_hba *phba)
2892{
2893 struct lpfc_sli *psli = &phba->sli;
2894
2895
2896 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2897
2898
2899 return 0;
2900}
2901
2902#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2903
2904
2905
2906
2907
2908
2909
2910
2911static void
2912lpfc_sha_init(uint32_t * HashResultPointer)
2913{
2914 HashResultPointer[0] = 0x67452301;
2915 HashResultPointer[1] = 0xEFCDAB89;
2916 HashResultPointer[2] = 0x98BADCFE;
2917 HashResultPointer[3] = 0x10325476;
2918 HashResultPointer[4] = 0xC3D2E1F0;
2919}
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931static void
2932lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2933{
2934 int t;
2935 uint32_t TEMP;
2936 uint32_t A, B, C, D, E;
2937 t = 16;
2938 do {
2939 HashWorkingPointer[t] =
2940 S(1,
2941 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2942 8] ^
2943 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2944 } while (++t <= 79);
2945 t = 0;
2946 A = HashResultPointer[0];
2947 B = HashResultPointer[1];
2948 C = HashResultPointer[2];
2949 D = HashResultPointer[3];
2950 E = HashResultPointer[4];
2951
2952 do {
2953 if (t < 20) {
2954 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2955 } else if (t < 40) {
2956 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2957 } else if (t < 60) {
2958 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2959 } else {
2960 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2961 }
2962 TEMP += S(5, A) + E + HashWorkingPointer[t];
2963 E = D;
2964 D = C;
2965 C = S(30, B);
2966 B = A;
2967 A = TEMP;
2968 } while (++t <= 79);
2969
2970 HashResultPointer[0] += A;
2971 HashResultPointer[1] += B;
2972 HashResultPointer[2] += C;
2973 HashResultPointer[3] += D;
2974 HashResultPointer[4] += E;
2975
2976}
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988static void
2989lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2990{
2991 *HashWorking = (*RandomChallenge ^ *HashWorking);
2992}
2993
2994
2995
2996
2997
2998
2999
3000
3001void
3002lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
3003{
3004 int t;
3005 uint32_t *HashWorking;
3006 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3007
3008 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
3009 if (!HashWorking)
3010 return;
3011
3012 HashWorking[0] = HashWorking[78] = *pwwnn++;
3013 HashWorking[1] = HashWorking[79] = *pwwnn;
3014
3015 for (t = 0; t < 7; t++)
3016 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3017
3018 lpfc_sha_init(hbainit);
3019 lpfc_sha_iterate(hbainit, HashWorking);
3020 kfree(HashWorking);
3021}
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032void
3033lpfc_cleanup(struct lpfc_vport *vport)
3034{
3035 struct lpfc_hba *phba = vport->phba;
3036 struct lpfc_nodelist *ndlp, *next_ndlp;
3037 int i = 0;
3038
3039 if (phba->link_state > LPFC_LINK_DOWN)
3040 lpfc_port_link_failure(vport);
3041
3042
3043 if (lpfc_is_vmid_enabled(phba))
3044 lpfc_vmid_vport_cleanup(vport);
3045
3046 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3047 if (vport->port_type != LPFC_PHYSICAL_PORT &&
3048 ndlp->nlp_DID == Fabric_DID) {
3049
3050 lpfc_nlp_put(ndlp);
3051 continue;
3052 }
3053
3054 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
3055 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
3056 lpfc_nlp_put(ndlp);
3057 continue;
3058 }
3059
3060
3061
3062
3063 if (ndlp->nlp_type & NLP_FABRIC &&
3064 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
3065 lpfc_disc_state_machine(vport, ndlp, NULL,
3066 NLP_EVT_DEVICE_RECOVERY);
3067
3068 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
3069 lpfc_disc_state_machine(vport, ndlp, NULL,
3070 NLP_EVT_DEVICE_RM);
3071 }
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085 if (vport->load_flag & FC_UNLOADING &&
3086 pci_channel_offline(phba->pcidev))
3087 lpfc_sli_flush_io_rings(vport->phba);
3088
3089
3090
3091
3092
3093 while (!list_empty(&vport->fc_nodes)) {
3094 if (i++ > 3000) {
3095 lpfc_printf_vlog(vport, KERN_ERR,
3096 LOG_TRACE_EVENT,
3097 "0233 Nodelist not empty\n");
3098 list_for_each_entry_safe(ndlp, next_ndlp,
3099 &vport->fc_nodes, nlp_listp) {
3100 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
3101 LOG_DISCOVERY,
3102 "0282 did:x%x ndlp:x%px "
3103 "refcnt:%d xflags x%x nflag x%x\n",
3104 ndlp->nlp_DID, (void *)ndlp,
3105 kref_read(&ndlp->kref),
3106 ndlp->fc4_xpt_flags,
3107 ndlp->nlp_flag);
3108 }
3109 break;
3110 }
3111
3112
3113 msleep(10);
3114 }
3115 lpfc_cleanup_vports_rrqs(vport, NULL);
3116}
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126void
3127lpfc_stop_vport_timers(struct lpfc_vport *vport)
3128{
3129 del_timer_sync(&vport->els_tmofunc);
3130 del_timer_sync(&vport->delayed_disc_tmo);
3131 lpfc_can_disctmo(vport);
3132 return;
3133}
3134
3135
3136
3137
3138
3139
3140
3141
3142void
3143__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3144{
3145
3146 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3147
3148
3149 del_timer(&phba->fcf.redisc_wait);
3150}
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161void
3162lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3163{
3164 spin_lock_irq(&phba->hbalock);
3165 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3166
3167 spin_unlock_irq(&phba->hbalock);
3168 return;
3169 }
3170 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3171
3172 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3173 spin_unlock_irq(&phba->hbalock);
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184void
3185lpfc_cmf_stop(struct lpfc_hba *phba)
3186{
3187 int cpu;
3188 struct lpfc_cgn_stat *cgs;
3189
3190
3191 if (!phba->sli4_hba.pc_sli4_params.cmf)
3192 return;
3193
3194 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3195 "6221 Stop CMF / Cancel Timer\n");
3196
3197
3198 hrtimer_cancel(&phba->cmf_timer);
3199
3200
3201 atomic_set(&phba->cmf_busy, 0);
3202 for_each_present_cpu(cpu) {
3203 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3204 atomic64_set(&cgs->total_bytes, 0);
3205 atomic64_set(&cgs->rcv_bytes, 0);
3206 atomic_set(&cgs->rx_io_cnt, 0);
3207 atomic64_set(&cgs->rx_latency, 0);
3208 }
3209 atomic_set(&phba->cmf_bw_wait, 0);
3210
3211
3212 queue_work(phba->wq, &phba->unblock_request_work);
3213}
3214
3215static inline uint64_t
3216lpfc_get_max_line_rate(struct lpfc_hba *phba)
3217{
3218 uint64_t rate = lpfc_sli_port_speed_get(phba);
3219
3220 return ((((unsigned long)rate) * 1024 * 1024) / 10);
3221}
3222
3223void
3224lpfc_cmf_signal_init(struct lpfc_hba *phba)
3225{
3226 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3227 "6223 Signal CMF init\n");
3228
3229
3230 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3231 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3232 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3233 phba->cmf_interval_rate, 1000);
3234 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3235
3236
3237 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3238}
3239
3240
3241
3242
3243
3244
3245
3246
3247void
3248lpfc_cmf_start(struct lpfc_hba *phba)
3249{
3250 struct lpfc_cgn_stat *cgs;
3251 int cpu;
3252
3253
3254 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3255 phba->cmf_active_mode == LPFC_CFG_OFF)
3256 return;
3257
3258
3259 lpfc_init_congestion_buf(phba);
3260
3261 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3262 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3263 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3264 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3265
3266 atomic_set(&phba->cmf_busy, 0);
3267 for_each_present_cpu(cpu) {
3268 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3269 atomic64_set(&cgs->total_bytes, 0);
3270 atomic64_set(&cgs->rcv_bytes, 0);
3271 atomic_set(&cgs->rx_io_cnt, 0);
3272 atomic64_set(&cgs->rx_latency, 0);
3273 }
3274 phba->cmf_latency.tv_sec = 0;
3275 phba->cmf_latency.tv_nsec = 0;
3276
3277 lpfc_cmf_signal_init(phba);
3278
3279 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3280 "6222 Start CMF / Timer\n");
3281
3282 phba->cmf_timer_cnt = 0;
3283 hrtimer_start(&phba->cmf_timer,
3284 ktime_set(0, LPFC_CMF_INTERVAL * 1000000),
3285 HRTIMER_MODE_REL);
3286
3287 ktime_get_real_ts64(&phba->cmf_latency);
3288
3289 atomic_set(&phba->cmf_bw_wait, 0);
3290 atomic_set(&phba->cmf_stop_io, 0);
3291}
3292
3293
3294
3295
3296
3297
3298
3299
3300void
3301lpfc_stop_hba_timers(struct lpfc_hba *phba)
3302{
3303 if (phba->pport)
3304 lpfc_stop_vport_timers(phba->pport);
3305 cancel_delayed_work_sync(&phba->eq_delay_work);
3306 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3307 del_timer_sync(&phba->sli.mbox_tmo);
3308 del_timer_sync(&phba->fabric_block_timer);
3309 del_timer_sync(&phba->eratt_poll);
3310 del_timer_sync(&phba->hb_tmofunc);
3311 if (phba->sli_rev == LPFC_SLI_REV4) {
3312 del_timer_sync(&phba->rrq_tmr);
3313 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3314 }
3315 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3316
3317 switch (phba->pci_dev_grp) {
3318 case LPFC_PCI_DEV_LP:
3319
3320 del_timer_sync(&phba->fcp_poll_timer);
3321 break;
3322 case LPFC_PCI_DEV_OC:
3323
3324 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3325 break;
3326 default:
3327 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3328 "0297 Invalid device group (x%x)\n",
3329 phba->pci_dev_grp);
3330 break;
3331 }
3332 return;
3333}
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346static void
3347lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3348{
3349 unsigned long iflag;
3350 uint8_t actcmd = MBX_HEARTBEAT;
3351 unsigned long timeout;
3352
3353 spin_lock_irqsave(&phba->hbalock, iflag);
3354 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3355 spin_unlock_irqrestore(&phba->hbalock, iflag);
3356 if (mbx_action == LPFC_MBX_NO_WAIT)
3357 return;
3358 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3359 spin_lock_irqsave(&phba->hbalock, iflag);
3360 if (phba->sli.mbox_active) {
3361 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3362
3363
3364
3365 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3366 phba->sli.mbox_active) * 1000) + jiffies;
3367 }
3368 spin_unlock_irqrestore(&phba->hbalock, iflag);
3369
3370
3371 while (phba->sli.mbox_active) {
3372
3373 msleep(2);
3374 if (time_after(jiffies, timeout)) {
3375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3376 "2813 Mgmt IO is Blocked %x "
3377 "- mbox cmd %x still active\n",
3378 phba->sli.sli_flag, actcmd);
3379 break;
3380 }
3381 }
3382}
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392void
3393lpfc_sli4_node_prep(struct lpfc_hba *phba)
3394{
3395 struct lpfc_nodelist *ndlp, *next_ndlp;
3396 struct lpfc_vport **vports;
3397 int i, rpi;
3398
3399 if (phba->sli_rev != LPFC_SLI_REV4)
3400 return;
3401
3402 vports = lpfc_create_vport_work_array(phba);
3403 if (vports == NULL)
3404 return;
3405
3406 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3407 if (vports[i]->load_flag & FC_UNLOADING)
3408 continue;
3409
3410 list_for_each_entry_safe(ndlp, next_ndlp,
3411 &vports[i]->fc_nodes,
3412 nlp_listp) {
3413 rpi = lpfc_sli4_alloc_rpi(phba);
3414 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3415
3416 continue;
3417 }
3418 ndlp->nlp_rpi = rpi;
3419 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3420 LOG_NODE | LOG_DISCOVERY,
3421 "0009 Assign RPI x%x to ndlp x%px "
3422 "DID:x%06x flg:x%x\n",
3423 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3424 ndlp->nlp_flag);
3425 }
3426 }
3427 lpfc_destroy_vport_work_array(phba, vports);
3428}
3429
3430
3431
3432
3433
3434
3435
3436
3437static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3438{
3439 struct lpfc_sli4_hdw_queue *qp;
3440 struct lpfc_io_buf *lpfc_ncmd;
3441 struct lpfc_io_buf *lpfc_ncmd_next;
3442 struct lpfc_epd_pool *epd_pool;
3443 unsigned long iflag;
3444
3445 epd_pool = &phba->epd_pool;
3446 qp = &phba->sli4_hba.hdwq[0];
3447
3448 spin_lock_init(&epd_pool->lock);
3449 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3450 spin_lock(&epd_pool->lock);
3451 INIT_LIST_HEAD(&epd_pool->list);
3452 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3453 &qp->lpfc_io_buf_list_put, list) {
3454 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3455 lpfc_ncmd->expedite = true;
3456 qp->put_io_bufs--;
3457 epd_pool->count++;
3458 if (epd_pool->count >= XRI_BATCH)
3459 break;
3460 }
3461 spin_unlock(&epd_pool->lock);
3462 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3463}
3464
3465
3466
3467
3468
3469
3470
3471
3472static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3473{
3474 struct lpfc_sli4_hdw_queue *qp;
3475 struct lpfc_io_buf *lpfc_ncmd;
3476 struct lpfc_io_buf *lpfc_ncmd_next;
3477 struct lpfc_epd_pool *epd_pool;
3478 unsigned long iflag;
3479
3480 epd_pool = &phba->epd_pool;
3481 qp = &phba->sli4_hba.hdwq[0];
3482
3483 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3484 spin_lock(&epd_pool->lock);
3485 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3486 &epd_pool->list, list) {
3487 list_move_tail(&lpfc_ncmd->list,
3488 &qp->lpfc_io_buf_list_put);
3489 lpfc_ncmd->flags = false;
3490 qp->put_io_bufs++;
3491 epd_pool->count--;
3492 }
3493 spin_unlock(&epd_pool->lock);
3494 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3495}
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3506{
3507 u32 i, j;
3508 u32 hwq_count;
3509 u32 count_per_hwq;
3510 struct lpfc_io_buf *lpfc_ncmd;
3511 struct lpfc_io_buf *lpfc_ncmd_next;
3512 unsigned long iflag;
3513 struct lpfc_sli4_hdw_queue *qp;
3514 struct lpfc_multixri_pool *multixri_pool;
3515 struct lpfc_pbl_pool *pbl_pool;
3516 struct lpfc_pvt_pool *pvt_pool;
3517
3518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3519 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3520 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3521 phba->sli4_hba.io_xri_cnt);
3522
3523 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3524 lpfc_create_expedite_pool(phba);
3525
3526 hwq_count = phba->cfg_hdw_queue;
3527 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3528
3529 for (i = 0; i < hwq_count; i++) {
3530 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3531
3532 if (!multixri_pool) {
3533 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3534 "1238 Failed to allocate memory for "
3535 "multixri_pool\n");
3536
3537 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3538 lpfc_destroy_expedite_pool(phba);
3539
3540 j = 0;
3541 while (j < i) {
3542 qp = &phba->sli4_hba.hdwq[j];
3543 kfree(qp->p_multixri_pool);
3544 j++;
3545 }
3546 phba->cfg_xri_rebalancing = 0;
3547 return;
3548 }
3549
3550 qp = &phba->sli4_hba.hdwq[i];
3551 qp->p_multixri_pool = multixri_pool;
3552
3553 multixri_pool->xri_limit = count_per_hwq;
3554 multixri_pool->rrb_next_hwqid = i;
3555
3556
3557 pbl_pool = &multixri_pool->pbl_pool;
3558 spin_lock_init(&pbl_pool->lock);
3559 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3560 spin_lock(&pbl_pool->lock);
3561 INIT_LIST_HEAD(&pbl_pool->list);
3562 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3563 &qp->lpfc_io_buf_list_put, list) {
3564 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3565 qp->put_io_bufs--;
3566 pbl_pool->count++;
3567 }
3568 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3569 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3570 pbl_pool->count, i);
3571 spin_unlock(&pbl_pool->lock);
3572 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3573
3574
3575 pvt_pool = &multixri_pool->pvt_pool;
3576 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3577 pvt_pool->low_watermark = XRI_BATCH;
3578 spin_lock_init(&pvt_pool->lock);
3579 spin_lock_irqsave(&pvt_pool->lock, iflag);
3580 INIT_LIST_HEAD(&pvt_pool->list);
3581 pvt_pool->count = 0;
3582 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3583 }
3584}
3585
3586
3587
3588
3589
3590
3591
3592static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3593{
3594 u32 i;
3595 u32 hwq_count;
3596 struct lpfc_io_buf *lpfc_ncmd;
3597 struct lpfc_io_buf *lpfc_ncmd_next;
3598 unsigned long iflag;
3599 struct lpfc_sli4_hdw_queue *qp;
3600 struct lpfc_multixri_pool *multixri_pool;
3601 struct lpfc_pbl_pool *pbl_pool;
3602 struct lpfc_pvt_pool *pvt_pool;
3603
3604 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3605 lpfc_destroy_expedite_pool(phba);
3606
3607 if (!(phba->pport->load_flag & FC_UNLOADING))
3608 lpfc_sli_flush_io_rings(phba);
3609
3610 hwq_count = phba->cfg_hdw_queue;
3611
3612 for (i = 0; i < hwq_count; i++) {
3613 qp = &phba->sli4_hba.hdwq[i];
3614 multixri_pool = qp->p_multixri_pool;
3615 if (!multixri_pool)
3616 continue;
3617
3618 qp->p_multixri_pool = NULL;
3619
3620 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3621
3622
3623 pbl_pool = &multixri_pool->pbl_pool;
3624 spin_lock(&pbl_pool->lock);
3625
3626 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3627 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3628 pbl_pool->count, i);
3629
3630 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3631 &pbl_pool->list, list) {
3632 list_move_tail(&lpfc_ncmd->list,
3633 &qp->lpfc_io_buf_list_put);
3634 qp->put_io_bufs++;
3635 pbl_pool->count--;
3636 }
3637
3638 INIT_LIST_HEAD(&pbl_pool->list);
3639 pbl_pool->count = 0;
3640
3641 spin_unlock(&pbl_pool->lock);
3642
3643
3644 pvt_pool = &multixri_pool->pvt_pool;
3645 spin_lock(&pvt_pool->lock);
3646
3647 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3648 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3649 pvt_pool->count, i);
3650
3651 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3652 &pvt_pool->list, list) {
3653 list_move_tail(&lpfc_ncmd->list,
3654 &qp->lpfc_io_buf_list_put);
3655 qp->put_io_bufs++;
3656 pvt_pool->count--;
3657 }
3658
3659 INIT_LIST_HEAD(&pvt_pool->list);
3660 pvt_pool->count = 0;
3661
3662 spin_unlock(&pvt_pool->lock);
3663 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3664
3665 kfree(multixri_pool);
3666 }
3667}
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681int
3682lpfc_online(struct lpfc_hba *phba)
3683{
3684 struct lpfc_vport *vport;
3685 struct lpfc_vport **vports;
3686 int i, error = 0;
3687 bool vpis_cleared = false;
3688
3689 if (!phba)
3690 return 0;
3691 vport = phba->pport;
3692
3693 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3694 return 0;
3695
3696 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3697 "0458 Bring Adapter online\n");
3698
3699 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3700
3701 if (phba->sli_rev == LPFC_SLI_REV4) {
3702 if (lpfc_sli4_hba_setup(phba)) {
3703 lpfc_unblock_mgmt_io(phba);
3704 return 1;
3705 }
3706 spin_lock_irq(&phba->hbalock);
3707 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3708 vpis_cleared = true;
3709 spin_unlock_irq(&phba->hbalock);
3710
3711
3712
3713
3714 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3715 !phba->nvmet_support) {
3716 error = lpfc_nvme_create_localport(phba->pport);
3717 if (error)
3718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3719 "6132 NVME restore reg failed "
3720 "on nvmei error x%x\n", error);
3721 }
3722 } else {
3723 lpfc_sli_queue_init(phba);
3724 if (lpfc_sli_hba_setup(phba)) {
3725 lpfc_unblock_mgmt_io(phba);
3726 return 1;
3727 }
3728 }
3729
3730 vports = lpfc_create_vport_work_array(phba);
3731 if (vports != NULL) {
3732 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3733 struct Scsi_Host *shost;
3734 shost = lpfc_shost_from_vport(vports[i]);
3735 spin_lock_irq(shost->host_lock);
3736 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3737 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3738 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3739 if (phba->sli_rev == LPFC_SLI_REV4) {
3740 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3741 if ((vpis_cleared) &&
3742 (vports[i]->port_type !=
3743 LPFC_PHYSICAL_PORT))
3744 vports[i]->vpi = 0;
3745 }
3746 spin_unlock_irq(shost->host_lock);
3747 }
3748 }
3749 lpfc_destroy_vport_work_array(phba, vports);
3750
3751 if (phba->cfg_xri_rebalancing)
3752 lpfc_create_multixri_pools(phba);
3753
3754 lpfc_cpuhp_add(phba);
3755
3756 lpfc_unblock_mgmt_io(phba);
3757 return 0;
3758}
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771void
3772lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3773{
3774 unsigned long iflag;
3775
3776 spin_lock_irqsave(&phba->hbalock, iflag);
3777 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3778 spin_unlock_irqrestore(&phba->hbalock, iflag);
3779}
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790void
3791lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3792{
3793 struct lpfc_vport *vport = phba->pport;
3794 struct lpfc_nodelist *ndlp, *next_ndlp;
3795 struct lpfc_vport **vports;
3796 struct Scsi_Host *shost;
3797 int i;
3798 int offline;
3799 bool hba_pci_err;
3800
3801 if (vport->fc_flag & FC_OFFLINE_MODE)
3802 return;
3803
3804 lpfc_block_mgmt_io(phba, mbx_action);
3805
3806 lpfc_linkdown(phba);
3807
3808 offline = pci_channel_offline(phba->pcidev);
3809 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3810
3811
3812 vports = lpfc_create_vport_work_array(phba);
3813 if (vports != NULL) {
3814 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3815 if (vports[i]->load_flag & FC_UNLOADING)
3816 continue;
3817 shost = lpfc_shost_from_vport(vports[i]);
3818 spin_lock_irq(shost->host_lock);
3819 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3820 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3821 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3822 spin_unlock_irq(shost->host_lock);
3823
3824 shost = lpfc_shost_from_vport(vports[i]);
3825 list_for_each_entry_safe(ndlp, next_ndlp,
3826 &vports[i]->fc_nodes,
3827 nlp_listp) {
3828
3829 spin_lock_irq(&ndlp->lock);
3830 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3831 spin_unlock_irq(&ndlp->lock);
3832
3833 if (offline || hba_pci_err) {
3834 spin_lock_irq(&ndlp->lock);
3835 ndlp->nlp_flag &= ~(NLP_UNREG_INP |
3836 NLP_RPI_REGISTERED);
3837 spin_unlock_irq(&ndlp->lock);
3838 if (phba->sli_rev == LPFC_SLI_REV4)
3839 lpfc_sli_rpi_release(vports[i],
3840 ndlp);
3841 } else {
3842 lpfc_unreg_rpi(vports[i], ndlp);
3843 }
3844
3845
3846
3847
3848
3849 if (phba->sli_rev == LPFC_SLI_REV4) {
3850 lpfc_printf_vlog(vports[i], KERN_INFO,
3851 LOG_NODE | LOG_DISCOVERY,
3852 "0011 Free RPI x%x on "
3853 "ndlp: x%px did x%x\n",
3854 ndlp->nlp_rpi, ndlp,
3855 ndlp->nlp_DID);
3856 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3857 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3858 }
3859
3860 if (ndlp->nlp_type & NLP_FABRIC) {
3861 lpfc_disc_state_machine(vports[i], ndlp,
3862 NULL, NLP_EVT_DEVICE_RECOVERY);
3863
3864
3865
3866
3867
3868
3869
3870
3871 if (!(ndlp->save_flags &
3872 NLP_IN_RECOV_POST_DEV_LOSS) &&
3873 !(ndlp->fc4_xpt_flags &
3874 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3875 lpfc_disc_state_machine
3876 (vports[i], ndlp,
3877 NULL,
3878 NLP_EVT_DEVICE_RM);
3879 }
3880 }
3881 }
3882 }
3883 lpfc_destroy_vport_work_array(phba, vports);
3884
3885 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3886
3887 if (phba->wq)
3888 flush_workqueue(phba->wq);
3889}
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899void
3900lpfc_offline(struct lpfc_hba *phba)
3901{
3902 struct Scsi_Host *shost;
3903 struct lpfc_vport **vports;
3904 int i;
3905
3906 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3907 return;
3908
3909
3910 lpfc_stop_port(phba);
3911
3912
3913
3914
3915 lpfc_nvmet_destroy_targetport(phba);
3916 lpfc_nvme_destroy_localport(phba->pport);
3917
3918 vports = lpfc_create_vport_work_array(phba);
3919 if (vports != NULL)
3920 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3921 lpfc_stop_vport_timers(vports[i]);
3922 lpfc_destroy_vport_work_array(phba, vports);
3923 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3924 "0460 Bring Adapter offline\n");
3925
3926
3927 lpfc_sli_hba_down(phba);
3928 spin_lock_irq(&phba->hbalock);
3929 phba->work_ha = 0;
3930 spin_unlock_irq(&phba->hbalock);
3931 vports = lpfc_create_vport_work_array(phba);
3932 if (vports != NULL)
3933 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3934 shost = lpfc_shost_from_vport(vports[i]);
3935 spin_lock_irq(shost->host_lock);
3936 vports[i]->work_port_events = 0;
3937 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3938 spin_unlock_irq(shost->host_lock);
3939 }
3940 lpfc_destroy_vport_work_array(phba, vports);
3941
3942
3943
3944 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3945 __lpfc_cpuhp_remove(phba);
3946
3947 if (phba->cfg_xri_rebalancing)
3948 lpfc_destroy_multixri_pools(phba);
3949}
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959static void
3960lpfc_scsi_free(struct lpfc_hba *phba)
3961{
3962 struct lpfc_io_buf *sb, *sb_next;
3963
3964 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3965 return;
3966
3967 spin_lock_irq(&phba->hbalock);
3968
3969
3970
3971 spin_lock(&phba->scsi_buf_list_put_lock);
3972 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3973 list) {
3974 list_del(&sb->list);
3975 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3976 sb->dma_handle);
3977 kfree(sb);
3978 phba->total_scsi_bufs--;
3979 }
3980 spin_unlock(&phba->scsi_buf_list_put_lock);
3981
3982 spin_lock(&phba->scsi_buf_list_get_lock);
3983 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3984 list) {
3985 list_del(&sb->list);
3986 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3987 sb->dma_handle);
3988 kfree(sb);
3989 phba->total_scsi_bufs--;
3990 }
3991 spin_unlock(&phba->scsi_buf_list_get_lock);
3992 spin_unlock_irq(&phba->hbalock);
3993}
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003void
4004lpfc_io_free(struct lpfc_hba *phba)
4005{
4006 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
4007 struct lpfc_sli4_hdw_queue *qp;
4008 int idx;
4009
4010 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4011 qp = &phba->sli4_hba.hdwq[idx];
4012
4013 spin_lock(&qp->io_buf_list_put_lock);
4014 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4015 &qp->lpfc_io_buf_list_put,
4016 list) {
4017 list_del(&lpfc_ncmd->list);
4018 qp->put_io_bufs--;
4019 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4020 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4021 if (phba->cfg_xpsgl && !phba->nvmet_support)
4022 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4023 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4024 kfree(lpfc_ncmd);
4025 qp->total_io_bufs--;
4026 }
4027 spin_unlock(&qp->io_buf_list_put_lock);
4028
4029 spin_lock(&qp->io_buf_list_get_lock);
4030 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4031 &qp->lpfc_io_buf_list_get,
4032 list) {
4033 list_del(&lpfc_ncmd->list);
4034 qp->get_io_bufs--;
4035 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4036 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4037 if (phba->cfg_xpsgl && !phba->nvmet_support)
4038 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4039 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4040 kfree(lpfc_ncmd);
4041 qp->total_io_bufs--;
4042 }
4043 spin_unlock(&qp->io_buf_list_get_lock);
4044 }
4045}
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059int
4060lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4061{
4062 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4063 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4064 LIST_HEAD(els_sgl_list);
4065 int rc;
4066
4067
4068
4069
4070 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4071
4072 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4073
4074 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4075 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4076 "3157 ELS xri-sgl count increased from "
4077 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4078 els_xri_cnt);
4079
4080 for (i = 0; i < xri_cnt; i++) {
4081 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4082 GFP_KERNEL);
4083 if (sglq_entry == NULL) {
4084 lpfc_printf_log(phba, KERN_ERR,
4085 LOG_TRACE_EVENT,
4086 "2562 Failure to allocate an "
4087 "ELS sgl entry:%d\n", i);
4088 rc = -ENOMEM;
4089 goto out_free_mem;
4090 }
4091 sglq_entry->buff_type = GEN_BUFF_TYPE;
4092 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4093 &sglq_entry->phys);
4094 if (sglq_entry->virt == NULL) {
4095 kfree(sglq_entry);
4096 lpfc_printf_log(phba, KERN_ERR,
4097 LOG_TRACE_EVENT,
4098 "2563 Failure to allocate an "
4099 "ELS mbuf:%d\n", i);
4100 rc = -ENOMEM;
4101 goto out_free_mem;
4102 }
4103 sglq_entry->sgl = sglq_entry->virt;
4104 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
4105 sglq_entry->state = SGL_FREED;
4106 list_add_tail(&sglq_entry->list, &els_sgl_list);
4107 }
4108 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4109 list_splice_init(&els_sgl_list,
4110 &phba->sli4_hba.lpfc_els_sgl_list);
4111 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4112 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4113
4114 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4115 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4116 "3158 ELS xri-sgl count decreased from "
4117 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4118 els_xri_cnt);
4119 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4120 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4121 &els_sgl_list);
4122
4123 for (i = 0; i < xri_cnt; i++) {
4124 list_remove_head(&els_sgl_list,
4125 sglq_entry, struct lpfc_sglq, list);
4126 if (sglq_entry) {
4127 __lpfc_mbuf_free(phba, sglq_entry->virt,
4128 sglq_entry->phys);
4129 kfree(sglq_entry);
4130 }
4131 }
4132 list_splice_init(&els_sgl_list,
4133 &phba->sli4_hba.lpfc_els_sgl_list);
4134 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4135 } else
4136 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4137 "3163 ELS xri-sgl count unchanged: %d\n",
4138 els_xri_cnt);
4139 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4140
4141
4142 sglq_entry = NULL;
4143 sglq_entry_next = NULL;
4144 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4145 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4146 lxri = lpfc_sli4_next_xritag(phba);
4147 if (lxri == NO_XRI) {
4148 lpfc_printf_log(phba, KERN_ERR,
4149 LOG_TRACE_EVENT,
4150 "2400 Failed to allocate xri for "
4151 "ELS sgl\n");
4152 rc = -ENOMEM;
4153 goto out_free_mem;
4154 }
4155 sglq_entry->sli4_lxritag = lxri;
4156 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4157 }
4158 return 0;
4159
4160out_free_mem:
4161 lpfc_free_els_sgl_list(phba);
4162 return rc;
4163}
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177int
4178lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4179{
4180 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
4181 uint16_t i, lxri, xri_cnt, els_xri_cnt;
4182 uint16_t nvmet_xri_cnt;
4183 LIST_HEAD(nvmet_sgl_list);
4184 int rc;
4185
4186
4187
4188
4189 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4190
4191
4192 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4193 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4194
4195 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4196 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4197 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
4198 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4199
4200 for (i = 0; i < xri_cnt; i++) {
4201 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
4202 GFP_KERNEL);
4203 if (sglq_entry == NULL) {
4204 lpfc_printf_log(phba, KERN_ERR,
4205 LOG_TRACE_EVENT,
4206 "6303 Failure to allocate an "
4207 "NVMET sgl entry:%d\n", i);
4208 rc = -ENOMEM;
4209 goto out_free_mem;
4210 }
4211 sglq_entry->buff_type = NVMET_BUFF_TYPE;
4212 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4213 &sglq_entry->phys);
4214 if (sglq_entry->virt == NULL) {
4215 kfree(sglq_entry);
4216 lpfc_printf_log(phba, KERN_ERR,
4217 LOG_TRACE_EVENT,
4218 "6304 Failure to allocate an "
4219 "NVMET buf:%d\n", i);
4220 rc = -ENOMEM;
4221 goto out_free_mem;
4222 }
4223 sglq_entry->sgl = sglq_entry->virt;
4224 memset(sglq_entry->sgl, 0,
4225 phba->cfg_sg_dma_buf_size);
4226 sglq_entry->state = SGL_FREED;
4227 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
4228 }
4229 spin_lock_irq(&phba->hbalock);
4230 spin_lock(&phba->sli4_hba.sgl_list_lock);
4231 list_splice_init(&nvmet_sgl_list,
4232 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4233 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4234 spin_unlock_irq(&phba->hbalock);
4235 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4236
4237 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4238 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4239 "6305 NVMET xri-sgl count decreased from "
4240 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4241 nvmet_xri_cnt);
4242 spin_lock_irq(&phba->hbalock);
4243 spin_lock(&phba->sli4_hba.sgl_list_lock);
4244 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4245 &nvmet_sgl_list);
4246
4247 for (i = 0; i < xri_cnt; i++) {
4248 list_remove_head(&nvmet_sgl_list,
4249 sglq_entry, struct lpfc_sglq, list);
4250 if (sglq_entry) {
4251 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4252 sglq_entry->phys);
4253 kfree(sglq_entry);
4254 }
4255 }
4256 list_splice_init(&nvmet_sgl_list,
4257 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4258 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4259 spin_unlock_irq(&phba->hbalock);
4260 } else
4261 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4262 "6306 NVMET xri-sgl count unchanged: %d\n",
4263 nvmet_xri_cnt);
4264 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4265
4266
4267 sglq_entry = NULL;
4268 sglq_entry_next = NULL;
4269 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
4270 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4271 lxri = lpfc_sli4_next_xritag(phba);
4272 if (lxri == NO_XRI) {
4273 lpfc_printf_log(phba, KERN_ERR,
4274 LOG_TRACE_EVENT,
4275 "6307 Failed to allocate xri for "
4276 "NVMET sgl\n");
4277 rc = -ENOMEM;
4278 goto out_free_mem;
4279 }
4280 sglq_entry->sli4_lxritag = lxri;
4281 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4282 }
4283 return 0;
4284
4285out_free_mem:
4286 lpfc_free_nvmet_sgl_list(phba);
4287 return rc;
4288}
4289
4290int
4291lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4292{
4293 LIST_HEAD(blist);
4294 struct lpfc_sli4_hdw_queue *qp;
4295 struct lpfc_io_buf *lpfc_cmd;
4296 struct lpfc_io_buf *iobufp, *prev_iobufp;
4297 int idx, cnt, xri, inserted;
4298
4299 cnt = 0;
4300 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4301 qp = &phba->sli4_hba.hdwq[idx];
4302 spin_lock_irq(&qp->io_buf_list_get_lock);
4303 spin_lock(&qp->io_buf_list_put_lock);
4304
4305
4306 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4307 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4308 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4309 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4310 cnt += qp->get_io_bufs + qp->put_io_bufs;
4311 qp->get_io_bufs = 0;
4312 qp->put_io_bufs = 0;
4313 qp->total_io_bufs = 0;
4314 spin_unlock(&qp->io_buf_list_put_lock);
4315 spin_unlock_irq(&qp->io_buf_list_get_lock);
4316 }
4317
4318
4319
4320
4321
4322
4323 for (idx = 0; idx < cnt; idx++) {
4324 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4325 if (!lpfc_cmd)
4326 return cnt;
4327 if (idx == 0) {
4328 list_add_tail(&lpfc_cmd->list, cbuf);
4329 continue;
4330 }
4331 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4332 inserted = 0;
4333 prev_iobufp = NULL;
4334 list_for_each_entry(iobufp, cbuf, list) {
4335 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4336 if (prev_iobufp)
4337 list_add(&lpfc_cmd->list,
4338 &prev_iobufp->list);
4339 else
4340 list_add(&lpfc_cmd->list, cbuf);
4341 inserted = 1;
4342 break;
4343 }
4344 prev_iobufp = iobufp;
4345 }
4346 if (!inserted)
4347 list_add_tail(&lpfc_cmd->list, cbuf);
4348 }
4349 return cnt;
4350}
4351
4352int
4353lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4354{
4355 struct lpfc_sli4_hdw_queue *qp;
4356 struct lpfc_io_buf *lpfc_cmd;
4357 int idx, cnt;
4358
4359 qp = phba->sli4_hba.hdwq;
4360 cnt = 0;
4361 while (!list_empty(cbuf)) {
4362 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4363 list_remove_head(cbuf, lpfc_cmd,
4364 struct lpfc_io_buf, list);
4365 if (!lpfc_cmd)
4366 return cnt;
4367 cnt++;
4368 qp = &phba->sli4_hba.hdwq[idx];
4369 lpfc_cmd->hdwq_no = idx;
4370 lpfc_cmd->hdwq = qp;
4371 lpfc_cmd->cur_iocbq.cmd_cmpl = NULL;
4372 spin_lock(&qp->io_buf_list_put_lock);
4373 list_add_tail(&lpfc_cmd->list,
4374 &qp->lpfc_io_buf_list_put);
4375 qp->put_io_bufs++;
4376 qp->total_io_bufs++;
4377 spin_unlock(&qp->io_buf_list_put_lock);
4378 }
4379 }
4380 return cnt;
4381}
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395int
4396lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4397{
4398 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4399 uint16_t i, lxri, els_xri_cnt;
4400 uint16_t io_xri_cnt, io_xri_max;
4401 LIST_HEAD(io_sgl_list);
4402 int rc, cnt;
4403
4404
4405
4406
4407
4408
4409 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4410 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4411 phba->sli4_hba.io_xri_max = io_xri_max;
4412
4413 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4414 "6074 Current allocated XRI sgl count:%d, "
4415 "maximum XRI count:%d els_xri_cnt:%d\n\n",
4416 phba->sli4_hba.io_xri_cnt,
4417 phba->sli4_hba.io_xri_max,
4418 els_xri_cnt);
4419
4420 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4421
4422 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4423
4424 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4425 phba->sli4_hba.io_xri_max;
4426
4427 for (i = 0; i < io_xri_cnt; i++) {
4428 list_remove_head(&io_sgl_list, lpfc_ncmd,
4429 struct lpfc_io_buf, list);
4430 if (lpfc_ncmd) {
4431 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4432 lpfc_ncmd->data,
4433 lpfc_ncmd->dma_handle);
4434 kfree(lpfc_ncmd);
4435 }
4436 }
4437 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4438 }
4439
4440
4441 lpfc_ncmd = NULL;
4442 lpfc_ncmd_next = NULL;
4443 phba->sli4_hba.io_xri_cnt = cnt;
4444 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4445 &io_sgl_list, list) {
4446 lxri = lpfc_sli4_next_xritag(phba);
4447 if (lxri == NO_XRI) {
4448 lpfc_printf_log(phba, KERN_ERR,
4449 LOG_TRACE_EVENT,
4450 "6075 Failed to allocate xri for "
4451 "nvme buffer\n");
4452 rc = -ENOMEM;
4453 goto out_free_mem;
4454 }
4455 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4456 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4457 }
4458 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4459 return 0;
4460
4461out_free_mem:
4462 lpfc_io_free(phba);
4463 return rc;
4464}
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480int
4481lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4482{
4483 struct lpfc_io_buf *lpfc_ncmd;
4484 struct lpfc_iocbq *pwqeq;
4485 uint16_t iotag, lxri = 0;
4486 int bcnt, num_posted;
4487 LIST_HEAD(prep_nblist);
4488 LIST_HEAD(post_nblist);
4489 LIST_HEAD(nvme_nblist);
4490
4491 phba->sli4_hba.io_xri_cnt = 0;
4492 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
4493 lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL);
4494 if (!lpfc_ncmd)
4495 break;
4496
4497
4498
4499
4500
4501 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4502 GFP_KERNEL,
4503 &lpfc_ncmd->dma_handle);
4504 if (!lpfc_ncmd->data) {
4505 kfree(lpfc_ncmd);
4506 break;
4507 }
4508
4509 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4510 INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list);
4511 } else {
4512
4513
4514
4515
4516 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4517 (((unsigned long)(lpfc_ncmd->data) &
4518 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
4519 lpfc_printf_log(phba, KERN_ERR,
4520 LOG_TRACE_EVENT,
4521 "3369 Memory alignment err: "
4522 "addr=%lx\n",
4523 (unsigned long)lpfc_ncmd->data);
4524 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4525 lpfc_ncmd->data,
4526 lpfc_ncmd->dma_handle);
4527 kfree(lpfc_ncmd);
4528 break;
4529 }
4530 }
4531
4532 INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list);
4533
4534 lxri = lpfc_sli4_next_xritag(phba);
4535 if (lxri == NO_XRI) {
4536 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4537 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4538 kfree(lpfc_ncmd);
4539 break;
4540 }
4541 pwqeq = &lpfc_ncmd->cur_iocbq;
4542
4543
4544 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4545 if (iotag == 0) {
4546 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4547 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
4548 kfree(lpfc_ncmd);
4549 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4550 "6121 Failed to allocate IOTAG for"
4551 " XRI:0x%x\n", lxri);
4552 lpfc_sli4_free_xri(phba, lxri);
4553 break;
4554 }
4555 pwqeq->sli4_lxritag = lxri;
4556 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4557
4558
4559 lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
4560 lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
4561 lpfc_ncmd->cur_iocbq.io_buf = lpfc_ncmd;
4562 spin_lock_init(&lpfc_ncmd->buf_lock);
4563
4564
4565 list_add_tail(&lpfc_ncmd->list, &post_nblist);
4566 phba->sli4_hba.io_xri_cnt++;
4567 }
4568 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4569 "6114 Allocate %d out of %d requested new NVME "
4570 "buffers of size x%zu bytes\n", bcnt, num_to_alloc,
4571 sizeof(*lpfc_ncmd));
4572
4573
4574
4575 if (!list_empty(&post_nblist))
4576 num_posted = lpfc_sli4_post_io_sgl_list(
4577 phba, &post_nblist, bcnt);
4578 else
4579 num_posted = 0;
4580
4581 return num_posted;
4582}
4583
4584static uint64_t
4585lpfc_get_wwpn(struct lpfc_hba *phba)
4586{
4587 uint64_t wwn;
4588 int rc;
4589 LPFC_MBOXQ_t *mboxq;
4590 MAILBOX_t *mb;
4591
4592 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4593 GFP_KERNEL);
4594 if (!mboxq)
4595 return (uint64_t)-1;
4596
4597
4598 lpfc_read_nv(phba, mboxq);
4599 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4600 if (rc != MBX_SUCCESS) {
4601 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4602 "6019 Mailbox failed , mbxCmd x%x "
4603 "READ_NV, mbxStatus x%x\n",
4604 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4605 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4606 mempool_free(mboxq, phba->mbox_mem_pool);
4607 return (uint64_t) -1;
4608 }
4609 mb = &mboxq->u.mb;
4610 memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
4611
4612 mempool_free(mboxq, phba->mbox_mem_pool);
4613 if (phba->sli_rev == LPFC_SLI_REV4)
4614 return be64_to_cpu(wwn);
4615 else
4616 return rol64(wwn, 32);
4617}
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630static int
4631lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4632{
4633
4634 if (phba->sli_rev == LPFC_SLI_REV3) {
4635 phba->cfg_vmid_app_header = 0;
4636 phba->cfg_vmid_priority_tagging = 0;
4637 }
4638
4639 if (lpfc_is_vmid_enabled(phba)) {
4640 vport->vmid =
4641 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4642 GFP_KERNEL);
4643 if (!vport->vmid)
4644 return -ENOMEM;
4645
4646 rwlock_init(&vport->vmid_lock);
4647
4648
4649 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4650 vport->vmid_inactivity_timeout =
4651 phba->cfg_vmid_inactivity_timeout;
4652 vport->max_vmid = phba->cfg_max_vmid;
4653 vport->cur_vmid_cnt = 0;
4654
4655 vport->vmid_priority_range = bitmap_zalloc
4656 (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL);
4657
4658 if (!vport->vmid_priority_range) {
4659 kfree(vport->vmid);
4660 return -ENOMEM;
4661 }
4662
4663 hash_init(vport->hash_table);
4664 }
4665 return 0;
4666}
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684struct lpfc_vport *
4685lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4686{
4687 struct lpfc_vport *vport;
4688 struct Scsi_Host *shost = NULL;
4689 struct scsi_host_template *template;
4690 int error = 0;
4691 int i;
4692 uint64_t wwn;
4693 bool use_no_reset_hba = false;
4694 int rc;
4695
4696 if (lpfc_no_hba_reset_cnt) {
4697 if (phba->sli_rev < LPFC_SLI_REV4 &&
4698 dev == &phba->pcidev->dev) {
4699
4700 lpfc_sli_brdrestart(phba);
4701 rc = lpfc_sli_chipset_init(phba);
4702 if (rc)
4703 return NULL;
4704 }
4705 wwn = lpfc_get_wwpn(phba);
4706 }
4707
4708 for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
4709 if (wwn == lpfc_no_hba_reset[i]) {
4710 lpfc_printf_log(phba, KERN_ERR,
4711 LOG_TRACE_EVENT,
4712 "6020 Setting use_no_reset port=%llx\n",
4713 wwn);
4714 use_no_reset_hba = true;
4715 break;
4716 }
4717 }
4718
4719
4720 if (dev == &phba->pcidev->dev) {
4721 template = &phba->port_template;
4722
4723 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4724
4725 memcpy(template, &lpfc_template, sizeof(*template));
4726
4727 if (use_no_reset_hba)
4728
4729 template->eh_host_reset_handler = NULL;
4730
4731
4732 memcpy(&phba->vport_template, &lpfc_template,
4733 sizeof(*template));
4734 phba->vport_template.shost_groups = lpfc_vport_groups;
4735 phba->vport_template.eh_bus_reset_handler = NULL;
4736 phba->vport_template.eh_host_reset_handler = NULL;
4737 phba->vport_template.vendor_id = 0;
4738
4739
4740 if (phba->sli_rev == LPFC_SLI_REV4) {
4741 template->sg_tablesize = phba->cfg_scsi_seg_cnt;
4742 phba->vport_template.sg_tablesize =
4743 phba->cfg_scsi_seg_cnt;
4744 } else {
4745 template->sg_tablesize = phba->cfg_sg_seg_cnt;
4746 phba->vport_template.sg_tablesize =
4747 phba->cfg_sg_seg_cnt;
4748 }
4749
4750 } else {
4751
4752 memcpy(template, &lpfc_template_nvme,
4753 sizeof(*template));
4754 }
4755 } else {
4756 template = &phba->vport_template;
4757 }
4758
4759 shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
4760 if (!shost)
4761 goto out;
4762
4763 vport = (struct lpfc_vport *) shost->hostdata;
4764 vport->phba = phba;
4765 vport->load_flag |= FC_LOADING;
4766 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
4767 vport->fc_rscn_flush = 0;
4768 lpfc_get_vport_cfgparam(vport);
4769
4770
4771 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4772
4773 shost->unique_id = instance;
4774 shost->max_id = LPFC_MAX_TARGET;
4775 shost->max_lun = vport->cfg_max_luns;
4776 shost->this_id = -1;
4777 shost->max_cmd_len = 16;
4778
4779 if (phba->sli_rev == LPFC_SLI_REV4) {
4780 if (!phba->cfg_fcp_mq_threshold ||
4781 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4782 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4783
4784 shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
4785 phba->cfg_fcp_mq_threshold);
4786
4787 shost->dma_boundary =
4788 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4789
4790 if (phba->cfg_xpsgl && !phba->nvmet_support)
4791 shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE;
4792 else
4793 shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
4794 } else
4795
4796
4797
4798 shost->nr_hw_queues = 1;
4799
4800
4801
4802
4803
4804
4805 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4806 if (dev != &phba->pcidev->dev) {
4807 shost->transportt = lpfc_vport_transport_template;
4808 vport->port_type = LPFC_NPIV_PORT;
4809 } else {
4810 shost->transportt = lpfc_transport_template;
4811 vport->port_type = LPFC_PHYSICAL_PORT;
4812 }
4813
4814 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4815 "9081 CreatePort TMPLATE type %x TBLsize %d "
4816 "SEGcnt %d/%d\n",
4817 vport->port_type, shost->sg_tablesize,
4818 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4819
4820
4821 rc = lpfc_vmid_res_alloc(phba, vport);
4822
4823 if (rc)
4824 goto out;
4825
4826
4827 INIT_LIST_HEAD(&vport->fc_nodes);
4828 INIT_LIST_HEAD(&vport->rcv_buffer_list);
4829 spin_lock_init(&vport->work_port_lock);
4830
4831 timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
4832
4833 timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
4834
4835 timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
4836
4837 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4838 lpfc_setup_bg(phba, shost);
4839
4840 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4841 if (error)
4842 goto out_put_shost;
4843
4844 spin_lock_irq(&phba->port_list_lock);
4845 list_add_tail(&vport->listentry, &phba->port_list);
4846 spin_unlock_irq(&phba->port_list_lock);
4847 return vport;
4848
4849out_put_shost:
4850 kfree(vport->vmid);
4851 bitmap_free(vport->vmid_priority_range);
4852 scsi_host_put(shost);
4853out:
4854 return NULL;
4855}
4856
4857
4858
4859
4860
4861
4862
4863
4864void
4865destroy_port(struct lpfc_vport *vport)
4866{
4867 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4868 struct lpfc_hba *phba = vport->phba;
4869
4870 lpfc_debugfs_terminate(vport);
4871 fc_remove_host(shost);
4872 scsi_remove_host(shost);
4873
4874 spin_lock_irq(&phba->port_list_lock);
4875 list_del_init(&vport->listentry);
4876 spin_unlock_irq(&phba->port_list_lock);
4877
4878 lpfc_cleanup(vport);
4879 return;
4880}
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892int
4893lpfc_get_instance(void)
4894{
4895 int ret;
4896
4897 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
4898 return ret < 0 ? -1 : ret;
4899}
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
4917{
4918 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4919 struct lpfc_hba *phba = vport->phba;
4920 int stat = 0;
4921
4922 spin_lock_irq(shost->host_lock);
4923
4924 if (vport->load_flag & FC_UNLOADING) {
4925 stat = 1;
4926 goto finished;
4927 }
4928 if (time >= msecs_to_jiffies(30 * 1000)) {
4929 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4930 "0461 Scanning longer than 30 "
4931 "seconds. Continuing initialization\n");
4932 stat = 1;
4933 goto finished;
4934 }
4935 if (time >= msecs_to_jiffies(15 * 1000) &&
4936 phba->link_state <= LPFC_LINK_DOWN) {
4937 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4938 "0465 Link down longer than 15 "
4939 "seconds. Continuing initialization\n");
4940 stat = 1;
4941 goto finished;
4942 }
4943
4944 if (vport->port_state != LPFC_VPORT_READY)
4945 goto finished;
4946 if (vport->num_disc_nodes || vport->fc_prli_sent)
4947 goto finished;
4948 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
4949 goto finished;
4950 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4951 goto finished;
4952
4953 stat = 1;
4954
4955finished:
4956 spin_unlock_irq(shost->host_lock);
4957 return stat;
4958}
4959
4960static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost)
4961{
4962 struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
4963 struct lpfc_hba *phba = vport->phba;
4964
4965 fc_host_supported_speeds(shost) = 0;
4966
4967
4968
4969
4970 if (phba->hba_flag & HBA_FCOE_MODE)
4971 return;
4972
4973 if (phba->lmt & LMT_256Gb)
4974 fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT;
4975 if (phba->lmt & LMT_128Gb)
4976 fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT;
4977 if (phba->lmt & LMT_64Gb)
4978 fc_host_supported_speeds(shost) |= FC_PORTSPEED_64GBIT;
4979 if (phba->lmt & LMT_32Gb)
4980 fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
4981 if (phba->lmt & LMT_16Gb)
4982 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
4983 if (phba->lmt & LMT_10Gb)
4984 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
4985 if (phba->lmt & LMT_8Gb)
4986 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
4987 if (phba->lmt & LMT_4Gb)
4988 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
4989 if (phba->lmt & LMT_2Gb)
4990 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
4991 if (phba->lmt & LMT_1Gb)
4992 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
4993}
4994
4995
4996
4997
4998
4999
5000
5001
5002void lpfc_host_attrib_init(struct Scsi_Host *shost)
5003{
5004 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5005 struct lpfc_hba *phba = vport->phba;
5006
5007
5008
5009
5010 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
5011 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
5012 fc_host_supported_classes(shost) = FC_COS_CLASS3;
5013
5014 memset(fc_host_supported_fc4s(shost), 0,
5015 sizeof(fc_host_supported_fc4s(shost)));
5016 fc_host_supported_fc4s(shost)[2] = 1;
5017 fc_host_supported_fc4s(shost)[7] = 1;
5018
5019 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
5020 sizeof fc_host_symbolic_name(shost));
5021
5022 lpfc_host_supported_speeds_set(shost);
5023
5024 fc_host_maxframe_size(shost) =
5025 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
5026 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
5027
5028 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
5029
5030
5031 memset(fc_host_active_fc4s(shost), 0,
5032 sizeof(fc_host_active_fc4s(shost)));
5033 fc_host_active_fc4s(shost)[2] = 1;
5034 fc_host_active_fc4s(shost)[7] = 1;
5035
5036 fc_host_max_npiv_vports(shost) = phba->max_vpi;
5037 spin_lock_irq(shost->host_lock);
5038 vport->load_flag &= ~FC_LOADING;
5039 spin_unlock_irq(shost->host_lock);
5040}
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050static void
5051lpfc_stop_port_s3(struct lpfc_hba *phba)
5052{
5053
5054 writel(0, phba->HCregaddr);
5055 readl(phba->HCregaddr);
5056
5057 writel(0xffffffff, phba->HAregaddr);
5058 readl(phba->HAregaddr);
5059
5060
5061 lpfc_stop_hba_timers(phba);
5062 phba->pport->work_port_events = 0;
5063}
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073static void
5074lpfc_stop_port_s4(struct lpfc_hba *phba)
5075{
5076
5077 lpfc_stop_hba_timers(phba);
5078 if (phba->pport)
5079 phba->pport->work_port_events = 0;
5080 phba->sli4_hba.intr_enable = 0;
5081}
5082
5083
5084
5085
5086
5087
5088
5089
5090void
5091lpfc_stop_port(struct lpfc_hba *phba)
5092{
5093 phba->lpfc_stop_port(phba);
5094
5095 if (phba->wq)
5096 flush_workqueue(phba->wq);
5097}
5098
5099
5100
5101
5102
5103
5104
5105void
5106lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5107{
5108 unsigned long fcf_redisc_wait_tmo =
5109 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
5110
5111 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5112 spin_lock_irq(&phba->hbalock);
5113
5114 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5115
5116 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5117 spin_unlock_irq(&phba->hbalock);
5118}
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130static void
5131lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
5132{
5133 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5134
5135
5136 spin_lock_irq(&phba->hbalock);
5137 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5138 spin_unlock_irq(&phba->hbalock);
5139 return;
5140 }
5141
5142 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5143
5144 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5145 spin_unlock_irq(&phba->hbalock);
5146 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5147 "2776 FCF rediscover quiescent timer expired\n");
5148
5149 lpfc_worker_wake_up(phba);
5150}
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161static void
5162lpfc_vmid_poll(struct timer_list *t)
5163{
5164 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5165 u32 wake_up = 0;
5166
5167
5168 if (phba->pport->vmid_priority_tagging) {
5169 wake_up = 1;
5170 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5171 }
5172
5173
5174 if (phba->pport->vmid_inactivity_timeout ||
5175 phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) {
5176 wake_up = 1;
5177 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5178 }
5179
5180 if (wake_up)
5181 lpfc_worker_wake_up(phba);
5182
5183
5184 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5185 LPFC_VMID_TIMER));
5186}
5187
5188
5189
5190
5191
5192
5193
5194
5195static void
5196lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5197 struct lpfc_acqe_link *acqe_link)
5198{
5199 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
5200 case LPFC_ASYNC_LINK_FAULT_NONE:
5201 case LPFC_ASYNC_LINK_FAULT_LOCAL:
5202 case LPFC_ASYNC_LINK_FAULT_REMOTE:
5203 case LPFC_ASYNC_LINK_FAULT_LR_LRR:
5204 break;
5205 default:
5206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5207 "0398 Unknown link fault code: x%x\n",
5208 bf_get(lpfc_acqe_link_fault, acqe_link));
5209 break;
5210 }
5211}
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223static uint8_t
5224lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5225 struct lpfc_acqe_link *acqe_link)
5226{
5227 uint8_t att_type;
5228
5229 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
5230 case LPFC_ASYNC_LINK_STATUS_DOWN:
5231 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
5232 att_type = LPFC_ATT_LINK_DOWN;
5233 break;
5234 case LPFC_ASYNC_LINK_STATUS_UP:
5235
5236 att_type = LPFC_ATT_RESERVED;
5237 break;
5238 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
5239 att_type = LPFC_ATT_LINK_UP;
5240 break;
5241 default:
5242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5243 "0399 Invalid link attention type: x%x\n",
5244 bf_get(lpfc_acqe_link_status, acqe_link));
5245 att_type = LPFC_ATT_RESERVED;
5246 break;
5247 }
5248 return att_type;
5249}
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259uint32_t
5260lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5261{
5262 uint32_t link_speed;
5263
5264 if (!lpfc_is_link_up(phba))
5265 return 0;
5266
5267 if (phba->sli_rev <= LPFC_SLI_REV3) {
5268 switch (phba->fc_linkspeed) {
5269 case LPFC_LINK_SPEED_1GHZ:
5270 link_speed = 1000;
5271 break;
5272 case LPFC_LINK_SPEED_2GHZ:
5273 link_speed = 2000;
5274 break;
5275 case LPFC_LINK_SPEED_4GHZ:
5276 link_speed = 4000;
5277 break;
5278 case LPFC_LINK_SPEED_8GHZ:
5279 link_speed = 8000;
5280 break;
5281 case LPFC_LINK_SPEED_10GHZ:
5282 link_speed = 10000;
5283 break;
5284 case LPFC_LINK_SPEED_16GHZ:
5285 link_speed = 16000;
5286 break;
5287 default:
5288 link_speed = 0;
5289 }
5290 } else {
5291 if (phba->sli4_hba.link_state.logical_speed)
5292 link_speed =
5293 phba->sli4_hba.link_state.logical_speed;
5294 else
5295 link_speed = phba->sli4_hba.link_state.speed;
5296 }
5297 return link_speed;
5298}
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311static uint32_t
5312lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5313 uint8_t speed_code)
5314{
5315 uint32_t port_speed;
5316
5317 switch (evt_code) {
5318 case LPFC_TRAILER_CODE_LINK:
5319 switch (speed_code) {
5320 case LPFC_ASYNC_LINK_SPEED_ZERO:
5321 port_speed = 0;
5322 break;
5323 case LPFC_ASYNC_LINK_SPEED_10MBPS:
5324 port_speed = 10;
5325 break;
5326 case LPFC_ASYNC_LINK_SPEED_100MBPS:
5327 port_speed = 100;
5328 break;
5329 case LPFC_ASYNC_LINK_SPEED_1GBPS:
5330 port_speed = 1000;
5331 break;
5332 case LPFC_ASYNC_LINK_SPEED_10GBPS:
5333 port_speed = 10000;
5334 break;
5335 case LPFC_ASYNC_LINK_SPEED_20GBPS:
5336 port_speed = 20000;
5337 break;
5338 case LPFC_ASYNC_LINK_SPEED_25GBPS:
5339 port_speed = 25000;
5340 break;
5341 case LPFC_ASYNC_LINK_SPEED_40GBPS:
5342 port_speed = 40000;
5343 break;
5344 case LPFC_ASYNC_LINK_SPEED_100GBPS:
5345 port_speed = 100000;
5346 break;
5347 default:
5348 port_speed = 0;
5349 }
5350 break;
5351 case LPFC_TRAILER_CODE_FC:
5352 switch (speed_code) {
5353 case LPFC_FC_LA_SPEED_UNKNOWN:
5354 port_speed = 0;
5355 break;
5356 case LPFC_FC_LA_SPEED_1G:
5357 port_speed = 1000;
5358 break;
5359 case LPFC_FC_LA_SPEED_2G:
5360 port_speed = 2000;
5361 break;
5362 case LPFC_FC_LA_SPEED_4G:
5363 port_speed = 4000;
5364 break;
5365 case LPFC_FC_LA_SPEED_8G:
5366 port_speed = 8000;
5367 break;
5368 case LPFC_FC_LA_SPEED_10G:
5369 port_speed = 10000;
5370 break;
5371 case LPFC_FC_LA_SPEED_16G:
5372 port_speed = 16000;
5373 break;
5374 case LPFC_FC_LA_SPEED_32G:
5375 port_speed = 32000;
5376 break;
5377 case LPFC_FC_LA_SPEED_64G:
5378 port_speed = 64000;
5379 break;
5380 case LPFC_FC_LA_SPEED_128G:
5381 port_speed = 128000;
5382 break;
5383 case LPFC_FC_LA_SPEED_256G:
5384 port_speed = 256000;
5385 break;
5386 default:
5387 port_speed = 0;
5388 }
5389 break;
5390 default:
5391 port_speed = 0;
5392 }
5393 return port_speed;
5394}
5395
5396
5397
5398
5399
5400
5401
5402
5403static void
5404lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5405 struct lpfc_acqe_link *acqe_link)
5406{
5407 LPFC_MBOXQ_t *pmb;
5408 MAILBOX_t *mb;
5409 struct lpfc_mbx_read_top *la;
5410 uint8_t att_type;
5411 int rc;
5412
5413 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5414 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
5415 return;
5416 phba->fcoe_eventtag = acqe_link->event_tag;
5417 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5418 if (!pmb) {
5419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5420 "0395 The mboxq allocation failed\n");
5421 return;
5422 }
5423
5424 rc = lpfc_mbox_rsrc_prep(phba, pmb);
5425 if (rc) {
5426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5427 "0396 mailbox allocation failed\n");
5428 goto out_free_pmb;
5429 }
5430
5431
5432 lpfc_els_flush_all_cmd(phba);
5433
5434
5435 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5436
5437
5438 phba->sli.slistat.link_event++;
5439
5440
5441 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
5442 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
5443 pmb->vport = phba->pport;
5444
5445
5446 phba->sli4_hba.link_state.speed =
5447 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5448 bf_get(lpfc_acqe_link_speed, acqe_link));
5449 phba->sli4_hba.link_state.duplex =
5450 bf_get(lpfc_acqe_link_duplex, acqe_link);
5451 phba->sli4_hba.link_state.status =
5452 bf_get(lpfc_acqe_link_status, acqe_link);
5453 phba->sli4_hba.link_state.type =
5454 bf_get(lpfc_acqe_link_type, acqe_link);
5455 phba->sli4_hba.link_state.number =
5456 bf_get(lpfc_acqe_link_number, acqe_link);
5457 phba->sli4_hba.link_state.fault =
5458 bf_get(lpfc_acqe_link_fault, acqe_link);
5459 phba->sli4_hba.link_state.logical_speed =
5460 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
5461
5462 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5463 "2900 Async FC/FCoE Link event - Speed:%dGBit "
5464 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
5465 "Logical speed:%dMbps Fault:%d\n",
5466 phba->sli4_hba.link_state.speed,
5467 phba->sli4_hba.link_state.topology,
5468 phba->sli4_hba.link_state.status,
5469 phba->sli4_hba.link_state.type,
5470 phba->sli4_hba.link_state.number,
5471 phba->sli4_hba.link_state.logical_speed,
5472 phba->sli4_hba.link_state.fault);
5473
5474
5475
5476
5477 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
5478 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5479 if (rc == MBX_NOT_FINISHED)
5480 goto out_free_pmb;
5481 return;
5482 }
5483
5484
5485
5486
5487
5488
5489 mb = &pmb->u.mb;
5490 mb->mbxStatus = MBX_SUCCESS;
5491
5492
5493 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5494
5495
5496 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
5497 la->eventTag = acqe_link->event_tag;
5498 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
5499 bf_set(lpfc_mbx_read_top_link_spd, la,
5500 (bf_get(lpfc_acqe_link_speed, acqe_link)));
5501
5502
5503 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
5504 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
5505 bf_set(lpfc_mbx_read_top_il, la, 0);
5506 bf_set(lpfc_mbx_read_top_pb, la, 0);
5507 bf_set(lpfc_mbx_read_top_fa, la, 0);
5508 bf_set(lpfc_mbx_read_top_mm, la, 0);
5509
5510
5511 lpfc_mbx_cmpl_read_topology(phba, pmb);
5512
5513 return;
5514
5515out_free_pmb:
5516 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5517}
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530static uint8_t
5531lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5532{
5533 uint8_t port_speed;
5534
5535 switch (speed_code) {
5536 case LPFC_FC_LA_SPEED_1G:
5537 port_speed = LPFC_LINK_SPEED_1GHZ;
5538 break;
5539 case LPFC_FC_LA_SPEED_2G:
5540 port_speed = LPFC_LINK_SPEED_2GHZ;
5541 break;
5542 case LPFC_FC_LA_SPEED_4G:
5543 port_speed = LPFC_LINK_SPEED_4GHZ;
5544 break;
5545 case LPFC_FC_LA_SPEED_8G:
5546 port_speed = LPFC_LINK_SPEED_8GHZ;
5547 break;
5548 case LPFC_FC_LA_SPEED_16G:
5549 port_speed = LPFC_LINK_SPEED_16GHZ;
5550 break;
5551 case LPFC_FC_LA_SPEED_32G:
5552 port_speed = LPFC_LINK_SPEED_32GHZ;
5553 break;
5554 case LPFC_FC_LA_SPEED_64G:
5555 port_speed = LPFC_LINK_SPEED_64GHZ;
5556 break;
5557 case LPFC_FC_LA_SPEED_128G:
5558 port_speed = LPFC_LINK_SPEED_128GHZ;
5559 break;
5560 case LPFC_FC_LA_SPEED_256G:
5561 port_speed = LPFC_LINK_SPEED_256GHZ;
5562 break;
5563 default:
5564 port_speed = 0;
5565 break;
5566 }
5567
5568 return port_speed;
5569}
5570
5571void
5572lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5573{
5574 struct rxtable_entry *entry;
5575 int cnt = 0, head, tail, last, start;
5576
5577 head = atomic_read(&phba->rxtable_idx_head);
5578 tail = atomic_read(&phba->rxtable_idx_tail);
5579 if (!phba->rxtable || head == tail) {
5580 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
5581 "4411 Rxtable is empty\n");
5582 return;
5583 }
5584 last = tail;
5585 start = head;
5586
5587
5588 while (start != last) {
5589 if (start)
5590 start--;
5591 else
5592 start = LPFC_MAX_RXMONITOR_ENTRY - 1;
5593 entry = &phba->rxtable[start];
5594 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5595 "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
5596 "Lat %lld ASz %lld Info %02d BWUtil %d "
5597 "Int %d slot %d\n",
5598 cnt, entry->max_bytes_per_interval,
5599 entry->total_bytes, entry->rcv_bytes,
5600 entry->avg_io_latency, entry->avg_io_size,
5601 entry->cmf_info, entry->timer_utilization,
5602 entry->timer_interval, start);
5603 cnt++;
5604 if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
5605 return;
5606 }
5607}
5608
5609
5610
5611
5612
5613
5614
5615
5616void
5617lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5618{
5619 struct lpfc_cgn_info *cp;
5620 struct tm broken;
5621 struct timespec64 cur_time;
5622 u32 cnt;
5623 u32 value;
5624
5625
5626 if (!phba->cgn_i)
5627 return;
5628 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5629 ktime_get_real_ts64(&cur_time);
5630 time64_to_tm(cur_time.tv_sec, 0, &broken);
5631
5632
5633 switch (dtag) {
5634 case ELS_DTAG_LNK_INTEGRITY:
5635 cnt = le32_to_cpu(cp->link_integ_notification);
5636 cnt++;
5637 cp->link_integ_notification = cpu_to_le32(cnt);
5638
5639 cp->cgn_stat_lnk_month = broken.tm_mon + 1;
5640 cp->cgn_stat_lnk_day = broken.tm_mday;
5641 cp->cgn_stat_lnk_year = broken.tm_year - 100;
5642 cp->cgn_stat_lnk_hour = broken.tm_hour;
5643 cp->cgn_stat_lnk_min = broken.tm_min;
5644 cp->cgn_stat_lnk_sec = broken.tm_sec;
5645 break;
5646 case ELS_DTAG_DELIVERY:
5647 cnt = le32_to_cpu(cp->delivery_notification);
5648 cnt++;
5649 cp->delivery_notification = cpu_to_le32(cnt);
5650
5651 cp->cgn_stat_del_month = broken.tm_mon + 1;
5652 cp->cgn_stat_del_day = broken.tm_mday;
5653 cp->cgn_stat_del_year = broken.tm_year - 100;
5654 cp->cgn_stat_del_hour = broken.tm_hour;
5655 cp->cgn_stat_del_min = broken.tm_min;
5656 cp->cgn_stat_del_sec = broken.tm_sec;
5657 break;
5658 case ELS_DTAG_PEER_CONGEST:
5659 cnt = le32_to_cpu(cp->cgn_peer_notification);
5660 cnt++;
5661 cp->cgn_peer_notification = cpu_to_le32(cnt);
5662
5663 cp->cgn_stat_peer_month = broken.tm_mon + 1;
5664 cp->cgn_stat_peer_day = broken.tm_mday;
5665 cp->cgn_stat_peer_year = broken.tm_year - 100;
5666 cp->cgn_stat_peer_hour = broken.tm_hour;
5667 cp->cgn_stat_peer_min = broken.tm_min;
5668 cp->cgn_stat_peer_sec = broken.tm_sec;
5669 break;
5670 case ELS_DTAG_CONGESTION:
5671 cnt = le32_to_cpu(cp->cgn_notification);
5672 cnt++;
5673 cp->cgn_notification = cpu_to_le32(cnt);
5674
5675 cp->cgn_stat_cgn_month = broken.tm_mon + 1;
5676 cp->cgn_stat_cgn_day = broken.tm_mday;
5677 cp->cgn_stat_cgn_year = broken.tm_year - 100;
5678 cp->cgn_stat_cgn_hour = broken.tm_hour;
5679 cp->cgn_stat_cgn_min = broken.tm_min;
5680 cp->cgn_stat_cgn_sec = broken.tm_sec;
5681 }
5682 if (phba->cgn_fpin_frequency &&
5683 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5684 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5685 cp->cgn_stat_npm = value;
5686 }
5687 value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5688 LPFC_CGN_CRC32_SEED);
5689 cp->cgn_info_crc = cpu_to_le32(value);
5690}
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702static void
5703lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
5704{
5705 struct lpfc_cgn_info *cp;
5706 struct tm broken;
5707 struct timespec64 cur_time;
5708 uint32_t i, index;
5709 uint16_t value, mvalue;
5710 uint64_t bps;
5711 uint32_t mbps;
5712 uint32_t dvalue, wvalue, lvalue, avalue;
5713 uint64_t latsum;
5714 __le16 *ptr;
5715 __le32 *lptr;
5716 __le16 *mptr;
5717
5718
5719 if (!phba->cgn_i)
5720 return;
5721 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5722
5723 if (time_before(jiffies, phba->cgn_evt_timestamp))
5724 return;
5725 phba->cgn_evt_timestamp = jiffies +
5726 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
5727 phba->cgn_evt_minute++;
5728
5729
5730
5731 ktime_get_real_ts64(&cur_time);
5732 time64_to_tm(cur_time.tv_sec, 0, &broken);
5733
5734 if (phba->cgn_fpin_frequency &&
5735 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5736 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5737 cp->cgn_stat_npm = value;
5738 }
5739
5740
5741 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5742 latsum = atomic64_read(&phba->cgn_latency_evt);
5743 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5744 atomic64_set(&phba->cgn_latency_evt, 0);
5745
5746
5747
5748
5749
5750 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5751 phba->rx_block_cnt = 0;
5752 mvalue = bps / (1024 * 1024);
5753
5754
5755
5756 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5757 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5758 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5759 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5760
5761
5762 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5763 cp->cgn_lunq = cpu_to_le16(value);
5764
5765
5766
5767
5768
5769
5770
5771 index = ++cp->cgn_index_minute;
5772 if (cp->cgn_index_minute == LPFC_MIN_HOUR) {
5773 cp->cgn_index_minute = 0;
5774 index = 0;
5775 }
5776
5777
5778 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5779 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5780
5781
5782 wvalue = 0;
5783 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5784 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5785 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5786 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5787 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5788
5789
5790 avalue = 0;
5791 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5792 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5793 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5794 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5795
5796
5797
5798
5799 ptr = &cp->cgn_drvr_min[index];
5800 value = (uint16_t)dvalue;
5801 *ptr = cpu_to_le16(value);
5802
5803 ptr = &cp->cgn_warn_min[index];
5804 value = (uint16_t)wvalue;
5805 *ptr = cpu_to_le16(value);
5806
5807 ptr = &cp->cgn_alarm_min[index];
5808 value = (uint16_t)avalue;
5809 *ptr = cpu_to_le16(value);
5810
5811 lptr = &cp->cgn_latency_min[index];
5812 if (lvalue) {
5813 lvalue = (uint32_t)div_u64(latsum, lvalue);
5814 *lptr = cpu_to_le32(lvalue);
5815 } else {
5816 *lptr = 0;
5817 }
5818
5819
5820 mptr = &cp->cgn_bw_min[index];
5821 *mptr = cpu_to_le16(mvalue);
5822
5823 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5824 "2418 Congestion Info - minute (%d): %d %d %d %d %d\n",
5825 index, dvalue, wvalue, *lptr, mvalue, avalue);
5826
5827
5828 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5829
5830
5831
5832 index = ++cp->cgn_index_hour;
5833 if (cp->cgn_index_hour == LPFC_HOUR_DAY) {
5834 cp->cgn_index_hour = 0;
5835 index = 0;
5836 }
5837
5838 dvalue = 0;
5839 wvalue = 0;
5840 lvalue = 0;
5841 avalue = 0;
5842 mvalue = 0;
5843 mbps = 0;
5844 for (i = 0; i < LPFC_MIN_HOUR; i++) {
5845 dvalue += le16_to_cpu(cp->cgn_drvr_min[i]);
5846 wvalue += le16_to_cpu(cp->cgn_warn_min[i]);
5847 lvalue += le32_to_cpu(cp->cgn_latency_min[i]);
5848 mbps += le16_to_cpu(cp->cgn_bw_min[i]);
5849 avalue += le16_to_cpu(cp->cgn_alarm_min[i]);
5850 }
5851 if (lvalue)
5852 lvalue /= LPFC_MIN_HOUR;
5853 if (mbps)
5854 mvalue = mbps / LPFC_MIN_HOUR;
5855
5856 lptr = &cp->cgn_drvr_hr[index];
5857 *lptr = cpu_to_le32(dvalue);
5858 lptr = &cp->cgn_warn_hr[index];
5859 *lptr = cpu_to_le32(wvalue);
5860 lptr = &cp->cgn_latency_hr[index];
5861 *lptr = cpu_to_le32(lvalue);
5862 mptr = &cp->cgn_bw_hr[index];
5863 *mptr = cpu_to_le16(mvalue);
5864 lptr = &cp->cgn_alarm_hr[index];
5865 *lptr = cpu_to_le32(avalue);
5866
5867 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5868 "2419 Congestion Info - hour "
5869 "(%d): %d %d %d %d %d\n",
5870 index, dvalue, wvalue, lvalue, mvalue, avalue);
5871 }
5872
5873
5874 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5875
5876
5877
5878
5879 index = ++cp->cgn_index_day;
5880 if (cp->cgn_index_day == LPFC_MAX_CGN_DAYS) {
5881 cp->cgn_index_day = 0;
5882 index = 0;
5883 }
5884
5885
5886
5887
5888
5889
5890
5891 if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) {
5892 time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken);
5893
5894 cp->cgn_info_month = broken.tm_mon + 1;
5895 cp->cgn_info_day = broken.tm_mday;
5896 cp->cgn_info_year = broken.tm_year - 100;
5897 cp->cgn_info_hour = broken.tm_hour;
5898 cp->cgn_info_minute = broken.tm_min;
5899 cp->cgn_info_second = broken.tm_sec;
5900
5901 lpfc_printf_log
5902 (phba, KERN_INFO, LOG_CGN_MGMT,
5903 "2646 CGNInfo idx0 Start Time: "
5904 "%d/%d/%d %d:%d:%d\n",
5905 cp->cgn_info_day, cp->cgn_info_month,
5906 cp->cgn_info_year, cp->cgn_info_hour,
5907 cp->cgn_info_minute, cp->cgn_info_second);
5908 }
5909
5910 dvalue = 0;
5911 wvalue = 0;
5912 lvalue = 0;
5913 mvalue = 0;
5914 mbps = 0;
5915 avalue = 0;
5916 for (i = 0; i < LPFC_HOUR_DAY; i++) {
5917 dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
5918 wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
5919 lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
5920 mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
5921 avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
5922 }
5923 if (lvalue)
5924 lvalue /= LPFC_HOUR_DAY;
5925 if (mbps)
5926 mvalue = mbps / LPFC_HOUR_DAY;
5927
5928 lptr = &cp->cgn_drvr_day[index];
5929 *lptr = cpu_to_le32(dvalue);
5930 lptr = &cp->cgn_warn_day[index];
5931 *lptr = cpu_to_le32(wvalue);
5932 lptr = &cp->cgn_latency_day[index];
5933 *lptr = cpu_to_le32(lvalue);
5934 mptr = &cp->cgn_bw_day[index];
5935 *mptr = cpu_to_le16(mvalue);
5936 lptr = &cp->cgn_alarm_day[index];
5937 *lptr = cpu_to_le32(avalue);
5938
5939 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5940 "2420 Congestion Info - daily (%d): "
5941 "%d %d %d %d %d\n",
5942 index, dvalue, wvalue, lvalue, mvalue, avalue);
5943
5944
5945
5946
5947
5948 if (index == (LPFC_MAX_CGN_DAYS - 1)) {
5949 phba->hba_flag |= HBA_CGN_DAY_WRAP;
5950 ktime_get_real_ts64(&phba->cgn_daily_ts);
5951 }
5952 }
5953
5954
5955 value = phba->cgn_fpin_frequency;
5956 cp->cgn_warn_freq = cpu_to_le16(value);
5957 cp->cgn_alarm_freq = cpu_to_le16(value);
5958
5959 lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
5960 LPFC_CGN_CRC32_SEED);
5961 cp->cgn_info_crc = cpu_to_le32(lvalue);
5962}
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972uint32_t
5973lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5974{
5975 struct timespec64 cmpl_time;
5976 uint32_t msec = 0;
5977
5978 ktime_get_real_ts64(&cmpl_time);
5979
5980
5981
5982
5983 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5984 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5985 NSEC_PER_MSEC;
5986 } else {
5987 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5988 msec = (cmpl_time.tv_sec -
5989 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5990 msec += ((cmpl_time.tv_nsec -
5991 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5992 } else {
5993 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5994 1) * MSEC_PER_SEC;
5995 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5996 cmpl_time.tv_nsec) / NSEC_PER_MSEC);
5997 }
5998 }
5999 return msec;
6000}
6001
6002
6003
6004
6005
6006
6007static enum hrtimer_restart
6008lpfc_cmf_timer(struct hrtimer *timer)
6009{
6010 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
6011 cmf_timer);
6012 struct rxtable_entry *entry;
6013 uint32_t io_cnt;
6014 uint32_t head, tail;
6015 uint32_t busy, max_read;
6016 uint64_t total, rcv, lat, mbpi, extra, cnt;
6017 int timer_interval = LPFC_CMF_INTERVAL;
6018 uint32_t ms;
6019 struct lpfc_cgn_stat *cgs;
6020 int cpu;
6021
6022
6023 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6024 !phba->cmf_latency.tv_sec) {
6025 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6026 "6224 CMF timer exit: %d %lld\n",
6027 phba->cmf_active_mode,
6028 (uint64_t)phba->cmf_latency.tv_sec);
6029 return HRTIMER_NORESTART;
6030 }
6031
6032
6033
6034
6035 if (!phba->pport)
6036 goto skip;
6037
6038
6039
6040
6041 atomic_set(&phba->cmf_stop_io, 1);
6042
6043
6044
6045
6046
6047
6048 ms = lpfc_calc_cmf_latency(phba);
6049
6050
6051
6052
6053
6054
6055 ktime_get_real_ts64(&phba->cmf_latency);
6056
6057 phba->cmf_link_byte_count =
6058 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6059
6060
6061 total = 0;
6062 io_cnt = 0;
6063 lat = 0;
6064 rcv = 0;
6065 for_each_present_cpu(cpu) {
6066 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6067 total += atomic64_xchg(&cgs->total_bytes, 0);
6068 io_cnt += atomic_xchg(&cgs->rx_io_cnt, 0);
6069 lat += atomic64_xchg(&cgs->rx_latency, 0);
6070 rcv += atomic64_xchg(&cgs->rcv_bytes, 0);
6071 }
6072
6073
6074
6075
6076
6077
6078 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6079 phba->link_state != LPFC_LINK_DOWN &&
6080 phba->hba_flag & HBA_SETUP) {
6081 mbpi = phba->cmf_last_sync_bw;
6082 phba->cmf_last_sync_bw = 0;
6083 extra = 0;
6084
6085
6086
6087
6088
6089
6090 if (ms && ms < LPFC_CMF_INTERVAL) {
6091 cnt = div_u64(total, ms);
6092 cnt *= LPFC_CMF_INTERVAL;
6093
6094
6095
6096
6097 if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
6098 cnt = mbpi;
6099
6100 extra = cnt - total;
6101 }
6102 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6103 } else {
6104
6105
6106
6107 mbpi = phba->cmf_link_byte_count;
6108 extra = 0;
6109 }
6110 phba->cmf_timer_cnt++;
6111
6112 if (io_cnt) {
6113
6114 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6115 atomic64_add(lat, &phba->cgn_latency_evt);
6116 }
6117 busy = atomic_xchg(&phba->cmf_busy, 0);
6118 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6119
6120
6121 if (mbpi) {
6122 if (mbpi > phba->cmf_link_byte_count ||
6123 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6124 mbpi = phba->cmf_link_byte_count;
6125
6126
6127
6128
6129 if (mbpi != phba->cmf_max_bytes_per_interval)
6130 phba->cmf_max_bytes_per_interval = mbpi;
6131 }
6132
6133
6134 if (phba->rxtable) {
6135 head = atomic_xchg(&phba->rxtable_idx_head,
6136 LPFC_RXMONITOR_TABLE_IN_USE);
6137 entry = &phba->rxtable[head];
6138 entry->total_bytes = total;
6139 entry->cmf_bytes = total + extra;
6140 entry->rcv_bytes = rcv;
6141 entry->cmf_busy = busy;
6142 entry->cmf_info = phba->cmf_active_info;
6143 if (io_cnt) {
6144 entry->avg_io_latency = div_u64(lat, io_cnt);
6145 entry->avg_io_size = div_u64(rcv, io_cnt);
6146 } else {
6147 entry->avg_io_latency = 0;
6148 entry->avg_io_size = 0;
6149 }
6150 entry->max_read_cnt = max_read;
6151 entry->io_cnt = io_cnt;
6152 entry->max_bytes_per_interval = mbpi;
6153 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6154 entry->timer_utilization = phba->cmf_last_ts;
6155 else
6156 entry->timer_utilization = ms;
6157 entry->timer_interval = ms;
6158 phba->cmf_last_ts = 0;
6159
6160
6161 head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6162 tail = atomic_read(&phba->rxtable_idx_tail);
6163 if (head == tail) {
6164 tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
6165 atomic_set(&phba->rxtable_idx_tail, tail);
6166 }
6167 atomic_set(&phba->rxtable_idx_head, head);
6168 }
6169
6170 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6171
6172
6173
6174 if (mbpi && total > mbpi)
6175 atomic_inc(&phba->cgn_driver_evt_cnt);
6176 }
6177 phba->rx_block_cnt += div_u64(rcv, 512);
6178
6179
6180 lpfc_cgn_save_evt_cnt(phba);
6181
6182 phba->hba_flag &= ~HBA_SHORT_CMF;
6183
6184
6185
6186
6187
6188 if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL),
6189 phba->cgn_evt_timestamp)) {
6190 timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp -
6191 jiffies);
6192 if (timer_interval <= 0)
6193 timer_interval = LPFC_CMF_INTERVAL;
6194 else
6195 phba->hba_flag |= HBA_SHORT_CMF;
6196
6197
6198
6199
6200 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
6201 timer_interval, 1000);
6202 if (phba->cmf_active_mode == LPFC_CFG_MONITOR)
6203 phba->cmf_max_bytes_per_interval =
6204 phba->cmf_link_byte_count;
6205 }
6206
6207
6208
6209
6210 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6211 queue_work(phba->wq, &phba->unblock_request_work);
6212
6213
6214 atomic_set(&phba->cmf_stop_io, 0);
6215
6216skip:
6217 hrtimer_forward_now(timer,
6218 ktime_set(0, timer_interval * NSEC_PER_MSEC));
6219 return HRTIMER_RESTART;
6220}
6221
6222#define trunk_link_status(__idx)\
6223 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6224 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6225 "Link up" : "Link down") : "NA"
6226
6227#define trunk_port_fault(__idx)\
6228 bf_get(lpfc_acqe_fc_la_trunk_config_port##__idx, acqe_fc) ?\
6229 (port_fault & (1 << __idx) ? "YES" : "NO") : "NA"
6230
6231static void
6232lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6233 struct lpfc_acqe_fc_la *acqe_fc)
6234{
6235 uint8_t port_fault = bf_get(lpfc_acqe_fc_la_trunk_linkmask, acqe_fc);
6236 uint8_t err = bf_get(lpfc_acqe_fc_la_trunk_fault, acqe_fc);
6237
6238 phba->sli4_hba.link_state.speed =
6239 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6240 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6241
6242 phba->sli4_hba.link_state.logical_speed =
6243 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6244
6245 phba->fc_linkspeed =
6246 lpfc_async_link_speed_to_read_top(
6247 phba,
6248 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6249
6250 if (bf_get(lpfc_acqe_fc_la_trunk_config_port0, acqe_fc)) {
6251 phba->trunk_link.link0.state =
6252 bf_get(lpfc_acqe_fc_la_trunk_link_status_port0, acqe_fc)
6253 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6254 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6255 }
6256 if (bf_get(lpfc_acqe_fc_la_trunk_config_port1, acqe_fc)) {
6257 phba->trunk_link.link1.state =
6258 bf_get(lpfc_acqe_fc_la_trunk_link_status_port1, acqe_fc)
6259 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6260 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6261 }
6262 if (bf_get(lpfc_acqe_fc_la_trunk_config_port2, acqe_fc)) {
6263 phba->trunk_link.link2.state =
6264 bf_get(lpfc_acqe_fc_la_trunk_link_status_port2, acqe_fc)
6265 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6266 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6267 }
6268 if (bf_get(lpfc_acqe_fc_la_trunk_config_port3, acqe_fc)) {
6269 phba->trunk_link.link3.state =
6270 bf_get(lpfc_acqe_fc_la_trunk_link_status_port3, acqe_fc)
6271 ? LPFC_LINK_UP : LPFC_LINK_DOWN;
6272 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6273 }
6274
6275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6276 "2910 Async FC Trunking Event - Speed:%d\n"
6277 "\tLogical speed:%d "
6278 "port0: %s port1: %s port2: %s port3: %s\n",
6279 phba->sli4_hba.link_state.speed,
6280 phba->sli4_hba.link_state.logical_speed,
6281 trunk_link_status(0), trunk_link_status(1),
6282 trunk_link_status(2), trunk_link_status(3));
6283
6284 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6285 lpfc_cmf_signal_init(phba);
6286
6287 if (port_fault)
6288 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6289 "3202 trunk error:0x%x (%s) seen on port0:%s "
6290
6291
6292
6293
6294
6295 "port1:%s port2:%s port3:%s\n", err, err > 0xA ?
6296 "UNDEFINED. update driver." : trunk_errmsg[err],
6297 trunk_port_fault(0), trunk_port_fault(1),
6298 trunk_port_fault(2), trunk_port_fault(3));
6299}
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309
6310
6311static void
6312lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6313{
6314 LPFC_MBOXQ_t *pmb;
6315 MAILBOX_t *mb;
6316 struct lpfc_mbx_read_top *la;
6317 int rc;
6318
6319 if (bf_get(lpfc_trailer_type, acqe_fc) !=
6320 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
6321 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6322 "2895 Non FC link Event detected.(%d)\n",
6323 bf_get(lpfc_trailer_type, acqe_fc));
6324 return;
6325 }
6326
6327 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6328 LPFC_FC_LA_TYPE_TRUNKING_EVENT) {
6329 lpfc_update_trunk_link_status(phba, acqe_fc);
6330 return;
6331 }
6332
6333
6334 phba->sli4_hba.link_state.speed =
6335 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6336 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
6337 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6338 phba->sli4_hba.link_state.topology =
6339 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
6340 phba->sli4_hba.link_state.status =
6341 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
6342 phba->sli4_hba.link_state.type =
6343 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
6344 phba->sli4_hba.link_state.number =
6345 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
6346 phba->sli4_hba.link_state.fault =
6347 bf_get(lpfc_acqe_link_fault, acqe_fc);
6348
6349 if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
6350 LPFC_FC_LA_TYPE_LINK_DOWN)
6351 phba->sli4_hba.link_state.logical_speed = 0;
6352 else if (!phba->sli4_hba.conf_trunk)
6353 phba->sli4_hba.link_state.logical_speed =
6354 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
6355
6356 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6357 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
6358 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
6359 "%dMbps Fault:%d\n",
6360 phba->sli4_hba.link_state.speed,
6361 phba->sli4_hba.link_state.topology,
6362 phba->sli4_hba.link_state.status,
6363 phba->sli4_hba.link_state.type,
6364 phba->sli4_hba.link_state.number,
6365 phba->sli4_hba.link_state.logical_speed,
6366 phba->sli4_hba.link_state.fault);
6367 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6368 if (!pmb) {
6369 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6370 "2897 The mboxq allocation failed\n");
6371 return;
6372 }
6373 rc = lpfc_mbox_rsrc_prep(phba, pmb);
6374 if (rc) {
6375 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6376 "2898 The mboxq prep failed\n");
6377 goto out_free_pmb;
6378 }
6379
6380
6381 lpfc_els_flush_all_cmd(phba);
6382
6383
6384 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6385
6386
6387 phba->sli.slistat.link_event++;
6388
6389
6390 lpfc_read_topology(phba, pmb, (struct lpfc_dmabuf *)pmb->ctx_buf);
6391 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
6392 pmb->vport = phba->pport;
6393
6394 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6395 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6396
6397 switch (phba->sli4_hba.link_state.status) {
6398 case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
6399 phba->link_flag |= LS_MDS_LINK_DOWN;
6400 break;
6401 case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
6402 phba->link_flag |= LS_MDS_LOOPBACK;
6403 break;
6404 default:
6405 break;
6406 }
6407
6408
6409 mb = &pmb->u.mb;
6410 mb->mbxStatus = MBX_SUCCESS;
6411
6412
6413 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6414
6415
6416 la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
6417 la->eventTag = acqe_fc->event_tag;
6418
6419 if (phba->sli4_hba.link_state.status ==
6420 LPFC_FC_LA_TYPE_UNEXP_WWPN) {
6421 bf_set(lpfc_mbx_read_top_att_type, la,
6422 LPFC_FC_LA_TYPE_UNEXP_WWPN);
6423 } else {
6424 bf_set(lpfc_mbx_read_top_att_type, la,
6425 LPFC_FC_LA_TYPE_LINK_DOWN);
6426 }
6427
6428 lpfc_mbx_cmpl_read_topology(phba, pmb);
6429
6430 return;
6431 }
6432
6433 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6434 if (rc == MBX_NOT_FINISHED)
6435 goto out_free_pmb;
6436 return;
6437
6438out_free_pmb:
6439 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6440}
6441
6442
6443
6444
6445
6446
6447
6448
6449static void
6450lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6451{
6452 char port_name;
6453 char message[128];
6454 uint8_t status;
6455 uint8_t evt_type;
6456 uint8_t operational = 0;
6457 struct temp_event temp_event_data;
6458 struct lpfc_acqe_misconfigured_event *misconfigured;
6459 struct lpfc_acqe_cgn_signal *cgn_signal;
6460 struct Scsi_Host *shost;
6461 struct lpfc_vport **vports;
6462 int rc, i, cnt;
6463
6464 evt_type = bf_get(lpfc_trailer_type, acqe_sli);
6465
6466 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6467 "2901 Async SLI event - Type:%d, Event Data: x%08x "
6468 "x%08x x%08x x%08x\n", evt_type,
6469 acqe_sli->event_data1, acqe_sli->event_data2,
6470 acqe_sli->reserved, acqe_sli->trailer);
6471
6472 port_name = phba->Port[0];
6473 if (port_name == 0x00)
6474 port_name = '?';
6475
6476 switch (evt_type) {
6477 case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
6478 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6479 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6480 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6481
6482 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6483 "3190 Over Temperature:%d Celsius- Port Name %c\n",
6484 acqe_sli->event_data1, port_name);
6485
6486 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6487 shost = lpfc_shost_from_vport(phba->pport);
6488 fc_host_post_vendor_event(shost, fc_get_event_number(),
6489 sizeof(temp_event_data),
6490 (char *)&temp_event_data,
6491 SCSI_NL_VID_TYPE_PCI
6492 | PCI_VENDOR_ID_EMULEX);
6493 break;
6494 case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
6495 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6496 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6497 temp_event_data.data = (uint32_t)acqe_sli->event_data1;
6498
6499 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6500 "3191 Normal Temperature:%d Celsius - Port Name %c\n",
6501 acqe_sli->event_data1, port_name);
6502
6503 shost = lpfc_shost_from_vport(phba->pport);
6504 fc_host_post_vendor_event(shost, fc_get_event_number(),
6505 sizeof(temp_event_data),
6506 (char *)&temp_event_data,
6507 SCSI_NL_VID_TYPE_PCI
6508 | PCI_VENDOR_ID_EMULEX);
6509 break;
6510 case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
6511 misconfigured = (struct lpfc_acqe_misconfigured_event *)
6512 &acqe_sli->event_data1;
6513
6514
6515 switch (phba->sli4_hba.lnk_info.lnk_no) {
6516 case LPFC_LINK_NUMBER_0:
6517 status = bf_get(lpfc_sli_misconfigured_port0_state,
6518 &misconfigured->theEvent);
6519 operational = bf_get(lpfc_sli_misconfigured_port0_op,
6520 &misconfigured->theEvent);
6521 break;
6522 case LPFC_LINK_NUMBER_1:
6523 status = bf_get(lpfc_sli_misconfigured_port1_state,
6524 &misconfigured->theEvent);
6525 operational = bf_get(lpfc_sli_misconfigured_port1_op,
6526 &misconfigured->theEvent);
6527 break;
6528 case LPFC_LINK_NUMBER_2:
6529 status = bf_get(lpfc_sli_misconfigured_port2_state,
6530 &misconfigured->theEvent);
6531 operational = bf_get(lpfc_sli_misconfigured_port2_op,
6532 &misconfigured->theEvent);
6533 break;
6534 case LPFC_LINK_NUMBER_3:
6535 status = bf_get(lpfc_sli_misconfigured_port3_state,
6536 &misconfigured->theEvent);
6537 operational = bf_get(lpfc_sli_misconfigured_port3_op,
6538 &misconfigured->theEvent);
6539 break;
6540 default:
6541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6542 "3296 "
6543 "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
6544 "event: Invalid link %d",
6545 phba->sli4_hba.lnk_info.lnk_no);
6546 return;
6547 }
6548
6549
6550 if (phba->sli4_hba.lnk_info.optic_state == status)
6551 return;
6552
6553 switch (status) {
6554 case LPFC_SLI_EVENT_STATUS_VALID:
6555 sprintf(message, "Physical Link is functional");
6556 break;
6557 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
6558 sprintf(message, "Optics faulted/incorrectly "
6559 "installed/not installed - Reseat optics, "
6560 "if issue not resolved, replace.");
6561 break;
6562 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
6563 sprintf(message,
6564 "Optics of two types installed - Remove one "
6565 "optic or install matching pair of optics.");
6566 break;
6567 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
6568 sprintf(message, "Incompatible optics - Replace with "
6569 "compatible optics for card to function.");
6570 break;
6571 case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
6572 sprintf(message, "Unqualified optics - Replace with "
6573 "Avago optics for Warranty and Technical "
6574 "Support - Link is%s operational",
6575 (operational) ? " not" : "");
6576 break;
6577 case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
6578 sprintf(message, "Uncertified optics - Replace with "
6579 "Avago-certified optics to enable link "
6580 "operation - Link is%s operational",
6581 (operational) ? " not" : "");
6582 break;
6583 default:
6584
6585 sprintf(message, "Unknown event status x%02x", status);
6586 break;
6587 }
6588
6589
6590 rc = lpfc_sli4_read_config(phba);
6591 if (rc) {
6592 phba->lmt = 0;
6593 lpfc_printf_log(phba, KERN_ERR,
6594 LOG_TRACE_EVENT,
6595 "3194 Unable to retrieve supported "
6596 "speeds, rc = 0x%x\n", rc);
6597 }
6598 rc = lpfc_sli4_refresh_params(phba);
6599 if (rc) {
6600 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6601 "3174 Unable to update pls support, "
6602 "rc x%x\n", rc);
6603 }
6604 vports = lpfc_create_vport_work_array(phba);
6605 if (vports != NULL) {
6606 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6607 i++) {
6608 shost = lpfc_shost_from_vport(vports[i]);
6609 lpfc_host_supported_speeds_set(shost);
6610 }
6611 }
6612 lpfc_destroy_vport_work_array(phba, vports);
6613
6614 phba->sli4_hba.lnk_info.optic_state = status;
6615 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6616 "3176 Port Name %c %s\n", port_name, message);
6617 break;
6618 case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
6619 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6620 "3192 Remote DPort Test Initiated - "
6621 "Event Data1:x%08x Event Data2: x%08x\n",
6622 acqe_sli->event_data1, acqe_sli->event_data2);
6623 break;
6624 case LPFC_SLI_EVENT_TYPE_PORT_PARAMS_CHG:
6625
6626 lpfc_sli4_cgn_parm_chg_evt(phba);
6627 break;
6628 case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN:
6629
6630
6631
6632
6633
6634 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6635 "2699 Misconfigured FA-PWWN - Attached device "
6636 "does not support FA-PWWN\n");
6637 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6638 memset(phba->pport->fc_portname.u.wwn, 0,
6639 sizeof(struct lpfc_name));
6640 break;
6641 case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE:
6642
6643 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6644 "2518 EEPROM failure - "
6645 "Event Data1: x%08x Event Data2: x%08x\n",
6646 acqe_sli->event_data1, acqe_sli->event_data2);
6647 break;
6648 case LPFC_SLI_EVENT_TYPE_CGN_SIGNAL:
6649 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6650 break;
6651 cgn_signal = (struct lpfc_acqe_cgn_signal *)
6652 &acqe_sli->event_data1;
6653 phba->cgn_acqe_cnt++;
6654
6655 cnt = bf_get(lpfc_warn_acqe, cgn_signal);
6656 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6657 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6658
6659
6660
6661
6662 if (cgn_signal->alarm_cnt) {
6663 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6664
6665 atomic_add(cgn_signal->alarm_cnt,
6666 &phba->cgn_sync_alarm_cnt);
6667 }
6668 } else if (cnt) {
6669
6670 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6671 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6672
6673 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6674 }
6675 }
6676 break;
6677 default:
6678 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6679 "3193 Unrecognized SLI event, type: 0x%x",
6680 evt_type);
6681 break;
6682 }
6683}
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695static struct lpfc_nodelist *
6696lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
6697{
6698 struct lpfc_nodelist *ndlp;
6699 struct Scsi_Host *shost;
6700 struct lpfc_hba *phba;
6701
6702 if (!vport)
6703 return NULL;
6704 phba = vport->phba;
6705 if (!phba)
6706 return NULL;
6707 ndlp = lpfc_findnode_did(vport, Fabric_DID);
6708 if (!ndlp) {
6709
6710 ndlp = lpfc_nlp_init(vport, Fabric_DID);
6711 if (!ndlp)
6712 return NULL;
6713
6714 ndlp->nlp_type |= NLP_FABRIC;
6715
6716 lpfc_enqueue_node(vport, ndlp);
6717 }
6718 if ((phba->pport->port_state < LPFC_FLOGI) &&
6719 (phba->pport->port_state != LPFC_VPORT_FAILED))
6720 return NULL;
6721
6722 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6723 && (vport->port_state != LPFC_VPORT_FAILED))
6724 return NULL;
6725 shost = lpfc_shost_from_vport(vport);
6726 if (!shost)
6727 return NULL;
6728 lpfc_linkdown_port(vport);
6729 lpfc_cleanup_pending_mbox(vport);
6730 spin_lock_irq(shost->host_lock);
6731 vport->fc_flag |= FC_VPORT_CVL_RCVD;
6732 spin_unlock_irq(shost->host_lock);
6733
6734 return ndlp;
6735}
6736
6737
6738
6739
6740
6741
6742
6743
6744static void
6745lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6746{
6747 struct lpfc_vport **vports;
6748 int i;
6749
6750 vports = lpfc_create_vport_work_array(phba);
6751 if (vports)
6752 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6753 lpfc_sli4_perform_vport_cvl(vports[i]);
6754 lpfc_destroy_vport_work_array(phba, vports);
6755}
6756
6757
6758
6759
6760
6761
6762
6763
6764static void
6765lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6766 struct lpfc_acqe_fip *acqe_fip)
6767{
6768 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
6769 int rc;
6770 struct lpfc_vport *vport;
6771 struct lpfc_nodelist *ndlp;
6772 int active_vlink_present;
6773 struct lpfc_vport **vports;
6774 int i;
6775
6776 phba->fc_eventTag = acqe_fip->event_tag;
6777 phba->fcoe_eventtag = acqe_fip->event_tag;
6778 switch (event_type) {
6779 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
6780 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
6781 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
6782 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6783 "2546 New FCF event, evt_tag:x%x, "
6784 "index:x%x\n",
6785 acqe_fip->event_tag,
6786 acqe_fip->index);
6787 else
6788 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6789 LOG_DISCOVERY,
6790 "2788 FCF param modified event, "
6791 "evt_tag:x%x, index:x%x\n",
6792 acqe_fip->event_tag,
6793 acqe_fip->index);
6794 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6795
6796
6797
6798
6799
6800 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6801 LOG_DISCOVERY,
6802 "2779 Read FCF (x%x) for updating "
6803 "roundrobin FCF failover bmask\n",
6804 acqe_fip->index);
6805 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6806 }
6807
6808
6809 spin_lock_irq(&phba->hbalock);
6810 if (phba->hba_flag & FCF_TS_INPROG) {
6811 spin_unlock_irq(&phba->hbalock);
6812 break;
6813 }
6814
6815 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6816 spin_unlock_irq(&phba->hbalock);
6817 break;
6818 }
6819
6820
6821 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6822 spin_unlock_irq(&phba->hbalock);
6823 break;
6824 }
6825 spin_unlock_irq(&phba->hbalock);
6826
6827
6828 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6829 "2770 Start FCF table scan per async FCF "
6830 "event, evt_tag:x%x, index:x%x\n",
6831 acqe_fip->event_tag, acqe_fip->index);
6832 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6833 LPFC_FCOE_FCF_GET_FIRST);
6834 if (rc)
6835 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6836 "2547 Issue FCF scan read FCF mailbox "
6837 "command failed (x%x)\n", rc);
6838 break;
6839
6840 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
6841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6842 "2548 FCF Table full count 0x%x tag 0x%x\n",
6843 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
6844 acqe_fip->event_tag);
6845 break;
6846
6847 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
6848 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6849 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6850 "2549 FCF (x%x) disconnected from network, "
6851 "tag:x%x\n", acqe_fip->index,
6852 acqe_fip->event_tag);
6853
6854
6855
6856
6857 spin_lock_irq(&phba->hbalock);
6858 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6859 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6860 spin_unlock_irq(&phba->hbalock);
6861
6862 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6863 break;
6864 }
6865 spin_unlock_irq(&phba->hbalock);
6866
6867
6868 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6869 break;
6870
6871
6872
6873
6874
6875
6876
6877 spin_lock_irq(&phba->hbalock);
6878
6879 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6880 spin_unlock_irq(&phba->hbalock);
6881
6882 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6883 "2771 Start FCF fast failover process due to "
6884 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
6885 "\n", acqe_fip->event_tag, acqe_fip->index);
6886 rc = lpfc_sli4_redisc_fcf_table(phba);
6887 if (rc) {
6888 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6889 LOG_TRACE_EVENT,
6890 "2772 Issue FCF rediscover mailbox "
6891 "command failed, fail through to FCF "
6892 "dead event\n");
6893 spin_lock_irq(&phba->hbalock);
6894 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6895 spin_unlock_irq(&phba->hbalock);
6896
6897
6898
6899
6900 lpfc_sli4_fcf_dead_failthrough(phba);
6901 } else {
6902
6903 lpfc_sli4_clear_fcf_rr_bmask(phba);
6904
6905
6906
6907
6908 lpfc_sli4_perform_all_vport_cvl(phba);
6909 }
6910 break;
6911 case LPFC_FIP_EVENT_TYPE_CVL:
6912 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6913 lpfc_printf_log(phba, KERN_ERR,
6914 LOG_TRACE_EVENT,
6915 "2718 Clear Virtual Link Received for VPI 0x%x"
6916 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
6917
6918 vport = lpfc_find_vport_by_vpid(phba,
6919 acqe_fip->index);
6920 ndlp = lpfc_sli4_perform_vport_cvl(vport);
6921 if (!ndlp)
6922 break;
6923 active_vlink_present = 0;
6924
6925 vports = lpfc_create_vport_work_array(phba);
6926 if (vports) {
6927 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6928 i++) {
6929 if ((!(vports[i]->fc_flag &
6930 FC_VPORT_CVL_RCVD)) &&
6931 (vports[i]->port_state > LPFC_FDISC)) {
6932 active_vlink_present = 1;
6933 break;
6934 }
6935 }
6936 lpfc_destroy_vport_work_array(phba, vports);
6937 }
6938
6939
6940
6941
6942
6943
6944 if (!(vport->load_flag & FC_UNLOADING) &&
6945 active_vlink_present) {
6946
6947
6948
6949
6950 mod_timer(&ndlp->nlp_delayfunc,
6951 jiffies + msecs_to_jiffies(1000));
6952 spin_lock_irq(&ndlp->lock);
6953 ndlp->nlp_flag |= NLP_DELAY_TMO;
6954 spin_unlock_irq(&ndlp->lock);
6955 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
6956 vport->port_state = LPFC_FDISC;
6957 } else {
6958
6959
6960
6961
6962
6963
6964
6965 spin_lock_irq(&phba->hbalock);
6966 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6967 spin_unlock_irq(&phba->hbalock);
6968 break;
6969 }
6970
6971 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6972 spin_unlock_irq(&phba->hbalock);
6973 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6974 LOG_DISCOVERY,
6975 "2773 Start FCF failover per CVL, "
6976 "evt_tag:x%x\n", acqe_fip->event_tag);
6977 rc = lpfc_sli4_redisc_fcf_table(phba);
6978 if (rc) {
6979 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6980 LOG_TRACE_EVENT,
6981 "2774 Issue FCF rediscover "
6982 "mailbox command failed, "
6983 "through to CVL event\n");
6984 spin_lock_irq(&phba->hbalock);
6985 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6986 spin_unlock_irq(&phba->hbalock);
6987
6988
6989
6990
6991 lpfc_retry_pport_discovery(phba);
6992 } else
6993
6994
6995
6996
6997 lpfc_sli4_clear_fcf_rr_bmask(phba);
6998 }
6999 break;
7000 default:
7001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7002 "0288 Unknown FCoE event type 0x%x event tag "
7003 "0x%x\n", event_type, acqe_fip->event_tag);
7004 break;
7005 }
7006}
7007
7008
7009
7010
7011
7012
7013
7014
7015static void
7016lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
7017 struct lpfc_acqe_dcbx *acqe_dcbx)
7018{
7019 phba->fc_eventTag = acqe_dcbx->event_tag;
7020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7021 "0290 The SLI4 DCBX asynchronous event is not "
7022 "handled yet\n");
7023}
7024
7025
7026
7027
7028
7029
7030
7031
7032
7033
7034static void
7035lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7036 struct lpfc_acqe_grp5 *acqe_grp5)
7037{
7038 uint16_t prev_ll_spd;
7039
7040 phba->fc_eventTag = acqe_grp5->event_tag;
7041 phba->fcoe_eventtag = acqe_grp5->event_tag;
7042 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7043 phba->sli4_hba.link_state.logical_speed =
7044 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
7045 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7046 "2789 GRP5 Async Event: Updating logical link speed "
7047 "from %dMbps to %dMbps\n", prev_ll_spd,
7048 phba->sli4_hba.link_state.logical_speed);
7049}
7050
7051
7052
7053
7054
7055
7056
7057
7058static void
7059lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7060{
7061 if (!phba->cgn_i)
7062 return;
7063 lpfc_init_congestion_stat(phba);
7064}
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074static void
7075lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7076{
7077 spin_lock_irq(&phba->hbalock);
7078
7079 if (!lpfc_rangecheck(p_cfg_param->cgn_param_mode, LPFC_CFG_OFF,
7080 LPFC_CFG_MONITOR)) {
7081 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7082 "6225 CMF mode param out of range: %d\n",
7083 p_cfg_param->cgn_param_mode);
7084 p_cfg_param->cgn_param_mode = LPFC_CFG_OFF;
7085 }
7086
7087 spin_unlock_irq(&phba->hbalock);
7088}
7089
7090
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103static void
7104lpfc_cgn_params_parse(struct lpfc_hba *phba,
7105 struct lpfc_cgn_param *p_cgn_param, uint32_t len)
7106{
7107 struct lpfc_cgn_info *cp;
7108 uint32_t crc, oldmode;
7109
7110
7111
7112
7113 if (p_cgn_param->cgn_param_magic == LPFC_CFG_PARAM_MAGIC_NUM) {
7114 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7115 "4668 FW cgn parm buffer data: "
7116 "magic 0x%x version %d mode %d "
7117 "level0 %d level1 %d "
7118 "level2 %d byte13 %d "
7119 "byte14 %d byte15 %d "
7120 "byte11 %d byte12 %d activeMode %d\n",
7121 p_cgn_param->cgn_param_magic,
7122 p_cgn_param->cgn_param_version,
7123 p_cgn_param->cgn_param_mode,
7124 p_cgn_param->cgn_param_level0,
7125 p_cgn_param->cgn_param_level1,
7126 p_cgn_param->cgn_param_level2,
7127 p_cgn_param->byte13,
7128 p_cgn_param->byte14,
7129 p_cgn_param->byte15,
7130 p_cgn_param->byte11,
7131 p_cgn_param->byte12,
7132 phba->cmf_active_mode);
7133
7134 oldmode = phba->cmf_active_mode;
7135
7136
7137
7138
7139 lpfc_cgn_params_val(phba, p_cgn_param);
7140
7141
7142 spin_lock_irq(&phba->hbalock);
7143 memcpy(&phba->cgn_p, p_cgn_param,
7144 sizeof(struct lpfc_cgn_param));
7145
7146
7147 if (phba->cgn_i) {
7148 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7149 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7150 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7151 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7152 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7153 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
7154 LPFC_CGN_CRC32_SEED);
7155 cp->cgn_info_crc = cpu_to_le32(crc);
7156 }
7157 spin_unlock_irq(&phba->hbalock);
7158
7159 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7160
7161 switch (oldmode) {
7162 case LPFC_CFG_OFF:
7163 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7164
7165 lpfc_cmf_start(phba);
7166
7167 if (phba->link_state >= LPFC_LINK_UP) {
7168 phba->cgn_reg_fpin =
7169 phba->cgn_init_reg_fpin;
7170 phba->cgn_reg_signal =
7171 phba->cgn_init_reg_signal;
7172 lpfc_issue_els_edc(phba->pport, 0);
7173 }
7174 }
7175 break;
7176 case LPFC_CFG_MANAGED:
7177 switch (phba->cgn_p.cgn_param_mode) {
7178 case LPFC_CFG_OFF:
7179
7180 lpfc_cmf_stop(phba);
7181 if (phba->link_state >= LPFC_LINK_UP)
7182 lpfc_issue_els_edc(phba->pport, 0);
7183 break;
7184 case LPFC_CFG_MONITOR:
7185 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7186 "4661 Switch from MANAGED to "
7187 "`MONITOR mode\n");
7188 phba->cmf_max_bytes_per_interval =
7189 phba->cmf_link_byte_count;
7190
7191
7192 queue_work(phba->wq,
7193 &phba->unblock_request_work);
7194 break;
7195 }
7196 break;
7197 case LPFC_CFG_MONITOR:
7198 switch (phba->cgn_p.cgn_param_mode) {
7199 case LPFC_CFG_OFF:
7200
7201 lpfc_cmf_stop(phba);
7202 if (phba->link_state >= LPFC_LINK_UP)
7203 lpfc_issue_els_edc(phba->pport, 0);
7204 break;
7205 case LPFC_CFG_MANAGED:
7206 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7207 "4662 Switch from MONITOR to "
7208 "MANAGED mode\n");
7209 lpfc_cmf_signal_init(phba);
7210 break;
7211 }
7212 break;
7213 }
7214 } else {
7215 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7216 "4669 FW cgn parm buf wrong magic 0x%x "
7217 "version %d\n", p_cgn_param->cgn_param_magic,
7218 p_cgn_param->cgn_param_version);
7219 }
7220}
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235int
7236lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7237{
7238 int ret = 0;
7239 struct lpfc_cgn_param *p_cgn_param = NULL;
7240 u32 *pdata = NULL;
7241 u32 len = 0;
7242
7243
7244 len = sizeof(struct lpfc_cgn_param);
7245 pdata = kzalloc(len, GFP_KERNEL);
7246 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7247 pdata, len);
7248
7249
7250
7251
7252 if (!ret) {
7253 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7254 "4670 CGN RD OBJ returns no data\n");
7255 goto rd_obj_err;
7256 } else if (ret < 0) {
7257
7258 goto rd_obj_err;
7259 }
7260
7261 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7262 "6234 READ CGN PARAMS Successful %d\n", len);
7263
7264
7265
7266
7267
7268 p_cgn_param = (struct lpfc_cgn_param *)pdata;
7269 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7270
7271 rd_obj_err:
7272 kfree(pdata);
7273 return ret;
7274}
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289
7290
7291
7292
7293
7294static int
7295lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7296{
7297 int ret = 0;
7298
7299 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7300 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7301 "4664 Cgn Evt when E2E off. Drop event\n");
7302 return -EACCES;
7303 }
7304
7305
7306
7307
7308
7309 ret = lpfc_sli4_cgn_params_read(phba);
7310 if (ret < 0) {
7311 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7312 "4667 Error reading Cgn Params (%d)\n",
7313 ret);
7314 } else if (!ret) {
7315 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7316 "4673 CGN Event empty object.\n");
7317 }
7318 return ret;
7319}
7320
7321
7322
7323
7324
7325
7326
7327
7328void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7329{
7330 struct lpfc_cq_event *cq_event;
7331 unsigned long iflags;
7332
7333
7334 spin_lock_irqsave(&phba->hbalock, iflags);
7335 phba->hba_flag &= ~ASYNC_EVENT;
7336 spin_unlock_irqrestore(&phba->hbalock, iflags);
7337
7338
7339 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7340 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7341 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7342 cq_event, struct lpfc_cq_event, list);
7343 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7344 iflags);
7345
7346
7347 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
7348 case LPFC_TRAILER_CODE_LINK:
7349 lpfc_sli4_async_link_evt(phba,
7350 &cq_event->cqe.acqe_link);
7351 break;
7352 case LPFC_TRAILER_CODE_FCOE:
7353 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7354 break;
7355 case LPFC_TRAILER_CODE_DCBX:
7356 lpfc_sli4_async_dcbx_evt(phba,
7357 &cq_event->cqe.acqe_dcbx);
7358 break;
7359 case LPFC_TRAILER_CODE_GRP5:
7360 lpfc_sli4_async_grp5_evt(phba,
7361 &cq_event->cqe.acqe_grp5);
7362 break;
7363 case LPFC_TRAILER_CODE_FC:
7364 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7365 break;
7366 case LPFC_TRAILER_CODE_SLI:
7367 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7368 break;
7369 case LPFC_TRAILER_CODE_CMSTAT:
7370 lpfc_sli4_async_cmstat_evt(phba);
7371 break;
7372 default:
7373 lpfc_printf_log(phba, KERN_ERR,
7374 LOG_TRACE_EVENT,
7375 "1804 Invalid asynchronous event code: "
7376 "x%x\n", bf_get(lpfc_trailer_code,
7377 &cq_event->cqe.mcqe_cmpl));
7378 break;
7379 }
7380
7381
7382 lpfc_sli4_cq_event_release(phba, cq_event);
7383 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7384 }
7385 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7386}
7387
7388
7389
7390
7391
7392
7393
7394
7395void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7396{
7397 int rc;
7398
7399 spin_lock_irq(&phba->hbalock);
7400
7401 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7402
7403 phba->fcf.failover_rec.flag = 0;
7404
7405 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7406 spin_unlock_irq(&phba->hbalock);
7407
7408
7409 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7410 "2777 Start post-quiescent FCF table scan\n");
7411 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7412 if (rc)
7413 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7414 "2747 Issue FCF scan read FCF mailbox "
7415 "command failed 0x%x\n", rc);
7416}
7417
7418
7419
7420
7421
7422
7423
7424
7425
7426
7427
7428int
7429lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7430{
7431 int rc;
7432
7433
7434 phba->pci_dev_grp = dev_grp;
7435
7436
7437 if (dev_grp == LPFC_PCI_DEV_OC)
7438 phba->sli_rev = LPFC_SLI_REV4;
7439
7440
7441 rc = lpfc_init_api_table_setup(phba, dev_grp);
7442 if (rc)
7443 return -ENODEV;
7444
7445 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7446 if (rc)
7447 return -ENODEV;
7448
7449 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7450 if (rc)
7451 return -ENODEV;
7452
7453 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7454 if (rc)
7455 return -ENODEV;
7456
7457 return 0;
7458}
7459
7460
7461
7462
7463
7464
7465
7466
7467
7468static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7469{
7470 switch (intr_mode) {
7471 case 0:
7472 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7473 "0470 Enable INTx interrupt mode.\n");
7474 break;
7475 case 1:
7476 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7477 "0481 Enabled MSI interrupt mode.\n");
7478 break;
7479 case 2:
7480 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7481 "0480 Enabled MSI-X interrupt mode.\n");
7482 break;
7483 default:
7484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7485 "0482 Illegal interrupt mode.\n");
7486 break;
7487 }
7488 return;
7489}
7490
7491
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502static int
7503lpfc_enable_pci_dev(struct lpfc_hba *phba)
7504{
7505 struct pci_dev *pdev;
7506
7507
7508 if (!phba->pcidev)
7509 goto out_error;
7510 else
7511 pdev = phba->pcidev;
7512
7513 if (pci_enable_device_mem(pdev))
7514 goto out_error;
7515
7516 if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
7517 goto out_disable_device;
7518
7519 pci_set_master(pdev);
7520 pci_try_set_mwi(pdev);
7521 pci_save_state(pdev);
7522
7523
7524 if (pci_is_pcie(pdev))
7525 pdev->needs_freset = 1;
7526
7527 return 0;
7528
7529out_disable_device:
7530 pci_disable_device(pdev);
7531out_error:
7532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7533 "1401 Failed to enable pci device\n");
7534 return -ENODEV;
7535}
7536
7537
7538
7539
7540
7541
7542
7543
7544static void
7545lpfc_disable_pci_dev(struct lpfc_hba *phba)
7546{
7547 struct pci_dev *pdev;
7548
7549
7550 if (!phba->pcidev)
7551 return;
7552 else
7553 pdev = phba->pcidev;
7554
7555 pci_release_mem_regions(pdev);
7556 pci_disable_device(pdev);
7557
7558 return;
7559}
7560
7561
7562
7563
7564
7565
7566
7567
7568
7569
7570void
7571lpfc_reset_hba(struct lpfc_hba *phba)
7572{
7573
7574 if (!phba->cfg_enable_hba_reset) {
7575 phba->link_state = LPFC_HBA_ERROR;
7576 return;
7577 }
7578
7579
7580 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7581 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7582 } else {
7583 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7584 lpfc_sli_flush_io_rings(phba);
7585 }
7586 lpfc_offline(phba);
7587 lpfc_sli_brdrestart(phba);
7588 lpfc_online(phba);
7589 lpfc_unblock_mgmt_io(phba);
7590}
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602uint16_t
7603lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7604{
7605 struct pci_dev *pdev = phba->pcidev;
7606 uint16_t nr_virtfn;
7607 int pos;
7608
7609 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
7610 if (pos == 0)
7611 return 0;
7612
7613 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
7614 return nr_virtfn;
7615}
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626
7627
7628int
7629lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7630{
7631 struct pci_dev *pdev = phba->pcidev;
7632 uint16_t max_nr_vfn;
7633 int rc;
7634
7635 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7636 if (nr_vfn > max_nr_vfn) {
7637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7638 "3057 Requested vfs (%d) greater than "
7639 "supported vfs (%d)", nr_vfn, max_nr_vfn);
7640 return -EINVAL;
7641 }
7642
7643 rc = pci_enable_sriov(pdev, nr_vfn);
7644 if (rc) {
7645 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7646 "2806 Failed to enable sriov on this device "
7647 "with vfn number nr_vf:%d, rc:%d\n",
7648 nr_vfn, rc);
7649 } else
7650 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7651 "2807 Successful enable sriov on this device "
7652 "with vfn number nr_vf:%d\n", nr_vfn);
7653 return rc;
7654}
7655
7656static void
7657lpfc_unblock_requests_work(struct work_struct *work)
7658{
7659 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7660 unblock_request_work);
7661
7662 lpfc_unblock_requests(phba);
7663}
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674
7675
7676static int
7677lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7678{
7679 struct lpfc_sli *psli = &phba->sli;
7680
7681
7682
7683
7684 atomic_set(&phba->fast_event_count, 0);
7685 atomic_set(&phba->dbg_log_idx, 0);
7686 atomic_set(&phba->dbg_log_cnt, 0);
7687 atomic_set(&phba->dbg_log_dmping, 0);
7688 spin_lock_init(&phba->hbalock);
7689
7690
7691 spin_lock_init(&phba->port_list_lock);
7692 INIT_LIST_HEAD(&phba->port_list);
7693
7694 INIT_LIST_HEAD(&phba->work_list);
7695 init_waitqueue_head(&phba->wait_4_mlo_m_q);
7696
7697
7698 init_waitqueue_head(&phba->work_waitq);
7699
7700 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7701 "1403 Protocols supported %s %s %s\n",
7702 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7703 "SCSI" : " "),
7704 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7705 "NVME" : " "),
7706 (phba->nvmet_support ? "NVMET" : " "));
7707
7708
7709 spin_lock_init(&phba->scsi_buf_list_get_lock);
7710 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7711 spin_lock_init(&phba->scsi_buf_list_put_lock);
7712 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7713
7714
7715 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7716
7717
7718 INIT_LIST_HEAD(&phba->elsbuf);
7719
7720
7721 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7722
7723
7724 spin_lock_init(&phba->devicelock);
7725 INIT_LIST_HEAD(&phba->luns);
7726
7727
7728 timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
7729
7730 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7731
7732 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7733
7734 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7735
7736 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7737
7738 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7739 lpfc_idle_stat_delay_work);
7740 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7741 return 0;
7742}
7743
7744
7745
7746
7747
7748
7749
7750
7751
7752
7753
7754
7755static int
7756lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7757{
7758 int rc, entry_sz;
7759
7760
7761
7762
7763
7764
7765 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7766
7767
7768 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7769 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7770
7771
7772 lpfc_get_cfgparam(phba);
7773
7774
7775 rc = lpfc_setup_driver_resource_phase1(phba);
7776 if (rc)
7777 return -ENODEV;
7778
7779 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
7780 phba->menlo_flag |= HBA_MENLO_SUPPORT;
7781
7782 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
7783 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
7784 }
7785
7786 if (!phba->sli.sli3_ring)
7787 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7788 sizeof(struct lpfc_sli_ring),
7789 GFP_KERNEL);
7790 if (!phba->sli.sli3_ring)
7791 return -ENOMEM;
7792
7793
7794
7795
7796
7797
7798 if (phba->sli_rev == LPFC_SLI_REV4)
7799 entry_sz = sizeof(struct sli4_sge);
7800 else
7801 entry_sz = sizeof(struct ulp_bde64);
7802
7803
7804 if (phba->cfg_enable_bg) {
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7815 sizeof(struct fcp_rsp) +
7816 (LPFC_MAX_SG_SEG_CNT * entry_sz);
7817
7818 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7819 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7820
7821
7822 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7823 } else {
7824
7825
7826
7827
7828
7829 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7830 sizeof(struct fcp_rsp) +
7831 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7832
7833
7834 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7835 }
7836
7837 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7838 "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
7839 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7840 phba->cfg_total_seg_cnt);
7841
7842 phba->max_vpi = LPFC_MAX_VPI;
7843
7844 phba->max_vports = 0;
7845
7846
7847
7848
7849 lpfc_sli_setup(phba);
7850 lpfc_sli_queue_init(phba);
7851
7852
7853 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7854 return -ENOMEM;
7855
7856 phba->lpfc_sg_dma_buf_pool =
7857 dma_pool_create("lpfc_sg_dma_buf_pool",
7858 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7859 BPL_ALIGN_SZ, 0);
7860
7861 if (!phba->lpfc_sg_dma_buf_pool)
7862 goto fail_free_mem;
7863
7864 phba->lpfc_cmd_rsp_buf_pool =
7865 dma_pool_create("lpfc_cmd_rsp_buf_pool",
7866 &phba->pcidev->dev,
7867 sizeof(struct fcp_cmnd) +
7868 sizeof(struct fcp_rsp),
7869 BPL_ALIGN_SZ, 0);
7870
7871 if (!phba->lpfc_cmd_rsp_buf_pool)
7872 goto fail_free_dma_buf_pool;
7873
7874
7875
7876
7877
7878 if (phba->cfg_sriov_nr_virtfn > 0) {
7879 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7880 phba->cfg_sriov_nr_virtfn);
7881 if (rc) {
7882 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7883 "2808 Requested number of SR-IOV "
7884 "virtual functions (%d) is not "
7885 "supported\n",
7886 phba->cfg_sriov_nr_virtfn);
7887 phba->cfg_sriov_nr_virtfn = 0;
7888 }
7889 }
7890
7891 return 0;
7892
7893fail_free_dma_buf_pool:
7894 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7895 phba->lpfc_sg_dma_buf_pool = NULL;
7896fail_free_mem:
7897 lpfc_mem_free(phba);
7898 return -ENOMEM;
7899}
7900
7901
7902
7903
7904
7905
7906
7907
7908static void
7909lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7910{
7911
7912 lpfc_mem_free_all(phba);
7913
7914 return;
7915}
7916
7917
7918
7919
7920
7921
7922
7923
7924
7925
7926
7927
7928static int
7929lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7930{
7931 LPFC_MBOXQ_t *mboxq;
7932 MAILBOX_t *mb;
7933 int rc, i, max_buf_size;
7934 int longs;
7935 int extra;
7936 uint64_t wwn;
7937 u32 if_type;
7938 u32 if_fam;
7939
7940 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7941 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7942 phba->sli4_hba.curr_disp_cpu = 0;
7943
7944
7945 lpfc_get_cfgparam(phba);
7946
7947
7948 rc = lpfc_setup_driver_resource_phase1(phba);
7949 if (rc)
7950 return -ENODEV;
7951
7952
7953 rc = lpfc_sli4_post_status_check(phba);
7954 if (rc)
7955 return -ENODEV;
7956
7957
7958
7959
7960 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7961
7962
7963
7964
7965
7966 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7967
7968
7969 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7970
7971
7972 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7973 phba->cmf_timer.function = lpfc_cmf_timer;
7974
7975
7976
7977
7978
7979 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7980 sizeof(struct lpfc_mbox_ext_buf_ctx));
7981 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7982
7983 phba->max_vpi = LPFC_MAX_VPI;
7984
7985
7986 phba->max_vports = 0;
7987
7988
7989 phba->valid_vlan = 0;
7990 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7991 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7992 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7993
7994
7995
7996
7997
7998
7999
8000
8001 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
8002 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
8003 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
8004
8005
8006 if (lpfc_is_vmid_enabled(phba))
8007 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8008
8009
8010
8011
8012
8013 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8014 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8015
8016 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8017
8018 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8019 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8020 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8021 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8022 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8023 }
8024
8025
8026 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8027 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8028 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8029 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8030
8031
8032
8033
8034
8035
8036 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8037
8038 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8039
8040 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8041
8042 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8043
8044 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8045
8046
8047 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8048 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8049 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8050 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8051
8052
8053
8054
8055 INIT_LIST_HEAD(&phba->sli.mboxq);
8056 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8057
8058
8059 phba->sli4_hba.lnk_info.optic_state = 0xff;
8060
8061
8062 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8063 if (rc)
8064 return -ENOMEM;
8065
8066
8067 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8068 LPFC_SLI_INTF_IF_TYPE_2) {
8069 rc = lpfc_pci_function_reset(phba);
8070 if (unlikely(rc)) {
8071 rc = -ENODEV;
8072 goto out_free_mem;
8073 }
8074 phba->temp_sensor_support = 1;
8075 }
8076
8077
8078 rc = lpfc_create_bootstrap_mbox(phba);
8079 if (unlikely(rc))
8080 goto out_free_mem;
8081
8082
8083 rc = lpfc_setup_endian_order(phba);
8084 if (unlikely(rc))
8085 goto out_free_bsmbx;
8086
8087
8088 rc = lpfc_sli4_read_config(phba);
8089 if (unlikely(rc))
8090 goto out_free_bsmbx;
8091
8092 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8093
8094
8095
8096
8097
8098
8099
8100 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8101 }
8102
8103 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8104 if (unlikely(rc))
8105 goto out_free_bsmbx;
8106
8107
8108 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8109 LPFC_SLI_INTF_IF_TYPE_0) {
8110 rc = lpfc_pci_function_reset(phba);
8111 if (unlikely(rc))
8112 goto out_free_bsmbx;
8113 }
8114
8115 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8116 GFP_KERNEL);
8117 if (!mboxq) {
8118 rc = -ENOMEM;
8119 goto out_free_bsmbx;
8120 }
8121
8122
8123 phba->nvmet_support = 0;
8124 if (lpfc_enable_nvmet_cnt) {
8125
8126
8127 lpfc_read_nv(phba, mboxq);
8128 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8129 if (rc != MBX_SUCCESS) {
8130 lpfc_printf_log(phba, KERN_ERR,
8131 LOG_TRACE_EVENT,
8132 "6016 Mailbox failed , mbxCmd x%x "
8133 "READ_NV, mbxStatus x%x\n",
8134 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8135 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
8136 mempool_free(mboxq, phba->mbox_mem_pool);
8137 rc = -EIO;
8138 goto out_free_bsmbx;
8139 }
8140 mb = &mboxq->u.mb;
8141 memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
8142 sizeof(uint64_t));
8143 wwn = cpu_to_be64(wwn);
8144 phba->sli4_hba.wwnn.u.name = wwn;
8145 memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
8146 sizeof(uint64_t));
8147
8148 wwn = cpu_to_be64(wwn);
8149 phba->sli4_hba.wwpn.u.name = wwn;
8150
8151
8152 for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
8153 if (wwn == lpfc_enable_nvmet[i]) {
8154#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
8155 if (lpfc_nvmet_mem_alloc(phba))
8156 break;
8157
8158 phba->nvmet_support = 1;
8159
8160 lpfc_printf_log(phba, KERN_ERR,
8161 LOG_TRACE_EVENT,
8162 "6017 NVME Target %016llx\n",
8163 wwn);
8164#else
8165 lpfc_printf_log(phba, KERN_ERR,
8166 LOG_TRACE_EVENT,
8167 "6021 Can't enable NVME Target."
8168 " NVME_TARGET_FC infrastructure"
8169 " is not in kernel\n");
8170#endif
8171
8172 phba->cfg_xri_rebalancing = 0;
8173 if (phba->irq_chann_mode == NHT_MODE) {
8174 phba->cfg_irq_chann =
8175 phba->sli4_hba.num_present_cpu;
8176 phba->cfg_hdw_queue =
8177 phba->sli4_hba.num_present_cpu;
8178 phba->irq_chann_mode = NORMAL_MODE;
8179 }
8180 break;
8181 }
8182 }
8183 }
8184
8185 lpfc_nvme_mod_param_dep(phba);
8186
8187
8188
8189
8190
8191
8192 rc = lpfc_get_sli4_parameters(phba, mboxq);
8193 if (rc) {
8194 if_type = bf_get(lpfc_sli_intf_if_type,
8195 &phba->sli4_hba.sli_intf);
8196 if_fam = bf_get(lpfc_sli_intf_sli_family,
8197 &phba->sli4_hba.sli_intf);
8198 if (phba->sli4_hba.extents_in_use &&
8199 phba->sli4_hba.rpi_hdrs_in_use) {
8200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8201 "2999 Unsupported SLI4 Parameters "
8202 "Extents and RPI headers enabled.\n");
8203 if (if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8204 if_fam == LPFC_SLI_INTF_FAMILY_BE2) {
8205 mempool_free(mboxq, phba->mbox_mem_pool);
8206 rc = -EIO;
8207 goto out_free_bsmbx;
8208 }
8209 }
8210 if (!(if_type == LPFC_SLI_INTF_IF_TYPE_0 &&
8211 if_fam == LPFC_SLI_INTF_FAMILY_BE2)) {
8212 mempool_free(mboxq, phba->mbox_mem_pool);
8213 rc = -EIO;
8214 goto out_free_bsmbx;
8215 }
8216 }
8217
8218
8219
8220
8221
8222 extra = 2;
8223 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8224 extra++;
8225
8226
8227
8228
8229
8230
8231 max_buf_size = (2 * SLI4_PAGE_SIZE);
8232
8233
8234
8235
8236
8237 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8238
8239
8240
8241
8242
8243
8244
8245
8246
8247
8248
8249 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8250 sizeof(struct fcp_rsp) + max_buf_size;
8251
8252
8253 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8254
8255
8256
8257
8258
8259 if (phba->cfg_enable_bg &&
8260 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8261 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8262 else
8263 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8264
8265 } else {
8266
8267
8268
8269
8270
8271 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
8272 sizeof(struct fcp_rsp) +
8273 ((phba->cfg_sg_seg_cnt + extra) *
8274 sizeof(struct sli4_sge));
8275
8276
8277 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8278 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8279
8280
8281
8282
8283
8284 }
8285
8286 if (phba->cfg_xpsgl && !phba->nvmet_support)
8287 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8288 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8289 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8290 else
8291 phba->cfg_sg_dma_buf_size =
8292 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8293
8294 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8295 sizeof(struct sli4_sge);
8296
8297
8298 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8299 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8300 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8301 "6300 Reducing NVME sg segment "
8302 "cnt to %d\n",
8303 LPFC_MAX_NVME_SEG_CNT);
8304 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8305 } else
8306 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8307 }
8308
8309 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8310 "9087 sg_seg_cnt:%d dmabuf_size:%d "
8311 "total:%d scsi:%d nvme:%d\n",
8312 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8313 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8314 phba->cfg_nvme_seg_cnt);
8315
8316 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8317 i = phba->cfg_sg_dma_buf_size;
8318 else
8319 i = SLI4_PAGE_SIZE;
8320
8321 phba->lpfc_sg_dma_buf_pool =
8322 dma_pool_create("lpfc_sg_dma_buf_pool",
8323 &phba->pcidev->dev,
8324 phba->cfg_sg_dma_buf_size,
8325 i, 0);
8326 if (!phba->lpfc_sg_dma_buf_pool)
8327 goto out_free_bsmbx;
8328
8329 phba->lpfc_cmd_rsp_buf_pool =
8330 dma_pool_create("lpfc_cmd_rsp_buf_pool",
8331 &phba->pcidev->dev,
8332 sizeof(struct fcp_cmnd) +
8333 sizeof(struct fcp_rsp),
8334 i, 0);
8335 if (!phba->lpfc_cmd_rsp_buf_pool)
8336 goto out_free_sg_dma_buf;
8337
8338 mempool_free(mboxq, phba->mbox_mem_pool);
8339
8340
8341 lpfc_sli4_oas_verify(phba);
8342
8343
8344 lpfc_sli4_ras_init(phba);
8345
8346
8347 rc = lpfc_sli4_queue_verify(phba);
8348 if (rc)
8349 goto out_free_cmd_rsp_buf;
8350
8351
8352 rc = lpfc_sli4_cq_event_pool_create(phba);
8353 if (rc)
8354 goto out_free_cmd_rsp_buf;
8355
8356
8357 lpfc_init_sgl_list(phba);
8358
8359
8360 rc = lpfc_init_active_sgl_array(phba);
8361 if (rc) {
8362 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8363 "1430 Failed to initialize sgl list.\n");
8364 goto out_destroy_cq_event_pool;
8365 }
8366 rc = lpfc_sli4_init_rpi_hdrs(phba);
8367 if (rc) {
8368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8369 "1432 Failed to initialize rpi headers.\n");
8370 goto out_free_active_sgl;
8371 }
8372
8373
8374 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
8375 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8376 GFP_KERNEL);
8377 if (!phba->fcf.fcf_rr_bmask) {
8378 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8379 "2759 Failed allocate memory for FCF round "
8380 "robin failover bmask\n");
8381 rc = -ENOMEM;
8382 goto out_remove_rpi_hdrs;
8383 }
8384
8385 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8386 sizeof(struct lpfc_hba_eq_hdl),
8387 GFP_KERNEL);
8388 if (!phba->sli4_hba.hba_eq_hdl) {
8389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8390 "2572 Failed allocate memory for "
8391 "fast-path per-EQ handle array\n");
8392 rc = -ENOMEM;
8393 goto out_free_fcf_rr_bmask;
8394 }
8395
8396 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8397 sizeof(struct lpfc_vector_map_info),
8398 GFP_KERNEL);
8399 if (!phba->sli4_hba.cpu_map) {
8400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8401 "3327 Failed allocate memory for msi-x "
8402 "interrupt vector mapping\n");
8403 rc = -ENOMEM;
8404 goto out_free_hba_eq_hdl;
8405 }
8406
8407 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8408 if (!phba->sli4_hba.eq_info) {
8409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8410 "3321 Failed allocation for per_cpu stats\n");
8411 rc = -ENOMEM;
8412 goto out_free_hba_cpu_map;
8413 }
8414
8415 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8416 sizeof(*phba->sli4_hba.idle_stat),
8417 GFP_KERNEL);
8418 if (!phba->sli4_hba.idle_stat) {
8419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8420 "3390 Failed allocation for idle_stat\n");
8421 rc = -ENOMEM;
8422 goto out_free_hba_eq_info;
8423 }
8424
8425#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8426 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8427 if (!phba->sli4_hba.c_stat) {
8428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8429 "3332 Failed allocating per cpu hdwq stats\n");
8430 rc = -ENOMEM;
8431 goto out_free_hba_idle_stat;
8432 }
8433#endif
8434
8435 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8436 if (!phba->cmf_stat) {
8437 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8438 "3331 Failed allocating per cpu cgn stats\n");
8439 rc = -ENOMEM;
8440 goto out_free_hba_hdwq_info;
8441 }
8442
8443
8444
8445
8446
8447 if (phba->cfg_sriov_nr_virtfn > 0) {
8448 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8449 phba->cfg_sriov_nr_virtfn);
8450 if (rc) {
8451 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8452 "3020 Requested number of SR-IOV "
8453 "virtual functions (%d) is not "
8454 "supported\n",
8455 phba->cfg_sriov_nr_virtfn);
8456 phba->cfg_sriov_nr_virtfn = 0;
8457 }
8458 }
8459
8460 return 0;
8461
8462out_free_hba_hdwq_info:
8463#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8464 free_percpu(phba->sli4_hba.c_stat);
8465out_free_hba_idle_stat:
8466#endif
8467 kfree(phba->sli4_hba.idle_stat);
8468out_free_hba_eq_info:
8469 free_percpu(phba->sli4_hba.eq_info);
8470out_free_hba_cpu_map:
8471 kfree(phba->sli4_hba.cpu_map);
8472out_free_hba_eq_hdl:
8473 kfree(phba->sli4_hba.hba_eq_hdl);
8474out_free_fcf_rr_bmask:
8475 kfree(phba->fcf.fcf_rr_bmask);
8476out_remove_rpi_hdrs:
8477 lpfc_sli4_remove_rpi_hdrs(phba);
8478out_free_active_sgl:
8479 lpfc_free_active_sgl(phba);
8480out_destroy_cq_event_pool:
8481 lpfc_sli4_cq_event_pool_destroy(phba);
8482out_free_cmd_rsp_buf:
8483 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8484 phba->lpfc_cmd_rsp_buf_pool = NULL;
8485out_free_sg_dma_buf:
8486 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8487 phba->lpfc_sg_dma_buf_pool = NULL;
8488out_free_bsmbx:
8489 lpfc_destroy_bootstrap_mbox(phba);
8490out_free_mem:
8491 lpfc_mem_free(phba);
8492 return rc;
8493}
8494
8495
8496
8497
8498
8499
8500
8501
8502static void
8503lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8504{
8505 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
8506
8507 free_percpu(phba->sli4_hba.eq_info);
8508#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
8509 free_percpu(phba->sli4_hba.c_stat);
8510#endif
8511 free_percpu(phba->cmf_stat);
8512 kfree(phba->sli4_hba.idle_stat);
8513
8514
8515 kfree(phba->sli4_hba.cpu_map);
8516 phba->sli4_hba.num_possible_cpu = 0;
8517 phba->sli4_hba.num_present_cpu = 0;
8518 phba->sli4_hba.curr_disp_cpu = 0;
8519 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8520
8521
8522 kfree(phba->sli4_hba.hba_eq_hdl);
8523
8524
8525 lpfc_sli4_remove_rpi_hdrs(phba);
8526 lpfc_sli4_remove_rpis(phba);
8527
8528
8529 kfree(phba->fcf.fcf_rr_bmask);
8530
8531
8532 lpfc_free_active_sgl(phba);
8533 lpfc_free_els_sgl_list(phba);
8534 lpfc_free_nvmet_sgl_list(phba);
8535
8536
8537 lpfc_sli4_cq_event_release_all(phba);
8538 lpfc_sli4_cq_event_pool_destroy(phba);
8539
8540
8541 lpfc_sli4_dealloc_resource_identifiers(phba);
8542
8543
8544 lpfc_destroy_bootstrap_mbox(phba);
8545
8546
8547 lpfc_mem_free_all(phba);
8548
8549
8550 list_for_each_entry_safe(conn_entry, next_conn_entry,
8551 &phba->fcf_conn_rec_list, list) {
8552 list_del_init(&conn_entry->list);
8553 kfree(conn_entry);
8554 }
8555
8556 return;
8557}
8558
8559
8560
8561
8562
8563
8564
8565
8566
8567
8568
8569int
8570lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8571{
8572 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8573 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8574 phba->lpfc_selective_reset = lpfc_selective_reset;
8575 switch (dev_grp) {
8576 case LPFC_PCI_DEV_LP:
8577 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8578 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8579 phba->lpfc_stop_port = lpfc_stop_port_s3;
8580 break;
8581 case LPFC_PCI_DEV_OC:
8582 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8583 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8584 phba->lpfc_stop_port = lpfc_stop_port_s4;
8585 break;
8586 default:
8587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8588 "1431 Invalid HBA PCI-device group: 0x%x\n",
8589 dev_grp);
8590 return -ENODEV;
8591 }
8592 return 0;
8593}
8594
8595
8596
8597
8598
8599
8600
8601
8602
8603
8604
8605
8606static int
8607lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8608{
8609 int error;
8610
8611
8612 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8613 "lpfc_worker_%d", phba->brd_no);
8614 if (IS_ERR(phba->worker_thread)) {
8615 error = PTR_ERR(phba->worker_thread);
8616 return error;
8617 }
8618
8619 return 0;
8620}
8621
8622
8623
8624
8625
8626
8627
8628
8629
8630static void
8631lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8632{
8633 if (phba->wq) {
8634 destroy_workqueue(phba->wq);
8635 phba->wq = NULL;
8636 }
8637
8638
8639 if (phba->worker_thread)
8640 kthread_stop(phba->worker_thread);
8641}
8642
8643
8644
8645
8646
8647
8648
8649void
8650lpfc_free_iocb_list(struct lpfc_hba *phba)
8651{
8652 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
8653
8654 spin_lock_irq(&phba->hbalock);
8655 list_for_each_entry_safe(iocbq_entry, iocbq_next,
8656 &phba->lpfc_iocb_list, list) {
8657 list_del(&iocbq_entry->list);
8658 kfree(iocbq_entry);
8659 phba->total_iocbq_bufs--;
8660 }
8661 spin_unlock_irq(&phba->hbalock);
8662
8663 return;
8664}
8665
8666
8667
8668
8669
8670
8671
8672
8673
8674
8675
8676
8677
8678int
8679lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8680{
8681 struct lpfc_iocbq *iocbq_entry = NULL;
8682 uint16_t iotag;
8683 int i;
8684
8685
8686 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8687 for (i = 0; i < iocb_count; i++) {
8688 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
8689 if (iocbq_entry == NULL) {
8690 printk(KERN_ERR "%s: only allocated %d iocbs of "
8691 "expected %d count. Unloading driver.\n",
8692 __func__, i, iocb_count);
8693 goto out_free_iocbq;
8694 }
8695
8696 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8697 if (iotag == 0) {
8698 kfree(iocbq_entry);
8699 printk(KERN_ERR "%s: failed to allocate IOTAG. "
8700 "Unloading driver.\n", __func__);
8701 goto out_free_iocbq;
8702 }
8703 iocbq_entry->sli4_lxritag = NO_XRI;
8704 iocbq_entry->sli4_xritag = NO_XRI;
8705
8706 spin_lock_irq(&phba->hbalock);
8707 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8708 phba->total_iocbq_bufs++;
8709 spin_unlock_irq(&phba->hbalock);
8710 }
8711
8712 return 0;
8713
8714out_free_iocbq:
8715 lpfc_free_iocb_list(phba);
8716
8717 return -ENOMEM;
8718}
8719
8720
8721
8722
8723
8724
8725
8726
8727void
8728lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8729{
8730 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8731
8732 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
8733 list_del(&sglq_entry->list);
8734 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8735 kfree(sglq_entry);
8736 }
8737}
8738
8739
8740
8741
8742
8743
8744
8745static void
8746lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8747{
8748 LIST_HEAD(sglq_list);
8749
8750
8751 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8752 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8753 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8754
8755
8756 lpfc_free_sgl_list(phba, &sglq_list);
8757}
8758
8759
8760
8761
8762
8763
8764
8765static void
8766lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8767{
8768 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
8769 LIST_HEAD(sglq_list);
8770
8771
8772 spin_lock_irq(&phba->hbalock);
8773 spin_lock(&phba->sli4_hba.sgl_list_lock);
8774 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8775 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8776 spin_unlock_irq(&phba->hbalock);
8777
8778
8779 list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
8780 list_del(&sglq_entry->list);
8781 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8782 kfree(sglq_entry);
8783 }
8784
8785
8786
8787
8788
8789 phba->sli4_hba.nvmet_xri_cnt = 0;
8790}
8791
8792
8793
8794
8795
8796
8797
8798
8799static int
8800lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8801{
8802 int size;
8803 size = sizeof(struct lpfc_sglq *);
8804 size *= phba->sli4_hba.max_cfg_param.max_xri;
8805
8806 phba->sli4_hba.lpfc_sglq_active_list =
8807 kzalloc(size, GFP_KERNEL);
8808 if (!phba->sli4_hba.lpfc_sglq_active_list)
8809 return -ENOMEM;
8810 return 0;
8811}
8812
8813
8814
8815
8816
8817
8818
8819
8820
8821static void
8822lpfc_free_active_sgl(struct lpfc_hba *phba)
8823{
8824 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8825}
8826
8827
8828
8829
8830
8831
8832
8833
8834
8835static void
8836lpfc_init_sgl_list(struct lpfc_hba *phba)
8837{
8838
8839 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8840 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8841 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8842 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8843
8844
8845 phba->sli4_hba.els_xri_cnt = 0;
8846
8847
8848 phba->sli4_hba.io_xri_cnt = 0;
8849}
8850
8851
8852
8853
8854
8855
8856
8857
8858
8859
8860
8861
8862
8863
8864
8865int
8866lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8867{
8868 int rc = 0;
8869 struct lpfc_rpi_hdr *rpi_hdr;
8870
8871 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8872 if (!phba->sli4_hba.rpi_hdrs_in_use)
8873 return rc;
8874 if (phba->sli4_hba.extents_in_use)
8875 return -EIO;
8876
8877 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8878 if (!rpi_hdr) {
8879 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8880 "0391 Error during rpi post operation\n");
8881 lpfc_sli4_remove_rpis(phba);
8882 rc = -ENODEV;
8883 }
8884
8885 return rc;
8886}
8887
8888
8889
8890
8891
8892
8893
8894
8895
8896
8897
8898
8899
8900
8901struct lpfc_rpi_hdr *
8902lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8903{
8904 uint16_t rpi_limit, curr_rpi_range;
8905 struct lpfc_dmabuf *dmabuf;
8906 struct lpfc_rpi_hdr *rpi_hdr;
8907
8908
8909
8910
8911
8912
8913 if (!phba->sli4_hba.rpi_hdrs_in_use)
8914 return NULL;
8915 if (phba->sli4_hba.extents_in_use)
8916 return NULL;
8917
8918
8919 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8920
8921 spin_lock_irq(&phba->hbalock);
8922
8923
8924
8925
8926
8927 curr_rpi_range = phba->sli4_hba.next_rpi;
8928 spin_unlock_irq(&phba->hbalock);
8929
8930
8931 if (curr_rpi_range == rpi_limit)
8932 return NULL;
8933
8934
8935
8936
8937
8938 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
8939 if (!dmabuf)
8940 return NULL;
8941
8942 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8943 LPFC_HDR_TEMPLATE_SIZE,
8944 &dmabuf->phys, GFP_KERNEL);
8945 if (!dmabuf->virt) {
8946 rpi_hdr = NULL;
8947 goto err_free_dmabuf;
8948 }
8949
8950 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
8951 rpi_hdr = NULL;
8952 goto err_free_coherent;
8953 }
8954
8955
8956 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
8957 if (!rpi_hdr)
8958 goto err_free_coherent;
8959
8960 rpi_hdr->dmabuf = dmabuf;
8961 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
8962 rpi_hdr->page_count = 1;
8963 spin_lock_irq(&phba->hbalock);
8964
8965
8966 rpi_hdr->start_rpi = curr_rpi_range;
8967 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8968 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8969
8970 spin_unlock_irq(&phba->hbalock);
8971 return rpi_hdr;
8972
8973 err_free_coherent:
8974 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8975 dmabuf->virt, dmabuf->phys);
8976 err_free_dmabuf:
8977 kfree(dmabuf);
8978 return NULL;
8979}
8980
8981
8982
8983
8984
8985
8986
8987
8988
8989
8990void
8991lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8992{
8993 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
8994
8995 if (!phba->sli4_hba.rpi_hdrs_in_use)
8996 goto exit;
8997
8998 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
8999 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
9000 list_del(&rpi_hdr->list);
9001 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
9002 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
9003 kfree(rpi_hdr->dmabuf);
9004 kfree(rpi_hdr);
9005 }
9006 exit:
9007
9008 phba->sli4_hba.next_rpi = 0;
9009}
9010
9011
9012
9013
9014
9015
9016
9017
9018
9019
9020
9021
9022
9023static struct lpfc_hba *
9024lpfc_hba_alloc(struct pci_dev *pdev)
9025{
9026 struct lpfc_hba *phba;
9027
9028
9029 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9030 if (!phba) {
9031 dev_err(&pdev->dev, "failed to allocate hba struct\n");
9032 return NULL;
9033 }
9034
9035
9036 phba->pcidev = pdev;
9037
9038
9039 phba->brd_no = lpfc_get_instance();
9040 if (phba->brd_no < 0) {
9041 kfree(phba);
9042 return NULL;
9043 }
9044 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9045
9046 spin_lock_init(&phba->ct_ev_lock);
9047 INIT_LIST_HEAD(&phba->ct_ev_waiters);
9048
9049 return phba;
9050}
9051
9052
9053
9054
9055
9056
9057
9058
9059static void
9060lpfc_hba_free(struct lpfc_hba *phba)
9061{
9062 if (phba->sli_rev == LPFC_SLI_REV4)
9063 kfree(phba->sli4_hba.hdwq);
9064
9065
9066 idr_remove(&lpfc_hba_index, phba->brd_no);
9067
9068
9069 kfree(phba->sli.sli3_ring);
9070 phba->sli.sli3_ring = NULL;
9071
9072 kfree(phba);
9073 return;
9074}
9075
9076
9077
9078
9079
9080
9081
9082
9083
9084
9085void
9086lpfc_setup_fdmi_mask(struct lpfc_vport *vport)
9087{
9088 struct lpfc_hba *phba = vport->phba;
9089
9090 vport->load_flag |= FC_ALLOW_FDMI;
9091 if (phba->cfg_enable_SmartSAN ||
9092 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9093
9094 vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
9095 if (phba->cfg_enable_SmartSAN)
9096 vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
9097 else
9098 vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
9099 }
9100
9101 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9102 "6077 Setup FDMI mask: hba x%x port x%x\n",
9103 vport->fdmi_hba_mask, vport->fdmi_port_mask);
9104}
9105
9106
9107
9108
9109
9110
9111
9112
9113
9114
9115
9116
9117static int
9118lpfc_create_shost(struct lpfc_hba *phba)
9119{
9120 struct lpfc_vport *vport;
9121 struct Scsi_Host *shost;
9122
9123
9124 phba->fc_edtov = FF_DEF_EDTOV;
9125 phba->fc_ratov = FF_DEF_RATOV;
9126 phba->fc_altov = FF_DEF_ALTOV;
9127 phba->fc_arbtov = FF_DEF_ARBTOV;
9128
9129 atomic_set(&phba->sdev_cnt, 0);
9130 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9131 if (!vport)
9132 return -ENODEV;
9133
9134 shost = lpfc_shost_from_vport(vport);
9135 phba->pport = vport;
9136
9137 if (phba->nvmet_support) {
9138
9139 phba->targetport = NULL;
9140 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9141 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9142 "6076 NVME Target Found\n");
9143 }
9144
9145 lpfc_debugfs_initialize(vport);
9146
9147 pci_set_drvdata(phba->pcidev, shost);
9148
9149 lpfc_setup_fdmi_mask(vport);
9150
9151
9152
9153
9154
9155 return 0;
9156}
9157
9158
9159
9160
9161
9162
9163
9164
9165static void
9166lpfc_destroy_shost(struct lpfc_hba *phba)
9167{
9168 struct lpfc_vport *vport = phba->pport;
9169
9170
9171 destroy_port(vport);
9172
9173 return;
9174}
9175
9176
9177
9178
9179
9180
9181
9182
9183
9184static void
9185lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9186{
9187 uint32_t old_mask;
9188 uint32_t old_guard;
9189
9190 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9191 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9192 "1478 Registering BlockGuard with the "
9193 "SCSI layer\n");
9194
9195 old_mask = phba->cfg_prot_mask;
9196 old_guard = phba->cfg_prot_guard;
9197
9198
9199 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9200 SHOST_DIX_TYPE0_PROTECTION |
9201 SHOST_DIX_TYPE1_PROTECTION);
9202 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9203 SHOST_DIX_GUARD_CRC);
9204
9205
9206 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9207 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9208
9209 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9210 if ((old_mask != phba->cfg_prot_mask) ||
9211 (old_guard != phba->cfg_prot_guard))
9212 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9213 "1475 Registering BlockGuard with the "
9214 "SCSI layer: mask %d guard %d\n",
9215 phba->cfg_prot_mask,
9216 phba->cfg_prot_guard);
9217
9218 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9219 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9220 } else
9221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9222 "1479 Not Registering BlockGuard with the SCSI "
9223 "layer, Bad protection parameters: %d %d\n",
9224 old_mask, old_guard);
9225 }
9226}
9227
9228
9229
9230
9231
9232
9233
9234
9235static void
9236lpfc_post_init_setup(struct lpfc_hba *phba)
9237{
9238 struct Scsi_Host *shost;
9239 struct lpfc_adapter_event_header adapter_event;
9240
9241
9242 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9243
9244
9245
9246
9247
9248 shost = pci_get_drvdata(phba->pcidev);
9249 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9250
9251 lpfc_host_attrib_init(shost);
9252
9253 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9254 spin_lock_irq(shost->host_lock);
9255 lpfc_poll_start_timer(phba);
9256 spin_unlock_irq(shost->host_lock);
9257 }
9258
9259 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9260 "0428 Perform SCSI scan\n");
9261
9262 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
9263 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
9264 fc_host_post_vendor_event(shost, fc_get_event_number(),
9265 sizeof(adapter_event),
9266 (char *) &adapter_event,
9267 LPFC_NL_VENDOR_ID);
9268 return;
9269}
9270
9271
9272
9273
9274
9275
9276
9277
9278
9279
9280
9281
9282static int
9283lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9284{
9285 struct pci_dev *pdev = phba->pcidev;
9286 unsigned long bar0map_len, bar2map_len;
9287 int i, hbq_count;
9288 void *ptr;
9289 int error;
9290
9291 if (!pdev)
9292 return -ENODEV;
9293
9294
9295 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9296 if (error)
9297 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9298 if (error)
9299 return error;
9300 error = -ENODEV;
9301
9302
9303
9304
9305 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9306 bar0map_len = pci_resource_len(pdev, 0);
9307
9308 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9309 bar2map_len = pci_resource_len(pdev, 2);
9310
9311
9312 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9313 if (!phba->slim_memmap_p) {
9314 dev_printk(KERN_ERR, &pdev->dev,
9315 "ioremap failed for SLIM memory.\n");
9316 goto out;
9317 }
9318
9319
9320 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9321 if (!phba->ctrl_regs_memmap_p) {
9322 dev_printk(KERN_ERR, &pdev->dev,
9323 "ioremap failed for HBA control registers.\n");
9324 goto out_iounmap_slim;
9325 }
9326
9327
9328 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9329 &phba->slim2p.phys, GFP_KERNEL);
9330 if (!phba->slim2p.virt)
9331 goto out_iounmap;
9332
9333 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9334 phba->mbox_ext = (phba->slim2p.virt +
9335 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
9336 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9337 phba->IOCBs = (phba->slim2p.virt +
9338 offsetof(struct lpfc_sli2_slim, IOCBs));
9339
9340 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9341 lpfc_sli_hbq_size(),
9342 &phba->hbqslimp.phys,
9343 GFP_KERNEL);
9344 if (!phba->hbqslimp.virt)
9345 goto out_free_slim;
9346
9347 hbq_count = lpfc_sli_hbq_count();
9348 ptr = phba->hbqslimp.virt;
9349 for (i = 0; i < hbq_count; ++i) {
9350 phba->hbqs[i].hbq_virt = ptr;
9351 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9352 ptr += (lpfc_hbq_defs[i]->entry_count *
9353 sizeof(struct lpfc_hbq_entry));
9354 }
9355 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9356 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9357
9358 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9359
9360 phba->MBslimaddr = phba->slim_memmap_p;
9361 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9362 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9363 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9364 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9365
9366 return 0;
9367
9368out_free_slim:
9369 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9370 phba->slim2p.virt, phba->slim2p.phys);
9371out_iounmap:
9372 iounmap(phba->ctrl_regs_memmap_p);
9373out_iounmap_slim:
9374 iounmap(phba->slim_memmap_p);
9375out:
9376 return error;
9377}
9378
9379
9380
9381
9382
9383
9384
9385
9386static void
9387lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9388{
9389 struct pci_dev *pdev;
9390
9391
9392 if (!phba->pcidev)
9393 return;
9394 else
9395 pdev = phba->pcidev;
9396
9397
9398 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9399 phba->hbqslimp.virt, phba->hbqslimp.phys);
9400 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9401 phba->slim2p.virt, phba->slim2p.phys);
9402
9403
9404 iounmap(phba->ctrl_regs_memmap_p);
9405 iounmap(phba->slim_memmap_p);
9406
9407 return;
9408}
9409
9410
9411
9412
9413
9414
9415
9416
9417
9418
9419int
9420lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9421{
9422 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
9423 struct lpfc_register reg_data;
9424 int i, port_error = 0;
9425 uint32_t if_type;
9426
9427 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
9428 memset(®_data, 0, sizeof(reg_data));
9429 if (!phba->sli4_hba.PSMPHRregaddr)
9430 return -ENODEV;
9431
9432
9433 for (i = 0; i < 3000; i++) {
9434 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9435 &portsmphr_reg.word0) ||
9436 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
9437
9438 port_error = -ENODEV;
9439 break;
9440 }
9441 if (LPFC_POST_STAGE_PORT_READY ==
9442 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
9443 break;
9444 msleep(10);
9445 }
9446
9447
9448
9449
9450
9451 if (port_error) {
9452 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9453 "1408 Port Failed POST - portsmphr=0x%x, "
9454 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
9455 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
9456 portsmphr_reg.word0,
9457 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
9458 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
9459 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
9460 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
9461 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
9462 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
9463 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
9464 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
9465 } else {
9466 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9467 "2534 Device Info: SLIFamily=0x%x, "
9468 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
9469 "SLIHint_2=0x%x, FT=0x%x\n",
9470 bf_get(lpfc_sli_intf_sli_family,
9471 &phba->sli4_hba.sli_intf),
9472 bf_get(lpfc_sli_intf_slirev,
9473 &phba->sli4_hba.sli_intf),
9474 bf_get(lpfc_sli_intf_if_type,
9475 &phba->sli4_hba.sli_intf),
9476 bf_get(lpfc_sli_intf_sli_hint1,
9477 &phba->sli4_hba.sli_intf),
9478 bf_get(lpfc_sli_intf_sli_hint2,
9479 &phba->sli4_hba.sli_intf),
9480 bf_get(lpfc_sli_intf_func_type,
9481 &phba->sli4_hba.sli_intf));
9482
9483
9484
9485
9486
9487 if_type = bf_get(lpfc_sli_intf_if_type,
9488 &phba->sli4_hba.sli_intf);
9489 switch (if_type) {
9490 case LPFC_SLI_INTF_IF_TYPE_0:
9491 phba->sli4_hba.ue_mask_lo =
9492 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9493 phba->sli4_hba.ue_mask_hi =
9494 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9495 uerrlo_reg.word0 =
9496 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9497 uerrhi_reg.word0 =
9498 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9499 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9500 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9501 lpfc_printf_log(phba, KERN_ERR,
9502 LOG_TRACE_EVENT,
9503 "1422 Unrecoverable Error "
9504 "Detected during POST "
9505 "uerr_lo_reg=0x%x, "
9506 "uerr_hi_reg=0x%x, "
9507 "ue_mask_lo_reg=0x%x, "
9508 "ue_mask_hi_reg=0x%x\n",
9509 uerrlo_reg.word0,
9510 uerrhi_reg.word0,
9511 phba->sli4_hba.ue_mask_lo,
9512 phba->sli4_hba.ue_mask_hi);
9513 port_error = -ENODEV;
9514 }
9515 break;
9516 case LPFC_SLI_INTF_IF_TYPE_2:
9517 case LPFC_SLI_INTF_IF_TYPE_6:
9518
9519 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9520 ®_data.word0) ||
9521 (bf_get(lpfc_sliport_status_err, ®_data) &&
9522 !bf_get(lpfc_sliport_status_rn, ®_data))) {
9523 phba->work_status[0] =
9524 readl(phba->sli4_hba.u.if_type2.
9525 ERR1regaddr);
9526 phba->work_status[1] =
9527 readl(phba->sli4_hba.u.if_type2.
9528 ERR2regaddr);
9529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9530 "2888 Unrecoverable port error "
9531 "following POST: port status reg "
9532 "0x%x, port_smphr reg 0x%x, "
9533 "error 1=0x%x, error 2=0x%x\n",
9534 reg_data.word0,
9535 portsmphr_reg.word0,
9536 phba->work_status[0],
9537 phba->work_status[1]);
9538 port_error = -ENODEV;
9539 break;
9540 }
9541
9542 if (lpfc_pldv_detect &&
9543 bf_get(lpfc_sli_intf_sli_family,
9544 &phba->sli4_hba.sli_intf) ==
9545 LPFC_SLI_INTF_FAMILY_G6)
9546 pci_write_config_byte(phba->pcidev,
9547 LPFC_SLI_INTF, CFG_PLD);
9548 break;
9549 case LPFC_SLI_INTF_IF_TYPE_1:
9550 default:
9551 break;
9552 }
9553 }
9554 return port_error;
9555}
9556
9557
9558
9559
9560
9561
9562
9563
9564
9565static void
9566lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9567{
9568 switch (if_type) {
9569 case LPFC_SLI_INTF_IF_TYPE_0:
9570 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9571 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9572 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9573 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9574 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9575 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9576 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9577 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9578 phba->sli4_hba.SLIINTFregaddr =
9579 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9580 break;
9581 case LPFC_SLI_INTF_IF_TYPE_2:
9582 phba->sli4_hba.u.if_type2.EQDregaddr =
9583 phba->sli4_hba.conf_regs_memmap_p +
9584 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9585 phba->sli4_hba.u.if_type2.ERR1regaddr =
9586 phba->sli4_hba.conf_regs_memmap_p +
9587 LPFC_CTL_PORT_ER1_OFFSET;
9588 phba->sli4_hba.u.if_type2.ERR2regaddr =
9589 phba->sli4_hba.conf_regs_memmap_p +
9590 LPFC_CTL_PORT_ER2_OFFSET;
9591 phba->sli4_hba.u.if_type2.CTRLregaddr =
9592 phba->sli4_hba.conf_regs_memmap_p +
9593 LPFC_CTL_PORT_CTL_OFFSET;
9594 phba->sli4_hba.u.if_type2.STATUSregaddr =
9595 phba->sli4_hba.conf_regs_memmap_p +
9596 LPFC_CTL_PORT_STA_OFFSET;
9597 phba->sli4_hba.SLIINTFregaddr =
9598 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9599 phba->sli4_hba.PSMPHRregaddr =
9600 phba->sli4_hba.conf_regs_memmap_p +
9601 LPFC_CTL_PORT_SEM_OFFSET;
9602 phba->sli4_hba.RQDBregaddr =
9603 phba->sli4_hba.conf_regs_memmap_p +
9604 LPFC_ULP0_RQ_DOORBELL;
9605 phba->sli4_hba.WQDBregaddr =
9606 phba->sli4_hba.conf_regs_memmap_p +
9607 LPFC_ULP0_WQ_DOORBELL;
9608 phba->sli4_hba.CQDBregaddr =
9609 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9610 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9611 phba->sli4_hba.MQDBregaddr =
9612 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9613 phba->sli4_hba.BMBXregaddr =
9614 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9615 break;
9616 case LPFC_SLI_INTF_IF_TYPE_6:
9617 phba->sli4_hba.u.if_type2.EQDregaddr =
9618 phba->sli4_hba.conf_regs_memmap_p +
9619 LPFC_CTL_PORT_EQ_DELAY_OFFSET;
9620 phba->sli4_hba.u.if_type2.ERR1regaddr =
9621 phba->sli4_hba.conf_regs_memmap_p +
9622 LPFC_CTL_PORT_ER1_OFFSET;
9623 phba->sli4_hba.u.if_type2.ERR2regaddr =
9624 phba->sli4_hba.conf_regs_memmap_p +
9625 LPFC_CTL_PORT_ER2_OFFSET;
9626 phba->sli4_hba.u.if_type2.CTRLregaddr =
9627 phba->sli4_hba.conf_regs_memmap_p +
9628 LPFC_CTL_PORT_CTL_OFFSET;
9629 phba->sli4_hba.u.if_type2.STATUSregaddr =
9630 phba->sli4_hba.conf_regs_memmap_p +
9631 LPFC_CTL_PORT_STA_OFFSET;
9632 phba->sli4_hba.PSMPHRregaddr =
9633 phba->sli4_hba.conf_regs_memmap_p +
9634 LPFC_CTL_PORT_SEM_OFFSET;
9635 phba->sli4_hba.BMBXregaddr =
9636 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9637 break;
9638 case LPFC_SLI_INTF_IF_TYPE_1:
9639 default:
9640 dev_printk(KERN_ERR, &phba->pcidev->dev,
9641 "FATAL - unsupported SLI4 interface type - %d\n",
9642 if_type);
9643 break;
9644 }
9645}
9646
9647
9648
9649
9650
9651
9652
9653
9654static void
9655lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9656{
9657 switch (if_type) {
9658 case LPFC_SLI_INTF_IF_TYPE_0:
9659 phba->sli4_hba.PSMPHRregaddr =
9660 phba->sli4_hba.ctrl_regs_memmap_p +
9661 LPFC_SLIPORT_IF0_SMPHR;
9662 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9663 LPFC_HST_ISR0;
9664 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9665 LPFC_HST_IMR0;
9666 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9667 LPFC_HST_ISCR0;
9668 break;
9669 case LPFC_SLI_INTF_IF_TYPE_6:
9670 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9671 LPFC_IF6_RQ_DOORBELL;
9672 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9673 LPFC_IF6_WQ_DOORBELL;
9674 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9675 LPFC_IF6_CQ_DOORBELL;
9676 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9677 LPFC_IF6_EQ_DOORBELL;
9678 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9679 LPFC_IF6_MQ_DOORBELL;
9680 break;
9681 case LPFC_SLI_INTF_IF_TYPE_2:
9682 case LPFC_SLI_INTF_IF_TYPE_1:
9683 default:
9684 dev_err(&phba->pcidev->dev,
9685 "FATAL - unsupported SLI4 interface type - %d\n",
9686 if_type);
9687 break;
9688 }
9689}
9690
9691
9692
9693
9694
9695
9696
9697
9698
9699
9700
9701static int
9702lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9703{
9704 if (vf > LPFC_VIR_FUNC_MAX)
9705 return -ENODEV;
9706
9707 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9708 vf * LPFC_VFR_PAGE_SIZE +
9709 LPFC_ULP0_RQ_DOORBELL);
9710 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9711 vf * LPFC_VFR_PAGE_SIZE +
9712 LPFC_ULP0_WQ_DOORBELL);
9713 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9714 vf * LPFC_VFR_PAGE_SIZE +
9715 LPFC_EQCQ_DOORBELL);
9716 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9717 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9718 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
9719 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9720 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
9721 return 0;
9722}
9723
9724
9725
9726
9727
9728
9729
9730
9731
9732
9733
9734
9735
9736
9737
9738
9739static int
9740lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9741{
9742 uint32_t bmbx_size;
9743 struct lpfc_dmabuf *dmabuf;
9744 struct dma_address *dma_address;
9745 uint32_t pa_addr;
9746 uint64_t phys_addr;
9747
9748 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9749 if (!dmabuf)
9750 return -ENOMEM;
9751
9752
9753
9754
9755
9756 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
9757 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9758 &dmabuf->phys, GFP_KERNEL);
9759 if (!dmabuf->virt) {
9760 kfree(dmabuf);
9761 return -ENOMEM;
9762 }
9763
9764
9765
9766
9767
9768
9769
9770
9771 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9772 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9773
9774 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9775 LPFC_ALIGN_16_BYTE);
9776 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9777 LPFC_ALIGN_16_BYTE);
9778
9779
9780
9781
9782
9783
9784
9785
9786
9787 dma_address = &phba->sli4_hba.bmbx.dma_address;
9788 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9789 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
9790 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
9791 LPFC_BMBX_BIT1_ADDR_HI);
9792
9793 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9794 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
9795 LPFC_BMBX_BIT1_ADDR_LO);
9796 return 0;
9797}
9798
9799
9800
9801
9802
9803
9804
9805
9806
9807
9808
9809
9810static void
9811lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9812{
9813 dma_free_coherent(&phba->pcidev->dev,
9814 phba->sli4_hba.bmbx.bmbx_size,
9815 phba->sli4_hba.bmbx.dmabuf->virt,
9816 phba->sli4_hba.bmbx.dmabuf->phys);
9817
9818 kfree(phba->sli4_hba.bmbx.dmabuf);
9819 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9820}
9821
9822static const char * const lpfc_topo_to_str[] = {
9823 "Loop then P2P",
9824 "Loopback",
9825 "P2P Only",
9826 "Unsupported",
9827 "Loop Only",
9828 "Unsupported",
9829 "P2P then Loop",
9830};
9831
9832#define LINK_FLAGS_DEF 0x0
9833#define LINK_FLAGS_P2P 0x1
9834#define LINK_FLAGS_LOOP 0x2
9835
9836
9837
9838
9839
9840
9841
9842
9843
9844
9845static void
9846lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9847{
9848 u8 ptv, tf, pt;
9849
9850 ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config);
9851 tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config);
9852 pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config);
9853
9854 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9855 "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x",
9856 ptv, tf, pt);
9857 if (!ptv) {
9858 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9859 "2019 FW does not support persistent topology "
9860 "Using driver parameter defined value [%s]",
9861 lpfc_topo_to_str[phba->cfg_topology]);
9862 return;
9863 }
9864
9865 phba->hba_flag |= HBA_PERSISTENT_TOPO;
9866
9867
9868 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9869 LPFC_SLI_INTF_IF_TYPE_6) ||
9870 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9871 LPFC_SLI_INTF_FAMILY_G6)) {
9872 if (!tf) {
9873 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9874 ? FLAGS_TOPOLOGY_MODE_LOOP
9875 : FLAGS_TOPOLOGY_MODE_PT_PT);
9876 } else {
9877 phba->hba_flag &= ~HBA_PERSISTENT_TOPO;
9878 }
9879 } else {
9880 if (tf) {
9881
9882 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9883 FLAGS_TOPOLOGY_MODE_LOOP_PT);
9884 } else {
9885 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9886 ? FLAGS_TOPOLOGY_MODE_PT_PT
9887 : FLAGS_TOPOLOGY_MODE_LOOP);
9888 }
9889 }
9890 if (phba->hba_flag & HBA_PERSISTENT_TOPO) {
9891 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9892 "2020 Using persistent topology value [%s]",
9893 lpfc_topo_to_str[phba->cfg_topology]);
9894 } else {
9895 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9896 "2021 Invalid topology values from FW "
9897 "Using driver parameter defined value [%s]",
9898 lpfc_topo_to_str[phba->cfg_topology]);
9899 }
9900}
9901
9902
9903
9904
9905
9906
9907
9908
9909
9910
9911
9912
9913
9914
9915
9916int
9917lpfc_sli4_read_config(struct lpfc_hba *phba)
9918{
9919 LPFC_MBOXQ_t *pmb;
9920 struct lpfc_mbx_read_config *rd_config;
9921 union lpfc_sli4_cfg_shdr *shdr;
9922 uint32_t shdr_status, shdr_add_status;
9923 struct lpfc_mbx_get_func_cfg *get_func_cfg;
9924 struct lpfc_rsrc_desc_fcfcoe *desc;
9925 char *pdesc_0;
9926 uint16_t forced_link_speed;
9927 uint32_t if_type, qmin, fawwpn;
9928 int length, i, rc = 0, rc2;
9929
9930 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9931 if (!pmb) {
9932 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9933 "2011 Unable to allocate memory for issuing "
9934 "SLI_CONFIG_SPECIAL mailbox command\n");
9935 return -ENOMEM;
9936 }
9937
9938 lpfc_read_config(phba, pmb);
9939
9940 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9941 if (rc != MBX_SUCCESS) {
9942 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9943 "2012 Mailbox failed , mbxCmd x%x "
9944 "READ_CONFIG, mbxStatus x%x\n",
9945 bf_get(lpfc_mqe_command, &pmb->u.mqe),
9946 bf_get(lpfc_mqe_status, &pmb->u.mqe));
9947 rc = -EIO;
9948 } else {
9949 rd_config = &pmb->u.mqe.un.rd_config;
9950 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
9951 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9952 phba->sli4_hba.lnk_info.lnk_tp =
9953 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
9954 phba->sli4_hba.lnk_info.lnk_no =
9955 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
9956 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9957 "3081 lnk_type:%d, lnk_numb:%d\n",
9958 phba->sli4_hba.lnk_info.lnk_tp,
9959 phba->sli4_hba.lnk_info.lnk_no);
9960 } else
9961 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9962 "3082 Mailbox (x%x) returned ldv:x0\n",
9963 bf_get(lpfc_mqe_command, &pmb->u.mqe));
9964 if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
9965 phba->bbcredit_support = 1;
9966 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9967 }
9968
9969 fawwpn = bf_get(lpfc_mbx_rd_conf_fawwpn, rd_config);
9970
9971 if (fawwpn) {
9972 lpfc_printf_log(phba, KERN_INFO,
9973 LOG_INIT | LOG_DISCOVERY,
9974 "2702 READ_CONFIG: FA-PWWN is "
9975 "configured on\n");
9976 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9977 } else {
9978 phba->sli4_hba.fawwpn_flag = 0;
9979 }
9980
9981 phba->sli4_hba.conf_trunk =
9982 bf_get(lpfc_mbx_rd_conf_trunk, rd_config);
9983 phba->sli4_hba.extents_in_use =
9984 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
9985
9986 phba->sli4_hba.max_cfg_param.max_xri =
9987 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
9988
9989 if (is_kdump_kernel() &&
9990 phba->sli4_hba.max_cfg_param.max_xri > 512)
9991 phba->sli4_hba.max_cfg_param.max_xri = 512;
9992 phba->sli4_hba.max_cfg_param.xri_base =
9993 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
9994 phba->sli4_hba.max_cfg_param.max_vpi =
9995 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
9996
9997 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9998 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9999 phba->sli4_hba.max_cfg_param.vpi_base =
10000 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
10001 phba->sli4_hba.max_cfg_param.max_rpi =
10002 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
10003 phba->sli4_hba.max_cfg_param.rpi_base =
10004 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
10005 phba->sli4_hba.max_cfg_param.max_vfi =
10006 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
10007 phba->sli4_hba.max_cfg_param.vfi_base =
10008 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
10009 phba->sli4_hba.max_cfg_param.max_fcfi =
10010 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
10011 phba->sli4_hba.max_cfg_param.max_eq =
10012 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
10013 phba->sli4_hba.max_cfg_param.max_rq =
10014 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
10015 phba->sli4_hba.max_cfg_param.max_wq =
10016 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
10017 phba->sli4_hba.max_cfg_param.max_cq =
10018 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
10019 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10020 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10021 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10022 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10023 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10024 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10025 phba->max_vports = phba->max_vpi;
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10037 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10038 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10039
10040 if (lpfc_use_cgn_signal) {
10041 if (bf_get(lpfc_mbx_rd_conf_wcs, rd_config)) {
10042 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10043 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10044 }
10045 if (bf_get(lpfc_mbx_rd_conf_acs, rd_config)) {
10046
10047
10048
10049 if (phba->cgn_reg_signal !=
10050 EDC_CG_SIG_WARN_ONLY) {
10051
10052 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10053 phba->cgn_reg_signal =
10054 EDC_CG_SIG_NOTSUPPORTED;
10055 } else {
10056 phba->cgn_reg_signal =
10057 EDC_CG_SIG_WARN_ALARM;
10058 phba->cgn_reg_fpin =
10059 LPFC_CGN_FPIN_NONE;
10060 }
10061 }
10062 }
10063
10064
10065 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10066 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10067
10068 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10069 "6446 READ_CONFIG reg_sig x%x reg_fpin:x%x\n",
10070 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10071
10072 lpfc_map_topology(phba, rd_config);
10073 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10074 "2003 cfg params Extents? %d "
10075 "XRI(B:%d M:%d), "
10076 "VPI(B:%d M:%d) "
10077 "VFI(B:%d M:%d) "
10078 "RPI(B:%d M:%d) "
10079 "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n",
10080 phba->sli4_hba.extents_in_use,
10081 phba->sli4_hba.max_cfg_param.xri_base,
10082 phba->sli4_hba.max_cfg_param.max_xri,
10083 phba->sli4_hba.max_cfg_param.vpi_base,
10084 phba->sli4_hba.max_cfg_param.max_vpi,
10085 phba->sli4_hba.max_cfg_param.vfi_base,
10086 phba->sli4_hba.max_cfg_param.max_vfi,
10087 phba->sli4_hba.max_cfg_param.rpi_base,
10088 phba->sli4_hba.max_cfg_param.max_rpi,
10089 phba->sli4_hba.max_cfg_param.max_fcfi,
10090 phba->sli4_hba.max_cfg_param.max_eq,
10091 phba->sli4_hba.max_cfg_param.max_cq,
10092 phba->sli4_hba.max_cfg_param.max_wq,
10093 phba->sli4_hba.max_cfg_param.max_rq,
10094 phba->lmt);
10095
10096
10097
10098
10099
10100 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10101 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10102 qmin = phba->sli4_hba.max_cfg_param.max_cq;
10103 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10104 qmin = phba->sli4_hba.max_cfg_param.max_eq;
10105
10106
10107
10108
10109
10110
10111 qmin -= 4;
10112
10113
10114 if ((phba->cfg_irq_chann > qmin) ||
10115 (phba->cfg_hdw_queue > qmin)) {
10116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10117 "2005 Reducing Queues - "
10118 "FW resource limitation: "
10119 "WQ %d CQ %d EQ %d: min %d: "
10120 "IRQ %d HDWQ %d\n",
10121 phba->sli4_hba.max_cfg_param.max_wq,
10122 phba->sli4_hba.max_cfg_param.max_cq,
10123 phba->sli4_hba.max_cfg_param.max_eq,
10124 qmin, phba->cfg_irq_chann,
10125 phba->cfg_hdw_queue);
10126
10127 if (phba->cfg_irq_chann > qmin)
10128 phba->cfg_irq_chann = qmin;
10129 if (phba->cfg_hdw_queue > qmin)
10130 phba->cfg_hdw_queue = qmin;
10131 }
10132 }
10133
10134 if (rc)
10135 goto read_cfg_out;
10136
10137
10138 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10139 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10140 forced_link_speed =
10141 bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
10142 if (forced_link_speed) {
10143 phba->hba_flag |= HBA_FORCED_LINK_SPEED;
10144
10145 switch (forced_link_speed) {
10146 case LINK_SPEED_1G:
10147 phba->cfg_link_speed =
10148 LPFC_USER_LINK_SPEED_1G;
10149 break;
10150 case LINK_SPEED_2G:
10151 phba->cfg_link_speed =
10152 LPFC_USER_LINK_SPEED_2G;
10153 break;
10154 case LINK_SPEED_4G:
10155 phba->cfg_link_speed =
10156 LPFC_USER_LINK_SPEED_4G;
10157 break;
10158 case LINK_SPEED_8G:
10159 phba->cfg_link_speed =
10160 LPFC_USER_LINK_SPEED_8G;
10161 break;
10162 case LINK_SPEED_10G:
10163 phba->cfg_link_speed =
10164 LPFC_USER_LINK_SPEED_10G;
10165 break;
10166 case LINK_SPEED_16G:
10167 phba->cfg_link_speed =
10168 LPFC_USER_LINK_SPEED_16G;
10169 break;
10170 case LINK_SPEED_32G:
10171 phba->cfg_link_speed =
10172 LPFC_USER_LINK_SPEED_32G;
10173 break;
10174 case LINK_SPEED_64G:
10175 phba->cfg_link_speed =
10176 LPFC_USER_LINK_SPEED_64G;
10177 break;
10178 case 0xffff:
10179 phba->cfg_link_speed =
10180 LPFC_USER_LINK_SPEED_AUTO;
10181 break;
10182 default:
10183 lpfc_printf_log(phba, KERN_ERR,
10184 LOG_TRACE_EVENT,
10185 "0047 Unrecognized link "
10186 "speed : %d\n",
10187 forced_link_speed);
10188 phba->cfg_link_speed =
10189 LPFC_USER_LINK_SPEED_AUTO;
10190 }
10191 }
10192 }
10193
10194
10195 length = phba->sli4_hba.max_cfg_param.max_xri -
10196 lpfc_sli4_get_els_iocb_cnt(phba);
10197 if (phba->cfg_hba_queue_depth > length) {
10198 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10199 "3361 HBA queue depth changed from %d to %d\n",
10200 phba->cfg_hba_queue_depth, length);
10201 phba->cfg_hba_queue_depth = length;
10202 }
10203
10204 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10205 LPFC_SLI_INTF_IF_TYPE_2)
10206 goto read_cfg_out;
10207
10208
10209 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
10210 sizeof(struct lpfc_sli4_cfg_mhdr));
10211 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10212 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
10213 length, LPFC_SLI4_MBX_EMBED);
10214
10215 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10216 shdr = (union lpfc_sli4_cfg_shdr *)
10217 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
10218 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10219 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10220 if (rc2 || shdr_status || shdr_add_status) {
10221 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10222 "3026 Mailbox failed , mbxCmd x%x "
10223 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
10224 bf_get(lpfc_mqe_command, &pmb->u.mqe),
10225 bf_get(lpfc_mqe_status, &pmb->u.mqe));
10226 goto read_cfg_out;
10227 }
10228
10229
10230 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
10231
10232 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
10233 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
10234 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
10235 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
10236 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
10237 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
10238 goto read_cfg_out;
10239
10240 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
10241 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
10242 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
10243 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
10244 phba->sli4_hba.iov.pf_number =
10245 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
10246 phba->sli4_hba.iov.vf_number =
10247 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
10248 break;
10249 }
10250 }
10251
10252 if (i < LPFC_RSRC_DESC_MAX_NUM)
10253 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10254 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
10255 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10256 phba->sli4_hba.iov.vf_number);
10257 else
10258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10259 "3028 GET_FUNCTION_CONFIG: failed to find "
10260 "Resource Descriptor:x%x\n",
10261 LPFC_RSRC_DESC_TYPE_FCFCOE);
10262
10263read_cfg_out:
10264 mempool_free(pmb, phba->mbox_mem_pool);
10265 return rc;
10266}
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277
10278
10279
10280
10281static int
10282lpfc_setup_endian_order(struct lpfc_hba *phba)
10283{
10284 LPFC_MBOXQ_t *mboxq;
10285 uint32_t if_type, rc = 0;
10286 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
10287 HOST_ENDIAN_HIGH_WORD1};
10288
10289 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10290 switch (if_type) {
10291 case LPFC_SLI_INTF_IF_TYPE_0:
10292 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10293 GFP_KERNEL);
10294 if (!mboxq) {
10295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10296 "0492 Unable to allocate memory for "
10297 "issuing SLI_CONFIG_SPECIAL mailbox "
10298 "command\n");
10299 return -ENOMEM;
10300 }
10301
10302
10303
10304
10305
10306 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
10307 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
10308 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10309 if (rc != MBX_SUCCESS) {
10310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10311 "0493 SLI_CONFIG_SPECIAL mailbox "
10312 "failed with status x%x\n",
10313 rc);
10314 rc = -EIO;
10315 }
10316 mempool_free(mboxq, phba->mbox_mem_pool);
10317 break;
10318 case LPFC_SLI_INTF_IF_TYPE_6:
10319 case LPFC_SLI_INTF_IF_TYPE_2:
10320 case LPFC_SLI_INTF_IF_TYPE_1:
10321 default:
10322 break;
10323 }
10324 return rc;
10325}
10326
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336
10337
10338
10339
10340static int
10341lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10342{
10343
10344
10345
10346
10347
10348 if (phba->nvmet_support) {
10349 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10350 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10351 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10352 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10353 }
10354
10355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10356 "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n",
10357 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10358 phba->cfg_nvmet_mrq);
10359
10360
10361 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10362 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10363
10364
10365 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10366 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10367 return 0;
10368}
10369
10370static int
10371lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10372{
10373 struct lpfc_queue *qdesc;
10374 u32 wqesize;
10375 int cpu;
10376
10377 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10378
10379 if (phba->enab_exp_wqcq_pages)
10380
10381 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10382 phba->sli4_hba.cq_esize,
10383 LPFC_CQE_EXP_COUNT, cpu);
10384
10385 else
10386 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10387 phba->sli4_hba.cq_esize,
10388 phba->sli4_hba.cq_ecount, cpu);
10389 if (!qdesc) {
10390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10391 "0499 Failed allocate fast-path IO CQ (%d)\n",
10392 idx);
10393 return 1;
10394 }
10395 qdesc->qe_valid = 1;
10396 qdesc->hdwq = idx;
10397 qdesc->chann = cpu;
10398 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10399
10400
10401 if (phba->enab_exp_wqcq_pages) {
10402
10403 wqesize = (phba->fcp_embed_io) ?
10404 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10405 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10406 wqesize,
10407 LPFC_WQE_EXP_COUNT, cpu);
10408 } else
10409 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10410 phba->sli4_hba.wq_esize,
10411 phba->sli4_hba.wq_ecount, cpu);
10412
10413 if (!qdesc) {
10414 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10415 "0503 Failed allocate fast-path IO WQ (%d)\n",
10416 idx);
10417 return 1;
10418 }
10419 qdesc->hdwq = idx;
10420 qdesc->chann = cpu;
10421 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10422 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10423 return 0;
10424}
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434
10435
10436
10437
10438
10439
10440int
10441lpfc_sli4_queue_create(struct lpfc_hba *phba)
10442{
10443 struct lpfc_queue *qdesc;
10444 int idx, cpu, eqcpu;
10445 struct lpfc_sli4_hdw_queue *qp;
10446 struct lpfc_vector_map_info *cpup;
10447 struct lpfc_vector_map_info *eqcpup;
10448 struct lpfc_eq_intr_info *eqi;
10449
10450
10451
10452
10453
10454 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10455 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10456 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10457 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10458 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10459 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10460 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10461 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10462 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10463 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10464
10465 if (!phba->sli4_hba.hdwq) {
10466 phba->sli4_hba.hdwq = kcalloc(
10467 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10468 GFP_KERNEL);
10469 if (!phba->sli4_hba.hdwq) {
10470 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10471 "6427 Failed allocate memory for "
10472 "fast-path Hardware Queue array\n");
10473 goto out_error;
10474 }
10475
10476 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10477 qp = &phba->sli4_hba.hdwq[idx];
10478 spin_lock_init(&qp->io_buf_list_get_lock);
10479 spin_lock_init(&qp->io_buf_list_put_lock);
10480 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
10481 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
10482 qp->get_io_bufs = 0;
10483 qp->put_io_bufs = 0;
10484 qp->total_io_bufs = 0;
10485 spin_lock_init(&qp->abts_io_buf_list_lock);
10486 INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list);
10487 qp->abts_scsi_io_bufs = 0;
10488 qp->abts_nvme_io_bufs = 0;
10489 INIT_LIST_HEAD(&qp->sgl_list);
10490 INIT_LIST_HEAD(&qp->cmd_rsp_buf_list);
10491 spin_lock_init(&qp->hdwq_lock);
10492 }
10493 }
10494
10495 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10496 if (phba->nvmet_support) {
10497 phba->sli4_hba.nvmet_cqset = kcalloc(
10498 phba->cfg_nvmet_mrq,
10499 sizeof(struct lpfc_queue *),
10500 GFP_KERNEL);
10501 if (!phba->sli4_hba.nvmet_cqset) {
10502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10503 "3121 Fail allocate memory for "
10504 "fast-path CQ set array\n");
10505 goto out_error;
10506 }
10507 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10508 phba->cfg_nvmet_mrq,
10509 sizeof(struct lpfc_queue *),
10510 GFP_KERNEL);
10511 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10513 "3122 Fail allocate memory for "
10514 "fast-path RQ set hdr array\n");
10515 goto out_error;
10516 }
10517 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10518 phba->cfg_nvmet_mrq,
10519 sizeof(struct lpfc_queue *),
10520 GFP_KERNEL);
10521 if (!phba->sli4_hba.nvmet_mrq_data) {
10522 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10523 "3124 Fail allocate memory for "
10524 "fast-path RQ set data array\n");
10525 goto out_error;
10526 }
10527 }
10528 }
10529
10530 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10531
10532
10533 for_each_present_cpu(cpu) {
10534
10535
10536
10537
10538 cpup = &phba->sli4_hba.cpu_map[cpu];
10539 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
10540 continue;
10541
10542
10543 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10544
10545
10546 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10547 phba->sli4_hba.eq_esize,
10548 phba->sli4_hba.eq_ecount, cpu);
10549 if (!qdesc) {
10550 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10551 "0497 Failed allocate EQ (%d)\n",
10552 cpup->hdwq);
10553 goto out_error;
10554 }
10555 qdesc->qe_valid = 1;
10556 qdesc->hdwq = cpup->hdwq;
10557 qdesc->chann = cpu;
10558 qdesc->last_cpu = qdesc->chann;
10559
10560
10561 qp->hba_eq = qdesc;
10562
10563 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10564 list_add(&qdesc->cpu_list, &eqi->list);
10565 }
10566
10567
10568
10569
10570 for_each_present_cpu(cpu) {
10571 cpup = &phba->sli4_hba.cpu_map[cpu];
10572
10573
10574 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
10575 continue;
10576
10577
10578 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10579 if (qp->hba_eq)
10580 continue;
10581
10582
10583 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10584 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10585 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10586 }
10587
10588
10589 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10590 if (lpfc_alloc_io_wq_cq(phba, idx))
10591 goto out_error;
10592 }
10593
10594 if (phba->nvmet_support) {
10595 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10596 cpu = lpfc_find_cpu_handle(phba, idx,
10597 LPFC_FIND_BY_HDWQ);
10598 qdesc = lpfc_sli4_queue_alloc(phba,
10599 LPFC_DEFAULT_PAGE_SIZE,
10600 phba->sli4_hba.cq_esize,
10601 phba->sli4_hba.cq_ecount,
10602 cpu);
10603 if (!qdesc) {
10604 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10605 "3142 Failed allocate NVME "
10606 "CQ Set (%d)\n", idx);
10607 goto out_error;
10608 }
10609 qdesc->qe_valid = 1;
10610 qdesc->hdwq = idx;
10611 qdesc->chann = cpu;
10612 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10613 }
10614 }
10615
10616
10617
10618
10619
10620 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10621
10622 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10623 phba->sli4_hba.cq_esize,
10624 phba->sli4_hba.cq_ecount, cpu);
10625 if (!qdesc) {
10626 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10627 "0500 Failed allocate slow-path mailbox CQ\n");
10628 goto out_error;
10629 }
10630 qdesc->qe_valid = 1;
10631 phba->sli4_hba.mbx_cq = qdesc;
10632
10633
10634 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10635 phba->sli4_hba.cq_esize,
10636 phba->sli4_hba.cq_ecount, cpu);
10637 if (!qdesc) {
10638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10639 "0501 Failed allocate slow-path ELS CQ\n");
10640 goto out_error;
10641 }
10642 qdesc->qe_valid = 1;
10643 qdesc->chann = cpu;
10644 phba->sli4_hba.els_cq = qdesc;
10645
10646
10647
10648
10649
10650
10651
10652
10653 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10654 phba->sli4_hba.mq_esize,
10655 phba->sli4_hba.mq_ecount, cpu);
10656 if (!qdesc) {
10657 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10658 "0505 Failed allocate slow-path MQ\n");
10659 goto out_error;
10660 }
10661 qdesc->chann = cpu;
10662 phba->sli4_hba.mbx_wq = qdesc;
10663
10664
10665
10666
10667
10668
10669 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10670 phba->sli4_hba.wq_esize,
10671 phba->sli4_hba.wq_ecount, cpu);
10672 if (!qdesc) {
10673 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10674 "0504 Failed allocate slow-path ELS WQ\n");
10675 goto out_error;
10676 }
10677 qdesc->chann = cpu;
10678 phba->sli4_hba.els_wq = qdesc;
10679 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10680
10681 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10682
10683 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10684 phba->sli4_hba.cq_esize,
10685 phba->sli4_hba.cq_ecount, cpu);
10686 if (!qdesc) {
10687 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10688 "6079 Failed allocate NVME LS CQ\n");
10689 goto out_error;
10690 }
10691 qdesc->chann = cpu;
10692 qdesc->qe_valid = 1;
10693 phba->sli4_hba.nvmels_cq = qdesc;
10694
10695
10696 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10697 phba->sli4_hba.wq_esize,
10698 phba->sli4_hba.wq_ecount, cpu);
10699 if (!qdesc) {
10700 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10701 "6080 Failed allocate NVME LS WQ\n");
10702 goto out_error;
10703 }
10704 qdesc->chann = cpu;
10705 phba->sli4_hba.nvmels_wq = qdesc;
10706 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10707 }
10708
10709
10710
10711
10712
10713
10714 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10715 phba->sli4_hba.rq_esize,
10716 phba->sli4_hba.rq_ecount, cpu);
10717 if (!qdesc) {
10718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10719 "0506 Failed allocate receive HRQ\n");
10720 goto out_error;
10721 }
10722 phba->sli4_hba.hdr_rq = qdesc;
10723
10724
10725 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10726 phba->sli4_hba.rq_esize,
10727 phba->sli4_hba.rq_ecount, cpu);
10728 if (!qdesc) {
10729 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10730 "0507 Failed allocate receive DRQ\n");
10731 goto out_error;
10732 }
10733 phba->sli4_hba.dat_rq = qdesc;
10734
10735 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10736 phba->nvmet_support) {
10737 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10738 cpu = lpfc_find_cpu_handle(phba, idx,
10739 LPFC_FIND_BY_HDWQ);
10740
10741 qdesc = lpfc_sli4_queue_alloc(phba,
10742 LPFC_DEFAULT_PAGE_SIZE,
10743 phba->sli4_hba.rq_esize,
10744 LPFC_NVMET_RQE_DEF_COUNT,
10745 cpu);
10746 if (!qdesc) {
10747 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10748 "3146 Failed allocate "
10749 "receive HRQ\n");
10750 goto out_error;
10751 }
10752 qdesc->hdwq = idx;
10753 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10754
10755
10756 qdesc->rqbp = kzalloc_node(sizeof(*qdesc->rqbp),
10757 GFP_KERNEL,
10758 cpu_to_node(cpu));
10759 if (qdesc->rqbp == NULL) {
10760 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10761 "6131 Failed allocate "
10762 "Header RQBP\n");
10763 goto out_error;
10764 }
10765
10766
10767 INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
10768
10769
10770 qdesc = lpfc_sli4_queue_alloc(phba,
10771 LPFC_DEFAULT_PAGE_SIZE,
10772 phba->sli4_hba.rq_esize,
10773 LPFC_NVMET_RQE_DEF_COUNT,
10774 cpu);
10775 if (!qdesc) {
10776 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10777 "3156 Failed allocate "
10778 "receive DRQ\n");
10779 goto out_error;
10780 }
10781 qdesc->hdwq = idx;
10782 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10783 }
10784 }
10785
10786
10787 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10788 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10789 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10790 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10791 }
10792 }
10793
10794
10795 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10796 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10797 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10798 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10799 }
10800 }
10801
10802 return 0;
10803
10804out_error:
10805 lpfc_sli4_queue_destroy(phba);
10806 return -ENOMEM;
10807}
10808
10809static inline void
10810__lpfc_sli4_release_queue(struct lpfc_queue **qp)
10811{
10812 if (*qp != NULL) {
10813 lpfc_sli4_queue_free(*qp);
10814 *qp = NULL;
10815 }
10816}
10817
10818static inline void
10819lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
10820{
10821 int idx;
10822
10823 if (*qs == NULL)
10824 return;
10825
10826 for (idx = 0; idx < max; idx++)
10827 __lpfc_sli4_release_queue(&(*qs)[idx]);
10828
10829 kfree(*qs);
10830 *qs = NULL;
10831}
10832
10833static inline void
10834lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10835{
10836 struct lpfc_sli4_hdw_queue *hdwq;
10837 struct lpfc_queue *eq;
10838 uint32_t idx;
10839
10840 hdwq = phba->sli4_hba.hdwq;
10841
10842
10843 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10844
10845 lpfc_sli4_queue_free(hdwq[idx].io_cq);
10846 lpfc_sli4_queue_free(hdwq[idx].io_wq);
10847 hdwq[idx].hba_eq = NULL;
10848 hdwq[idx].io_cq = NULL;
10849 hdwq[idx].io_wq = NULL;
10850 if (phba->cfg_xpsgl && !phba->nvmet_support)
10851 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10852 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10853 }
10854
10855 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10856
10857 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10858 lpfc_sli4_queue_free(eq);
10859 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10860 }
10861}
10862
10863
10864
10865
10866
10867
10868
10869
10870
10871
10872
10873
10874
10875void
10876lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10877{
10878
10879
10880
10881
10882
10883 spin_lock_irq(&phba->hbalock);
10884 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10885 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10886 spin_unlock_irq(&phba->hbalock);
10887 msleep(20);
10888 spin_lock_irq(&phba->hbalock);
10889 }
10890 spin_unlock_irq(&phba->hbalock);
10891
10892 lpfc_sli4_cleanup_poll_list(phba);
10893
10894
10895 if (phba->sli4_hba.hdwq)
10896 lpfc_sli4_release_hdwq(phba);
10897
10898 if (phba->nvmet_support) {
10899 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10900 phba->cfg_nvmet_mrq);
10901
10902 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10903 phba->cfg_nvmet_mrq);
10904 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10905 phba->cfg_nvmet_mrq);
10906 }
10907
10908
10909 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10910
10911
10912 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10913
10914
10915 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10916
10917
10918 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10919 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10920
10921
10922 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10923
10924
10925 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10926
10927
10928 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10929
10930
10931 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10932
10933
10934 spin_lock_irq(&phba->hbalock);
10935 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10936 spin_unlock_irq(&phba->hbalock);
10937}
10938
10939int
10940lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10941{
10942 struct lpfc_rqb *rqbp;
10943 struct lpfc_dmabuf *h_buf;
10944 struct rqb_dmabuf *rqb_buffer;
10945
10946 rqbp = rq->rqbp;
10947 while (!list_empty(&rqbp->rqb_buffer_list)) {
10948 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
10949 struct lpfc_dmabuf, list);
10950
10951 rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
10952 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10953 rqbp->buffer_count--;
10954 }
10955 return 1;
10956}
10957
10958static int
10959lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10960 struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
10961 int qidx, uint32_t qtype)
10962{
10963 struct lpfc_sli_ring *pring;
10964 int rc;
10965
10966 if (!eq || !cq || !wq) {
10967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10968 "6085 Fast-path %s (%d) not allocated\n",
10969 ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
10970 return -ENOMEM;
10971 }
10972
10973
10974 rc = lpfc_cq_create(phba, cq, eq,
10975 (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
10976 if (rc) {
10977 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10978 "6086 Failed setup of CQ (%d), rc = 0x%x\n",
10979 qidx, (uint32_t)rc);
10980 return rc;
10981 }
10982
10983 if (qtype != LPFC_MBOX) {
10984
10985 if (cq_map)
10986 *cq_map = cq->queue_id;
10987
10988 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10989 "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
10990 qidx, cq->queue_id, qidx, eq->queue_id);
10991
10992
10993 rc = lpfc_wq_create(phba, wq, cq, qtype);
10994 if (rc) {
10995 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10996 "4618 Fail setup fastpath WQ (%d), rc = 0x%x\n",
10997 qidx, (uint32_t)rc);
10998
10999 return rc;
11000 }
11001
11002
11003 pring = wq->pring;
11004 pring->sli.sli4.wqp = (void *)wq;
11005 cq->pring = pring;
11006
11007 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11008 "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
11009 qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
11010 } else {
11011 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11012 if (rc) {
11013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11014 "0539 Failed setup of slow-path MQ: "
11015 "rc = 0x%x\n", rc);
11016
11017 return rc;
11018 }
11019
11020 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11021 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
11022 phba->sli4_hba.mbx_wq->queue_id,
11023 phba->sli4_hba.mbx_cq->queue_id);
11024 }
11025
11026 return 0;
11027}
11028
11029
11030
11031
11032
11033
11034
11035
11036static void
11037lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11038{
11039 struct lpfc_queue *eq, *childq;
11040 int qidx;
11041
11042 memset(phba->sli4_hba.cq_lookup, 0,
11043 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11044
11045 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11046
11047 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11048 if (!eq)
11049 continue;
11050
11051 list_for_each_entry(childq, &eq->child_list, list) {
11052 if (childq->queue_id > phba->sli4_hba.cq_max)
11053 continue;
11054 if (childq->subtype == LPFC_IO)
11055 phba->sli4_hba.cq_lookup[childq->queue_id] =
11056 childq;
11057 }
11058 }
11059}
11060
11061
11062
11063
11064
11065
11066
11067
11068
11069
11070
11071
11072
11073int
11074lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11075{
11076 uint32_t shdr_status, shdr_add_status;
11077 union lpfc_sli4_cfg_shdr *shdr;
11078 struct lpfc_vector_map_info *cpup;
11079 struct lpfc_sli4_hdw_queue *qp;
11080 LPFC_MBOXQ_t *mboxq;
11081 int qidx, cpu;
11082 uint32_t length, usdelay;
11083 int rc = -ENOMEM;
11084
11085
11086 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11087 if (!mboxq) {
11088 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11089 "3249 Unable to allocate memory for "
11090 "QUERY_FW_CFG mailbox command\n");
11091 return -ENOMEM;
11092 }
11093 length = (sizeof(struct lpfc_mbx_query_fw_config) -
11094 sizeof(struct lpfc_sli4_cfg_mhdr));
11095 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11096 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
11097 length, LPFC_SLI4_MBX_EMBED);
11098
11099 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11100
11101 shdr = (union lpfc_sli4_cfg_shdr *)
11102 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11103 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11104 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11105 if (shdr_status || shdr_add_status || rc) {
11106 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11107 "3250 QUERY_FW_CFG mailbox failed with status "
11108 "x%x add_status x%x, mbx status x%x\n",
11109 shdr_status, shdr_add_status, rc);
11110 mempool_free(mboxq, phba->mbox_mem_pool);
11111 rc = -ENXIO;
11112 goto out_error;
11113 }
11114
11115 phba->sli4_hba.fw_func_mode =
11116 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
11117 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11118 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11119 phba->sli4_hba.physical_port =
11120 mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
11121 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11122 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
11123 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11124 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11125
11126 mempool_free(mboxq, phba->mbox_mem_pool);
11127
11128
11129
11130
11131 qp = phba->sli4_hba.hdwq;
11132
11133
11134 if (!qp) {
11135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11136 "3147 Fast-path EQs not allocated\n");
11137 rc = -ENOMEM;
11138 goto out_error;
11139 }
11140
11141
11142 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11143
11144 for_each_present_cpu(cpu) {
11145 cpup = &phba->sli4_hba.cpu_map[cpu];
11146
11147
11148
11149
11150 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
11151 continue;
11152 if (qidx != cpup->eq)
11153 continue;
11154
11155
11156 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11157 phba->cfg_fcp_imax);
11158 if (rc) {
11159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11160 "0523 Failed setup of fast-path"
11161 " EQ (%d), rc = 0x%x\n",
11162 cpup->eq, (uint32_t)rc);
11163 goto out_destroy;
11164 }
11165
11166
11167 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11168 qp[cpup->hdwq].hba_eq;
11169
11170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11171 "2584 HBA EQ setup: queue[%d]-id=%d\n",
11172 cpup->eq,
11173 qp[cpup->hdwq].hba_eq->queue_id);
11174 }
11175 }
11176
11177
11178 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11179 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11180 cpup = &phba->sli4_hba.cpu_map[cpu];
11181
11182
11183 rc = lpfc_create_wq_cq(phba,
11184 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11185 qp[qidx].io_cq,
11186 qp[qidx].io_wq,
11187 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11188 qidx,
11189 LPFC_IO);
11190 if (rc) {
11191 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11192 "0535 Failed to setup fastpath "
11193 "IO WQ/CQ (%d), rc = 0x%x\n",
11194 qidx, (uint32_t)rc);
11195 goto out_destroy;
11196 }
11197 }
11198
11199
11200
11201
11202
11203
11204
11205 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11206 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11207 "0528 %s not allocated\n",
11208 phba->sli4_hba.mbx_cq ?
11209 "Mailbox WQ" : "Mailbox CQ");
11210 rc = -ENOMEM;
11211 goto out_destroy;
11212 }
11213
11214 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11215 phba->sli4_hba.mbx_cq,
11216 phba->sli4_hba.mbx_wq,
11217 NULL, 0, LPFC_MBOX);
11218 if (rc) {
11219 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11220 "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
11221 (uint32_t)rc);
11222 goto out_destroy;
11223 }
11224 if (phba->nvmet_support) {
11225 if (!phba->sli4_hba.nvmet_cqset) {
11226 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11227 "3165 Fast-path NVME CQ Set "
11228 "array not allocated\n");
11229 rc = -ENOMEM;
11230 goto out_destroy;
11231 }
11232 if (phba->cfg_nvmet_mrq > 1) {
11233 rc = lpfc_cq_create_set(phba,
11234 phba->sli4_hba.nvmet_cqset,
11235 qp,
11236 LPFC_WCQ, LPFC_NVMET);
11237 if (rc) {
11238 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11239 "3164 Failed setup of NVME CQ "
11240 "Set, rc = 0x%x\n",
11241 (uint32_t)rc);
11242 goto out_destroy;
11243 }
11244 } else {
11245
11246 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11247 qp[0].hba_eq,
11248 LPFC_WCQ, LPFC_NVMET);
11249 if (rc) {
11250 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11251 "6089 Failed setup NVMET CQ: "
11252 "rc = 0x%x\n", (uint32_t)rc);
11253 goto out_destroy;
11254 }
11255 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11256
11257 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11258 "6090 NVMET CQ setup: cq-id=%d, "
11259 "parent eq-id=%d\n",
11260 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11261 qp[0].hba_eq->queue_id);
11262 }
11263 }
11264
11265
11266 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11267 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11268 "0530 ELS %s not allocated\n",
11269 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11270 rc = -ENOMEM;
11271 goto out_destroy;
11272 }
11273 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11274 phba->sli4_hba.els_cq,
11275 phba->sli4_hba.els_wq,
11276 NULL, 0, LPFC_ELS);
11277 if (rc) {
11278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11279 "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
11280 (uint32_t)rc);
11281 goto out_destroy;
11282 }
11283 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11284 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
11285 phba->sli4_hba.els_wq->queue_id,
11286 phba->sli4_hba.els_cq->queue_id);
11287
11288 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11289
11290 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11291 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11292 "6091 LS %s not allocated\n",
11293 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11294 rc = -ENOMEM;
11295 goto out_destroy;
11296 }
11297 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11298 phba->sli4_hba.nvmels_cq,
11299 phba->sli4_hba.nvmels_wq,
11300 NULL, 0, LPFC_NVME_LS);
11301 if (rc) {
11302 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11303 "0526 Failed setup of NVVME LS WQ/CQ: "
11304 "rc = 0x%x\n", (uint32_t)rc);
11305 goto out_destroy;
11306 }
11307
11308 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11309 "6096 ELS WQ setup: wq-id=%d, "
11310 "parent cq-id=%d\n",
11311 phba->sli4_hba.nvmels_wq->queue_id,
11312 phba->sli4_hba.nvmels_cq->queue_id);
11313 }
11314
11315
11316
11317
11318 if (phba->nvmet_support) {
11319 if ((!phba->sli4_hba.nvmet_cqset) ||
11320 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11321 (!phba->sli4_hba.nvmet_mrq_data)) {
11322 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11323 "6130 MRQ CQ Queues not "
11324 "allocated\n");
11325 rc = -ENOMEM;
11326 goto out_destroy;
11327 }
11328 if (phba->cfg_nvmet_mrq > 1) {
11329 rc = lpfc_mrq_create(phba,
11330 phba->sli4_hba.nvmet_mrq_hdr,
11331 phba->sli4_hba.nvmet_mrq_data,
11332 phba->sli4_hba.nvmet_cqset,
11333 LPFC_NVMET);
11334 if (rc) {
11335 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11336 "6098 Failed setup of NVMET "
11337 "MRQ: rc = 0x%x\n",
11338 (uint32_t)rc);
11339 goto out_destroy;
11340 }
11341
11342 } else {
11343 rc = lpfc_rq_create(phba,
11344 phba->sli4_hba.nvmet_mrq_hdr[0],
11345 phba->sli4_hba.nvmet_mrq_data[0],
11346 phba->sli4_hba.nvmet_cqset[0],
11347 LPFC_NVMET);
11348 if (rc) {
11349 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11350 "6057 Failed setup of NVMET "
11351 "Receive Queue: rc = 0x%x\n",
11352 (uint32_t)rc);
11353 goto out_destroy;
11354 }
11355
11356 lpfc_printf_log(
11357 phba, KERN_INFO, LOG_INIT,
11358 "6099 NVMET RQ setup: hdr-rq-id=%d, "
11359 "dat-rq-id=%d parent cq-id=%d\n",
11360 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11361 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11362 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11363
11364 }
11365 }
11366
11367 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11369 "0540 Receive Queue not allocated\n");
11370 rc = -ENOMEM;
11371 goto out_destroy;
11372 }
11373
11374 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11375 phba->sli4_hba.els_cq, LPFC_USOL);
11376 if (rc) {
11377 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11378 "0541 Failed setup of Receive Queue: "
11379 "rc = 0x%x\n", (uint32_t)rc);
11380 goto out_destroy;
11381 }
11382
11383 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11384 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
11385 "parent cq-id=%d\n",
11386 phba->sli4_hba.hdr_rq->queue_id,
11387 phba->sli4_hba.dat_rq->queue_id,
11388 phba->sli4_hba.els_cq->queue_id);
11389
11390 if (phba->cfg_fcp_imax)
11391 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11392 else
11393 usdelay = 0;
11394
11395 for (qidx = 0; qidx < phba->cfg_irq_chann;
11396 qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
11397 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11398 usdelay);
11399
11400 if (phba->sli4_hba.cq_max) {
11401 kfree(phba->sli4_hba.cq_lookup);
11402 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11403 sizeof(struct lpfc_queue *), GFP_KERNEL);
11404 if (!phba->sli4_hba.cq_lookup) {
11405 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11406 "0549 Failed setup of CQ Lookup table: "
11407 "size 0x%x\n", phba->sli4_hba.cq_max);
11408 rc = -ENOMEM;
11409 goto out_destroy;
11410 }
11411 lpfc_setup_cq_lookup(phba);
11412 }
11413 return 0;
11414
11415out_destroy:
11416 lpfc_sli4_queue_unset(phba);
11417out_error:
11418 return rc;
11419}
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429
11430
11431
11432
11433void
11434lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11435{
11436 struct lpfc_sli4_hdw_queue *qp;
11437 struct lpfc_queue *eq;
11438 int qidx;
11439
11440
11441 if (phba->sli4_hba.mbx_wq)
11442 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11443
11444
11445 if (phba->sli4_hba.nvmels_wq)
11446 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11447
11448
11449 if (phba->sli4_hba.els_wq)
11450 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11451
11452
11453 if (phba->sli4_hba.hdr_rq)
11454 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11455 phba->sli4_hba.dat_rq);
11456
11457
11458 if (phba->sli4_hba.mbx_cq)
11459 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11460
11461
11462 if (phba->sli4_hba.els_cq)
11463 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11464
11465
11466 if (phba->sli4_hba.nvmels_cq)
11467 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11468
11469 if (phba->nvmet_support) {
11470
11471 if (phba->sli4_hba.nvmet_mrq_hdr) {
11472 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11473 lpfc_rq_destroy(
11474 phba,
11475 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11476 phba->sli4_hba.nvmet_mrq_data[qidx]);
11477 }
11478
11479
11480 if (phba->sli4_hba.nvmet_cqset) {
11481 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11482 lpfc_cq_destroy(
11483 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11484 }
11485 }
11486
11487
11488 if (phba->sli4_hba.hdwq) {
11489
11490 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11491
11492 qp = &phba->sli4_hba.hdwq[qidx];
11493 lpfc_wq_destroy(phba, qp->io_wq);
11494 lpfc_cq_destroy(phba, qp->io_cq);
11495 }
11496
11497 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11498
11499 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11500 lpfc_eq_destroy(phba, eq);
11501 }
11502 }
11503
11504 kfree(phba->sli4_hba.cq_lookup);
11505 phba->sli4_hba.cq_lookup = NULL;
11506 phba->sli4_hba.cq_max = 0;
11507}
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519
11520
11521
11522
11523
11524
11525static int
11526lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11527{
11528 struct lpfc_cq_event *cq_event;
11529 int i;
11530
11531 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11532 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
11533 if (!cq_event)
11534 goto out_pool_create_fail;
11535 list_add_tail(&cq_event->list,
11536 &phba->sli4_hba.sp_cqe_event_pool);
11537 }
11538 return 0;
11539
11540out_pool_create_fail:
11541 lpfc_sli4_cq_event_pool_destroy(phba);
11542 return -ENOMEM;
11543}
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555static void
11556lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11557{
11558 struct lpfc_cq_event *cq_event, *next_cq_event;
11559
11560 list_for_each_entry_safe(cq_event, next_cq_event,
11561 &phba->sli4_hba.sp_cqe_event_pool, list) {
11562 list_del(&cq_event->list);
11563 kfree(cq_event);
11564 }
11565}
11566
11567
11568
11569
11570
11571
11572
11573
11574
11575
11576
11577struct lpfc_cq_event *
11578__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11579{
11580 struct lpfc_cq_event *cq_event = NULL;
11581
11582 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11583 struct lpfc_cq_event, list);
11584 return cq_event;
11585}
11586
11587
11588
11589
11590
11591
11592
11593
11594
11595
11596
11597struct lpfc_cq_event *
11598lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11599{
11600 struct lpfc_cq_event *cq_event;
11601 unsigned long iflags;
11602
11603 spin_lock_irqsave(&phba->hbalock, iflags);
11604 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11605 spin_unlock_irqrestore(&phba->hbalock, iflags);
11606 return cq_event;
11607}
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617void
11618__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11619 struct lpfc_cq_event *cq_event)
11620{
11621 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11622}
11623
11624
11625
11626
11627
11628
11629
11630
11631
11632void
11633lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11634 struct lpfc_cq_event *cq_event)
11635{
11636 unsigned long iflags;
11637 spin_lock_irqsave(&phba->hbalock, iflags);
11638 __lpfc_sli4_cq_event_release(phba, cq_event);
11639 spin_unlock_irqrestore(&phba->hbalock, iflags);
11640}
11641
11642
11643
11644
11645
11646
11647
11648
11649static void
11650lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11651{
11652 LIST_HEAD(cq_event_list);
11653 struct lpfc_cq_event *cq_event;
11654 unsigned long iflags;
11655
11656
11657
11658
11659 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11660 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11661 &cq_event_list);
11662 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11663
11664
11665 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11666 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11667 &cq_event_list);
11668 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11669
11670 while (!list_empty(&cq_event_list)) {
11671 list_remove_head(&cq_event_list, cq_event,
11672 struct lpfc_cq_event, list);
11673 lpfc_sli4_cq_event_release(phba, cq_event);
11674 }
11675}
11676
11677
11678
11679
11680
11681
11682
11683
11684
11685
11686
11687
11688
11689int
11690lpfc_pci_function_reset(struct lpfc_hba *phba)
11691{
11692 LPFC_MBOXQ_t *mboxq;
11693 uint32_t rc = 0, if_type;
11694 uint32_t shdr_status, shdr_add_status;
11695 uint32_t rdy_chk;
11696 uint32_t port_reset = 0;
11697 union lpfc_sli4_cfg_shdr *shdr;
11698 struct lpfc_register reg_data;
11699 uint16_t devid;
11700
11701 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11702 switch (if_type) {
11703 case LPFC_SLI_INTF_IF_TYPE_0:
11704 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11705 GFP_KERNEL);
11706 if (!mboxq) {
11707 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11708 "0494 Unable to allocate memory for "
11709 "issuing SLI_FUNCTION_RESET mailbox "
11710 "command\n");
11711 return -ENOMEM;
11712 }
11713
11714
11715 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11716 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
11717 LPFC_SLI4_MBX_EMBED);
11718 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11719 shdr = (union lpfc_sli4_cfg_shdr *)
11720 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
11721 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11722 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
11723 &shdr->response);
11724 mempool_free(mboxq, phba->mbox_mem_pool);
11725 if (shdr_status || shdr_add_status || rc) {
11726 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11727 "0495 SLI_FUNCTION_RESET mailbox "
11728 "failed with status x%x add_status x%x,"
11729 " mbx status x%x\n",
11730 shdr_status, shdr_add_status, rc);
11731 rc = -ENXIO;
11732 }
11733 break;
11734 case LPFC_SLI_INTF_IF_TYPE_2:
11735 case LPFC_SLI_INTF_IF_TYPE_6:
11736wait:
11737
11738
11739
11740
11741
11742 for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
11743 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11744 STATUSregaddr, ®_data.word0)) {
11745 rc = -ENODEV;
11746 goto out;
11747 }
11748 if (bf_get(lpfc_sliport_status_rdy, ®_data))
11749 break;
11750 msleep(20);
11751 }
11752
11753 if (!bf_get(lpfc_sliport_status_rdy, ®_data)) {
11754 phba->work_status[0] = readl(
11755 phba->sli4_hba.u.if_type2.ERR1regaddr);
11756 phba->work_status[1] = readl(
11757 phba->sli4_hba.u.if_type2.ERR2regaddr);
11758 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11759 "2890 Port not ready, port status reg "
11760 "0x%x error 1=0x%x, error 2=0x%x\n",
11761 reg_data.word0,
11762 phba->work_status[0],
11763 phba->work_status[1]);
11764 rc = -ENODEV;
11765 goto out;
11766 }
11767
11768 if (bf_get(lpfc_sliport_status_pldv, ®_data))
11769 lpfc_pldv_detect = true;
11770
11771 if (!port_reset) {
11772
11773
11774
11775 reg_data.word0 = 0;
11776 bf_set(lpfc_sliport_ctrl_end, ®_data,
11777 LPFC_SLIPORT_LITTLE_ENDIAN);
11778 bf_set(lpfc_sliport_ctrl_ip, ®_data,
11779 LPFC_SLIPORT_INIT_PORT);
11780 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11781 CTRLregaddr);
11782
11783 pci_read_config_word(phba->pcidev,
11784 PCI_DEVICE_ID, &devid);
11785
11786 port_reset = 1;
11787 msleep(20);
11788 goto wait;
11789 } else if (bf_get(lpfc_sliport_status_rn, ®_data)) {
11790 rc = -ENODEV;
11791 goto out;
11792 }
11793 break;
11794
11795 case LPFC_SLI_INTF_IF_TYPE_1:
11796 default:
11797 break;
11798 }
11799
11800out:
11801
11802 if (rc) {
11803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11804 "3317 HBA not functional: IP Reset Failed "
11805 "try: echo fw_reset > board_mode\n");
11806 rc = -ENODEV;
11807 }
11808
11809 return rc;
11810}
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823static int
11824lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11825{
11826 struct pci_dev *pdev = phba->pcidev;
11827 unsigned long bar0map_len, bar1map_len, bar2map_len;
11828 int error;
11829 uint32_t if_type;
11830
11831 if (!pdev)
11832 return -ENODEV;
11833
11834
11835 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11836 if (error)
11837 error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11838 if (error)
11839 return error;
11840
11841
11842
11843
11844
11845 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
11846 &phba->sli4_hba.sli_intf.word0)) {
11847 return -ENODEV;
11848 }
11849
11850
11851 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11852 LPFC_SLI_INTF_VALID) {
11853 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11854 "2894 SLI_INTF reg contents invalid "
11855 "sli_intf reg 0x%x\n",
11856 phba->sli4_hba.sli_intf.word0);
11857 return -ENODEV;
11858 }
11859
11860 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11861
11862
11863
11864
11865
11866
11867 if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
11868 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11869 bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
11870
11871
11872
11873
11874
11875 phba->sli4_hba.conf_regs_memmap_p =
11876 ioremap(phba->pci_bar0_map, bar0map_len);
11877 if (!phba->sli4_hba.conf_regs_memmap_p) {
11878 dev_printk(KERN_ERR, &pdev->dev,
11879 "ioremap failed for SLI4 PCI config "
11880 "registers.\n");
11881 return -ENODEV;
11882 }
11883 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11884
11885 lpfc_sli4_bar0_register_memmap(phba, if_type);
11886 } else {
11887 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11888 bar0map_len = pci_resource_len(pdev, 1);
11889 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
11890 dev_printk(KERN_ERR, &pdev->dev,
11891 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
11892 return -ENODEV;
11893 }
11894 phba->sli4_hba.conf_regs_memmap_p =
11895 ioremap(phba->pci_bar0_map, bar0map_len);
11896 if (!phba->sli4_hba.conf_regs_memmap_p) {
11897 dev_printk(KERN_ERR, &pdev->dev,
11898 "ioremap failed for SLI4 PCI config "
11899 "registers.\n");
11900 return -ENODEV;
11901 }
11902 lpfc_sli4_bar0_register_memmap(phba, if_type);
11903 }
11904
11905 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11906 if (pci_resource_start(pdev, PCI_64BIT_BAR2)) {
11907
11908
11909
11910
11911 phba->pci_bar1_map = pci_resource_start(pdev,
11912 PCI_64BIT_BAR2);
11913 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11914 phba->sli4_hba.ctrl_regs_memmap_p =
11915 ioremap(phba->pci_bar1_map,
11916 bar1map_len);
11917 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11918 dev_err(&pdev->dev,
11919 "ioremap failed for SLI4 HBA "
11920 "control registers.\n");
11921 error = -ENOMEM;
11922 goto out_iounmap_conf;
11923 }
11924 phba->pci_bar2_memmap_p =
11925 phba->sli4_hba.ctrl_regs_memmap_p;
11926 lpfc_sli4_bar1_register_memmap(phba, if_type);
11927 } else {
11928 error = -ENOMEM;
11929 goto out_iounmap_conf;
11930 }
11931 }
11932
11933 if ((if_type == LPFC_SLI_INTF_IF_TYPE_6) &&
11934 (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
11935
11936
11937
11938
11939 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11940 bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
11941 phba->sli4_hba.drbl_regs_memmap_p =
11942 ioremap(phba->pci_bar1_map, bar1map_len);
11943 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11944 dev_err(&pdev->dev,
11945 "ioremap failed for SLI4 HBA doorbell registers.\n");
11946 error = -ENOMEM;
11947 goto out_iounmap_conf;
11948 }
11949 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11950 lpfc_sli4_bar1_register_memmap(phba, if_type);
11951 }
11952
11953 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) {
11954 if (pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11955
11956
11957
11958
11959 phba->pci_bar2_map = pci_resource_start(pdev,
11960 PCI_64BIT_BAR4);
11961 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11962 phba->sli4_hba.drbl_regs_memmap_p =
11963 ioremap(phba->pci_bar2_map,
11964 bar2map_len);
11965 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11966 dev_err(&pdev->dev,
11967 "ioremap failed for SLI4 HBA"
11968 " doorbell registers.\n");
11969 error = -ENOMEM;
11970 goto out_iounmap_ctrl;
11971 }
11972 phba->pci_bar4_memmap_p =
11973 phba->sli4_hba.drbl_regs_memmap_p;
11974 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11975 if (error)
11976 goto out_iounmap_all;
11977 } else {
11978 error = -ENOMEM;
11979 goto out_iounmap_all;
11980 }
11981 }
11982
11983 if (if_type == LPFC_SLI_INTF_IF_TYPE_6 &&
11984 pci_resource_start(pdev, PCI_64BIT_BAR4)) {
11985
11986
11987
11988
11989 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11990 bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
11991 phba->sli4_hba.dpp_regs_memmap_p =
11992 ioremap(phba->pci_bar2_map, bar2map_len);
11993 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11994 dev_err(&pdev->dev,
11995 "ioremap failed for SLI4 HBA dpp registers.\n");
11996 error = -ENOMEM;
11997 goto out_iounmap_ctrl;
11998 }
11999 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
12000 }
12001
12002
12003 switch (if_type) {
12004 case LPFC_SLI_INTF_IF_TYPE_0:
12005 case LPFC_SLI_INTF_IF_TYPE_2:
12006 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
12007 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
12008 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12009 break;
12010 case LPFC_SLI_INTF_IF_TYPE_6:
12011 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12012 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12013 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12014 break;
12015 default:
12016 break;
12017 }
12018
12019 return 0;
12020
12021out_iounmap_all:
12022 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12023out_iounmap_ctrl:
12024 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12025out_iounmap_conf:
12026 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12027
12028 return error;
12029}
12030
12031
12032
12033
12034
12035
12036
12037
12038static void
12039lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12040{
12041 uint32_t if_type;
12042 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12043
12044 switch (if_type) {
12045 case LPFC_SLI_INTF_IF_TYPE_0:
12046 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12047 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12048 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12049 break;
12050 case LPFC_SLI_INTF_IF_TYPE_2:
12051 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12052 break;
12053 case LPFC_SLI_INTF_IF_TYPE_6:
12054 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12055 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12056 if (phba->sli4_hba.dpp_regs_memmap_p)
12057 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12058 break;
12059 case LPFC_SLI_INTF_IF_TYPE_1:
12060 default:
12061 dev_printk(KERN_ERR, &phba->pcidev->dev,
12062 "FATAL - unsupported SLI4 interface type - %d\n",
12063 if_type);
12064 break;
12065 }
12066}
12067
12068
12069
12070
12071
12072
12073
12074
12075
12076
12077
12078
12079static int
12080lpfc_sli_enable_msix(struct lpfc_hba *phba)
12081{
12082 int rc;
12083 LPFC_MBOXQ_t *pmb;
12084
12085
12086 rc = pci_alloc_irq_vectors(phba->pcidev,
12087 LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
12088 if (rc < 0) {
12089 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12090 "0420 PCI enable MSI-X failed (%d)\n", rc);
12091 goto vec_fail_out;
12092 }
12093
12094
12095
12096
12097
12098
12099 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12100 &lpfc_sli_sp_intr_handler, 0,
12101 LPFC_SP_DRIVER_HANDLER_NAME, phba);
12102 if (rc) {
12103 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12104 "0421 MSI-X slow-path request_irq failed "
12105 "(%d)\n", rc);
12106 goto msi_fail_out;
12107 }
12108
12109
12110 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12111 &lpfc_sli_fp_intr_handler, 0,
12112 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12113
12114 if (rc) {
12115 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12116 "0429 MSI-X fast-path request_irq failed "
12117 "(%d)\n", rc);
12118 goto irq_fail_out;
12119 }
12120
12121
12122
12123
12124 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12125
12126 if (!pmb) {
12127 rc = -ENOMEM;
12128 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12129 "0474 Unable to allocate memory for issuing "
12130 "MBOX_CONFIG_MSI command\n");
12131 goto mem_fail_out;
12132 }
12133 rc = lpfc_config_msi(phba, pmb);
12134 if (rc)
12135 goto mbx_fail_out;
12136 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12137 if (rc != MBX_SUCCESS) {
12138 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12139 "0351 Config MSI mailbox command failed, "
12140 "mbxCmd x%x, mbxStatus x%x\n",
12141 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
12142 goto mbx_fail_out;
12143 }
12144
12145
12146 mempool_free(pmb, phba->mbox_mem_pool);
12147 return rc;
12148
12149mbx_fail_out:
12150
12151 mempool_free(pmb, phba->mbox_mem_pool);
12152
12153mem_fail_out:
12154
12155 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12156
12157irq_fail_out:
12158
12159 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12160
12161msi_fail_out:
12162
12163 pci_free_irq_vectors(phba->pcidev);
12164
12165vec_fail_out:
12166 return rc;
12167}
12168
12169
12170
12171
12172
12173
12174
12175
12176
12177
12178
12179
12180
12181
12182
12183static int
12184lpfc_sli_enable_msi(struct lpfc_hba *phba)
12185{
12186 int rc;
12187
12188 rc = pci_enable_msi(phba->pcidev);
12189 if (!rc)
12190 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12191 "0012 PCI enable MSI mode success.\n");
12192 else {
12193 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12194 "0471 PCI enable MSI mode failed (%d)\n", rc);
12195 return rc;
12196 }
12197
12198 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12199 0, LPFC_DRIVER_NAME, phba);
12200 if (rc) {
12201 pci_disable_msi(phba->pcidev);
12202 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12203 "0478 MSI request_irq failed (%d)\n", rc);
12204 }
12205 return rc;
12206}
12207
12208
12209
12210
12211
12212
12213
12214
12215
12216
12217
12218
12219
12220
12221
12222
12223
12224
12225static uint32_t
12226lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12227{
12228 uint32_t intr_mode = LPFC_INTR_ERROR;
12229 int retval;
12230
12231
12232 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12233 if (retval)
12234 return intr_mode;
12235 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
12236
12237 if (cfg_mode == 2) {
12238
12239 retval = lpfc_sli_enable_msix(phba);
12240 if (!retval) {
12241
12242 phba->intr_type = MSIX;
12243 intr_mode = 2;
12244 }
12245 }
12246
12247
12248 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12249 retval = lpfc_sli_enable_msi(phba);
12250 if (!retval) {
12251
12252 phba->intr_type = MSI;
12253 intr_mode = 1;
12254 }
12255 }
12256
12257
12258 if (phba->intr_type == NONE) {
12259 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12260 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12261 if (!retval) {
12262
12263 phba->intr_type = INTx;
12264 intr_mode = 0;
12265 }
12266 }
12267 return intr_mode;
12268}
12269
12270
12271
12272
12273
12274
12275
12276
12277
12278
12279static void
12280lpfc_sli_disable_intr(struct lpfc_hba *phba)
12281{
12282 int nr_irqs, i;
12283
12284 if (phba->intr_type == MSIX)
12285 nr_irqs = LPFC_MSIX_VECTORS;
12286 else
12287 nr_irqs = 1;
12288
12289 for (i = 0; i < nr_irqs; i++)
12290 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12291 pci_free_irq_vectors(phba->pcidev);
12292
12293
12294 phba->intr_type = NONE;
12295 phba->sli.slistat.sli_intr = 0;
12296}
12297
12298
12299
12300
12301
12302
12303
12304
12305
12306static uint16_t
12307lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12308{
12309 struct lpfc_vector_map_info *cpup;
12310 int cpu;
12311
12312
12313 for_each_present_cpu(cpu) {
12314 cpup = &phba->sli4_hba.cpu_map[cpu];
12315
12316
12317
12318
12319
12320 if ((match == LPFC_FIND_BY_EQ) &&
12321 (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
12322 (cpup->eq == id))
12323 return cpu;
12324
12325
12326 if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
12327 return cpu;
12328 }
12329 return 0;
12330}
12331
12332#ifdef CONFIG_X86
12333
12334
12335
12336
12337
12338
12339
12340static int
12341lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12342 uint16_t phys_id, uint16_t core_id)
12343{
12344 struct lpfc_vector_map_info *cpup;
12345 int idx;
12346
12347 for_each_present_cpu(idx) {
12348 cpup = &phba->sli4_hba.cpu_map[idx];
12349
12350 if ((cpup->phys_id == phys_id) &&
12351 (cpup->core_id == core_id) &&
12352 (cpu != idx))
12353 return 1;
12354 }
12355 return 0;
12356}
12357#endif
12358
12359
12360
12361
12362
12363
12364
12365
12366
12367
12368static inline void
12369lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12370 unsigned int cpu)
12371{
12372 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12373 struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx);
12374
12375 cpup->eq = eqidx;
12376 cpup->flag |= flag;
12377
12378 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12379 "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n",
12380 cpu, eqhdl->irq, cpup->eq, cpup->flag);
12381}
12382
12383
12384
12385
12386
12387
12388
12389static void
12390lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12391{
12392 struct lpfc_vector_map_info *cpup;
12393 struct lpfc_eq_intr_info *eqi;
12394 int cpu;
12395
12396 for_each_possible_cpu(cpu) {
12397 cpup = &phba->sli4_hba.cpu_map[cpu];
12398 cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
12399 cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
12400 cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
12401 cpup->eq = LPFC_VECTOR_MAP_EMPTY;
12402 cpup->flag = 0;
12403 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12404 INIT_LIST_HEAD(&eqi->list);
12405 eqi->icnt = 0;
12406 }
12407}
12408
12409
12410
12411
12412
12413
12414
12415static void
12416lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12417{
12418 struct lpfc_hba_eq_hdl *eqhdl;
12419 int i;
12420
12421 for (i = 0; i < phba->cfg_irq_chann; i++) {
12422 eqhdl = lpfc_get_eq_hdl(i);
12423 eqhdl->irq = LPFC_VECTOR_MAP_EMPTY;
12424 eqhdl->phba = phba;
12425 }
12426}
12427
12428
12429
12430
12431
12432
12433
12434
12435
12436
12437
12438static void
12439lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12440{
12441 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu;
12442 int max_phys_id, min_phys_id;
12443 int max_core_id, min_core_id;
12444 struct lpfc_vector_map_info *cpup;
12445 struct lpfc_vector_map_info *new_cpup;
12446#ifdef CONFIG_X86
12447 struct cpuinfo_x86 *cpuinfo;
12448#endif
12449#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12450 struct lpfc_hdwq_stat *c_stat;
12451#endif
12452
12453 max_phys_id = 0;
12454 min_phys_id = LPFC_VECTOR_MAP_EMPTY;
12455 max_core_id = 0;
12456 min_core_id = LPFC_VECTOR_MAP_EMPTY;
12457
12458
12459 for_each_present_cpu(cpu) {
12460 cpup = &phba->sli4_hba.cpu_map[cpu];
12461#ifdef CONFIG_X86
12462 cpuinfo = &cpu_data(cpu);
12463 cpup->phys_id = cpuinfo->phys_proc_id;
12464 cpup->core_id = cpuinfo->cpu_core_id;
12465 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12466 cpup->flag |= LPFC_CPU_MAP_HYPER;
12467#else
12468
12469 cpup->phys_id = 0;
12470 cpup->core_id = cpu;
12471#endif
12472
12473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12474 "3328 CPU %d physid %d coreid %d flag x%x\n",
12475 cpu, cpup->phys_id, cpup->core_id, cpup->flag);
12476
12477 if (cpup->phys_id > max_phys_id)
12478 max_phys_id = cpup->phys_id;
12479 if (cpup->phys_id < min_phys_id)
12480 min_phys_id = cpup->phys_id;
12481
12482 if (cpup->core_id > max_core_id)
12483 max_core_id = cpup->core_id;
12484 if (cpup->core_id < min_core_id)
12485 min_core_id = cpup->core_id;
12486 }
12487
12488
12489
12490
12491
12492
12493 first_cpu = cpumask_first(cpu_present_mask);
12494 start_cpu = first_cpu;
12495
12496 for_each_present_cpu(cpu) {
12497 cpup = &phba->sli4_hba.cpu_map[cpu];
12498
12499
12500 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12501
12502 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12503
12504
12505
12506
12507
12508
12509 new_cpu = start_cpu;
12510 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12511 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12512 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12513 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
12514 (new_cpup->phys_id == cpup->phys_id))
12515 goto found_same;
12516 new_cpu = cpumask_next(
12517 new_cpu, cpu_present_mask);
12518 if (new_cpu == nr_cpumask_bits)
12519 new_cpu = first_cpu;
12520 }
12521
12522 continue;
12523found_same:
12524
12525 cpup->eq = new_cpup->eq;
12526
12527
12528
12529
12530
12531 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12532 if (start_cpu == nr_cpumask_bits)
12533 start_cpu = first_cpu;
12534
12535 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12536 "3337 Set Affinity: CPU %d "
12537 "eq %d from peer cpu %d same "
12538 "phys_id (%d)\n",
12539 cpu, cpup->eq, new_cpu,
12540 cpup->phys_id);
12541 }
12542 }
12543
12544
12545 start_cpu = first_cpu;
12546
12547 for_each_present_cpu(cpu) {
12548 cpup = &phba->sli4_hba.cpu_map[cpu];
12549
12550
12551 if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
12552
12553 cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
12554
12555
12556
12557
12558
12559
12560 new_cpu = start_cpu;
12561 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12562 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12563 if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
12564 (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
12565 goto found_any;
12566 new_cpu = cpumask_next(
12567 new_cpu, cpu_present_mask);
12568 if (new_cpu == nr_cpumask_bits)
12569 new_cpu = first_cpu;
12570 }
12571
12572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12573 "3339 Set Affinity: CPU %d "
12574 "eq %d UNASSIGNED\n",
12575 cpup->hdwq, cpup->eq);
12576 continue;
12577found_any:
12578
12579 cpup->eq = new_cpup->eq;
12580
12581
12582
12583
12584
12585 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12586 if (start_cpu == nr_cpumask_bits)
12587 start_cpu = first_cpu;
12588
12589 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12590 "3338 Set Affinity: CPU %d "
12591 "eq %d from peer cpu %d (%d/%d)\n",
12592 cpu, cpup->eq, new_cpu,
12593 new_cpup->phys_id, new_cpup->core_id);
12594 }
12595 }
12596
12597
12598
12599
12600 idx = 0;
12601 for_each_present_cpu(cpu) {
12602 cpup = &phba->sli4_hba.cpu_map[cpu];
12603
12604
12605 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12606 continue;
12607
12608
12609 cpup->hdwq = idx;
12610 idx++;
12611 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12612 "3333 Set Affinity: CPU %d (phys %d core %d): "
12613 "hdwq %d eq %d flg x%x\n",
12614 cpu, cpup->phys_id, cpup->core_id,
12615 cpup->hdwq, cpup->eq, cpup->flag);
12616 }
12617
12618
12619
12620
12621
12622
12623
12624
12625 next_idx = idx;
12626 start_cpu = 0;
12627 idx = 0;
12628 for_each_present_cpu(cpu) {
12629 cpup = &phba->sli4_hba.cpu_map[cpu];
12630
12631
12632 if (cpup->flag & LPFC_CPU_FIRST_IRQ)
12633 continue;
12634
12635
12636
12637
12638
12639 if (next_idx < phba->cfg_hdw_queue) {
12640 cpup->hdwq = next_idx;
12641 next_idx++;
12642 continue;
12643 }
12644
12645
12646
12647
12648
12649
12650 new_cpu = start_cpu;
12651 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12652 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12653 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12654 new_cpup->phys_id == cpup->phys_id &&
12655 new_cpup->core_id == cpup->core_id) {
12656 goto found_hdwq;
12657 }
12658 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12659 if (new_cpu == nr_cpumask_bits)
12660 new_cpu = first_cpu;
12661 }
12662
12663
12664
12665
12666 new_cpu = start_cpu;
12667 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12668 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12669 if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
12670 new_cpup->phys_id == cpup->phys_id)
12671 goto found_hdwq;
12672
12673 new_cpu = cpumask_next(new_cpu, cpu_present_mask);
12674 if (new_cpu == nr_cpumask_bits)
12675 new_cpu = first_cpu;
12676 }
12677
12678
12679 cpup->hdwq = idx % phba->cfg_hdw_queue;
12680 idx++;
12681 goto logit;
12682 found_hdwq:
12683
12684 start_cpu = cpumask_next(new_cpu, cpu_present_mask);
12685 if (start_cpu == nr_cpumask_bits)
12686 start_cpu = first_cpu;
12687 cpup->hdwq = new_cpup->hdwq;
12688 logit:
12689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12690 "3335 Set Affinity: CPU %d (phys %d core %d): "
12691 "hdwq %d eq %d flg x%x\n",
12692 cpu, cpup->phys_id, cpup->core_id,
12693 cpup->hdwq, cpup->eq, cpup->flag);
12694 }
12695
12696
12697
12698
12699
12700 idx = 0;
12701 for_each_possible_cpu(cpu) {
12702 cpup = &phba->sli4_hba.cpu_map[cpu];
12703#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12704 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12705 c_stat->hdwq_no = cpup->hdwq;
12706#endif
12707 if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
12708 continue;
12709
12710 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12711#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
12712 c_stat->hdwq_no = cpup->hdwq;
12713#endif
12714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12715 "3340 Set Affinity: not present "
12716 "CPU %d hdwq %d\n",
12717 cpu, cpup->hdwq);
12718 }
12719
12720
12721
12722
12723 return;
12724}
12725
12726
12727
12728
12729
12730
12731
12732
12733static int
12734lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12735 struct list_head *eqlist)
12736{
12737 const struct cpumask *maskp;
12738 struct lpfc_queue *eq;
12739 struct cpumask *tmp;
12740 u16 idx;
12741
12742 tmp = kzalloc(cpumask_size(), GFP_KERNEL);
12743 if (!tmp)
12744 return -ENOMEM;
12745
12746 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12747 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12748 if (!maskp)
12749 continue;
12750
12751
12752
12753
12754
12755 if (!cpumask_and(tmp, maskp, cpumask_of(cpu)))
12756 continue;
12757
12758
12759
12760
12761
12762
12763 cpumask_and(tmp, maskp, cpu_online_mask);
12764 if (cpumask_weight(tmp) > 1)
12765 continue;
12766
12767
12768
12769
12770
12771
12772 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12773 list_add(&eq->_poll_list, eqlist);
12774 }
12775 kfree(tmp);
12776 return 0;
12777}
12778
12779static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12780{
12781 if (phba->sli_rev != LPFC_SLI_REV4)
12782 return;
12783
12784 cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state,
12785 &phba->cpuhp);
12786
12787
12788
12789
12790 synchronize_rcu();
12791 del_timer_sync(&phba->cpuhp_poll_timer);
12792}
12793
12794static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12795{
12796 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
12797 return;
12798
12799 __lpfc_cpuhp_remove(phba);
12800}
12801
12802static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12803{
12804 if (phba->sli_rev != LPFC_SLI_REV4)
12805 return;
12806
12807 rcu_read_lock();
12808
12809 if (!list_empty(&phba->poll_list))
12810 mod_timer(&phba->cpuhp_poll_timer,
12811 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
12812
12813 rcu_read_unlock();
12814
12815 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state,
12816 &phba->cpuhp);
12817}
12818
12819static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12820{
12821 if (phba->pport->load_flag & FC_UNLOADING) {
12822 *retval = -EAGAIN;
12823 return true;
12824 }
12825
12826 if (phba->sli_rev != LPFC_SLI_REV4) {
12827 *retval = 0;
12828 return true;
12829 }
12830
12831
12832 return false;
12833}
12834
12835
12836
12837
12838
12839
12840
12841static inline void
12842lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
12843{
12844 cpumask_clear(&eqhdl->aff_mask);
12845 cpumask_set_cpu(cpu, &eqhdl->aff_mask);
12846 irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12847 irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
12848}
12849
12850
12851
12852
12853
12854
12855static inline void
12856lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
12857{
12858 cpumask_clear(&eqhdl->aff_mask);
12859 irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
12860}
12861
12862
12863
12864
12865
12866
12867
12868
12869
12870
12871
12872
12873
12874
12875
12876
12877
12878static void
12879lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12880{
12881 struct lpfc_vector_map_info *cpup;
12882 struct cpumask *aff_mask;
12883 unsigned int cpu_select, cpu_next, idx;
12884 const struct cpumask *orig_mask;
12885
12886 if (phba->irq_chann_mode == NORMAL_MODE)
12887 return;
12888
12889 orig_mask = &phba->sli4_hba.irq_aff_mask;
12890
12891 if (!cpumask_test_cpu(cpu, orig_mask))
12892 return;
12893
12894 cpup = &phba->sli4_hba.cpu_map[cpu];
12895
12896 if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
12897 return;
12898
12899 if (offline) {
12900
12901 cpu_next = cpumask_next_wrap(cpu, orig_mask, cpu, true);
12902 cpu_select = lpfc_next_online_cpu(orig_mask, cpu_next);
12903
12904
12905 if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) {
12906
12907
12908
12909 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12910 aff_mask = lpfc_get_aff_mask(idx);
12911
12912
12913 if (cpumask_test_cpu(cpu, aff_mask))
12914 lpfc_irq_set_aff(lpfc_get_eq_hdl(idx),
12915 cpu_select);
12916 }
12917 } else {
12918
12919 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12920 lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx));
12921 }
12922 } else {
12923
12924 lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu);
12925 }
12926}
12927
12928static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node)
12929{
12930 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12931 struct lpfc_queue *eq, *next;
12932 LIST_HEAD(eqlist);
12933 int retval;
12934
12935 if (!phba) {
12936 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12937 return 0;
12938 }
12939
12940 if (__lpfc_cpuhp_checks(phba, &retval))
12941 return retval;
12942
12943 lpfc_irq_rebalance(phba, cpu, true);
12944
12945 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12946 if (retval)
12947 return retval;
12948
12949
12950 list_for_each_entry_safe(eq, next, &eqlist, _poll_list) {
12951 list_del_init(&eq->_poll_list);
12952 lpfc_sli4_start_polling(eq);
12953 }
12954
12955 return 0;
12956}
12957
12958static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node)
12959{
12960 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12961 struct lpfc_queue *eq, *next;
12962 unsigned int n;
12963 int retval;
12964
12965 if (!phba) {
12966 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12967 return 0;
12968 }
12969
12970 if (__lpfc_cpuhp_checks(phba, &retval))
12971 return retval;
12972
12973 lpfc_irq_rebalance(phba, cpu, false);
12974
12975 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12976 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12977 if (n == cpu)
12978 lpfc_sli4_stop_polling(eq);
12979 }
12980
12981 return 0;
12982}
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992
12993
12994
12995
12996
12997
12998
12999
13000
13001
13002
13003
13004
13005
13006
13007
13008
13009
13010
13011
13012static int
13013lpfc_sli4_enable_msix(struct lpfc_hba *phba)
13014{
13015 int vectors, rc, index;
13016 char *name;
13017 const struct cpumask *aff_mask = NULL;
13018 unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids;
13019 struct lpfc_vector_map_info *cpup;
13020 struct lpfc_hba_eq_hdl *eqhdl;
13021 const struct cpumask *maskp;
13022 unsigned int flags = PCI_IRQ_MSIX;
13023
13024
13025 vectors = phba->cfg_irq_chann;
13026
13027 if (phba->irq_chann_mode != NORMAL_MODE)
13028 aff_mask = &phba->sli4_hba.irq_aff_mask;
13029
13030 if (aff_mask) {
13031 cpu_cnt = cpumask_weight(aff_mask);
13032 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13033
13034
13035
13036
13037 cpu = cpumask_first(aff_mask);
13038 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13039 } else {
13040 flags |= PCI_IRQ_AFFINITY;
13041 }
13042
13043 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13044 if (rc < 0) {
13045 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13046 "0484 PCI enable MSI-X failed (%d)\n", rc);
13047 goto vec_fail_out;
13048 }
13049 vectors = rc;
13050
13051
13052 for (index = 0; index < vectors; index++) {
13053 eqhdl = lpfc_get_eq_hdl(index);
13054 name = eqhdl->handler_name;
13055 memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
13056 snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
13057 LPFC_DRIVER_HANDLER_NAME"%d", index);
13058
13059 eqhdl->idx = index;
13060 rc = request_irq(pci_irq_vector(phba->pcidev, index),
13061 &lpfc_sli4_hba_intr_handler, 0,
13062 name, eqhdl);
13063 if (rc) {
13064 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13065 "0486 MSI-X fast-path (%d) "
13066 "request_irq failed (%d)\n", index, rc);
13067 goto cfg_fail_out;
13068 }
13069
13070 eqhdl->irq = pci_irq_vector(phba->pcidev, index);
13071
13072 if (aff_mask) {
13073
13074 if (cpu_select < nr_cpu_ids)
13075 lpfc_irq_set_aff(eqhdl, cpu_select);
13076
13077
13078 lpfc_assign_eq_map_info(phba, index,
13079 LPFC_CPU_FIRST_IRQ,
13080 cpu);
13081
13082
13083 cpu = cpumask_next(cpu, aff_mask);
13084
13085
13086 cpu_select = lpfc_next_online_cpu(aff_mask, cpu);
13087 } else if (vectors == 1) {
13088 cpu = cpumask_first(cpu_present_mask);
13089 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13090 cpu);
13091 } else {
13092 maskp = pci_irq_get_affinity(phba->pcidev, index);
13093
13094
13095 for_each_cpu_and(cpu, maskp, cpu_present_mask) {
13096 cpup = &phba->sli4_hba.cpu_map[cpu];
13097
13098
13099
13100
13101
13102
13103
13104
13105
13106
13107
13108
13109
13110 if (cpup->eq != LPFC_VECTOR_MAP_EMPTY)
13111 continue;
13112 lpfc_assign_eq_map_info(phba, index,
13113 LPFC_CPU_FIRST_IRQ,
13114 cpu);
13115 break;
13116 }
13117 }
13118 }
13119
13120 if (vectors != phba->cfg_irq_chann) {
13121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13122 "3238 Reducing IO channels to match number of "
13123 "MSI-X vectors, requested %d got %d\n",
13124 phba->cfg_irq_chann, vectors);
13125 if (phba->cfg_irq_chann > vectors)
13126 phba->cfg_irq_chann = vectors;
13127 }
13128
13129 return rc;
13130
13131cfg_fail_out:
13132
13133 for (--index; index >= 0; index--) {
13134 eqhdl = lpfc_get_eq_hdl(index);
13135 lpfc_irq_clear_aff(eqhdl);
13136 free_irq(eqhdl->irq, eqhdl);
13137 }
13138
13139
13140 pci_free_irq_vectors(phba->pcidev);
13141
13142vec_fail_out:
13143 return rc;
13144}
13145
13146
13147
13148
13149
13150
13151
13152
13153
13154
13155
13156
13157
13158
13159
13160static int
13161lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13162{
13163 int rc, index;
13164 unsigned int cpu;
13165 struct lpfc_hba_eq_hdl *eqhdl;
13166
13167 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13168 PCI_IRQ_MSI | PCI_IRQ_AFFINITY);
13169 if (rc > 0)
13170 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13171 "0487 PCI enable MSI mode success.\n");
13172 else {
13173 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13174 "0488 PCI enable MSI mode failed (%d)\n", rc);
13175 return rc ? rc : -1;
13176 }
13177
13178 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13179 0, LPFC_DRIVER_NAME, phba);
13180 if (rc) {
13181 pci_free_irq_vectors(phba->pcidev);
13182 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13183 "0490 MSI request_irq failed (%d)\n", rc);
13184 return rc;
13185 }
13186
13187 eqhdl = lpfc_get_eq_hdl(0);
13188 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13189
13190 cpu = cpumask_first(cpu_present_mask);
13191 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13192
13193 for (index = 0; index < phba->cfg_irq_chann; index++) {
13194 eqhdl = lpfc_get_eq_hdl(index);
13195 eqhdl->idx = index;
13196 }
13197
13198 return 0;
13199}
13200
13201
13202
13203
13204
13205
13206
13207
13208
13209
13210
13211
13212
13213
13214
13215
13216
13217
13218static uint32_t
13219lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13220{
13221 uint32_t intr_mode = LPFC_INTR_ERROR;
13222 int retval, idx;
13223
13224 if (cfg_mode == 2) {
13225
13226 retval = 0;
13227 if (!retval) {
13228
13229 retval = lpfc_sli4_enable_msix(phba);
13230 if (!retval) {
13231
13232 phba->intr_type = MSIX;
13233 intr_mode = 2;
13234 }
13235 }
13236 }
13237
13238
13239 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13240 retval = lpfc_sli4_enable_msi(phba);
13241 if (!retval) {
13242
13243 phba->intr_type = MSI;
13244 intr_mode = 1;
13245 }
13246 }
13247
13248
13249 if (phba->intr_type == NONE) {
13250 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13251 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13252 if (!retval) {
13253 struct lpfc_hba_eq_hdl *eqhdl;
13254 unsigned int cpu;
13255
13256
13257 phba->intr_type = INTx;
13258 intr_mode = 0;
13259
13260 eqhdl = lpfc_get_eq_hdl(0);
13261 eqhdl->irq = pci_irq_vector(phba->pcidev, 0);
13262
13263 cpu = cpumask_first(cpu_present_mask);
13264 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13265 cpu);
13266 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13267 eqhdl = lpfc_get_eq_hdl(idx);
13268 eqhdl->idx = idx;
13269 }
13270 }
13271 }
13272 return intr_mode;
13273}
13274
13275
13276
13277
13278
13279
13280
13281
13282
13283
13284static void
13285lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13286{
13287
13288 if (phba->intr_type == MSIX) {
13289 int index;
13290 struct lpfc_hba_eq_hdl *eqhdl;
13291
13292
13293 for (index = 0; index < phba->cfg_irq_chann; index++) {
13294 eqhdl = lpfc_get_eq_hdl(index);
13295 lpfc_irq_clear_aff(eqhdl);
13296 free_irq(eqhdl->irq, eqhdl);
13297 }
13298 } else {
13299 free_irq(phba->pcidev->irq, phba);
13300 }
13301
13302 pci_free_irq_vectors(phba->pcidev);
13303
13304
13305 phba->intr_type = NONE;
13306 phba->sli.slistat.sli_intr = 0;
13307}
13308
13309
13310
13311
13312
13313
13314
13315
13316static void
13317lpfc_unset_hba(struct lpfc_hba *phba)
13318{
13319 struct lpfc_vport *vport = phba->pport;
13320 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
13321
13322 spin_lock_irq(shost->host_lock);
13323 vport->load_flag |= FC_UNLOADING;
13324 spin_unlock_irq(shost->host_lock);
13325
13326 kfree(phba->vpi_bmask);
13327 kfree(phba->vpi_ids);
13328
13329 lpfc_stop_hba_timers(phba);
13330
13331 phba->pport->work_port_events = 0;
13332
13333 lpfc_sli_hba_down(phba);
13334
13335 lpfc_sli_brdrestart(phba);
13336
13337 lpfc_sli_disable_intr(phba);
13338
13339 return;
13340}
13341
13342
13343
13344
13345
13346
13347
13348
13349
13350
13351
13352
13353
13354
13355static void
13356lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13357{
13358 struct lpfc_sli4_hdw_queue *qp;
13359 int idx, ccnt;
13360 int wait_time = 0;
13361 int io_xri_cmpl = 1;
13362 int nvmet_xri_cmpl = 1;
13363 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13364
13365
13366
13367
13368
13369 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5);
13370
13371
13372 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13373 lpfc_nvme_wait_for_io_drain(phba);
13374
13375 ccnt = 0;
13376 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13377 qp = &phba->sli4_hba.hdwq[idx];
13378 io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list);
13379 if (!io_xri_cmpl)
13380 ccnt++;
13381 }
13382 if (ccnt)
13383 io_xri_cmpl = 0;
13384
13385 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13386 nvmet_xri_cmpl =
13387 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13388 }
13389
13390 while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) {
13391 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
13392 if (!nvmet_xri_cmpl)
13393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13394 "6424 NVMET XRI exchange busy "
13395 "wait time: %d seconds.\n",
13396 wait_time/1000);
13397 if (!io_xri_cmpl)
13398 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13399 "6100 IO XRI exchange busy "
13400 "wait time: %d seconds.\n",
13401 wait_time/1000);
13402 if (!els_xri_cmpl)
13403 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13404 "2878 ELS XRI exchange busy "
13405 "wait time: %d seconds.\n",
13406 wait_time/1000);
13407 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
13408 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
13409 } else {
13410 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
13411 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
13412 }
13413
13414 ccnt = 0;
13415 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13416 qp = &phba->sli4_hba.hdwq[idx];
13417 io_xri_cmpl = list_empty(
13418 &qp->lpfc_abts_io_buf_list);
13419 if (!io_xri_cmpl)
13420 ccnt++;
13421 }
13422 if (ccnt)
13423 io_xri_cmpl = 0;
13424
13425 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13426 nvmet_xri_cmpl = list_empty(
13427 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13428 }
13429 els_xri_cmpl =
13430 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13431
13432 }
13433}
13434
13435
13436
13437
13438
13439
13440
13441
13442
13443
13444
13445static void
13446lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13447{
13448 int wait_cnt = 0;
13449 LPFC_MBOXQ_t *mboxq;
13450 struct pci_dev *pdev = phba->pcidev;
13451
13452 lpfc_stop_hba_timers(phba);
13453 hrtimer_cancel(&phba->cmf_timer);
13454
13455 if (phba->pport)
13456 phba->sli4_hba.intr_enable = 0;
13457
13458
13459
13460
13461
13462
13463
13464 spin_lock_irq(&phba->hbalock);
13465 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13466 spin_unlock_irq(&phba->hbalock);
13467
13468 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13469 msleep(10);
13470 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
13471 break;
13472 }
13473
13474 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13475 spin_lock_irq(&phba->hbalock);
13476 mboxq = phba->sli.mbox_active;
13477 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
13478 __lpfc_mbox_cmpl_put(phba, mboxq);
13479 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13480 phba->sli.mbox_active = NULL;
13481 spin_unlock_irq(&phba->hbalock);
13482 }
13483
13484
13485 lpfc_sli_hba_iocb_abort(phba);
13486
13487 if (!pci_channel_offline(phba->pcidev))
13488
13489 lpfc_sli4_xri_exchange_busy_wait(phba);
13490
13491
13492 if (phba->pport)
13493 lpfc_cpuhp_remove(phba);
13494
13495
13496 lpfc_sli4_disable_intr(phba);
13497
13498
13499 if (phba->cfg_sriov_nr_virtfn)
13500 pci_disable_sriov(pdev);
13501
13502
13503 kthread_stop(phba->worker_thread);
13504
13505
13506 lpfc_ras_stop_fwlog(phba);
13507
13508
13509 lpfc_pci_function_reset(phba);
13510
13511
13512 lpfc_sli4_queue_destroy(phba);
13513
13514
13515 if (phba->ras_fwlog.ras_enabled)
13516 lpfc_sli4_ras_dma_free(phba);
13517
13518
13519 if (phba->pport)
13520 phba->pport->work_port_events = 0;
13521}
13522
13523static uint32_t
13524lpfc_cgn_crc32(uint32_t crc, u8 byte)
13525{
13526 uint32_t msb = 0;
13527 uint32_t bit;
13528
13529 for (bit = 0; bit < 8; bit++) {
13530 msb = (crc >> 31) & 1;
13531 crc <<= 1;
13532
13533 if (msb ^ (byte & 1)) {
13534 crc ^= LPFC_CGN_CRC32_MAGIC_NUMBER;
13535 crc |= 1;
13536 }
13537 byte >>= 1;
13538 }
13539 return crc;
13540}
13541
13542static uint32_t
13543lpfc_cgn_reverse_bits(uint32_t wd)
13544{
13545 uint32_t result = 0;
13546 uint32_t i;
13547
13548 for (i = 0; i < 32; i++) {
13549 result <<= 1;
13550 result |= (1 & (wd >> i));
13551 }
13552 return result;
13553}
13554
13555
13556
13557
13558
13559uint32_t
13560lpfc_cgn_calc_crc32(void *ptr, uint32_t byteLen, uint32_t crc)
13561{
13562 uint32_t i;
13563 uint32_t result;
13564 uint8_t *data = (uint8_t *)ptr;
13565
13566 for (i = 0; i < byteLen; ++i)
13567 crc = lpfc_cgn_crc32(crc, data[i]);
13568
13569 result = ~lpfc_cgn_reverse_bits(crc);
13570 return result;
13571}
13572
13573void
13574lpfc_init_congestion_buf(struct lpfc_hba *phba)
13575{
13576 struct lpfc_cgn_info *cp;
13577 struct timespec64 cmpl_time;
13578 struct tm broken;
13579 uint16_t size;
13580 uint32_t crc;
13581
13582 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13583 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13584
13585 if (!phba->cgn_i)
13586 return;
13587 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13588
13589 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13590 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13591 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13592 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13593
13594 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13595 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13596 atomic64_set(&phba->cgn_latency_evt, 0);
13597 phba->cgn_evt_minute = 0;
13598 phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
13599
13600 memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
13601 cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
13602 cp->cgn_info_version = LPFC_CGN_INFO_V3;
13603
13604
13605 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13606 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13607 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13608 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13609
13610 ktime_get_real_ts64(&cmpl_time);
13611 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13612
13613 cp->cgn_info_month = broken.tm_mon + 1;
13614 cp->cgn_info_day = broken.tm_mday;
13615 cp->cgn_info_year = broken.tm_year - 100;
13616 cp->cgn_info_hour = broken.tm_hour;
13617 cp->cgn_info_minute = broken.tm_min;
13618 cp->cgn_info_second = broken.tm_sec;
13619
13620 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13621 "2643 CGNInfo Init: Start Time "
13622 "%d/%d/%d %d:%d:%d\n",
13623 cp->cgn_info_day, cp->cgn_info_month,
13624 cp->cgn_info_year, cp->cgn_info_hour,
13625 cp->cgn_info_minute, cp->cgn_info_second);
13626
13627
13628 if (phba->pport) {
13629 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13630 cp->cgn_lunq = cpu_to_le16(size);
13631 }
13632
13633
13634
13635 cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13636 cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
13637 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13638 cp->cgn_info_crc = cpu_to_le32(crc);
13639
13640 phba->cgn_evt_timestamp = jiffies +
13641 msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN);
13642}
13643
13644void
13645lpfc_init_congestion_stat(struct lpfc_hba *phba)
13646{
13647 struct lpfc_cgn_info *cp;
13648 struct timespec64 cmpl_time;
13649 struct tm broken;
13650 uint32_t crc;
13651
13652 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13653 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13654
13655 if (!phba->cgn_i)
13656 return;
13657
13658 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13659 memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
13660
13661 ktime_get_real_ts64(&cmpl_time);
13662 time64_to_tm(cmpl_time.tv_sec, 0, &broken);
13663
13664 cp->cgn_stat_month = broken.tm_mon + 1;
13665 cp->cgn_stat_day = broken.tm_mday;
13666 cp->cgn_stat_year = broken.tm_year - 100;
13667 cp->cgn_stat_hour = broken.tm_hour;
13668 cp->cgn_stat_minute = broken.tm_min;
13669
13670 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
13671 "2647 CGNstat Init: Start Time "
13672 "%d/%d/%d %d:%d\n",
13673 cp->cgn_stat_day, cp->cgn_stat_month,
13674 cp->cgn_stat_year, cp->cgn_stat_hour,
13675 cp->cgn_stat_minute);
13676
13677 crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
13678 cp->cgn_info_crc = cpu_to_le32(crc);
13679}
13680
13681
13682
13683
13684
13685
13686static int
13687__lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13688{
13689 struct lpfc_mbx_reg_congestion_buf *reg_congestion_buf;
13690 union lpfc_sli4_cfg_shdr *shdr;
13691 uint32_t shdr_status, shdr_add_status;
13692 LPFC_MBOXQ_t *mboxq;
13693 int length, rc;
13694
13695 if (!phba->cgn_i)
13696 return -ENXIO;
13697
13698 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13699 if (!mboxq) {
13700 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13701 "2641 REG_CONGESTION_BUF mbox allocation fail: "
13702 "HBA state x%x reg %d\n",
13703 phba->pport->port_state, reg);
13704 return -ENOMEM;
13705 }
13706
13707 length = (sizeof(struct lpfc_mbx_reg_congestion_buf) -
13708 sizeof(struct lpfc_sli4_cfg_mhdr));
13709 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13710 LPFC_MBOX_OPCODE_REG_CONGESTION_BUF, length,
13711 LPFC_SLI4_MBX_EMBED);
13712 reg_congestion_buf = &mboxq->u.mqe.un.reg_congestion_buf;
13713 bf_set(lpfc_mbx_reg_cgn_buf_type, reg_congestion_buf, 1);
13714 if (reg > 0)
13715 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 1);
13716 else
13717 bf_set(lpfc_mbx_reg_cgn_buf_cnt, reg_congestion_buf, 0);
13718 reg_congestion_buf->length = sizeof(struct lpfc_cgn_info);
13719 reg_congestion_buf->addr_lo =
13720 putPaddrLow(phba->cgn_i->phys);
13721 reg_congestion_buf->addr_hi =
13722 putPaddrHigh(phba->cgn_i->phys);
13723
13724 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13725 shdr = (union lpfc_sli4_cfg_shdr *)
13726 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
13727 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13728 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
13729 &shdr->response);
13730 mempool_free(mboxq, phba->mbox_mem_pool);
13731 if (shdr_status || shdr_add_status || rc) {
13732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13733 "2642 REG_CONGESTION_BUF mailbox "
13734 "failed with status x%x add_status x%x,"
13735 " mbx status x%x reg %d\n",
13736 shdr_status, shdr_add_status, rc, reg);
13737 return -ENXIO;
13738 }
13739 return 0;
13740}
13741
13742int
13743lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13744{
13745 lpfc_cmf_stop(phba);
13746 return __lpfc_reg_congestion_buf(phba, 0);
13747}
13748
13749int
13750lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13751{
13752 return __lpfc_reg_congestion_buf(phba, 1);
13753}
13754
13755
13756
13757
13758
13759
13760
13761
13762
13763
13764
13765
13766
13767int
13768lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13769{
13770 int rc;
13771 struct lpfc_mqe *mqe = &mboxq->u.mqe;
13772 struct lpfc_pc_sli4_params *sli4_params;
13773 uint32_t mbox_tmo;
13774 int length;
13775 bool exp_wqcq_pages = true;
13776 struct lpfc_sli4_parameters *mbx_sli4_parameters;
13777
13778
13779
13780
13781
13782
13783 phba->sli4_hba.rpi_hdrs_in_use = 1;
13784
13785
13786 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
13787 sizeof(struct lpfc_sli4_cfg_mhdr));
13788 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13789 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
13790 length, LPFC_SLI4_MBX_EMBED);
13791 if (!phba->sli4_hba.intr_enable)
13792 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13793 else {
13794 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13795 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13796 }
13797 if (unlikely(rc))
13798 return rc;
13799 sli4_params = &phba->sli4_hba.pc_sli4_params;
13800 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
13801 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
13802 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
13803 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
13804 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
13805 mbx_sli4_parameters);
13806 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
13807 mbx_sli4_parameters);
13808 if (bf_get(cfg_phwq, mbx_sli4_parameters))
13809 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13810 else
13811 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13812 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
13813 sli4_params->loopbk_scope = bf_get(cfg_loopbk_scope,
13814 mbx_sli4_parameters);
13815 sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
13816 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
13817 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
13818 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
13819 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
13820 sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
13821 sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
13822 sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
13823 sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
13824 sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters);
13825 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
13826 mbx_sli4_parameters);
13827 sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
13828 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
13829 mbx_sli4_parameters);
13830 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13831 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13832
13833
13834 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13835
13836
13837 rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
13838 bf_get(cfg_xib, mbx_sli4_parameters));
13839
13840 if (rc) {
13841
13842 sli4_params->nvme = 1;
13843
13844
13845 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13846 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13847 "6133 Disabling NVME support: "
13848 "FC4 type not supported: x%x\n",
13849 phba->cfg_enable_fc4_type);
13850 goto fcponly;
13851 }
13852 } else {
13853
13854 sli4_params->nvme = 0;
13855 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13857 "6101 Disabling NVME support: Not "
13858 "supported by firmware (%d %d) x%x\n",
13859 bf_get(cfg_nvme, mbx_sli4_parameters),
13860 bf_get(cfg_xib, mbx_sli4_parameters),
13861 phba->cfg_enable_fc4_type);
13862fcponly:
13863 phba->nvmet_support = 0;
13864 phba->cfg_nvmet_mrq = 0;
13865 phba->cfg_nvme_seg_cnt = 0;
13866
13867
13868 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13869 return -ENODEV;
13870 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13871 }
13872 }
13873
13874
13875
13876
13877 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13878 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13879
13880
13881 if (bf_get(cfg_pbde, mbx_sli4_parameters))
13882 phba->cfg_enable_pbde = 1;
13883 else
13884 phba->cfg_enable_pbde = 0;
13885
13886
13887
13888
13889
13890
13891
13892
13893
13894 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13895 !(bf_get(cfg_nosr, mbx_sli4_parameters)))
13896 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13897 else
13898 phba->cfg_suppress_rsp = 0;
13899
13900 if (bf_get(cfg_eqdr, mbx_sli4_parameters))
13901 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13902
13903
13904 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
13905 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
13906
13907
13908
13909
13910
13911
13912 if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
13913 phba->fcp_embed_io = 1;
13914 else
13915 phba->fcp_embed_io = 0;
13916
13917 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13918 "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
13919 bf_get(cfg_xib, mbx_sli4_parameters),
13920 phba->cfg_enable_pbde,
13921 phba->fcp_embed_io, sli4_params->nvme,
13922 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13923
13924 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13925 LPFC_SLI_INTF_IF_TYPE_2) &&
13926 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13927 LPFC_SLI_INTF_FAMILY_LNCR_A0))
13928 exp_wqcq_pages = false;
13929
13930 if ((bf_get(cfg_cqpsize, mbx_sli4_parameters) & LPFC_CQ_16K_PAGE_SZ) &&
13931 (bf_get(cfg_wqpsize, mbx_sli4_parameters) & LPFC_WQ_16K_PAGE_SZ) &&
13932 exp_wqcq_pages &&
13933 (sli4_params->wqsize & LPFC_WQ_SZ128_SUPPORT))
13934 phba->enab_exp_wqcq_pages = 1;
13935 else
13936 phba->enab_exp_wqcq_pages = 0;
13937
13938
13939
13940 if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
13941 phba->mds_diags_support = 1;
13942 else
13943 phba->mds_diags_support = 0;
13944
13945
13946
13947
13948 if (bf_get(cfg_nsler, mbx_sli4_parameters))
13949 phba->nsler = 1;
13950 else
13951 phba->nsler = 0;
13952
13953 return 0;
13954}
13955
13956
13957
13958
13959
13960
13961
13962
13963
13964
13965
13966
13967
13968
13969
13970
13971
13972
13973static int
13974lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
13975{
13976 struct lpfc_hba *phba;
13977 struct lpfc_vport *vport = NULL;
13978 struct Scsi_Host *shost = NULL;
13979 int error;
13980 uint32_t cfg_mode, intr_mode;
13981
13982
13983 phba = lpfc_hba_alloc(pdev);
13984 if (!phba)
13985 return -ENOMEM;
13986
13987
13988 error = lpfc_enable_pci_dev(phba);
13989 if (error)
13990 goto out_free_phba;
13991
13992
13993 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13994 if (error)
13995 goto out_disable_pci_dev;
13996
13997
13998 error = lpfc_sli_pci_mem_setup(phba);
13999 if (error) {
14000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14001 "1402 Failed to set up pci memory space.\n");
14002 goto out_disable_pci_dev;
14003 }
14004
14005
14006 error = lpfc_sli_driver_resource_setup(phba);
14007 if (error) {
14008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14009 "1404 Failed to set up driver resource.\n");
14010 goto out_unset_pci_mem_s3;
14011 }
14012
14013
14014
14015 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
14016 if (error) {
14017 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14018 "1405 Failed to initialize iocb list.\n");
14019 goto out_unset_driver_resource_s3;
14020 }
14021
14022
14023 error = lpfc_setup_driver_resource_phase2(phba);
14024 if (error) {
14025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14026 "1406 Failed to set up driver resource.\n");
14027 goto out_free_iocb_list;
14028 }
14029
14030
14031 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14032
14033
14034 error = lpfc_create_shost(phba);
14035 if (error) {
14036 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14037 "1407 Failed to create scsi host.\n");
14038 goto out_unset_driver_resource;
14039 }
14040
14041
14042 vport = phba->pport;
14043 error = lpfc_alloc_sysfs_attr(vport);
14044 if (error) {
14045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14046 "1476 Failed to allocate sysfs attr\n");
14047 goto out_destroy_shost;
14048 }
14049
14050 shost = lpfc_shost_from_vport(vport);
14051
14052 cfg_mode = phba->cfg_use_msi;
14053 while (true) {
14054
14055 lpfc_stop_port(phba);
14056
14057 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14058 if (intr_mode == LPFC_INTR_ERROR) {
14059 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14060 "0431 Failed to enable interrupt.\n");
14061 error = -ENODEV;
14062 goto out_free_sysfs_attr;
14063 }
14064
14065 if (lpfc_sli_hba_setup(phba)) {
14066 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14067 "1477 Failed to set up hba\n");
14068 error = -ENODEV;
14069 goto out_remove_device;
14070 }
14071
14072
14073 msleep(50);
14074
14075 if (intr_mode == 0 ||
14076 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14077
14078 phba->intr_mode = intr_mode;
14079 lpfc_log_intr_mode(phba, intr_mode);
14080 break;
14081 } else {
14082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14083 "0447 Configure interrupt mode (%d) "
14084 "failed active interrupt test.\n",
14085 intr_mode);
14086
14087 lpfc_sli_disable_intr(phba);
14088
14089 cfg_mode = --intr_mode;
14090 }
14091 }
14092
14093
14094 lpfc_post_init_setup(phba);
14095
14096
14097 lpfc_create_static_vport(phba);
14098
14099 return 0;
14100
14101out_remove_device:
14102 lpfc_unset_hba(phba);
14103out_free_sysfs_attr:
14104 lpfc_free_sysfs_attr(vport);
14105out_destroy_shost:
14106 lpfc_destroy_shost(phba);
14107out_unset_driver_resource:
14108 lpfc_unset_driver_resource_phase2(phba);
14109out_free_iocb_list:
14110 lpfc_free_iocb_list(phba);
14111out_unset_driver_resource_s3:
14112 lpfc_sli_driver_resource_unset(phba);
14113out_unset_pci_mem_s3:
14114 lpfc_sli_pci_mem_unset(phba);
14115out_disable_pci_dev:
14116 lpfc_disable_pci_dev(phba);
14117 if (shost)
14118 scsi_host_put(shost);
14119out_free_phba:
14120 lpfc_hba_free(phba);
14121 return error;
14122}
14123
14124
14125
14126
14127
14128
14129
14130
14131
14132
14133static void
14134lpfc_pci_remove_one_s3(struct pci_dev *pdev)
14135{
14136 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14137 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14138 struct lpfc_vport **vports;
14139 struct lpfc_hba *phba = vport->phba;
14140 int i;
14141
14142 spin_lock_irq(&phba->hbalock);
14143 vport->load_flag |= FC_UNLOADING;
14144 spin_unlock_irq(&phba->hbalock);
14145
14146 lpfc_free_sysfs_attr(vport);
14147
14148
14149 vports = lpfc_create_vport_work_array(phba);
14150 if (vports != NULL)
14151 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14152 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
14153 continue;
14154 fc_vport_terminate(vports[i]->fc_vport);
14155 }
14156 lpfc_destroy_vport_work_array(phba, vports);
14157
14158
14159 fc_remove_host(shost);
14160 scsi_remove_host(shost);
14161
14162
14163 lpfc_cleanup(vport);
14164
14165
14166
14167
14168
14169
14170
14171
14172 lpfc_sli_hba_down(phba);
14173
14174 kthread_stop(phba->worker_thread);
14175
14176 lpfc_sli_brdrestart(phba);
14177
14178 kfree(phba->vpi_bmask);
14179 kfree(phba->vpi_ids);
14180
14181 lpfc_stop_hba_timers(phba);
14182 spin_lock_irq(&phba->port_list_lock);
14183 list_del_init(&vport->listentry);
14184 spin_unlock_irq(&phba->port_list_lock);
14185
14186 lpfc_debugfs_terminate(vport);
14187
14188
14189 if (phba->cfg_sriov_nr_virtfn)
14190 pci_disable_sriov(pdev);
14191
14192
14193 lpfc_sli_disable_intr(phba);
14194
14195 scsi_host_put(shost);
14196
14197
14198
14199
14200
14201 lpfc_scsi_free(phba);
14202 lpfc_free_iocb_list(phba);
14203
14204 lpfc_mem_free_all(phba);
14205
14206 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
14207 phba->hbqslimp.virt, phba->hbqslimp.phys);
14208
14209
14210 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
14211 phba->slim2p.virt, phba->slim2p.phys);
14212
14213
14214 iounmap(phba->ctrl_regs_memmap_p);
14215 iounmap(phba->slim_memmap_p);
14216
14217 lpfc_hba_free(phba);
14218
14219 pci_release_mem_regions(pdev);
14220 pci_disable_device(pdev);
14221}
14222
14223
14224
14225
14226
14227
14228
14229
14230
14231
14232
14233
14234
14235
14236
14237
14238
14239
14240
14241
14242
14243static int __maybe_unused
14244lpfc_pci_suspend_one_s3(struct device *dev_d)
14245{
14246 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14247 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14248
14249 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14250 "0473 PCI device Power Management suspend.\n");
14251
14252
14253 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14254 lpfc_offline(phba);
14255 kthread_stop(phba->worker_thread);
14256
14257
14258 lpfc_sli_disable_intr(phba);
14259
14260 return 0;
14261}
14262
14263
14264
14265
14266
14267
14268
14269
14270
14271
14272
14273
14274
14275
14276
14277
14278
14279
14280
14281
14282static int __maybe_unused
14283lpfc_pci_resume_one_s3(struct device *dev_d)
14284{
14285 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
14286 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14287 uint32_t intr_mode;
14288 int error;
14289
14290 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14291 "0452 PCI device Power Management resume.\n");
14292
14293
14294 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14295 "lpfc_worker_%d", phba->brd_no);
14296 if (IS_ERR(phba->worker_thread)) {
14297 error = PTR_ERR(phba->worker_thread);
14298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14299 "0434 PM resume failed to start worker "
14300 "thread: error=x%x.\n", error);
14301 return error;
14302 }
14303
14304
14305 lpfc_cpu_map_array_init(phba);
14306
14307 lpfc_hba_eq_hdl_array_init(phba);
14308
14309 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14310 if (intr_mode == LPFC_INTR_ERROR) {
14311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14312 "0430 PM resume Failed to enable interrupt\n");
14313 return -EIO;
14314 } else
14315 phba->intr_mode = intr_mode;
14316
14317
14318 lpfc_sli_brdrestart(phba);
14319 lpfc_online(phba);
14320
14321
14322 lpfc_log_intr_mode(phba, phba->intr_mode);
14323
14324 return 0;
14325}
14326
14327
14328
14329
14330
14331
14332
14333
14334static void
14335lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14336{
14337 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14338 "2723 PCI channel I/O abort preparing for recovery\n");
14339
14340
14341
14342
14343
14344 lpfc_sli_abort_fcp_rings(phba);
14345}
14346
14347
14348
14349
14350
14351
14352
14353
14354
14355static void
14356lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14357{
14358 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14359 "2710 PCI channel disable preparing for reset\n");
14360
14361
14362 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14363
14364
14365 lpfc_scsi_dev_block(phba);
14366
14367
14368 lpfc_sli_flush_io_rings(phba);
14369
14370
14371 lpfc_stop_hba_timers(phba);
14372
14373
14374 lpfc_sli_disable_intr(phba);
14375 pci_disable_device(phba->pcidev);
14376}
14377
14378
14379
14380
14381
14382
14383
14384
14385
14386static void
14387lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14388{
14389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14390 "2711 PCI channel permanent disable for failure\n");
14391
14392 lpfc_scsi_dev_block(phba);
14393 lpfc_sli4_prep_dev_for_reset(phba);
14394
14395
14396 lpfc_stop_hba_timers(phba);
14397
14398
14399 lpfc_sli_flush_io_rings(phba);
14400}
14401
14402
14403
14404
14405
14406
14407
14408
14409
14410
14411
14412
14413
14414
14415
14416
14417
14418
14419
14420static pci_ers_result_t
14421lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
14422{
14423 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14424 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14425
14426 switch (state) {
14427 case pci_channel_io_normal:
14428
14429 lpfc_sli_prep_dev_for_recover(phba);
14430 return PCI_ERS_RESULT_CAN_RECOVER;
14431 case pci_channel_io_frozen:
14432
14433 lpfc_sli_prep_dev_for_reset(phba);
14434 return PCI_ERS_RESULT_NEED_RESET;
14435 case pci_channel_io_perm_failure:
14436
14437 lpfc_sli_prep_dev_for_perm_failure(phba);
14438 return PCI_ERS_RESULT_DISCONNECT;
14439 default:
14440
14441 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14442 "0472 Unknown PCI error state: x%x\n", state);
14443 lpfc_sli_prep_dev_for_reset(phba);
14444 return PCI_ERS_RESULT_NEED_RESET;
14445 }
14446}
14447
14448
14449
14450
14451
14452
14453
14454
14455
14456
14457
14458
14459
14460
14461
14462
14463
14464
14465
14466static pci_ers_result_t
14467lpfc_io_slot_reset_s3(struct pci_dev *pdev)
14468{
14469 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14470 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14471 struct lpfc_sli *psli = &phba->sli;
14472 uint32_t intr_mode;
14473
14474 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
14475 if (pci_enable_device_mem(pdev)) {
14476 printk(KERN_ERR "lpfc: Cannot re-enable "
14477 "PCI device after reset.\n");
14478 return PCI_ERS_RESULT_DISCONNECT;
14479 }
14480
14481 pci_restore_state(pdev);
14482
14483
14484
14485
14486
14487 pci_save_state(pdev);
14488
14489 if (pdev->is_busmaster)
14490 pci_set_master(pdev);
14491
14492 spin_lock_irq(&phba->hbalock);
14493 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
14494 spin_unlock_irq(&phba->hbalock);
14495
14496
14497 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14498 if (intr_mode == LPFC_INTR_ERROR) {
14499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14500 "0427 Cannot re-enable interrupt after "
14501 "slot reset.\n");
14502 return PCI_ERS_RESULT_DISCONNECT;
14503 } else
14504 phba->intr_mode = intr_mode;
14505
14506
14507 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14508 lpfc_offline(phba);
14509 lpfc_sli_brdrestart(phba);
14510
14511
14512 lpfc_log_intr_mode(phba, phba->intr_mode);
14513
14514 return PCI_ERS_RESULT_RECOVERED;
14515}
14516
14517
14518
14519
14520
14521
14522
14523
14524
14525
14526
14527static void
14528lpfc_io_resume_s3(struct pci_dev *pdev)
14529{
14530 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14531 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14532
14533
14534 lpfc_online(phba);
14535}
14536
14537
14538
14539
14540
14541
14542
14543int
14544lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14545{
14546 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14547
14548 if (phba->sli_rev == LPFC_SLI_REV4) {
14549 if (max_xri <= 100)
14550 return 10;
14551 else if (max_xri <= 256)
14552 return 25;
14553 else if (max_xri <= 512)
14554 return 50;
14555 else if (max_xri <= 1024)
14556 return 100;
14557 else if (max_xri <= 1536)
14558 return 150;
14559 else if (max_xri <= 2048)
14560 return 200;
14561 else
14562 return 250;
14563 } else
14564 return 0;
14565}
14566
14567
14568
14569
14570
14571
14572
14573int
14574lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14575{
14576 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14577
14578 if (phba->nvmet_support)
14579 max_xri += LPFC_NVMET_BUF_POST;
14580 return max_xri;
14581}
14582
14583
14584static int
14585lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14586 uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize,
14587 const struct firmware *fw)
14588{
14589 int rc;
14590 u8 sli_family;
14591
14592 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14593
14594
14595
14596
14597
14598
14599 if (offset == ADD_STATUS_FW_NOT_SUPPORTED ||
14600 (sli_family == LPFC_SLI_INTF_FAMILY_G6 &&
14601 magic_number != MAGIC_NUMBER_G6) ||
14602 (sli_family == LPFC_SLI_INTF_FAMILY_G7 &&
14603 magic_number != MAGIC_NUMBER_G7) ||
14604 (sli_family == LPFC_SLI_INTF_FAMILY_G7P &&
14605 magic_number != MAGIC_NUMBER_G7P)) {
14606 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14607 "3030 This firmware version is not supported on"
14608 " this HBA model. Device:%x Magic:%x Type:%x "
14609 "ID:%x Size %d %zd\n",
14610 phba->pcidev->device, magic_number, ftype, fid,
14611 fsize, fw->size);
14612 rc = -EINVAL;
14613 } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) {
14614 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14615 "3021 Firmware downloads have been prohibited "
14616 "by a system configuration setting on "
14617 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14618 "%zd\n",
14619 phba->pcidev->device, magic_number, ftype, fid,
14620 fsize, fw->size);
14621 rc = -EACCES;
14622 } else {
14623 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14624 "3022 FW Download failed. Add Status x%x "
14625 "Device:%x Magic:%x Type:%x ID:%x Size %d "
14626 "%zd\n",
14627 offset, phba->pcidev->device, magic_number,
14628 ftype, fid, fsize, fw->size);
14629 rc = -EIO;
14630 }
14631 return rc;
14632}
14633
14634
14635
14636
14637
14638
14639
14640static void
14641lpfc_write_firmware(const struct firmware *fw, void *context)
14642{
14643 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14644 char fwrev[FW_REV_STR_SIZE];
14645 struct lpfc_grp_hdr *image;
14646 struct list_head dma_buffer_list;
14647 int i, rc = 0;
14648 struct lpfc_dmabuf *dmabuf, *next;
14649 uint32_t offset = 0, temp_offset = 0;
14650 uint32_t magic_number, ftype, fid, fsize;
14651
14652
14653 if (!fw) {
14654 rc = -ENXIO;
14655 goto out;
14656 }
14657 image = (struct lpfc_grp_hdr *)fw->data;
14658
14659 magic_number = be32_to_cpu(image->magic_number);
14660 ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
14661 fid = bf_get_be32(lpfc_grp_hdr_id, image);
14662 fsize = be32_to_cpu(image->size);
14663
14664 INIT_LIST_HEAD(&dma_buffer_list);
14665 lpfc_decode_firmware_rev(phba, fwrev, 1);
14666 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
14667 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14668 "3023 Updating Firmware, Current Version:%s "
14669 "New Version:%s\n",
14670 fwrev, image->revision);
14671 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
14672 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
14673 GFP_KERNEL);
14674 if (!dmabuf) {
14675 rc = -ENOMEM;
14676 goto release_out;
14677 }
14678 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14679 SLI4_PAGE_SIZE,
14680 &dmabuf->phys,
14681 GFP_KERNEL);
14682 if (!dmabuf->virt) {
14683 kfree(dmabuf);
14684 rc = -ENOMEM;
14685 goto release_out;
14686 }
14687 list_add_tail(&dmabuf->list, &dma_buffer_list);
14688 }
14689 while (offset < fw->size) {
14690 temp_offset = offset;
14691 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
14692 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
14693 memcpy(dmabuf->virt,
14694 fw->data + temp_offset,
14695 fw->size - temp_offset);
14696 temp_offset = fw->size;
14697 break;
14698 }
14699 memcpy(dmabuf->virt, fw->data + temp_offset,
14700 SLI4_PAGE_SIZE);
14701 temp_offset += SLI4_PAGE_SIZE;
14702 }
14703 rc = lpfc_wr_object(phba, &dma_buffer_list,
14704 (fw->size - offset), &offset);
14705 if (rc) {
14706 rc = lpfc_log_write_firmware_error(phba, offset,
14707 magic_number,
14708 ftype,
14709 fid,
14710 fsize,
14711 fw);
14712 goto release_out;
14713 }
14714 }
14715 rc = offset;
14716 } else
14717 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14718 "3029 Skipped Firmware update, Current "
14719 "Version:%s New Version:%s\n",
14720 fwrev, image->revision);
14721
14722release_out:
14723 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
14724 list_del(&dmabuf->list);
14725 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14726 dmabuf->virt, dmabuf->phys);
14727 kfree(dmabuf);
14728 }
14729 release_firmware(fw);
14730out:
14731 if (rc < 0)
14732 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14733 "3062 Firmware update error, status %d.\n", rc);
14734 else
14735 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14736 "3024 Firmware update success: size %d.\n", rc);
14737}
14738
14739
14740
14741
14742
14743
14744
14745
14746
14747int
14748lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14749{
14750 uint8_t file_name[ELX_MODEL_NAME_SIZE];
14751 int ret;
14752 const struct firmware *fw;
14753
14754
14755 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14756 LPFC_SLI_INTF_IF_TYPE_2)
14757 return -EPERM;
14758
14759 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
14760
14761 if (fw_upgrade == INT_FW_UPGRADE) {
14762 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
14763 file_name, &phba->pcidev->dev,
14764 GFP_KERNEL, (void *)phba,
14765 lpfc_write_firmware);
14766 } else if (fw_upgrade == RUN_FW_UPGRADE) {
14767 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14768 if (!ret)
14769 lpfc_write_firmware(fw, (void *)phba);
14770 } else {
14771 ret = -EINVAL;
14772 }
14773
14774 return ret;
14775}
14776
14777
14778
14779
14780
14781
14782
14783
14784
14785
14786
14787
14788
14789
14790
14791
14792
14793
14794
14795static int
14796lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
14797{
14798 struct lpfc_hba *phba;
14799 struct lpfc_vport *vport = NULL;
14800 struct Scsi_Host *shost = NULL;
14801 int error;
14802 uint32_t cfg_mode, intr_mode;
14803
14804
14805 phba = lpfc_hba_alloc(pdev);
14806 if (!phba)
14807 return -ENOMEM;
14808
14809 INIT_LIST_HEAD(&phba->poll_list);
14810
14811
14812 error = lpfc_enable_pci_dev(phba);
14813 if (error)
14814 goto out_free_phba;
14815
14816
14817 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14818 if (error)
14819 goto out_disable_pci_dev;
14820
14821
14822 error = lpfc_sli4_pci_mem_setup(phba);
14823 if (error) {
14824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14825 "1410 Failed to set up pci memory space.\n");
14826 goto out_disable_pci_dev;
14827 }
14828
14829
14830 error = lpfc_sli4_driver_resource_setup(phba);
14831 if (error) {
14832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14833 "1412 Failed to set up driver resource.\n");
14834 goto out_unset_pci_mem_s4;
14835 }
14836
14837 INIT_LIST_HEAD(&phba->active_rrq_list);
14838 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14839
14840
14841 error = lpfc_setup_driver_resource_phase2(phba);
14842 if (error) {
14843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14844 "1414 Failed to set up driver resource.\n");
14845 goto out_unset_driver_resource_s4;
14846 }
14847
14848
14849 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14850
14851
14852 cfg_mode = phba->cfg_use_msi;
14853
14854
14855 phba->pport = NULL;
14856 lpfc_stop_port(phba);
14857
14858
14859 lpfc_cpu_map_array_init(phba);
14860
14861
14862 lpfc_hba_eq_hdl_array_init(phba);
14863
14864
14865 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14866 if (intr_mode == LPFC_INTR_ERROR) {
14867 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14868 "0426 Failed to enable interrupt.\n");
14869 error = -ENODEV;
14870 goto out_unset_driver_resource;
14871 }
14872
14873 if (phba->intr_type != MSIX) {
14874 phba->cfg_irq_chann = 1;
14875 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14876 if (phba->nvmet_support)
14877 phba->cfg_nvmet_mrq = 1;
14878 }
14879 }
14880 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14881
14882
14883 error = lpfc_create_shost(phba);
14884 if (error) {
14885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14886 "1415 Failed to create scsi host.\n");
14887 goto out_disable_intr;
14888 }
14889 vport = phba->pport;
14890 shost = lpfc_shost_from_vport(vport);
14891
14892
14893 error = lpfc_alloc_sysfs_attr(vport);
14894 if (error) {
14895 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14896 "1416 Failed to allocate sysfs attr\n");
14897 goto out_destroy_shost;
14898 }
14899
14900
14901 if (lpfc_sli4_hba_setup(phba)) {
14902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14903 "1421 Failed to set up hba\n");
14904 error = -ENODEV;
14905 goto out_free_sysfs_attr;
14906 }
14907
14908
14909 phba->intr_mode = intr_mode;
14910 lpfc_log_intr_mode(phba, intr_mode);
14911
14912
14913 lpfc_post_init_setup(phba);
14914
14915
14916
14917
14918 if (phba->nvmet_support == 0) {
14919 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14920
14921
14922
14923
14924
14925 error = lpfc_nvme_create_localport(vport);
14926 if (error) {
14927 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14928 "6004 NVME registration "
14929 "failed, error x%x\n",
14930 error);
14931 }
14932 }
14933 }
14934
14935
14936 if (phba->cfg_request_firmware_upgrade)
14937 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14938
14939
14940 lpfc_create_static_vport(phba);
14941
14942 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14943 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14944
14945 return 0;
14946
14947out_free_sysfs_attr:
14948 lpfc_free_sysfs_attr(vport);
14949out_destroy_shost:
14950 lpfc_destroy_shost(phba);
14951out_disable_intr:
14952 lpfc_sli4_disable_intr(phba);
14953out_unset_driver_resource:
14954 lpfc_unset_driver_resource_phase2(phba);
14955out_unset_driver_resource_s4:
14956 lpfc_sli4_driver_resource_unset(phba);
14957out_unset_pci_mem_s4:
14958 lpfc_sli4_pci_mem_unset(phba);
14959out_disable_pci_dev:
14960 lpfc_disable_pci_dev(phba);
14961 if (shost)
14962 scsi_host_put(shost);
14963out_free_phba:
14964 lpfc_hba_free(phba);
14965 return error;
14966}
14967
14968
14969
14970
14971
14972
14973
14974
14975
14976
14977static void
14978lpfc_pci_remove_one_s4(struct pci_dev *pdev)
14979{
14980 struct Scsi_Host *shost = pci_get_drvdata(pdev);
14981 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
14982 struct lpfc_vport **vports;
14983 struct lpfc_hba *phba = vport->phba;
14984 int i;
14985
14986
14987 spin_lock_irq(&phba->hbalock);
14988 vport->load_flag |= FC_UNLOADING;
14989 spin_unlock_irq(&phba->hbalock);
14990 if (phba->cgn_i)
14991 lpfc_unreg_congestion_buf(phba);
14992
14993 lpfc_free_sysfs_attr(vport);
14994
14995
14996 vports = lpfc_create_vport_work_array(phba);
14997 if (vports != NULL)
14998 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14999 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
15000 continue;
15001 fc_vport_terminate(vports[i]->fc_vport);
15002 }
15003 lpfc_destroy_vport_work_array(phba, vports);
15004
15005
15006 fc_remove_host(shost);
15007 scsi_remove_host(shost);
15008
15009
15010
15011
15012 lpfc_cleanup(vport);
15013 lpfc_nvmet_destroy_targetport(phba);
15014 lpfc_nvme_destroy_localport(vport);
15015
15016
15017 if (phba->cfg_xri_rebalancing)
15018 lpfc_destroy_multixri_pools(phba);
15019
15020
15021
15022
15023
15024
15025 lpfc_debugfs_terminate(vport);
15026
15027 lpfc_stop_hba_timers(phba);
15028 spin_lock_irq(&phba->port_list_lock);
15029 list_del_init(&vport->listentry);
15030 spin_unlock_irq(&phba->port_list_lock);
15031
15032
15033
15034
15035 lpfc_io_free(phba);
15036 lpfc_free_iocb_list(phba);
15037 lpfc_sli4_hba_unset(phba);
15038
15039 lpfc_unset_driver_resource_phase2(phba);
15040 lpfc_sli4_driver_resource_unset(phba);
15041
15042
15043 lpfc_sli4_pci_mem_unset(phba);
15044
15045
15046 scsi_host_put(shost);
15047 lpfc_disable_pci_dev(phba);
15048
15049
15050 lpfc_hba_free(phba);
15051
15052 return;
15053}
15054
15055
15056
15057
15058
15059
15060
15061
15062
15063
15064
15065
15066
15067
15068
15069
15070
15071
15072
15073
15074
15075static int __maybe_unused
15076lpfc_pci_suspend_one_s4(struct device *dev_d)
15077{
15078 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15079 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15080
15081 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15082 "2843 PCI device Power Management suspend.\n");
15083
15084
15085 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15086 lpfc_offline(phba);
15087 kthread_stop(phba->worker_thread);
15088
15089
15090 lpfc_sli4_disable_intr(phba);
15091 lpfc_sli4_queue_destroy(phba);
15092
15093 return 0;
15094}
15095
15096
15097
15098
15099
15100
15101
15102
15103
15104
15105
15106
15107
15108
15109
15110
15111
15112
15113
15114
15115static int __maybe_unused
15116lpfc_pci_resume_one_s4(struct device *dev_d)
15117{
15118 struct Scsi_Host *shost = dev_get_drvdata(dev_d);
15119 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15120 uint32_t intr_mode;
15121 int error;
15122
15123 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15124 "0292 PCI device Power Management resume.\n");
15125
15126
15127 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15128 "lpfc_worker_%d", phba->brd_no);
15129 if (IS_ERR(phba->worker_thread)) {
15130 error = PTR_ERR(phba->worker_thread);
15131 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15132 "0293 PM resume failed to start worker "
15133 "thread: error=x%x.\n", error);
15134 return error;
15135 }
15136
15137
15138 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15139 if (intr_mode == LPFC_INTR_ERROR) {
15140 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15141 "0294 PM resume Failed to enable interrupt\n");
15142 return -EIO;
15143 } else
15144 phba->intr_mode = intr_mode;
15145
15146
15147 lpfc_sli_brdrestart(phba);
15148 lpfc_online(phba);
15149
15150
15151 lpfc_log_intr_mode(phba, phba->intr_mode);
15152
15153 return 0;
15154}
15155
15156
15157
15158
15159
15160
15161
15162
15163static void
15164lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15165{
15166 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15167 "2828 PCI channel I/O abort preparing for recovery\n");
15168
15169
15170
15171
15172 lpfc_sli_abort_fcp_rings(phba);
15173}
15174
15175
15176
15177
15178
15179
15180
15181
15182
15183static void
15184lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15185{
15186 int offline = pci_channel_offline(phba->pcidev);
15187
15188 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15189 "2826 PCI channel disable preparing for reset offline"
15190 " %d\n", offline);
15191
15192
15193 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15194
15195
15196
15197 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15198
15199 lpfc_sli_flush_io_rings(phba);
15200 lpfc_offline(phba);
15201
15202
15203 lpfc_stop_hba_timers(phba);
15204
15205 lpfc_sli4_queue_destroy(phba);
15206
15207 lpfc_sli4_disable_intr(phba);
15208 pci_disable_device(phba->pcidev);
15209}
15210
15211
15212
15213
15214
15215
15216
15217
15218
15219static void
15220lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15221{
15222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15223 "2827 PCI channel permanent disable for failure\n");
15224
15225
15226 lpfc_scsi_dev_block(phba);
15227
15228
15229 lpfc_stop_hba_timers(phba);
15230
15231
15232 lpfc_sli_flush_io_rings(phba);
15233}
15234
15235
15236
15237
15238
15239
15240
15241
15242
15243
15244
15245
15246
15247
15248
15249
15250
15251static pci_ers_result_t
15252lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
15253{
15254 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15255 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15256 bool hba_pci_err;
15257
15258 switch (state) {
15259 case pci_channel_io_normal:
15260
15261 lpfc_sli4_prep_dev_for_recover(phba);
15262 return PCI_ERS_RESULT_CAN_RECOVER;
15263 case pci_channel_io_frozen:
15264 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15265
15266 if (!hba_pci_err)
15267 lpfc_sli4_prep_dev_for_reset(phba);
15268 else
15269 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15270 "2832 Already handling PCI error "
15271 "state: x%x\n", state);
15272 return PCI_ERS_RESULT_NEED_RESET;
15273 case pci_channel_io_perm_failure:
15274 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15275
15276 lpfc_sli4_prep_dev_for_perm_failure(phba);
15277 return PCI_ERS_RESULT_DISCONNECT;
15278 default:
15279 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15280 if (!hba_pci_err)
15281 lpfc_sli4_prep_dev_for_reset(phba);
15282
15283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15284 "2825 Unknown PCI error state: x%x\n", state);
15285 lpfc_sli4_prep_dev_for_reset(phba);
15286 return PCI_ERS_RESULT_NEED_RESET;
15287 }
15288}
15289
15290
15291
15292
15293
15294
15295
15296
15297
15298
15299
15300
15301
15302
15303
15304
15305
15306
15307
15308static pci_ers_result_t
15309lpfc_io_slot_reset_s4(struct pci_dev *pdev)
15310{
15311 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15312 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15313 struct lpfc_sli *psli = &phba->sli;
15314 uint32_t intr_mode;
15315 bool hba_pci_err;
15316
15317 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
15318 if (pci_enable_device_mem(pdev)) {
15319 printk(KERN_ERR "lpfc: Cannot re-enable "
15320 "PCI device after reset.\n");
15321 return PCI_ERS_RESULT_DISCONNECT;
15322 }
15323
15324 pci_restore_state(pdev);
15325
15326 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15327 if (!hba_pci_err)
15328 dev_info(&pdev->dev,
15329 "hba_pci_err was not set, recovering slot reset.\n");
15330
15331
15332
15333
15334 pci_save_state(pdev);
15335
15336 if (pdev->is_busmaster)
15337 pci_set_master(pdev);
15338
15339 spin_lock_irq(&phba->hbalock);
15340 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
15341 spin_unlock_irq(&phba->hbalock);
15342
15343
15344 lpfc_cpu_map_array_init(phba);
15345
15346 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15347 if (intr_mode == LPFC_INTR_ERROR) {
15348 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15349 "2824 Cannot re-enable interrupt after "
15350 "slot reset.\n");
15351 return PCI_ERS_RESULT_DISCONNECT;
15352 } else
15353 phba->intr_mode = intr_mode;
15354 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15355
15356
15357 lpfc_log_intr_mode(phba, phba->intr_mode);
15358
15359 return PCI_ERS_RESULT_RECOVERED;
15360}
15361
15362
15363
15364
15365
15366
15367
15368
15369
15370
15371
15372static void
15373lpfc_io_resume_s4(struct pci_dev *pdev)
15374{
15375 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15376 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15377
15378
15379
15380
15381
15382
15383
15384 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15385
15386 lpfc_sli_brdrestart(phba);
15387
15388 lpfc_online(phba);
15389 }
15390}
15391
15392
15393
15394
15395
15396
15397
15398
15399
15400
15401
15402
15403
15404
15405
15406
15407
15408
15409
15410static int
15411lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
15412{
15413 int rc;
15414 struct lpfc_sli_intf intf;
15415
15416 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
15417 return -ENODEV;
15418
15419 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
15420 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
15421 rc = lpfc_pci_probe_one_s4(pdev, pid);
15422 else
15423 rc = lpfc_pci_probe_one_s3(pdev, pid);
15424
15425 return rc;
15426}
15427
15428
15429
15430
15431
15432
15433
15434
15435
15436
15437
15438static void
15439lpfc_pci_remove_one(struct pci_dev *pdev)
15440{
15441 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15442 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15443
15444 switch (phba->pci_dev_grp) {
15445 case LPFC_PCI_DEV_LP:
15446 lpfc_pci_remove_one_s3(pdev);
15447 break;
15448 case LPFC_PCI_DEV_OC:
15449 lpfc_pci_remove_one_s4(pdev);
15450 break;
15451 default:
15452 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15453 "1424 Invalid PCI device group: 0x%x\n",
15454 phba->pci_dev_grp);
15455 break;
15456 }
15457 return;
15458}
15459
15460
15461
15462
15463
15464
15465
15466
15467
15468
15469
15470
15471
15472
15473static int __maybe_unused
15474lpfc_pci_suspend_one(struct device *dev)
15475{
15476 struct Scsi_Host *shost = dev_get_drvdata(dev);
15477 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15478 int rc = -ENODEV;
15479
15480 switch (phba->pci_dev_grp) {
15481 case LPFC_PCI_DEV_LP:
15482 rc = lpfc_pci_suspend_one_s3(dev);
15483 break;
15484 case LPFC_PCI_DEV_OC:
15485 rc = lpfc_pci_suspend_one_s4(dev);
15486 break;
15487 default:
15488 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15489 "1425 Invalid PCI device group: 0x%x\n",
15490 phba->pci_dev_grp);
15491 break;
15492 }
15493 return rc;
15494}
15495
15496
15497
15498
15499
15500
15501
15502
15503
15504
15505
15506
15507
15508
15509static int __maybe_unused
15510lpfc_pci_resume_one(struct device *dev)
15511{
15512 struct Scsi_Host *shost = dev_get_drvdata(dev);
15513 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15514 int rc = -ENODEV;
15515
15516 switch (phba->pci_dev_grp) {
15517 case LPFC_PCI_DEV_LP:
15518 rc = lpfc_pci_resume_one_s3(dev);
15519 break;
15520 case LPFC_PCI_DEV_OC:
15521 rc = lpfc_pci_resume_one_s4(dev);
15522 break;
15523 default:
15524 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15525 "1426 Invalid PCI device group: 0x%x\n",
15526 phba->pci_dev_grp);
15527 break;
15528 }
15529 return rc;
15530}
15531
15532
15533
15534
15535
15536
15537
15538
15539
15540
15541
15542
15543
15544
15545
15546
15547static pci_ers_result_t
15548lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
15549{
15550 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15551 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15552 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15553
15554 if (phba->link_state == LPFC_HBA_ERROR &&
15555 phba->hba_flag & HBA_IOQ_FLUSH)
15556 return PCI_ERS_RESULT_NEED_RESET;
15557
15558 switch (phba->pci_dev_grp) {
15559 case LPFC_PCI_DEV_LP:
15560 rc = lpfc_io_error_detected_s3(pdev, state);
15561 break;
15562 case LPFC_PCI_DEV_OC:
15563 rc = lpfc_io_error_detected_s4(pdev, state);
15564 break;
15565 default:
15566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15567 "1427 Invalid PCI device group: 0x%x\n",
15568 phba->pci_dev_grp);
15569 break;
15570 }
15571 return rc;
15572}
15573
15574
15575
15576
15577
15578
15579
15580
15581
15582
15583
15584
15585
15586
15587
15588static pci_ers_result_t
15589lpfc_io_slot_reset(struct pci_dev *pdev)
15590{
15591 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15592 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15593 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15594
15595 switch (phba->pci_dev_grp) {
15596 case LPFC_PCI_DEV_LP:
15597 rc = lpfc_io_slot_reset_s3(pdev);
15598 break;
15599 case LPFC_PCI_DEV_OC:
15600 rc = lpfc_io_slot_reset_s4(pdev);
15601 break;
15602 default:
15603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15604 "1428 Invalid PCI device group: 0x%x\n",
15605 phba->pci_dev_grp);
15606 break;
15607 }
15608 return rc;
15609}
15610
15611
15612
15613
15614
15615
15616
15617
15618
15619
15620
15621static void
15622lpfc_io_resume(struct pci_dev *pdev)
15623{
15624 struct Scsi_Host *shost = pci_get_drvdata(pdev);
15625 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15626
15627 switch (phba->pci_dev_grp) {
15628 case LPFC_PCI_DEV_LP:
15629 lpfc_io_resume_s3(pdev);
15630 break;
15631 case LPFC_PCI_DEV_OC:
15632 lpfc_io_resume_s4(pdev);
15633 break;
15634 default:
15635 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15636 "1429 Invalid PCI device group: 0x%x\n",
15637 phba->pci_dev_grp);
15638 break;
15639 }
15640 return;
15641}
15642
15643
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653static void
15654lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15655{
15656
15657 if (!phba->cfg_EnableXLane)
15658 return;
15659
15660 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15661 phba->cfg_fof = 1;
15662 } else {
15663 phba->cfg_fof = 0;
15664 mempool_destroy(phba->device_data_mem_pool);
15665 phba->device_data_mem_pool = NULL;
15666 }
15667
15668 return;
15669}
15670
15671
15672
15673
15674
15675
15676
15677
15678void
15679lpfc_sli4_ras_init(struct lpfc_hba *phba)
15680{
15681
15682 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15683 LPFC_SLI_INTF_IF_TYPE_6) ||
15684 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15685 LPFC_SLI_INTF_FAMILY_G6)) {
15686 phba->ras_fwlog.ras_hwsupport = true;
15687 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15688 phba->cfg_ras_fwlog_buffsize)
15689 phba->ras_fwlog.ras_enabled = true;
15690 else
15691 phba->ras_fwlog.ras_enabled = false;
15692 } else {
15693 phba->ras_fwlog.ras_hwsupport = false;
15694 }
15695}
15696
15697
15698MODULE_DEVICE_TABLE(pci, lpfc_id_table);
15699
15700static const struct pci_error_handlers lpfc_err_handler = {
15701 .error_detected = lpfc_io_error_detected,
15702 .slot_reset = lpfc_io_slot_reset,
15703 .resume = lpfc_io_resume,
15704};
15705
15706static SIMPLE_DEV_PM_OPS(lpfc_pci_pm_ops_one,
15707 lpfc_pci_suspend_one,
15708 lpfc_pci_resume_one);
15709
15710static struct pci_driver lpfc_driver = {
15711 .name = LPFC_DRIVER_NAME,
15712 .id_table = lpfc_id_table,
15713 .probe = lpfc_pci_probe_one,
15714 .remove = lpfc_pci_remove_one,
15715 .shutdown = lpfc_pci_remove_one,
15716 .driver.pm = &lpfc_pci_pm_ops_one,
15717 .err_handler = &lpfc_err_handler,
15718};
15719
15720static const struct file_operations lpfc_mgmt_fop = {
15721 .owner = THIS_MODULE,
15722};
15723
15724static struct miscdevice lpfc_mgmt_dev = {
15725 .minor = MISC_DYNAMIC_MINOR,
15726 .name = "lpfcmgmt",
15727 .fops = &lpfc_mgmt_fop,
15728};
15729
15730
15731
15732
15733
15734
15735
15736
15737
15738
15739
15740
15741
15742static int __init
15743lpfc_init(void)
15744{
15745 int error = 0;
15746
15747 pr_info(LPFC_MODULE_DESC "\n");
15748 pr_info(LPFC_COPYRIGHT "\n");
15749
15750 error = misc_register(&lpfc_mgmt_dev);
15751 if (error)
15752 printk(KERN_ERR "Could not register lpfcmgmt device, "
15753 "misc_register returned with status %d", error);
15754
15755 error = -ENOMEM;
15756 lpfc_transport_functions.vport_create = lpfc_vport_create;
15757 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
15758 lpfc_transport_template =
15759 fc_attach_transport(&lpfc_transport_functions);
15760 if (lpfc_transport_template == NULL)
15761 goto unregister;
15762 lpfc_vport_transport_template =
15763 fc_attach_transport(&lpfc_vport_transport_functions);
15764 if (lpfc_vport_transport_template == NULL) {
15765 fc_release_transport(lpfc_transport_template);
15766 goto unregister;
15767 }
15768 lpfc_wqe_cmd_template();
15769 lpfc_nvmet_cmd_template();
15770
15771
15772 lpfc_present_cpu = num_present_cpus();
15773
15774 lpfc_pldv_detect = false;
15775
15776 error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
15777 "lpfc/sli4:online",
15778 lpfc_cpu_online, lpfc_cpu_offline);
15779 if (error < 0)
15780 goto cpuhp_failure;
15781 lpfc_cpuhp_state = error;
15782
15783 error = pci_register_driver(&lpfc_driver);
15784 if (error)
15785 goto unwind;
15786
15787 return error;
15788
15789unwind:
15790 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15791cpuhp_failure:
15792 fc_release_transport(lpfc_transport_template);
15793 fc_release_transport(lpfc_vport_transport_template);
15794unregister:
15795 misc_deregister(&lpfc_mgmt_dev);
15796
15797 return error;
15798}
15799
15800void lpfc_dmp_dbg(struct lpfc_hba *phba)
15801{
15802 unsigned int start_idx;
15803 unsigned int dbg_cnt;
15804 unsigned int temp_idx;
15805 int i;
15806 int j = 0;
15807 unsigned long rem_nsec;
15808
15809 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15810 return;
15811
15812 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15813 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15814 if (!dbg_cnt)
15815 goto out;
15816 temp_idx = start_idx;
15817 if (dbg_cnt >= DBG_LOG_SZ) {
15818 dbg_cnt = DBG_LOG_SZ;
15819 temp_idx -= 1;
15820 } else {
15821 if ((start_idx + dbg_cnt) > (DBG_LOG_SZ - 1)) {
15822 temp_idx = (start_idx + dbg_cnt) % DBG_LOG_SZ;
15823 } else {
15824 if (start_idx < dbg_cnt)
15825 start_idx = DBG_LOG_SZ - (dbg_cnt - start_idx);
15826 else
15827 start_idx -= dbg_cnt;
15828 }
15829 }
15830 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15831 start_idx, temp_idx, dbg_cnt);
15832
15833 for (i = 0; i < dbg_cnt; i++) {
15834 if ((start_idx + i) < DBG_LOG_SZ)
15835 temp_idx = (start_idx + i) % DBG_LOG_SZ;
15836 else
15837 temp_idx = j++;
15838 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15839 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15840 temp_idx,
15841 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15842 rem_nsec / 1000,
15843 phba->dbg_log[temp_idx].log);
15844 }
15845out:
15846 atomic_set(&phba->dbg_log_cnt, 0);
15847 atomic_set(&phba->dbg_log_dmping, 0);
15848}
15849
15850__printf(2, 3)
15851void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15852{
15853 unsigned int idx;
15854 va_list args;
15855 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15856 struct va_format vaf;
15857
15858
15859 va_start(args, fmt);
15860 if (unlikely(dbg_dmping)) {
15861 vaf.fmt = fmt;
15862 vaf.va = &args;
15863 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15864 va_end(args);
15865 return;
15866 }
15867 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15868 DBG_LOG_SZ;
15869
15870 atomic_inc(&phba->dbg_log_cnt);
15871
15872 vscnprintf(phba->dbg_log[idx].log,
15873 sizeof(phba->dbg_log[idx].log), fmt, args);
15874 va_end(args);
15875
15876 phba->dbg_log[idx].t_ns = local_clock();
15877}
15878
15879
15880
15881
15882
15883
15884
15885
15886static void __exit
15887lpfc_exit(void)
15888{
15889 misc_deregister(&lpfc_mgmt_dev);
15890 pci_unregister_driver(&lpfc_driver);
15891 cpuhp_remove_multi_state(lpfc_cpuhp_state);
15892 fc_release_transport(lpfc_transport_template);
15893 fc_release_transport(lpfc_vport_transport_template);
15894 idr_destroy(&lpfc_hba_index);
15895}
15896
15897module_init(lpfc_init);
15898module_exit(lpfc_exit);
15899MODULE_LICENSE("GPL");
15900MODULE_DESCRIPTION(LPFC_MODULE_DESC);
15901MODULE_AUTHOR("Broadcom");
15902MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
15903