1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/delay.h>
24#include <linux/dma-mapping.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/kthread.h>
29#include <linux/pci.h>
30#include <linux/spinlock.h>
31#include <linux/ctype.h>
32#include <linux/aer.h>
33#include <linux/slab.h>
34#include <linux/firmware.h>
35#include <linux/miscdevice.h>
36#include <linux/percpu.h>
37
38#include <scsi/scsi.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_transport_fc.h>
42
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc_scsi.h"
50#include "lpfc.h"
51#include "lpfc_logmsg.h"
52#include "lpfc_crtn.h"
53#include "lpfc_vport.h"
54#include "lpfc_version.h"
55
56char *_dump_buf_data;
57unsigned long _dump_buf_data_order;
58char *_dump_buf_dif;
59unsigned long _dump_buf_dif_order;
60spinlock_t _dump_buf_lock;
61
62
63uint16_t *lpfc_used_cpu;
64uint32_t lpfc_present_cpu;
65
66static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
67static int lpfc_post_rcv_buf(struct lpfc_hba *);
68static int lpfc_sli4_queue_verify(struct lpfc_hba *);
69static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
70static int lpfc_setup_endian_order(struct lpfc_hba *);
71static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
72static void lpfc_free_els_sgl_list(struct lpfc_hba *);
73static void lpfc_init_sgl_list(struct lpfc_hba *);
74static int lpfc_init_active_sgl_array(struct lpfc_hba *);
75static void lpfc_free_active_sgl(struct lpfc_hba *);
76static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
77static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
78static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
79static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
80static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
81static void lpfc_sli4_disable_intr(struct lpfc_hba *);
82static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
83
84static struct scsi_transport_template *lpfc_transport_template = NULL;
85static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
86static DEFINE_IDR(lpfc_hba_index);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102int
103lpfc_config_port_prep(struct lpfc_hba *phba)
104{
105 lpfc_vpd_t *vp = &phba->vpd;
106 int i = 0, rc;
107 LPFC_MBOXQ_t *pmb;
108 MAILBOX_t *mb;
109 char *lpfc_vpd_data = NULL;
110 uint16_t offset = 0;
111 static char licensed[56] =
112 "key unlock for use with gnu public licensed code only\0";
113 static int init_key = 1;
114
115 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
116 if (!pmb) {
117 phba->link_state = LPFC_HBA_ERROR;
118 return -ENOMEM;
119 }
120
121 mb = &pmb->u.mb;
122 phba->link_state = LPFC_INIT_MBX_CMDS;
123
124 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
125 if (init_key) {
126 uint32_t *ptext = (uint32_t *) licensed;
127
128 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
129 *ptext = cpu_to_be32(*ptext);
130 init_key = 0;
131 }
132
133 lpfc_read_nv(phba, pmb);
134 memset((char*)mb->un.varRDnvp.rsvd3, 0,
135 sizeof (mb->un.varRDnvp.rsvd3));
136 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
137 sizeof (licensed));
138
139 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
140
141 if (rc != MBX_SUCCESS) {
142 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
143 "0324 Config Port initialization "
144 "error, mbxCmd x%x READ_NVPARM, "
145 "mbxStatus x%x\n",
146 mb->mbxCommand, mb->mbxStatus);
147 mempool_free(pmb, phba->mbox_mem_pool);
148 return -ERESTART;
149 }
150 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
151 sizeof(phba->wwnn));
152 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
153 sizeof(phba->wwpn));
154 }
155
156 phba->sli3_options = 0x0;
157
158
159 lpfc_read_rev(phba, pmb);
160 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
161 if (rc != MBX_SUCCESS) {
162 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
163 "0439 Adapter failed to init, mbxCmd x%x "
164 "READ_REV, mbxStatus x%x\n",
165 mb->mbxCommand, mb->mbxStatus);
166 mempool_free( pmb, phba->mbox_mem_pool);
167 return -ERESTART;
168 }
169
170
171
172
173
174
175 if (mb->un.varRdRev.rr == 0) {
176 vp->rev.rBit = 0;
177 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
178 "0440 Adapter failed to init, READ_REV has "
179 "missing revision information.\n");
180 mempool_free(pmb, phba->mbox_mem_pool);
181 return -ERESTART;
182 }
183
184 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
185 mempool_free(pmb, phba->mbox_mem_pool);
186 return -EINVAL;
187 }
188
189
190 vp->rev.rBit = 1;
191 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
192 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
193 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
194 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
195 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
196 vp->rev.biuRev = mb->un.varRdRev.biuRev;
197 vp->rev.smRev = mb->un.varRdRev.smRev;
198 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
199 vp->rev.endecRev = mb->un.varRdRev.endecRev;
200 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
201 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
202 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
203 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
204 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
205 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
206
207
208
209
210
211 if (vp->rev.feaLevelHigh < 9)
212 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
213
214 if (lpfc_is_LC_HBA(phba->pcidev->device))
215 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
216 sizeof (phba->RandomData));
217
218
219 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
220 if (!lpfc_vpd_data)
221 goto out_free_mbox;
222 do {
223 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
224 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
225
226 if (rc != MBX_SUCCESS) {
227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
228 "0441 VPD not present on adapter, "
229 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
230 mb->mbxCommand, mb->mbxStatus);
231 mb->un.varDmp.word_cnt = 0;
232 }
233
234
235
236 if (mb->un.varDmp.word_cnt == 0)
237 break;
238 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
239 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
240 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
241 lpfc_vpd_data + offset,
242 mb->un.varDmp.word_cnt);
243 offset += mb->un.varDmp.word_cnt;
244 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
245 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
246
247 kfree(lpfc_vpd_data);
248out_free_mbox:
249 mempool_free(pmb, phba->mbox_mem_pool);
250 return 0;
251}
252
253
254
255
256
257
258
259
260
261
262
263static void
264lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
265{
266 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
267 phba->temp_sensor_support = 1;
268 else
269 phba->temp_sensor_support = 0;
270 mempool_free(pmboxq, phba->mbox_mem_pool);
271 return;
272}
273
274
275
276
277
278
279
280
281
282
283
284static void
285lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
286{
287 struct prog_id *prg;
288 uint32_t prog_id_word;
289 char dist = ' ';
290
291 char dist_char[] = "nabx";
292
293 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
294 mempool_free(pmboxq, phba->mbox_mem_pool);
295 return;
296 }
297
298 prg = (struct prog_id *) &prog_id_word;
299
300
301 prog_id_word = pmboxq->u.mb.un.varWords[7];
302
303
304 if (prg->dist < 4)
305 dist = dist_char[prg->dist];
306
307 if ((prg->dist == 3) && (prg->num == 0))
308 sprintf(phba->OptionROMVersion, "%d.%d%d",
309 prg->ver, prg->rev, prg->lev);
310 else
311 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
312 prg->ver, prg->rev, prg->lev,
313 dist, prg->num);
314 mempool_free(pmboxq, phba->mbox_mem_pool);
315 return;
316}
317
318
319
320
321
322
323
324
325
326
327void
328lpfc_update_vport_wwn(struct lpfc_vport *vport)
329{
330
331 if (vport->phba->cfg_soft_wwnn)
332 u64_to_wwn(vport->phba->cfg_soft_wwnn,
333 vport->fc_sparam.nodeName.u.wwn);
334 if (vport->phba->cfg_soft_wwpn)
335 u64_to_wwn(vport->phba->cfg_soft_wwpn,
336 vport->fc_sparam.portName.u.wwn);
337
338
339
340
341
342 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
343 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
344 sizeof(struct lpfc_name));
345 else
346 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
347 sizeof(struct lpfc_name));
348
349 if (vport->fc_portname.u.wwn[0] == 0 || vport->phba->cfg_soft_wwpn)
350 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
351 sizeof(struct lpfc_name));
352 else
353 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
354 sizeof(struct lpfc_name));
355}
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370int
371lpfc_config_port_post(struct lpfc_hba *phba)
372{
373 struct lpfc_vport *vport = phba->pport;
374 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
375 LPFC_MBOXQ_t *pmb;
376 MAILBOX_t *mb;
377 struct lpfc_dmabuf *mp;
378 struct lpfc_sli *psli = &phba->sli;
379 uint32_t status, timeout;
380 int i, j;
381 int rc;
382
383 spin_lock_irq(&phba->hbalock);
384
385
386
387
388 if (phba->over_temp_state == HBA_OVER_TEMP)
389 phba->over_temp_state = HBA_NORMAL_TEMP;
390 spin_unlock_irq(&phba->hbalock);
391
392 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
393 if (!pmb) {
394 phba->link_state = LPFC_HBA_ERROR;
395 return -ENOMEM;
396 }
397 mb = &pmb->u.mb;
398
399
400 rc = lpfc_read_sparam(phba, pmb, 0);
401 if (rc) {
402 mempool_free(pmb, phba->mbox_mem_pool);
403 return -ENOMEM;
404 }
405
406 pmb->vport = vport;
407 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
408 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
409 "0448 Adapter failed init, mbxCmd x%x "
410 "READ_SPARM mbxStatus x%x\n",
411 mb->mbxCommand, mb->mbxStatus);
412 phba->link_state = LPFC_HBA_ERROR;
413 mp = (struct lpfc_dmabuf *) pmb->context1;
414 mempool_free(pmb, phba->mbox_mem_pool);
415 lpfc_mbuf_free(phba, mp->virt, mp->phys);
416 kfree(mp);
417 return -EIO;
418 }
419
420 mp = (struct lpfc_dmabuf *) pmb->context1;
421
422 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
423 lpfc_mbuf_free(phba, mp->virt, mp->phys);
424 kfree(mp);
425 pmb->context1 = NULL;
426 lpfc_update_vport_wwn(vport);
427
428
429 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
430 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
431 fc_host_max_npiv_vports(shost) = phba->max_vpi;
432
433
434
435 if (phba->SerialNumber[0] == 0) {
436 uint8_t *outptr;
437
438 outptr = &vport->fc_nodename.u.s.IEEE[0];
439 for (i = 0; i < 12; i++) {
440 status = *outptr++;
441 j = ((status & 0xf0) >> 4);
442 if (j <= 9)
443 phba->SerialNumber[i] =
444 (char)((uint8_t) 0x30 + (uint8_t) j);
445 else
446 phba->SerialNumber[i] =
447 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
448 i++;
449 j = (status & 0xf);
450 if (j <= 9)
451 phba->SerialNumber[i] =
452 (char)((uint8_t) 0x30 + (uint8_t) j);
453 else
454 phba->SerialNumber[i] =
455 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
456 }
457 }
458
459 lpfc_read_config(phba, pmb);
460 pmb->vport = vport;
461 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
463 "0453 Adapter failed to init, mbxCmd x%x "
464 "READ_CONFIG, mbxStatus x%x\n",
465 mb->mbxCommand, mb->mbxStatus);
466 phba->link_state = LPFC_HBA_ERROR;
467 mempool_free( pmb, phba->mbox_mem_pool);
468 return -EIO;
469 }
470
471
472 lpfc_sli_read_link_ste(phba);
473
474
475 if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
476 phba->cfg_hba_queue_depth =
477 (mb->un.varRdConfig.max_xri + 1) -
478 lpfc_sli4_get_els_iocb_cnt(phba);
479
480 phba->lmt = mb->un.varRdConfig.lmt;
481
482
483 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
484
485 phba->link_state = LPFC_LINK_DOWN;
486
487
488 if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
489 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
490 if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
491 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
492 if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
493 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
494
495
496 if (phba->sli_rev != 3)
497 lpfc_post_rcv_buf(phba);
498
499
500
501
502 if (phba->intr_type == MSIX) {
503 rc = lpfc_config_msi(phba, pmb);
504 if (rc) {
505 mempool_free(pmb, phba->mbox_mem_pool);
506 return -EIO;
507 }
508 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
509 if (rc != MBX_SUCCESS) {
510 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
511 "0352 Config MSI mailbox command "
512 "failed, mbxCmd x%x, mbxStatus x%x\n",
513 pmb->u.mb.mbxCommand,
514 pmb->u.mb.mbxStatus);
515 mempool_free(pmb, phba->mbox_mem_pool);
516 return -EIO;
517 }
518 }
519
520 spin_lock_irq(&phba->hbalock);
521
522 phba->hba_flag &= ~HBA_ERATT_HANDLED;
523
524
525 if (lpfc_readl(phba->HCregaddr, &status)) {
526 spin_unlock_irq(&phba->hbalock);
527 return -EIO;
528 }
529 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
530 if (psli->num_rings > 0)
531 status |= HC_R0INT_ENA;
532 if (psli->num_rings > 1)
533 status |= HC_R1INT_ENA;
534 if (psli->num_rings > 2)
535 status |= HC_R2INT_ENA;
536 if (psli->num_rings > 3)
537 status |= HC_R3INT_ENA;
538
539 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
540 (phba->cfg_poll & DISABLE_FCP_RING_INT))
541 status &= ~(HC_R0INT_ENA);
542
543 writel(status, phba->HCregaddr);
544 readl(phba->HCregaddr);
545 spin_unlock_irq(&phba->hbalock);
546
547
548 timeout = phba->fc_ratov * 2;
549 mod_timer(&vport->els_tmofunc,
550 jiffies + msecs_to_jiffies(1000 * timeout));
551
552 mod_timer(&phba->hb_tmofunc,
553 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
554 phba->hb_outstanding = 0;
555 phba->last_completion_time = jiffies;
556
557 mod_timer(&phba->eratt_poll,
558 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
559
560 if (phba->hba_flag & LINK_DISABLED) {
561 lpfc_printf_log(phba,
562 KERN_ERR, LOG_INIT,
563 "2598 Adapter Link is disabled.\n");
564 lpfc_down_link(phba, pmb);
565 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
566 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
567 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
568 lpfc_printf_log(phba,
569 KERN_ERR, LOG_INIT,
570 "2599 Adapter failed to issue DOWN_LINK"
571 " mbox command rc 0x%x\n", rc);
572
573 mempool_free(pmb, phba->mbox_mem_pool);
574 return -EIO;
575 }
576 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
577 mempool_free(pmb, phba->mbox_mem_pool);
578 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
579 if (rc)
580 return rc;
581 }
582
583 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
584 if (!pmb) {
585 phba->link_state = LPFC_HBA_ERROR;
586 return -ENOMEM;
587 }
588
589 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
590 pmb->mbox_cmpl = lpfc_config_async_cmpl;
591 pmb->vport = phba->pport;
592 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
593
594 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
595 lpfc_printf_log(phba,
596 KERN_ERR,
597 LOG_INIT,
598 "0456 Adapter failed to issue "
599 "ASYNCEVT_ENABLE mbox status x%x\n",
600 rc);
601 mempool_free(pmb, phba->mbox_mem_pool);
602 }
603
604
605 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
606 if (!pmb) {
607 phba->link_state = LPFC_HBA_ERROR;
608 return -ENOMEM;
609 }
610
611 lpfc_dump_wakeup_param(phba, pmb);
612 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
613 pmb->vport = phba->pport;
614 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
615
616 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
618 "to get Option ROM version status x%x\n", rc);
619 mempool_free(pmb, phba->mbox_mem_pool);
620 }
621
622 return 0;
623}
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639int
640lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
641{
642 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660int
661lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
662 uint32_t flag)
663{
664 struct lpfc_vport *vport = phba->pport;
665 LPFC_MBOXQ_t *pmb;
666 MAILBOX_t *mb;
667 int rc;
668
669 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
670 if (!pmb) {
671 phba->link_state = LPFC_HBA_ERROR;
672 return -ENOMEM;
673 }
674 mb = &pmb->u.mb;
675 pmb->vport = vport;
676
677 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
678 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
679 !(phba->lmt & LMT_1Gb)) ||
680 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
681 !(phba->lmt & LMT_2Gb)) ||
682 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
683 !(phba->lmt & LMT_4Gb)) ||
684 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
685 !(phba->lmt & LMT_8Gb)) ||
686 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
687 !(phba->lmt & LMT_10Gb)) ||
688 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
689 !(phba->lmt & LMT_16Gb))) {
690
691 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
692 "1302 Invalid speed for this board:%d "
693 "Reset link speed to auto.\n",
694 phba->cfg_link_speed);
695 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
696 }
697 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
698 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
699 if (phba->sli_rev < LPFC_SLI_REV4)
700 lpfc_set_loopback_flag(phba);
701 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
702 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
704 "0498 Adapter failed to init, mbxCmd x%x "
705 "INIT_LINK, mbxStatus x%x\n",
706 mb->mbxCommand, mb->mbxStatus);
707 if (phba->sli_rev <= LPFC_SLI_REV3) {
708
709 writel(0, phba->HCregaddr);
710 readl(phba->HCregaddr);
711
712 writel(0xffffffff, phba->HAregaddr);
713 readl(phba->HAregaddr);
714 }
715 phba->link_state = LPFC_HBA_ERROR;
716 if (rc != MBX_BUSY || flag == MBX_POLL)
717 mempool_free(pmb, phba->mbox_mem_pool);
718 return -EIO;
719 }
720 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
721 if (flag == MBX_POLL)
722 mempool_free(pmb, phba->mbox_mem_pool);
723
724 return 0;
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740int
741lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
742{
743 LPFC_MBOXQ_t *pmb;
744 int rc;
745
746 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
747 if (!pmb) {
748 phba->link_state = LPFC_HBA_ERROR;
749 return -ENOMEM;
750 }
751
752 lpfc_printf_log(phba,
753 KERN_ERR, LOG_INIT,
754 "0491 Adapter Link is disabled.\n");
755 lpfc_down_link(phba, pmb);
756 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
757 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
758 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
759 lpfc_printf_log(phba,
760 KERN_ERR, LOG_INIT,
761 "2522 Adapter failed to issue DOWN_LINK"
762 " mbox command rc 0x%x\n", rc);
763
764 mempool_free(pmb, phba->mbox_mem_pool);
765 return -EIO;
766 }
767 if (flag == MBX_POLL)
768 mempool_free(pmb, phba->mbox_mem_pool);
769
770 return 0;
771}
772
773
774
775
776
777
778
779
780
781
782
783
784int
785lpfc_hba_down_prep(struct lpfc_hba *phba)
786{
787 struct lpfc_vport **vports;
788 int i;
789
790 if (phba->sli_rev <= LPFC_SLI_REV3) {
791
792 writel(0, phba->HCregaddr);
793 readl(phba->HCregaddr);
794 }
795
796 if (phba->pport->load_flag & FC_UNLOADING)
797 lpfc_cleanup_discovery_resources(phba->pport);
798 else {
799 vports = lpfc_create_vport_work_array(phba);
800 if (vports != NULL)
801 for (i = 0; i <= phba->max_vports &&
802 vports[i] != NULL; i++)
803 lpfc_cleanup_discovery_resources(vports[i]);
804 lpfc_destroy_vport_work_array(phba, vports);
805 }
806 return 0;
807}
808
809
810
811
812
813
814
815
816
817
818
819
820static int
821lpfc_hba_down_post_s3(struct lpfc_hba *phba)
822{
823 struct lpfc_sli *psli = &phba->sli;
824 struct lpfc_sli_ring *pring;
825 struct lpfc_dmabuf *mp, *next_mp;
826 LIST_HEAD(completions);
827 int i;
828
829 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
830 lpfc_sli_hbqbuf_free_all(phba);
831 else {
832
833 pring = &psli->ring[LPFC_ELS_RING];
834 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
835 list_del(&mp->list);
836 pring->postbufq_cnt--;
837 lpfc_mbuf_free(phba, mp->virt, mp->phys);
838 kfree(mp);
839 }
840 }
841
842 spin_lock_irq(&phba->hbalock);
843 for (i = 0; i < psli->num_rings; i++) {
844 pring = &psli->ring[i];
845
846
847
848
849 list_splice_init(&pring->txcmplq, &completions);
850 spin_unlock_irq(&phba->hbalock);
851
852
853 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
854 IOERR_SLI_ABORTED);
855
856 lpfc_sli_abort_iocb_ring(phba, pring);
857 spin_lock_irq(&phba->hbalock);
858 }
859 spin_unlock_irq(&phba->hbalock);
860
861 return 0;
862}
863
864
865
866
867
868
869
870
871
872
873
874
875static int
876lpfc_hba_down_post_s4(struct lpfc_hba *phba)
877{
878 struct lpfc_scsi_buf *psb, *psb_next;
879 LIST_HEAD(aborts);
880 int ret;
881 unsigned long iflag = 0;
882 struct lpfc_sglq *sglq_entry = NULL;
883
884 ret = lpfc_hba_down_post_s3(phba);
885 if (ret)
886 return ret;
887
888
889
890
891
892
893 spin_lock_irq(&phba->hbalock);
894
895
896
897
898 spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
899 list_for_each_entry(sglq_entry,
900 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
901 sglq_entry->state = SGL_FREED;
902
903 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
904 &phba->sli4_hba.lpfc_sgl_list);
905 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
906
907
908
909 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
910 list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
911 &aborts);
912 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
913 spin_unlock_irq(&phba->hbalock);
914
915 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
916 psb->pCmd = NULL;
917 psb->status = IOSTAT_SUCCESS;
918 }
919 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
920 list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
921 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
922 return 0;
923}
924
925
926
927
928
929
930
931
932
933
934
935
936int
937lpfc_hba_down_post(struct lpfc_hba *phba)
938{
939 return (*phba->lpfc_hba_down_post)(phba);
940}
941
942
943
944
945
946
947
948
949
950
951
952
953
954static void
955lpfc_hb_timeout(unsigned long ptr)
956{
957 struct lpfc_hba *phba;
958 uint32_t tmo_posted;
959 unsigned long iflag;
960
961 phba = (struct lpfc_hba *)ptr;
962
963
964 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
965 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
966 if (!tmo_posted)
967 phba->pport->work_port_events |= WORKER_HB_TMO;
968 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
969
970
971 if (!tmo_posted)
972 lpfc_worker_wake_up(phba);
973 return;
974}
975
976
977
978
979
980
981
982
983
984
985
986
987
988static void
989lpfc_rrq_timeout(unsigned long ptr)
990{
991 struct lpfc_hba *phba;
992 unsigned long iflag;
993
994 phba = (struct lpfc_hba *)ptr;
995 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
996 phba->hba_flag |= HBA_RRQ_ACTIVE;
997 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
998 lpfc_worker_wake_up(phba);
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017static void
1018lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1019{
1020 unsigned long drvr_flag;
1021
1022 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1023 phba->hb_outstanding = 0;
1024 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1025
1026
1027 mempool_free(pmboxq, phba->mbox_mem_pool);
1028 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1029 !(phba->link_state == LPFC_HBA_ERROR) &&
1030 !(phba->pport->load_flag & FC_UNLOADING))
1031 mod_timer(&phba->hb_tmofunc,
1032 jiffies +
1033 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1034 return;
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053void
1054lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1055{
1056 struct lpfc_vport **vports;
1057 LPFC_MBOXQ_t *pmboxq;
1058 struct lpfc_dmabuf *buf_ptr;
1059 int retval, i;
1060 struct lpfc_sli *psli = &phba->sli;
1061 LIST_HEAD(completions);
1062
1063 vports = lpfc_create_vport_work_array(phba);
1064 if (vports != NULL)
1065 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1066 lpfc_rcv_seq_check_edtov(vports[i]);
1067 lpfc_destroy_vport_work_array(phba, vports);
1068
1069 if ((phba->link_state == LPFC_HBA_ERROR) ||
1070 (phba->pport->load_flag & FC_UNLOADING) ||
1071 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1072 return;
1073
1074 spin_lock_irq(&phba->pport->work_port_lock);
1075
1076 if (time_after(phba->last_completion_time +
1077 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1078 jiffies)) {
1079 spin_unlock_irq(&phba->pport->work_port_lock);
1080 if (!phba->hb_outstanding)
1081 mod_timer(&phba->hb_tmofunc,
1082 jiffies +
1083 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1084 else
1085 mod_timer(&phba->hb_tmofunc,
1086 jiffies +
1087 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1088 return;
1089 }
1090 spin_unlock_irq(&phba->pport->work_port_lock);
1091
1092 if (phba->elsbuf_cnt &&
1093 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1094 spin_lock_irq(&phba->hbalock);
1095 list_splice_init(&phba->elsbuf, &completions);
1096 phba->elsbuf_cnt = 0;
1097 phba->elsbuf_prev_cnt = 0;
1098 spin_unlock_irq(&phba->hbalock);
1099
1100 while (!list_empty(&completions)) {
1101 list_remove_head(&completions, buf_ptr,
1102 struct lpfc_dmabuf, list);
1103 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1104 kfree(buf_ptr);
1105 }
1106 }
1107 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1108
1109
1110 if (phba->cfg_enable_hba_heartbeat) {
1111 if (!phba->hb_outstanding) {
1112 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1113 (list_empty(&psli->mboxq))) {
1114 pmboxq = mempool_alloc(phba->mbox_mem_pool,
1115 GFP_KERNEL);
1116 if (!pmboxq) {
1117 mod_timer(&phba->hb_tmofunc,
1118 jiffies +
1119 msecs_to_jiffies(1000 *
1120 LPFC_HB_MBOX_INTERVAL));
1121 return;
1122 }
1123
1124 lpfc_heart_beat(phba, pmboxq);
1125 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1126 pmboxq->vport = phba->pport;
1127 retval = lpfc_sli_issue_mbox(phba, pmboxq,
1128 MBX_NOWAIT);
1129
1130 if (retval != MBX_BUSY &&
1131 retval != MBX_SUCCESS) {
1132 mempool_free(pmboxq,
1133 phba->mbox_mem_pool);
1134 mod_timer(&phba->hb_tmofunc,
1135 jiffies +
1136 msecs_to_jiffies(1000 *
1137 LPFC_HB_MBOX_INTERVAL));
1138 return;
1139 }
1140 phba->skipped_hb = 0;
1141 phba->hb_outstanding = 1;
1142 } else if (time_before_eq(phba->last_completion_time,
1143 phba->skipped_hb)) {
1144 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1145 "2857 Last completion time not "
1146 " updated in %d ms\n",
1147 jiffies_to_msecs(jiffies
1148 - phba->last_completion_time));
1149 } else
1150 phba->skipped_hb = jiffies;
1151
1152 mod_timer(&phba->hb_tmofunc,
1153 jiffies +
1154 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1155 return;
1156 } else {
1157
1158
1159
1160
1161
1162 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1163 "0459 Adapter heartbeat still out"
1164 "standing:last compl time was %d ms.\n",
1165 jiffies_to_msecs(jiffies
1166 - phba->last_completion_time));
1167 mod_timer(&phba->hb_tmofunc,
1168 jiffies +
1169 msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
1170 }
1171 }
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181static void
1182lpfc_offline_eratt(struct lpfc_hba *phba)
1183{
1184 struct lpfc_sli *psli = &phba->sli;
1185
1186 spin_lock_irq(&phba->hbalock);
1187 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1188 spin_unlock_irq(&phba->hbalock);
1189 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1190
1191 lpfc_offline(phba);
1192 lpfc_reset_barrier(phba);
1193 spin_lock_irq(&phba->hbalock);
1194 lpfc_sli_brdreset(phba);
1195 spin_unlock_irq(&phba->hbalock);
1196 lpfc_hba_down_post(phba);
1197 lpfc_sli_brdready(phba, HS_MBRDY);
1198 lpfc_unblock_mgmt_io(phba);
1199 phba->link_state = LPFC_HBA_ERROR;
1200 return;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210void
1211lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1212{
1213 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1214 lpfc_offline(phba);
1215 lpfc_sli4_brdreset(phba);
1216 lpfc_hba_down_post(phba);
1217 lpfc_sli4_post_status_check(phba);
1218 lpfc_unblock_mgmt_io(phba);
1219 phba->link_state = LPFC_HBA_ERROR;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231static void
1232lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1233{
1234 uint32_t old_host_status = phba->work_hs;
1235 struct lpfc_sli_ring *pring;
1236 struct lpfc_sli *psli = &phba->sli;
1237
1238
1239
1240
1241 if (pci_channel_offline(phba->pcidev)) {
1242 spin_lock_irq(&phba->hbalock);
1243 phba->hba_flag &= ~DEFER_ERATT;
1244 spin_unlock_irq(&phba->hbalock);
1245 return;
1246 }
1247
1248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1249 "0479 Deferred Adapter Hardware Error "
1250 "Data: x%x x%x x%x\n",
1251 phba->work_hs,
1252 phba->work_status[0], phba->work_status[1]);
1253
1254 spin_lock_irq(&phba->hbalock);
1255 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1256 spin_unlock_irq(&phba->hbalock);
1257
1258
1259
1260
1261
1262
1263
1264 pring = &psli->ring[psli->fcp_ring];
1265 lpfc_sli_abort_iocb_ring(phba, pring);
1266
1267
1268
1269
1270
1271 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1272 lpfc_offline(phba);
1273
1274
1275 while (phba->work_hs & HS_FFER1) {
1276 msleep(100);
1277 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1278 phba->work_hs = UNPLUG_ERR ;
1279 break;
1280 }
1281
1282 if (phba->pport->load_flag & FC_UNLOADING) {
1283 phba->work_hs = 0;
1284 break;
1285 }
1286 }
1287
1288
1289
1290
1291
1292
1293 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1294 phba->work_hs = old_host_status & ~HS_FFER1;
1295
1296 spin_lock_irq(&phba->hbalock);
1297 phba->hba_flag &= ~DEFER_ERATT;
1298 spin_unlock_irq(&phba->hbalock);
1299 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1300 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1301}
1302
1303static void
1304lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1305{
1306 struct lpfc_board_event_header board_event;
1307 struct Scsi_Host *shost;
1308
1309 board_event.event_type = FC_REG_BOARD_EVENT;
1310 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1311 shost = lpfc_shost_from_vport(phba->pport);
1312 fc_host_post_vendor_event(shost, fc_get_event_number(),
1313 sizeof(board_event),
1314 (char *) &board_event,
1315 LPFC_NL_VENDOR_ID);
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328static void
1329lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1330{
1331 struct lpfc_vport *vport = phba->pport;
1332 struct lpfc_sli *psli = &phba->sli;
1333 struct lpfc_sli_ring *pring;
1334 uint32_t event_data;
1335 unsigned long temperature;
1336 struct temp_event temp_event_data;
1337 struct Scsi_Host *shost;
1338
1339
1340
1341
1342 if (pci_channel_offline(phba->pcidev)) {
1343 spin_lock_irq(&phba->hbalock);
1344 phba->hba_flag &= ~DEFER_ERATT;
1345 spin_unlock_irq(&phba->hbalock);
1346 return;
1347 }
1348
1349
1350 if (!phba->cfg_enable_hba_reset)
1351 return;
1352
1353
1354 lpfc_board_errevt_to_mgmt(phba);
1355
1356 if (phba->hba_flag & DEFER_ERATT)
1357 lpfc_handle_deferred_eratt(phba);
1358
1359 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1360 if (phba->work_hs & HS_FFER6)
1361
1362 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1363 "1301 Re-establishing Link "
1364 "Data: x%x x%x x%x\n",
1365 phba->work_hs, phba->work_status[0],
1366 phba->work_status[1]);
1367 if (phba->work_hs & HS_FFER8)
1368
1369 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1370 "2861 Host Authentication device "
1371 "zeroization Data:x%x x%x x%x\n",
1372 phba->work_hs, phba->work_status[0],
1373 phba->work_status[1]);
1374
1375 spin_lock_irq(&phba->hbalock);
1376 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1377 spin_unlock_irq(&phba->hbalock);
1378
1379
1380
1381
1382
1383
1384
1385 pring = &psli->ring[psli->fcp_ring];
1386 lpfc_sli_abort_iocb_ring(phba, pring);
1387
1388
1389
1390
1391
1392 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1393 lpfc_offline(phba);
1394 lpfc_sli_brdrestart(phba);
1395 if (lpfc_online(phba) == 0) {
1396 lpfc_unblock_mgmt_io(phba);
1397 return;
1398 }
1399 lpfc_unblock_mgmt_io(phba);
1400 } else if (phba->work_hs & HS_CRIT_TEMP) {
1401 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1402 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1403 temp_event_data.event_code = LPFC_CRIT_TEMP;
1404 temp_event_data.data = (uint32_t)temperature;
1405
1406 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1407 "0406 Adapter maximum temperature exceeded "
1408 "(%ld), taking this port offline "
1409 "Data: x%x x%x x%x\n",
1410 temperature, phba->work_hs,
1411 phba->work_status[0], phba->work_status[1]);
1412
1413 shost = lpfc_shost_from_vport(phba->pport);
1414 fc_host_post_vendor_event(shost, fc_get_event_number(),
1415 sizeof(temp_event_data),
1416 (char *) &temp_event_data,
1417 SCSI_NL_VID_TYPE_PCI
1418 | PCI_VENDOR_ID_EMULEX);
1419
1420 spin_lock_irq(&phba->hbalock);
1421 phba->over_temp_state = HBA_OVER_TEMP;
1422 spin_unlock_irq(&phba->hbalock);
1423 lpfc_offline_eratt(phba);
1424
1425 } else {
1426
1427
1428
1429
1430 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1431 "0457 Adapter Hardware Error "
1432 "Data: x%x x%x x%x\n",
1433 phba->work_hs,
1434 phba->work_status[0], phba->work_status[1]);
1435
1436 event_data = FC_REG_DUMP_EVENT;
1437 shost = lpfc_shost_from_vport(vport);
1438 fc_host_post_vendor_event(shost, fc_get_event_number(),
1439 sizeof(event_data), (char *) &event_data,
1440 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1441
1442 lpfc_offline_eratt(phba);
1443 }
1444 return;
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458static int
1459lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
1460{
1461 int rc;
1462 uint32_t intr_mode;
1463
1464
1465
1466
1467
1468 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1469 if (!rc) {
1470
1471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1472 "2887 Reset Needed: Attempting Port "
1473 "Recovery...\n");
1474 lpfc_offline_prep(phba, mbx_action);
1475 lpfc_offline(phba);
1476
1477 lpfc_sli4_disable_intr(phba);
1478 lpfc_sli_brdrestart(phba);
1479
1480 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1481 if (intr_mode == LPFC_INTR_ERROR) {
1482 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1483 "3175 Failed to enable interrupt\n");
1484 return -EIO;
1485 } else {
1486 phba->intr_mode = intr_mode;
1487 }
1488 rc = lpfc_online(phba);
1489 if (rc == 0)
1490 lpfc_unblock_mgmt_io(phba);
1491 }
1492 return rc;
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502static void
1503lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1504{
1505 struct lpfc_vport *vport = phba->pport;
1506 uint32_t event_data;
1507 struct Scsi_Host *shost;
1508 uint32_t if_type;
1509 struct lpfc_register portstat_reg = {0};
1510 uint32_t reg_err1, reg_err2;
1511 uint32_t uerrlo_reg, uemasklo_reg;
1512 uint32_t pci_rd_rc1, pci_rd_rc2;
1513 int rc;
1514
1515
1516
1517
1518 if (pci_channel_offline(phba->pcidev))
1519 return;
1520
1521 if (!phba->cfg_enable_hba_reset)
1522 return;
1523
1524 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1525 switch (if_type) {
1526 case LPFC_SLI_INTF_IF_TYPE_0:
1527 pci_rd_rc1 = lpfc_readl(
1528 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1529 &uerrlo_reg);
1530 pci_rd_rc2 = lpfc_readl(
1531 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1532 &uemasklo_reg);
1533
1534 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1535 return;
1536 lpfc_sli4_offline_eratt(phba);
1537 break;
1538 case LPFC_SLI_INTF_IF_TYPE_2:
1539 pci_rd_rc1 = lpfc_readl(
1540 phba->sli4_hba.u.if_type2.STATUSregaddr,
1541 &portstat_reg.word0);
1542
1543 if (pci_rd_rc1 == -EIO) {
1544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1545 "3151 PCI bus read access failure: x%x\n",
1546 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1547 return;
1548 }
1549 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1550 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1551 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
1552
1553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1554 "2889 Port Overtemperature event, "
1555 "taking port offline\n");
1556 spin_lock_irq(&phba->hbalock);
1557 phba->over_temp_state = HBA_OVER_TEMP;
1558 spin_unlock_irq(&phba->hbalock);
1559 lpfc_sli4_offline_eratt(phba);
1560 break;
1561 }
1562 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1563 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
1564 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1565 "3143 Port Down: Firmware Restarted\n");
1566 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1567 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1569 "3144 Port Down: Debug Dump\n");
1570 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1571 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
1572 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1573 "3145 Port Down: Provisioning\n");
1574
1575
1576 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
1577 if (rc == 0) {
1578
1579 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
1580 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
1581 return;
1582 else
1583 break;
1584 }
1585
1586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1587 "3152 Unrecoverable error, bring the port "
1588 "offline\n");
1589 lpfc_sli4_offline_eratt(phba);
1590 break;
1591 case LPFC_SLI_INTF_IF_TYPE_1:
1592 default:
1593 break;
1594 }
1595 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1596 "3123 Report dump event to upper layer\n");
1597
1598 lpfc_board_errevt_to_mgmt(phba);
1599
1600 event_data = FC_REG_DUMP_EVENT;
1601 shost = lpfc_shost_from_vport(vport);
1602 fc_host_post_vendor_event(shost, fc_get_event_number(),
1603 sizeof(event_data), (char *) &event_data,
1604 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1605}
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618void
1619lpfc_handle_eratt(struct lpfc_hba *phba)
1620{
1621 (*phba->lpfc_handle_eratt)(phba);
1622}
1623
1624
1625
1626
1627
1628
1629
1630
1631void
1632lpfc_handle_latt(struct lpfc_hba *phba)
1633{
1634 struct lpfc_vport *vport = phba->pport;
1635 struct lpfc_sli *psli = &phba->sli;
1636 LPFC_MBOXQ_t *pmb;
1637 volatile uint32_t control;
1638 struct lpfc_dmabuf *mp;
1639 int rc = 0;
1640
1641 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1642 if (!pmb) {
1643 rc = 1;
1644 goto lpfc_handle_latt_err_exit;
1645 }
1646
1647 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1648 if (!mp) {
1649 rc = 2;
1650 goto lpfc_handle_latt_free_pmb;
1651 }
1652
1653 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1654 if (!mp->virt) {
1655 rc = 3;
1656 goto lpfc_handle_latt_free_mp;
1657 }
1658
1659
1660 lpfc_els_flush_all_cmd(phba);
1661
1662 psli->slistat.link_event++;
1663 lpfc_read_topology(phba, pmb, mp);
1664 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
1665 pmb->vport = vport;
1666
1667 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1668 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1669 if (rc == MBX_NOT_FINISHED) {
1670 rc = 4;
1671 goto lpfc_handle_latt_free_mbuf;
1672 }
1673
1674
1675 spin_lock_irq(&phba->hbalock);
1676 writel(HA_LATT, phba->HAregaddr);
1677 readl(phba->HAregaddr);
1678 spin_unlock_irq(&phba->hbalock);
1679
1680 return;
1681
1682lpfc_handle_latt_free_mbuf:
1683 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1684 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1685lpfc_handle_latt_free_mp:
1686 kfree(mp);
1687lpfc_handle_latt_free_pmb:
1688 mempool_free(pmb, phba->mbox_mem_pool);
1689lpfc_handle_latt_err_exit:
1690
1691 spin_lock_irq(&phba->hbalock);
1692 psli->sli_flag |= LPFC_PROCESS_LA;
1693 control = readl(phba->HCregaddr);
1694 control |= HC_LAINT_ENA;
1695 writel(control, phba->HCregaddr);
1696 readl(phba->HCregaddr);
1697
1698
1699 writel(HA_LATT, phba->HAregaddr);
1700 readl(phba->HAregaddr);
1701 spin_unlock_irq(&phba->hbalock);
1702 lpfc_linkdown(phba);
1703 phba->link_state = LPFC_HBA_ERROR;
1704
1705 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1706 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1707
1708 return;
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725int
1726lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1727{
1728 uint8_t lenlo, lenhi;
1729 int Length;
1730 int i, j;
1731 int finished = 0;
1732 int index = 0;
1733
1734 if (!vpd)
1735 return 0;
1736
1737
1738 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1739 "0455 Vital Product Data: x%x x%x x%x x%x\n",
1740 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1741 (uint32_t) vpd[3]);
1742 while (!finished && (index < (len - 4))) {
1743 switch (vpd[index]) {
1744 case 0x82:
1745 case 0x91:
1746 index += 1;
1747 lenlo = vpd[index];
1748 index += 1;
1749 lenhi = vpd[index];
1750 index += 1;
1751 i = ((((unsigned short)lenhi) << 8) + lenlo);
1752 index += i;
1753 break;
1754 case 0x90:
1755 index += 1;
1756 lenlo = vpd[index];
1757 index += 1;
1758 lenhi = vpd[index];
1759 index += 1;
1760 Length = ((((unsigned short)lenhi) << 8) + lenlo);
1761 if (Length > len - index)
1762 Length = len - index;
1763 while (Length > 0) {
1764
1765 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1766 index += 2;
1767 i = vpd[index];
1768 index += 1;
1769 j = 0;
1770 Length -= (3+i);
1771 while(i--) {
1772 phba->SerialNumber[j++] = vpd[index++];
1773 if (j == 31)
1774 break;
1775 }
1776 phba->SerialNumber[j] = 0;
1777 continue;
1778 }
1779 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1780 phba->vpd_flag |= VPD_MODEL_DESC;
1781 index += 2;
1782 i = vpd[index];
1783 index += 1;
1784 j = 0;
1785 Length -= (3+i);
1786 while(i--) {
1787 phba->ModelDesc[j++] = vpd[index++];
1788 if (j == 255)
1789 break;
1790 }
1791 phba->ModelDesc[j] = 0;
1792 continue;
1793 }
1794 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1795 phba->vpd_flag |= VPD_MODEL_NAME;
1796 index += 2;
1797 i = vpd[index];
1798 index += 1;
1799 j = 0;
1800 Length -= (3+i);
1801 while(i--) {
1802 phba->ModelName[j++] = vpd[index++];
1803 if (j == 79)
1804 break;
1805 }
1806 phba->ModelName[j] = 0;
1807 continue;
1808 }
1809 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1810 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1811 index += 2;
1812 i = vpd[index];
1813 index += 1;
1814 j = 0;
1815 Length -= (3+i);
1816 while(i--) {
1817 phba->ProgramType[j++] = vpd[index++];
1818 if (j == 255)
1819 break;
1820 }
1821 phba->ProgramType[j] = 0;
1822 continue;
1823 }
1824 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1825 phba->vpd_flag |= VPD_PORT;
1826 index += 2;
1827 i = vpd[index];
1828 index += 1;
1829 j = 0;
1830 Length -= (3+i);
1831 while(i--) {
1832 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1833 (phba->sli4_hba.pport_name_sta ==
1834 LPFC_SLI4_PPNAME_GET)) {
1835 j++;
1836 index++;
1837 } else
1838 phba->Port[j++] = vpd[index++];
1839 if (j == 19)
1840 break;
1841 }
1842 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1843 (phba->sli4_hba.pport_name_sta ==
1844 LPFC_SLI4_PPNAME_NON))
1845 phba->Port[j] = 0;
1846 continue;
1847 }
1848 else {
1849 index += 2;
1850 i = vpd[index];
1851 index += 1;
1852 index += i;
1853 Length -= (3 + i);
1854 }
1855 }
1856 finished = 0;
1857 break;
1858 case 0x78:
1859 finished = 1;
1860 break;
1861 default:
1862 index ++;
1863 break;
1864 }
1865 }
1866
1867 return(1);
1868}
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882static void
1883lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1884{
1885 lpfc_vpd_t *vp;
1886 uint16_t dev_id = phba->pcidev->device;
1887 int max_speed;
1888 int GE = 0;
1889 int oneConnect = 0;
1890 struct {
1891 char *name;
1892 char *bus;
1893 char *function;
1894 } m = {"<Unknown>", "", ""};
1895
1896 if (mdp && mdp[0] != '\0'
1897 && descp && descp[0] != '\0')
1898 return;
1899
1900 if (phba->lmt & LMT_16Gb)
1901 max_speed = 16;
1902 else if (phba->lmt & LMT_10Gb)
1903 max_speed = 10;
1904 else if (phba->lmt & LMT_8Gb)
1905 max_speed = 8;
1906 else if (phba->lmt & LMT_4Gb)
1907 max_speed = 4;
1908 else if (phba->lmt & LMT_2Gb)
1909 max_speed = 2;
1910 else if (phba->lmt & LMT_1Gb)
1911 max_speed = 1;
1912 else
1913 max_speed = 0;
1914
1915 vp = &phba->vpd;
1916
1917 switch (dev_id) {
1918 case PCI_DEVICE_ID_FIREFLY:
1919 m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"};
1920 break;
1921 case PCI_DEVICE_ID_SUPERFLY:
1922 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1923 m = (typeof(m)){"LP7000", "PCI",
1924 "Fibre Channel Adapter"};
1925 else
1926 m = (typeof(m)){"LP7000E", "PCI",
1927 "Fibre Channel Adapter"};
1928 break;
1929 case PCI_DEVICE_ID_DRAGONFLY:
1930 m = (typeof(m)){"LP8000", "PCI",
1931 "Fibre Channel Adapter"};
1932 break;
1933 case PCI_DEVICE_ID_CENTAUR:
1934 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1935 m = (typeof(m)){"LP9002", "PCI",
1936 "Fibre Channel Adapter"};
1937 else
1938 m = (typeof(m)){"LP9000", "PCI",
1939 "Fibre Channel Adapter"};
1940 break;
1941 case PCI_DEVICE_ID_RFLY:
1942 m = (typeof(m)){"LP952", "PCI",
1943 "Fibre Channel Adapter"};
1944 break;
1945 case PCI_DEVICE_ID_PEGASUS:
1946 m = (typeof(m)){"LP9802", "PCI-X",
1947 "Fibre Channel Adapter"};
1948 break;
1949 case PCI_DEVICE_ID_THOR:
1950 m = (typeof(m)){"LP10000", "PCI-X",
1951 "Fibre Channel Adapter"};
1952 break;
1953 case PCI_DEVICE_ID_VIPER:
1954 m = (typeof(m)){"LPX1000", "PCI-X",
1955 "Fibre Channel Adapter"};
1956 break;
1957 case PCI_DEVICE_ID_PFLY:
1958 m = (typeof(m)){"LP982", "PCI-X",
1959 "Fibre Channel Adapter"};
1960 break;
1961 case PCI_DEVICE_ID_TFLY:
1962 m = (typeof(m)){"LP1050", "PCI-X",
1963 "Fibre Channel Adapter"};
1964 break;
1965 case PCI_DEVICE_ID_HELIOS:
1966 m = (typeof(m)){"LP11000", "PCI-X2",
1967 "Fibre Channel Adapter"};
1968 break;
1969 case PCI_DEVICE_ID_HELIOS_SCSP:
1970 m = (typeof(m)){"LP11000-SP", "PCI-X2",
1971 "Fibre Channel Adapter"};
1972 break;
1973 case PCI_DEVICE_ID_HELIOS_DCSP:
1974 m = (typeof(m)){"LP11002-SP", "PCI-X2",
1975 "Fibre Channel Adapter"};
1976 break;
1977 case PCI_DEVICE_ID_NEPTUNE:
1978 m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"};
1979 break;
1980 case PCI_DEVICE_ID_NEPTUNE_SCSP:
1981 m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"};
1982 break;
1983 case PCI_DEVICE_ID_NEPTUNE_DCSP:
1984 m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"};
1985 break;
1986 case PCI_DEVICE_ID_BMID:
1987 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
1988 break;
1989 case PCI_DEVICE_ID_BSMB:
1990 m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"};
1991 break;
1992 case PCI_DEVICE_ID_ZEPHYR:
1993 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1994 break;
1995 case PCI_DEVICE_ID_ZEPHYR_SCSP:
1996 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
1997 break;
1998 case PCI_DEVICE_ID_ZEPHYR_DCSP:
1999 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2000 GE = 1;
2001 break;
2002 case PCI_DEVICE_ID_ZMID:
2003 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2004 break;
2005 case PCI_DEVICE_ID_ZSMB:
2006 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2007 break;
2008 case PCI_DEVICE_ID_LP101:
2009 m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"};
2010 break;
2011 case PCI_DEVICE_ID_LP10000S:
2012 m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"};
2013 break;
2014 case PCI_DEVICE_ID_LP11000S:
2015 m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"};
2016 break;
2017 case PCI_DEVICE_ID_LPE11000S:
2018 m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"};
2019 break;
2020 case PCI_DEVICE_ID_SAT:
2021 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2022 break;
2023 case PCI_DEVICE_ID_SAT_MID:
2024 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2025 break;
2026 case PCI_DEVICE_ID_SAT_SMB:
2027 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2028 break;
2029 case PCI_DEVICE_ID_SAT_DCSP:
2030 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2031 break;
2032 case PCI_DEVICE_ID_SAT_SCSP:
2033 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2034 break;
2035 case PCI_DEVICE_ID_SAT_S:
2036 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2037 break;
2038 case PCI_DEVICE_ID_HORNET:
2039 m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"};
2040 GE = 1;
2041 break;
2042 case PCI_DEVICE_ID_PROTEUS_VF:
2043 m = (typeof(m)){"LPev12000", "PCIe IOV",
2044 "Fibre Channel Adapter"};
2045 break;
2046 case PCI_DEVICE_ID_PROTEUS_PF:
2047 m = (typeof(m)){"LPev12000", "PCIe IOV",
2048 "Fibre Channel Adapter"};
2049 break;
2050 case PCI_DEVICE_ID_PROTEUS_S:
2051 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2052 "Fibre Channel Adapter"};
2053 break;
2054 case PCI_DEVICE_ID_TIGERSHARK:
2055 oneConnect = 1;
2056 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2057 break;
2058 case PCI_DEVICE_ID_TOMCAT:
2059 oneConnect = 1;
2060 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2061 break;
2062 case PCI_DEVICE_ID_FALCON:
2063 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2064 "EmulexSecure Fibre"};
2065 break;
2066 case PCI_DEVICE_ID_BALIUS:
2067 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2068 "Fibre Channel Adapter"};
2069 break;
2070 case PCI_DEVICE_ID_LANCER_FC:
2071 case PCI_DEVICE_ID_LANCER_FC_VF:
2072 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2073 break;
2074 case PCI_DEVICE_ID_LANCER_FCOE:
2075 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2076 oneConnect = 1;
2077 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2078 break;
2079 case PCI_DEVICE_ID_SKYHAWK:
2080 case PCI_DEVICE_ID_SKYHAWK_VF:
2081 oneConnect = 1;
2082 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2083 break;
2084 default:
2085 m = (typeof(m)){"Unknown", "", ""};
2086 break;
2087 }
2088
2089 if (mdp && mdp[0] == '\0')
2090 snprintf(mdp, 79,"%s", m.name);
2091
2092
2093
2094
2095 if (descp && descp[0] == '\0') {
2096 if (oneConnect)
2097 snprintf(descp, 255,
2098 "Emulex OneConnect %s, %s Initiator %s",
2099 m.name, m.function,
2100 phba->Port);
2101 else if (max_speed == 0)
2102 snprintf(descp, 255,
2103 "Emulex %s %s %s ",
2104 m.name, m.bus, m.function);
2105 else
2106 snprintf(descp, 255,
2107 "Emulex %s %d%s %s %s",
2108 m.name, max_speed, (GE) ? "GE" : "Gb",
2109 m.bus, m.function);
2110 }
2111}
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125int
2126lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2127{
2128 IOCB_t *icmd;
2129 struct lpfc_iocbq *iocb;
2130 struct lpfc_dmabuf *mp1, *mp2;
2131
2132 cnt += pring->missbufcnt;
2133
2134
2135 while (cnt > 0) {
2136
2137 iocb = lpfc_sli_get_iocbq(phba);
2138 if (iocb == NULL) {
2139 pring->missbufcnt = cnt;
2140 return cnt;
2141 }
2142 icmd = &iocb->iocb;
2143
2144
2145
2146 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2147 if (mp1)
2148 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2149 if (!mp1 || !mp1->virt) {
2150 kfree(mp1);
2151 lpfc_sli_release_iocbq(phba, iocb);
2152 pring->missbufcnt = cnt;
2153 return cnt;
2154 }
2155
2156 INIT_LIST_HEAD(&mp1->list);
2157
2158 if (cnt > 1) {
2159 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2160 if (mp2)
2161 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2162 &mp2->phys);
2163 if (!mp2 || !mp2->virt) {
2164 kfree(mp2);
2165 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2166 kfree(mp1);
2167 lpfc_sli_release_iocbq(phba, iocb);
2168 pring->missbufcnt = cnt;
2169 return cnt;
2170 }
2171
2172 INIT_LIST_HEAD(&mp2->list);
2173 } else {
2174 mp2 = NULL;
2175 }
2176
2177 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2178 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2179 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2180 icmd->ulpBdeCount = 1;
2181 cnt--;
2182 if (mp2) {
2183 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2184 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2185 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2186 cnt--;
2187 icmd->ulpBdeCount = 2;
2188 }
2189
2190 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2191 icmd->ulpLe = 1;
2192
2193 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2194 IOCB_ERROR) {
2195 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2196 kfree(mp1);
2197 cnt++;
2198 if (mp2) {
2199 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2200 kfree(mp2);
2201 cnt++;
2202 }
2203 lpfc_sli_release_iocbq(phba, iocb);
2204 pring->missbufcnt = cnt;
2205 return cnt;
2206 }
2207 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2208 if (mp2)
2209 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2210 }
2211 pring->missbufcnt = 0;
2212 return 0;
2213}
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226static int
2227lpfc_post_rcv_buf(struct lpfc_hba *phba)
2228{
2229 struct lpfc_sli *psli = &phba->sli;
2230
2231
2232 lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2233
2234
2235 return 0;
2236}
2237
2238#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2239
2240
2241
2242
2243
2244
2245
2246
2247static void
2248lpfc_sha_init(uint32_t * HashResultPointer)
2249{
2250 HashResultPointer[0] = 0x67452301;
2251 HashResultPointer[1] = 0xEFCDAB89;
2252 HashResultPointer[2] = 0x98BADCFE;
2253 HashResultPointer[3] = 0x10325476;
2254 HashResultPointer[4] = 0xC3D2E1F0;
2255}
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267static void
2268lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2269{
2270 int t;
2271 uint32_t TEMP;
2272 uint32_t A, B, C, D, E;
2273 t = 16;
2274 do {
2275 HashWorkingPointer[t] =
2276 S(1,
2277 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2278 8] ^
2279 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2280 } while (++t <= 79);
2281 t = 0;
2282 A = HashResultPointer[0];
2283 B = HashResultPointer[1];
2284 C = HashResultPointer[2];
2285 D = HashResultPointer[3];
2286 E = HashResultPointer[4];
2287
2288 do {
2289 if (t < 20) {
2290 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2291 } else if (t < 40) {
2292 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2293 } else if (t < 60) {
2294 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2295 } else {
2296 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2297 }
2298 TEMP += S(5, A) + E + HashWorkingPointer[t];
2299 E = D;
2300 D = C;
2301 C = S(30, B);
2302 B = A;
2303 A = TEMP;
2304 } while (++t <= 79);
2305
2306 HashResultPointer[0] += A;
2307 HashResultPointer[1] += B;
2308 HashResultPointer[2] += C;
2309 HashResultPointer[3] += D;
2310 HashResultPointer[4] += E;
2311
2312}
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324static void
2325lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2326{
2327 *HashWorking = (*RandomChallenge ^ *HashWorking);
2328}
2329
2330
2331
2332
2333
2334
2335
2336
2337void
2338lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2339{
2340 int t;
2341 uint32_t *HashWorking;
2342 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2343
2344 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2345 if (!HashWorking)
2346 return;
2347
2348 HashWorking[0] = HashWorking[78] = *pwwnn++;
2349 HashWorking[1] = HashWorking[79] = *pwwnn;
2350
2351 for (t = 0; t < 7; t++)
2352 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2353
2354 lpfc_sha_init(hbainit);
2355 lpfc_sha_iterate(hbainit, HashWorking);
2356 kfree(HashWorking);
2357}
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368void
2369lpfc_cleanup(struct lpfc_vport *vport)
2370{
2371 struct lpfc_hba *phba = vport->phba;
2372 struct lpfc_nodelist *ndlp, *next_ndlp;
2373 int i = 0;
2374
2375 if (phba->link_state > LPFC_LINK_DOWN)
2376 lpfc_port_link_failure(vport);
2377
2378 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2379 if (!NLP_CHK_NODE_ACT(ndlp)) {
2380 ndlp = lpfc_enable_node(vport, ndlp,
2381 NLP_STE_UNUSED_NODE);
2382 if (!ndlp)
2383 continue;
2384 spin_lock_irq(&phba->ndlp_lock);
2385 NLP_SET_FREE_REQ(ndlp);
2386 spin_unlock_irq(&phba->ndlp_lock);
2387
2388 lpfc_nlp_put(ndlp);
2389 continue;
2390 }
2391 spin_lock_irq(&phba->ndlp_lock);
2392 if (NLP_CHK_FREE_REQ(ndlp)) {
2393
2394 spin_unlock_irq(&phba->ndlp_lock);
2395 continue;
2396 } else
2397
2398 NLP_SET_FREE_REQ(ndlp);
2399 spin_unlock_irq(&phba->ndlp_lock);
2400
2401 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2402 ndlp->nlp_DID == Fabric_DID) {
2403
2404 lpfc_nlp_put(ndlp);
2405 continue;
2406 }
2407
2408
2409
2410
2411 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2412 lpfc_nlp_put(ndlp);
2413 continue;
2414 }
2415
2416 if (ndlp->nlp_type & NLP_FABRIC)
2417 lpfc_disc_state_machine(vport, ndlp, NULL,
2418 NLP_EVT_DEVICE_RECOVERY);
2419
2420 lpfc_disc_state_machine(vport, ndlp, NULL,
2421 NLP_EVT_DEVICE_RM);
2422 }
2423
2424
2425
2426
2427
2428 while (!list_empty(&vport->fc_nodes)) {
2429 if (i++ > 3000) {
2430 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
2431 "0233 Nodelist not empty\n");
2432 list_for_each_entry_safe(ndlp, next_ndlp,
2433 &vport->fc_nodes, nlp_listp) {
2434 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2435 LOG_NODE,
2436 "0282 did:x%x ndlp:x%p "
2437 "usgmap:x%x refcnt:%d\n",
2438 ndlp->nlp_DID, (void *)ndlp,
2439 ndlp->nlp_usg_map,
2440 atomic_read(
2441 &ndlp->kref.refcount));
2442 }
2443 break;
2444 }
2445
2446
2447 msleep(10);
2448 }
2449 lpfc_cleanup_vports_rrqs(vport, NULL);
2450}
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460void
2461lpfc_stop_vport_timers(struct lpfc_vport *vport)
2462{
2463 del_timer_sync(&vport->els_tmofunc);
2464 del_timer_sync(&vport->fc_fdmitmo);
2465 del_timer_sync(&vport->delayed_disc_tmo);
2466 lpfc_can_disctmo(vport);
2467 return;
2468}
2469
2470
2471
2472
2473
2474
2475
2476
2477void
2478__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2479{
2480
2481 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2482
2483
2484 del_timer(&phba->fcf.redisc_wait);
2485}
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496void
2497lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2498{
2499 spin_lock_irq(&phba->hbalock);
2500 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
2501
2502 spin_unlock_irq(&phba->hbalock);
2503 return;
2504 }
2505 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2506
2507 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
2508 spin_unlock_irq(&phba->hbalock);
2509}
2510
2511
2512
2513
2514
2515
2516
2517
2518void
2519lpfc_stop_hba_timers(struct lpfc_hba *phba)
2520{
2521 lpfc_stop_vport_timers(phba->pport);
2522 del_timer_sync(&phba->sli.mbox_tmo);
2523 del_timer_sync(&phba->fabric_block_timer);
2524 del_timer_sync(&phba->eratt_poll);
2525 del_timer_sync(&phba->hb_tmofunc);
2526 if (phba->sli_rev == LPFC_SLI_REV4) {
2527 del_timer_sync(&phba->rrq_tmr);
2528 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
2529 }
2530 phba->hb_outstanding = 0;
2531
2532 switch (phba->pci_dev_grp) {
2533 case LPFC_PCI_DEV_LP:
2534
2535 del_timer_sync(&phba->fcp_poll_timer);
2536 break;
2537 case LPFC_PCI_DEV_OC:
2538
2539 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
2540 break;
2541 default:
2542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2543 "0297 Invalid device group (x%x)\n",
2544 phba->pci_dev_grp);
2545 break;
2546 }
2547 return;
2548}
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560static void
2561lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
2562{
2563 unsigned long iflag;
2564 uint8_t actcmd = MBX_HEARTBEAT;
2565 unsigned long timeout;
2566
2567 spin_lock_irqsave(&phba->hbalock, iflag);
2568 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2569 spin_unlock_irqrestore(&phba->hbalock, iflag);
2570 if (mbx_action == LPFC_MBX_NO_WAIT)
2571 return;
2572 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2573 spin_lock_irqsave(&phba->hbalock, iflag);
2574 if (phba->sli.mbox_active) {
2575 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2576
2577
2578
2579 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2580 phba->sli.mbox_active) * 1000) + jiffies;
2581 }
2582 spin_unlock_irqrestore(&phba->hbalock, iflag);
2583
2584
2585 while (phba->sli.mbox_active) {
2586
2587 msleep(2);
2588 if (time_after(jiffies, timeout)) {
2589 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2590 "2813 Mgmt IO is Blocked %x "
2591 "- mbox cmd %x still active\n",
2592 phba->sli.sli_flag, actcmd);
2593 break;
2594 }
2595 }
2596}
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606void
2607lpfc_sli4_node_prep(struct lpfc_hba *phba)
2608{
2609 struct lpfc_nodelist *ndlp, *next_ndlp;
2610 struct lpfc_vport **vports;
2611 int i;
2612
2613 if (phba->sli_rev != LPFC_SLI_REV4)
2614 return;
2615
2616 vports = lpfc_create_vport_work_array(phba);
2617 if (vports != NULL) {
2618 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2619 if (vports[i]->load_flag & FC_UNLOADING)
2620 continue;
2621
2622 list_for_each_entry_safe(ndlp, next_ndlp,
2623 &vports[i]->fc_nodes,
2624 nlp_listp) {
2625 if (NLP_CHK_NODE_ACT(ndlp))
2626 ndlp->nlp_rpi =
2627 lpfc_sli4_alloc_rpi(phba);
2628 }
2629 }
2630 }
2631 lpfc_destroy_vport_work_array(phba, vports);
2632}
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646int
2647lpfc_online(struct lpfc_hba *phba)
2648{
2649 struct lpfc_vport *vport;
2650 struct lpfc_vport **vports;
2651 int i;
2652 bool vpis_cleared = false;
2653
2654 if (!phba)
2655 return 0;
2656 vport = phba->pport;
2657
2658 if (!(vport->fc_flag & FC_OFFLINE_MODE))
2659 return 0;
2660
2661 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2662 "0458 Bring Adapter online\n");
2663
2664 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
2665
2666 if (!lpfc_sli_queue_setup(phba)) {
2667 lpfc_unblock_mgmt_io(phba);
2668 return 1;
2669 }
2670
2671 if (phba->sli_rev == LPFC_SLI_REV4) {
2672 if (lpfc_sli4_hba_setup(phba)) {
2673 lpfc_unblock_mgmt_io(phba);
2674 return 1;
2675 }
2676 spin_lock_irq(&phba->hbalock);
2677 if (!phba->sli4_hba.max_cfg_param.vpi_used)
2678 vpis_cleared = true;
2679 spin_unlock_irq(&phba->hbalock);
2680 } else {
2681 if (lpfc_sli_hba_setup(phba)) {
2682 lpfc_unblock_mgmt_io(phba);
2683 return 1;
2684 }
2685 }
2686
2687 vports = lpfc_create_vport_work_array(phba);
2688 if (vports != NULL)
2689 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2690 struct Scsi_Host *shost;
2691 shost = lpfc_shost_from_vport(vports[i]);
2692 spin_lock_irq(shost->host_lock);
2693 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
2694 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
2695 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2696 if (phba->sli_rev == LPFC_SLI_REV4) {
2697 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
2698 if ((vpis_cleared) &&
2699 (vports[i]->port_type !=
2700 LPFC_PHYSICAL_PORT))
2701 vports[i]->vpi = 0;
2702 }
2703 spin_unlock_irq(shost->host_lock);
2704 }
2705 lpfc_destroy_vport_work_array(phba, vports);
2706
2707 lpfc_unblock_mgmt_io(phba);
2708 return 0;
2709}
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722void
2723lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
2724{
2725 unsigned long iflag;
2726
2727 spin_lock_irqsave(&phba->hbalock, iflag);
2728 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
2729 spin_unlock_irqrestore(&phba->hbalock, iflag);
2730}
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740void
2741lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
2742{
2743 struct lpfc_vport *vport = phba->pport;
2744 struct lpfc_nodelist *ndlp, *next_ndlp;
2745 struct lpfc_vport **vports;
2746 struct Scsi_Host *shost;
2747 int i;
2748
2749 if (vport->fc_flag & FC_OFFLINE_MODE)
2750 return;
2751
2752 lpfc_block_mgmt_io(phba, mbx_action);
2753
2754 lpfc_linkdown(phba);
2755
2756
2757 vports = lpfc_create_vport_work_array(phba);
2758 if (vports != NULL) {
2759 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2760 if (vports[i]->load_flag & FC_UNLOADING)
2761 continue;
2762 shost = lpfc_shost_from_vport(vports[i]);
2763 spin_lock_irq(shost->host_lock);
2764 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
2765 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2766 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
2767 spin_unlock_irq(shost->host_lock);
2768
2769 shost = lpfc_shost_from_vport(vports[i]);
2770 list_for_each_entry_safe(ndlp, next_ndlp,
2771 &vports[i]->fc_nodes,
2772 nlp_listp) {
2773 if (!NLP_CHK_NODE_ACT(ndlp))
2774 continue;
2775 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
2776 continue;
2777 if (ndlp->nlp_type & NLP_FABRIC) {
2778 lpfc_disc_state_machine(vports[i], ndlp,
2779 NULL, NLP_EVT_DEVICE_RECOVERY);
2780 lpfc_disc_state_machine(vports[i], ndlp,
2781 NULL, NLP_EVT_DEVICE_RM);
2782 }
2783 spin_lock_irq(shost->host_lock);
2784 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2785 spin_unlock_irq(shost->host_lock);
2786
2787
2788
2789
2790
2791 if (phba->sli_rev == LPFC_SLI_REV4)
2792 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2793 lpfc_unreg_rpi(vports[i], ndlp);
2794 }
2795 }
2796 }
2797 lpfc_destroy_vport_work_array(phba, vports);
2798
2799 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
2800}
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810void
2811lpfc_offline(struct lpfc_hba *phba)
2812{
2813 struct Scsi_Host *shost;
2814 struct lpfc_vport **vports;
2815 int i;
2816
2817 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
2818 return;
2819
2820
2821 lpfc_stop_port(phba);
2822 vports = lpfc_create_vport_work_array(phba);
2823 if (vports != NULL)
2824 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
2825 lpfc_stop_vport_timers(vports[i]);
2826 lpfc_destroy_vport_work_array(phba, vports);
2827 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2828 "0460 Bring Adapter offline\n");
2829
2830
2831 lpfc_sli_hba_down(phba);
2832 spin_lock_irq(&phba->hbalock);
2833 phba->work_ha = 0;
2834 spin_unlock_irq(&phba->hbalock);
2835 vports = lpfc_create_vport_work_array(phba);
2836 if (vports != NULL)
2837 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2838 shost = lpfc_shost_from_vport(vports[i]);
2839 spin_lock_irq(shost->host_lock);
2840 vports[i]->work_port_events = 0;
2841 vports[i]->fc_flag |= FC_OFFLINE_MODE;
2842 spin_unlock_irq(shost->host_lock);
2843 }
2844 lpfc_destroy_vport_work_array(phba, vports);
2845}
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855static void
2856lpfc_scsi_free(struct lpfc_hba *phba)
2857{
2858 struct lpfc_scsi_buf *sb, *sb_next;
2859 struct lpfc_iocbq *io, *io_next;
2860
2861 spin_lock_irq(&phba->hbalock);
2862
2863
2864
2865 spin_lock(&phba->scsi_buf_list_put_lock);
2866 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
2867 list) {
2868 list_del(&sb->list);
2869 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2870 sb->dma_handle);
2871 kfree(sb);
2872 phba->total_scsi_bufs--;
2873 }
2874 spin_unlock(&phba->scsi_buf_list_put_lock);
2875
2876 spin_lock(&phba->scsi_buf_list_get_lock);
2877 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
2878 list) {
2879 list_del(&sb->list);
2880 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2881 sb->dma_handle);
2882 kfree(sb);
2883 phba->total_scsi_bufs--;
2884 }
2885 spin_unlock(&phba->scsi_buf_list_get_lock);
2886
2887
2888 list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2889 list_del(&io->list);
2890 kfree(io);
2891 phba->total_iocbq_bufs--;
2892 }
2893
2894 spin_unlock_irq(&phba->hbalock);
2895}
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909int
2910lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
2911{
2912 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
2913 struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
2914 uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
2915 LIST_HEAD(els_sgl_list);
2916 LIST_HEAD(scsi_sgl_list);
2917 int rc;
2918
2919
2920
2921
2922 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
2923 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
2924
2925 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
2926 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2927 "3157 ELS xri-sgl count increased from "
2928 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2929 els_xri_cnt);
2930
2931 for (i = 0; i < xri_cnt; i++) {
2932 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
2933 GFP_KERNEL);
2934 if (sglq_entry == NULL) {
2935 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2936 "2562 Failure to allocate an "
2937 "ELS sgl entry:%d\n", i);
2938 rc = -ENOMEM;
2939 goto out_free_mem;
2940 }
2941 sglq_entry->buff_type = GEN_BUFF_TYPE;
2942 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
2943 &sglq_entry->phys);
2944 if (sglq_entry->virt == NULL) {
2945 kfree(sglq_entry);
2946 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2947 "2563 Failure to allocate an "
2948 "ELS mbuf:%d\n", i);
2949 rc = -ENOMEM;
2950 goto out_free_mem;
2951 }
2952 sglq_entry->sgl = sglq_entry->virt;
2953 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
2954 sglq_entry->state = SGL_FREED;
2955 list_add_tail(&sglq_entry->list, &els_sgl_list);
2956 }
2957 spin_lock_irq(&phba->hbalock);
2958 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2959 spin_unlock_irq(&phba->hbalock);
2960 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
2961
2962 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
2963 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2964 "3158 ELS xri-sgl count decreased from "
2965 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
2966 els_xri_cnt);
2967 spin_lock_irq(&phba->hbalock);
2968 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
2969 spin_unlock_irq(&phba->hbalock);
2970
2971 for (i = 0; i < xri_cnt; i++) {
2972 list_remove_head(&els_sgl_list,
2973 sglq_entry, struct lpfc_sglq, list);
2974 if (sglq_entry) {
2975 lpfc_mbuf_free(phba, sglq_entry->virt,
2976 sglq_entry->phys);
2977 kfree(sglq_entry);
2978 }
2979 }
2980 spin_lock_irq(&phba->hbalock);
2981 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
2982 spin_unlock_irq(&phba->hbalock);
2983 } else
2984 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2985 "3163 ELS xri-sgl count unchanged: %d\n",
2986 els_xri_cnt);
2987 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
2988
2989
2990 sglq_entry = NULL;
2991 sglq_entry_next = NULL;
2992 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
2993 &phba->sli4_hba.lpfc_sgl_list, list) {
2994 lxri = lpfc_sli4_next_xritag(phba);
2995 if (lxri == NO_XRI) {
2996 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2997 "2400 Failed to allocate xri for "
2998 "ELS sgl\n");
2999 rc = -ENOMEM;
3000 goto out_free_mem;
3001 }
3002 sglq_entry->sli4_lxritag = lxri;
3003 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3004 }
3005
3006
3007
3008
3009 phba->total_scsi_bufs = 0;
3010
3011
3012 phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
3013 els_xri_cnt;
3014
3015 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3016 "2401 Current allocated SCSI xri-sgl count:%d, "
3017 "maximum SCSI xri count:%d\n",
3018 phba->sli4_hba.scsi_xri_cnt,
3019 phba->sli4_hba.scsi_xri_max);
3020
3021 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3022 spin_lock_irq(&phba->scsi_buf_list_put_lock);
3023 list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
3024 list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
3025 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
3026 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3027
3028 if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
3029
3030 scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
3031 phba->sli4_hba.scsi_xri_max;
3032
3033 for (i = 0; i < scsi_xri_cnt; i++) {
3034 list_remove_head(&scsi_sgl_list, psb,
3035 struct lpfc_scsi_buf, list);
3036 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
3037 psb->dma_handle);
3038 kfree(psb);
3039 }
3040 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3041 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
3042 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3043 }
3044
3045
3046 psb = NULL;
3047 psb_next = NULL;
3048 list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
3049 lxri = lpfc_sli4_next_xritag(phba);
3050 if (lxri == NO_XRI) {
3051 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3052 "2560 Failed to allocate xri for "
3053 "scsi buffer\n");
3054 rc = -ENOMEM;
3055 goto out_free_mem;
3056 }
3057 psb->cur_iocbq.sli4_lxritag = lxri;
3058 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3059 }
3060 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3061 spin_lock_irq(&phba->scsi_buf_list_put_lock);
3062 list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
3063 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
3064 spin_unlock_irq(&phba->scsi_buf_list_put_lock);
3065 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
3066
3067 return 0;
3068
3069out_free_mem:
3070 lpfc_free_els_sgl_list(phba);
3071 lpfc_scsi_free(phba);
3072 return rc;
3073}
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091struct lpfc_vport *
3092lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
3093{
3094 struct lpfc_vport *vport;
3095 struct Scsi_Host *shost;
3096 int error = 0;
3097
3098 if (dev != &phba->pcidev->dev)
3099 shost = scsi_host_alloc(&lpfc_vport_template,
3100 sizeof(struct lpfc_vport));
3101 else
3102 shost = scsi_host_alloc(&lpfc_template,
3103 sizeof(struct lpfc_vport));
3104 if (!shost)
3105 goto out;
3106
3107 vport = (struct lpfc_vport *) shost->hostdata;
3108 vport->phba = phba;
3109 vport->load_flag |= FC_LOADING;
3110 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3111 vport->fc_rscn_flush = 0;
3112
3113 lpfc_get_vport_cfgparam(vport);
3114 shost->unique_id = instance;
3115 shost->max_id = LPFC_MAX_TARGET;
3116 shost->max_lun = vport->cfg_max_luns;
3117 shost->this_id = -1;
3118 shost->max_cmd_len = 16;
3119 if (phba->sli_rev == LPFC_SLI_REV4) {
3120 shost->dma_boundary =
3121 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
3122 shost->sg_tablesize = phba->cfg_sg_seg_cnt;
3123 }
3124
3125
3126
3127
3128
3129
3130 shost->can_queue = phba->cfg_hba_queue_depth - 10;
3131 if (dev != &phba->pcidev->dev) {
3132 shost->transportt = lpfc_vport_transport_template;
3133 vport->port_type = LPFC_NPIV_PORT;
3134 } else {
3135 shost->transportt = lpfc_transport_template;
3136 vport->port_type = LPFC_PHYSICAL_PORT;
3137 }
3138
3139
3140 INIT_LIST_HEAD(&vport->fc_nodes);
3141 INIT_LIST_HEAD(&vport->rcv_buffer_list);
3142 spin_lock_init(&vport->work_port_lock);
3143
3144 init_timer(&vport->fc_disctmo);
3145 vport->fc_disctmo.function = lpfc_disc_timeout;
3146 vport->fc_disctmo.data = (unsigned long)vport;
3147
3148 init_timer(&vport->fc_fdmitmo);
3149 vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
3150 vport->fc_fdmitmo.data = (unsigned long)vport;
3151
3152 init_timer(&vport->els_tmofunc);
3153 vport->els_tmofunc.function = lpfc_els_timeout;
3154 vport->els_tmofunc.data = (unsigned long)vport;
3155
3156 init_timer(&vport->delayed_disc_tmo);
3157 vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
3158 vport->delayed_disc_tmo.data = (unsigned long)vport;
3159
3160 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
3161 if (error)
3162 goto out_put_shost;
3163
3164 spin_lock_irq(&phba->hbalock);
3165 list_add_tail(&vport->listentry, &phba->port_list);
3166 spin_unlock_irq(&phba->hbalock);
3167 return vport;
3168
3169out_put_shost:
3170 scsi_host_put(shost);
3171out:
3172 return NULL;
3173}
3174
3175
3176
3177
3178
3179
3180
3181
3182void
3183destroy_port(struct lpfc_vport *vport)
3184{
3185 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3186 struct lpfc_hba *phba = vport->phba;
3187
3188 lpfc_debugfs_terminate(vport);
3189 fc_remove_host(shost);
3190 scsi_remove_host(shost);
3191
3192 spin_lock_irq(&phba->hbalock);
3193 list_del_init(&vport->listentry);
3194 spin_unlock_irq(&phba->hbalock);
3195
3196 lpfc_cleanup(vport);
3197 return;
3198}
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210int
3211lpfc_get_instance(void)
3212{
3213 int ret;
3214
3215 ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
3216 return ret < 0 ? -1 : ret;
3217}
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3235{
3236 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3237 struct lpfc_hba *phba = vport->phba;
3238 int stat = 0;
3239
3240 spin_lock_irq(shost->host_lock);
3241
3242 if (vport->load_flag & FC_UNLOADING) {
3243 stat = 1;
3244 goto finished;
3245 }
3246 if (time >= msecs_to_jiffies(30 * 1000)) {
3247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3248 "0461 Scanning longer than 30 "
3249 "seconds. Continuing initialization\n");
3250 stat = 1;
3251 goto finished;
3252 }
3253 if (time >= msecs_to_jiffies(15 * 1000) &&
3254 phba->link_state <= LPFC_LINK_DOWN) {
3255 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3256 "0465 Link down longer than 15 "
3257 "seconds. Continuing initialization\n");
3258 stat = 1;
3259 goto finished;
3260 }
3261
3262 if (vport->port_state != LPFC_VPORT_READY)
3263 goto finished;
3264 if (vport->num_disc_nodes || vport->fc_prli_sent)
3265 goto finished;
3266 if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
3267 goto finished;
3268 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
3269 goto finished;
3270
3271 stat = 1;
3272
3273finished:
3274 spin_unlock_irq(shost->host_lock);
3275 return stat;
3276}
3277
3278
3279
3280
3281
3282
3283
3284
3285void lpfc_host_attrib_init(struct Scsi_Host *shost)
3286{
3287 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3288 struct lpfc_hba *phba = vport->phba;
3289
3290
3291
3292
3293 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
3294 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
3295 fc_host_supported_classes(shost) = FC_COS_CLASS3;
3296
3297 memset(fc_host_supported_fc4s(shost), 0,
3298 sizeof(fc_host_supported_fc4s(shost)));
3299 fc_host_supported_fc4s(shost)[2] = 1;
3300 fc_host_supported_fc4s(shost)[7] = 1;
3301
3302 lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
3303 sizeof fc_host_symbolic_name(shost));
3304
3305 fc_host_supported_speeds(shost) = 0;
3306 if (phba->lmt & LMT_16Gb)
3307 fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
3308 if (phba->lmt & LMT_10Gb)
3309 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
3310 if (phba->lmt & LMT_8Gb)
3311 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
3312 if (phba->lmt & LMT_4Gb)
3313 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
3314 if (phba->lmt & LMT_2Gb)
3315 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
3316 if (phba->lmt & LMT_1Gb)
3317 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
3318
3319 fc_host_maxframe_size(shost) =
3320 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
3321 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
3322
3323 fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
3324
3325
3326 memset(fc_host_active_fc4s(shost), 0,
3327 sizeof(fc_host_active_fc4s(shost)));
3328 fc_host_active_fc4s(shost)[2] = 1;
3329 fc_host_active_fc4s(shost)[7] = 1;
3330
3331 fc_host_max_npiv_vports(shost) = phba->max_vpi;
3332 spin_lock_irq(shost->host_lock);
3333 vport->load_flag &= ~FC_LOADING;
3334 spin_unlock_irq(shost->host_lock);
3335}
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345static void
3346lpfc_stop_port_s3(struct lpfc_hba *phba)
3347{
3348
3349 writel(0, phba->HCregaddr);
3350 readl(phba->HCregaddr);
3351
3352 writel(0xffffffff, phba->HAregaddr);
3353 readl(phba->HAregaddr);
3354
3355
3356 lpfc_stop_hba_timers(phba);
3357 phba->pport->work_port_events = 0;
3358}
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368static void
3369lpfc_stop_port_s4(struct lpfc_hba *phba)
3370{
3371
3372 lpfc_stop_hba_timers(phba);
3373 phba->pport->work_port_events = 0;
3374 phba->sli4_hba.intr_enable = 0;
3375}
3376
3377
3378
3379
3380
3381
3382
3383
3384void
3385lpfc_stop_port(struct lpfc_hba *phba)
3386{
3387 phba->lpfc_stop_port(phba);
3388}
3389
3390
3391
3392
3393
3394
3395
3396void
3397lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3398{
3399 unsigned long fcf_redisc_wait_tmo =
3400 (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
3401
3402 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
3403 spin_lock_irq(&phba->hbalock);
3404
3405 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
3406
3407 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
3408 spin_unlock_irq(&phba->hbalock);
3409}
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421void
3422lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3423{
3424 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
3425
3426
3427 spin_lock_irq(&phba->hbalock);
3428 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3429 spin_unlock_irq(&phba->hbalock);
3430 return;
3431 }
3432
3433 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3434
3435 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
3436 spin_unlock_irq(&phba->hbalock);
3437 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
3438 "2776 FCF rediscover quiescent timer expired\n");
3439
3440 lpfc_worker_wake_up(phba);
3441}
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454static uint16_t
3455lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
3456 struct lpfc_acqe_link *acqe_link)
3457{
3458 uint16_t latt_fault;
3459
3460 switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
3461 case LPFC_ASYNC_LINK_FAULT_NONE:
3462 case LPFC_ASYNC_LINK_FAULT_LOCAL:
3463 case LPFC_ASYNC_LINK_FAULT_REMOTE:
3464 latt_fault = 0;
3465 break;
3466 default:
3467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3468 "0398 Invalid link fault code: x%x\n",
3469 bf_get(lpfc_acqe_link_fault, acqe_link));
3470 latt_fault = MBXERR_ERROR;
3471 break;
3472 }
3473 return latt_fault;
3474}
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486static uint8_t
3487lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
3488 struct lpfc_acqe_link *acqe_link)
3489{
3490 uint8_t att_type;
3491
3492 switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
3493 case LPFC_ASYNC_LINK_STATUS_DOWN:
3494 case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
3495 att_type = LPFC_ATT_LINK_DOWN;
3496 break;
3497 case LPFC_ASYNC_LINK_STATUS_UP:
3498
3499 att_type = LPFC_ATT_RESERVED;
3500 break;
3501 case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
3502 att_type = LPFC_ATT_LINK_UP;
3503 break;
3504 default:
3505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3506 "0399 Invalid link attention type: x%x\n",
3507 bf_get(lpfc_acqe_link_status, acqe_link));
3508 att_type = LPFC_ATT_RESERVED;
3509 break;
3510 }
3511 return att_type;
3512}
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524static uint8_t
3525lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
3526 struct lpfc_acqe_link *acqe_link)
3527{
3528 uint8_t link_speed;
3529
3530 switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
3531 case LPFC_ASYNC_LINK_SPEED_ZERO:
3532 case LPFC_ASYNC_LINK_SPEED_10MBPS:
3533 case LPFC_ASYNC_LINK_SPEED_100MBPS:
3534 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3535 break;
3536 case LPFC_ASYNC_LINK_SPEED_1GBPS:
3537 link_speed = LPFC_LINK_SPEED_1GHZ;
3538 break;
3539 case LPFC_ASYNC_LINK_SPEED_10GBPS:
3540 link_speed = LPFC_LINK_SPEED_10GHZ;
3541 break;
3542 default:
3543 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3544 "0483 Invalid link-attention link speed: x%x\n",
3545 bf_get(lpfc_acqe_link_speed, acqe_link));
3546 link_speed = LPFC_LINK_SPEED_UNKNOWN;
3547 break;
3548 }
3549 return link_speed;
3550}
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560uint32_t
3561lpfc_sli_port_speed_get(struct lpfc_hba *phba)
3562{
3563 uint32_t link_speed;
3564
3565 if (!lpfc_is_link_up(phba))
3566 return 0;
3567
3568 switch (phba->fc_linkspeed) {
3569 case LPFC_LINK_SPEED_1GHZ:
3570 link_speed = 1000;
3571 break;
3572 case LPFC_LINK_SPEED_2GHZ:
3573 link_speed = 2000;
3574 break;
3575 case LPFC_LINK_SPEED_4GHZ:
3576 link_speed = 4000;
3577 break;
3578 case LPFC_LINK_SPEED_8GHZ:
3579 link_speed = 8000;
3580 break;
3581 case LPFC_LINK_SPEED_10GHZ:
3582 link_speed = 10000;
3583 break;
3584 case LPFC_LINK_SPEED_16GHZ:
3585 link_speed = 16000;
3586 break;
3587 default:
3588 link_speed = 0;
3589 }
3590 return link_speed;
3591}
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604static uint32_t
3605lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
3606 uint8_t speed_code)
3607{
3608 uint32_t port_speed;
3609
3610 switch (evt_code) {
3611 case LPFC_TRAILER_CODE_LINK:
3612 switch (speed_code) {
3613 case LPFC_EVT_CODE_LINK_NO_LINK:
3614 port_speed = 0;
3615 break;
3616 case LPFC_EVT_CODE_LINK_10_MBIT:
3617 port_speed = 10;
3618 break;
3619 case LPFC_EVT_CODE_LINK_100_MBIT:
3620 port_speed = 100;
3621 break;
3622 case LPFC_EVT_CODE_LINK_1_GBIT:
3623 port_speed = 1000;
3624 break;
3625 case LPFC_EVT_CODE_LINK_10_GBIT:
3626 port_speed = 10000;
3627 break;
3628 default:
3629 port_speed = 0;
3630 }
3631 break;
3632 case LPFC_TRAILER_CODE_FC:
3633 switch (speed_code) {
3634 case LPFC_EVT_CODE_FC_NO_LINK:
3635 port_speed = 0;
3636 break;
3637 case LPFC_EVT_CODE_FC_1_GBAUD:
3638 port_speed = 1000;
3639 break;
3640 case LPFC_EVT_CODE_FC_2_GBAUD:
3641 port_speed = 2000;
3642 break;
3643 case LPFC_EVT_CODE_FC_4_GBAUD:
3644 port_speed = 4000;
3645 break;
3646 case LPFC_EVT_CODE_FC_8_GBAUD:
3647 port_speed = 8000;
3648 break;
3649 case LPFC_EVT_CODE_FC_10_GBAUD:
3650 port_speed = 10000;
3651 break;
3652 case LPFC_EVT_CODE_FC_16_GBAUD:
3653 port_speed = 16000;
3654 break;
3655 default:
3656 port_speed = 0;
3657 }
3658 break;
3659 default:
3660 port_speed = 0;
3661 }
3662 return port_speed;
3663}
3664
3665
3666
3667
3668
3669
3670
3671
3672static void
3673lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
3674 struct lpfc_acqe_link *acqe_link)
3675{
3676 struct lpfc_dmabuf *mp;
3677 LPFC_MBOXQ_t *pmb;
3678 MAILBOX_t *mb;
3679 struct lpfc_mbx_read_top *la;
3680 uint8_t att_type;
3681 int rc;
3682
3683 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
3684 if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
3685 return;
3686 phba->fcoe_eventtag = acqe_link->event_tag;
3687 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3688 if (!pmb) {
3689 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3690 "0395 The mboxq allocation failed\n");
3691 return;
3692 }
3693 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3694 if (!mp) {
3695 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3696 "0396 The lpfc_dmabuf allocation failed\n");
3697 goto out_free_pmb;
3698 }
3699 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3700 if (!mp->virt) {
3701 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3702 "0397 The mbuf allocation failed\n");
3703 goto out_free_dmabuf;
3704 }
3705
3706
3707 lpfc_els_flush_all_cmd(phba);
3708
3709
3710 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3711
3712
3713 phba->sli.slistat.link_event++;
3714
3715
3716 lpfc_read_topology(phba, pmb, mp);
3717 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3718 pmb->vport = phba->pport;
3719
3720
3721 phba->sli4_hba.link_state.speed =
3722 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
3723 bf_get(lpfc_acqe_link_speed, acqe_link));
3724 phba->sli4_hba.link_state.duplex =
3725 bf_get(lpfc_acqe_link_duplex, acqe_link);
3726 phba->sli4_hba.link_state.status =
3727 bf_get(lpfc_acqe_link_status, acqe_link);
3728 phba->sli4_hba.link_state.type =
3729 bf_get(lpfc_acqe_link_type, acqe_link);
3730 phba->sli4_hba.link_state.number =
3731 bf_get(lpfc_acqe_link_number, acqe_link);
3732 phba->sli4_hba.link_state.fault =
3733 bf_get(lpfc_acqe_link_fault, acqe_link);
3734 phba->sli4_hba.link_state.logical_speed =
3735 bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
3736
3737 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3738 "2900 Async FC/FCoE Link event - Speed:%dGBit "
3739 "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
3740 "Logical speed:%dMbps Fault:%d\n",
3741 phba->sli4_hba.link_state.speed,
3742 phba->sli4_hba.link_state.topology,
3743 phba->sli4_hba.link_state.status,
3744 phba->sli4_hba.link_state.type,
3745 phba->sli4_hba.link_state.number,
3746 phba->sli4_hba.link_state.logical_speed,
3747 phba->sli4_hba.link_state.fault);
3748
3749
3750
3751
3752 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3753 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3754 if (rc == MBX_NOT_FINISHED)
3755 goto out_free_dmabuf;
3756 return;
3757 }
3758
3759
3760
3761
3762
3763
3764 mb = &pmb->u.mb;
3765 mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
3766
3767
3768 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3769 la->eventTag = acqe_link->event_tag;
3770 bf_set(lpfc_mbx_read_top_att_type, la, att_type);
3771 bf_set(lpfc_mbx_read_top_link_spd, la,
3772 lpfc_sli4_parse_latt_link_speed(phba, acqe_link));
3773
3774
3775 bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
3776 bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
3777 bf_set(lpfc_mbx_read_top_il, la, 0);
3778 bf_set(lpfc_mbx_read_top_pb, la, 0);
3779 bf_set(lpfc_mbx_read_top_fa, la, 0);
3780 bf_set(lpfc_mbx_read_top_mm, la, 0);
3781
3782
3783 lpfc_mbx_cmpl_read_topology(phba, pmb);
3784
3785 return;
3786
3787out_free_dmabuf:
3788 kfree(mp);
3789out_free_pmb:
3790 mempool_free(pmb, phba->mbox_mem_pool);
3791}
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802static void
3803lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
3804{
3805 struct lpfc_dmabuf *mp;
3806 LPFC_MBOXQ_t *pmb;
3807 int rc;
3808
3809 if (bf_get(lpfc_trailer_type, acqe_fc) !=
3810 LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
3811 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3812 "2895 Non FC link Event detected.(%d)\n",
3813 bf_get(lpfc_trailer_type, acqe_fc));
3814 return;
3815 }
3816
3817 phba->sli4_hba.link_state.speed =
3818 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
3819 bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
3820 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
3821 phba->sli4_hba.link_state.topology =
3822 bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
3823 phba->sli4_hba.link_state.status =
3824 bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
3825 phba->sli4_hba.link_state.type =
3826 bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
3827 phba->sli4_hba.link_state.number =
3828 bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
3829 phba->sli4_hba.link_state.fault =
3830 bf_get(lpfc_acqe_link_fault, acqe_fc);
3831 phba->sli4_hba.link_state.logical_speed =
3832 bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
3833 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3834 "2896 Async FC event - Speed:%dGBaud Topology:x%x "
3835 "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
3836 "%dMbps Fault:%d\n",
3837 phba->sli4_hba.link_state.speed,
3838 phba->sli4_hba.link_state.topology,
3839 phba->sli4_hba.link_state.status,
3840 phba->sli4_hba.link_state.type,
3841 phba->sli4_hba.link_state.number,
3842 phba->sli4_hba.link_state.logical_speed,
3843 phba->sli4_hba.link_state.fault);
3844 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3845 if (!pmb) {
3846 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3847 "2897 The mboxq allocation failed\n");
3848 return;
3849 }
3850 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3851 if (!mp) {
3852 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3853 "2898 The lpfc_dmabuf allocation failed\n");
3854 goto out_free_pmb;
3855 }
3856 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3857 if (!mp->virt) {
3858 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3859 "2899 The mbuf allocation failed\n");
3860 goto out_free_dmabuf;
3861 }
3862
3863
3864 lpfc_els_flush_all_cmd(phba);
3865
3866
3867 phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
3868
3869
3870 phba->sli.slistat.link_event++;
3871
3872
3873 lpfc_read_topology(phba, pmb, mp);
3874 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
3875 pmb->vport = phba->pport;
3876
3877 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3878 if (rc == MBX_NOT_FINISHED)
3879 goto out_free_dmabuf;
3880 return;
3881
3882out_free_dmabuf:
3883 kfree(mp);
3884out_free_pmb:
3885 mempool_free(pmb, phba->mbox_mem_pool);
3886}
3887
3888
3889
3890
3891
3892
3893
3894
3895static void
3896lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
3897{
3898 char port_name;
3899 char message[128];
3900 uint8_t status;
3901 struct lpfc_acqe_misconfigured_event *misconfigured;
3902
3903
3904 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3905 LPFC_SLI_INTF_IF_TYPE_2) ||
3906 (bf_get(lpfc_trailer_type, acqe_sli) !=
3907 LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
3908 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3909 "2901 Async SLI event - Event Data1:x%08x Event Data2:"
3910 "x%08x SLI Event Type:%d\n",
3911 acqe_sli->event_data1, acqe_sli->event_data2,
3912 bf_get(lpfc_trailer_type, acqe_sli));
3913 return;
3914 }
3915
3916 port_name = phba->Port[0];
3917 if (port_name == 0x00)
3918 port_name = '?';
3919
3920 misconfigured = (struct lpfc_acqe_misconfigured_event *)
3921 &acqe_sli->event_data1;
3922
3923
3924 switch (phba->sli4_hba.lnk_info.lnk_no) {
3925 case LPFC_LINK_NUMBER_0:
3926 status = bf_get(lpfc_sli_misconfigured_port0,
3927 &misconfigured->theEvent);
3928 break;
3929 case LPFC_LINK_NUMBER_1:
3930 status = bf_get(lpfc_sli_misconfigured_port1,
3931 &misconfigured->theEvent);
3932 break;
3933 case LPFC_LINK_NUMBER_2:
3934 status = bf_get(lpfc_sli_misconfigured_port2,
3935 &misconfigured->theEvent);
3936 break;
3937 case LPFC_LINK_NUMBER_3:
3938 status = bf_get(lpfc_sli_misconfigured_port3,
3939 &misconfigured->theEvent);
3940 break;
3941 default:
3942 status = ~LPFC_SLI_EVENT_STATUS_VALID;
3943 break;
3944 }
3945
3946 switch (status) {
3947 case LPFC_SLI_EVENT_STATUS_VALID:
3948 return;
3949 case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
3950 sprintf(message, "Optics faulted/incorrectly installed/not " \
3951 "installed - Reseat optics, if issue not "
3952 "resolved, replace.");
3953 break;
3954 case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
3955 sprintf(message,
3956 "Optics of two types installed - Remove one optic or " \
3957 "install matching pair of optics.");
3958 break;
3959 case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
3960 sprintf(message, "Incompatible optics - Replace with " \
3961 "compatible optics for card to function.");
3962 break;
3963 default:
3964
3965 sprintf(message, "Unknown event status x%02x", status);
3966 break;
3967 }
3968
3969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3970 "3176 Misconfigured Physical Port - "
3971 "Port Name %c %s\n", port_name, message);
3972}
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983
3984static struct lpfc_nodelist *
3985lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
3986{
3987 struct lpfc_nodelist *ndlp;
3988 struct Scsi_Host *shost;
3989 struct lpfc_hba *phba;
3990
3991 if (!vport)
3992 return NULL;
3993 phba = vport->phba;
3994 if (!phba)
3995 return NULL;
3996 ndlp = lpfc_findnode_did(vport, Fabric_DID);
3997 if (!ndlp) {
3998
3999 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4000 if (!ndlp)
4001 return 0;
4002 lpfc_nlp_init(vport, ndlp, Fabric_DID);
4003
4004 ndlp->nlp_type |= NLP_FABRIC;
4005
4006 lpfc_enqueue_node(vport, ndlp);
4007 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4008
4009 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
4010 if (!ndlp)
4011 return 0;
4012 }
4013 if ((phba->pport->port_state < LPFC_FLOGI) &&
4014 (phba->pport->port_state != LPFC_VPORT_FAILED))
4015 return NULL;
4016
4017 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
4018 && (vport->port_state != LPFC_VPORT_FAILED))
4019 return NULL;
4020 shost = lpfc_shost_from_vport(vport);
4021 if (!shost)
4022 return NULL;
4023 lpfc_linkdown_port(vport);
4024 lpfc_cleanup_pending_mbox(vport);
4025 spin_lock_irq(shost->host_lock);
4026 vport->fc_flag |= FC_VPORT_CVL_RCVD;
4027 spin_unlock_irq(shost->host_lock);
4028
4029 return ndlp;
4030}
4031
4032
4033
4034
4035
4036
4037
4038
4039static void
4040lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
4041{
4042 struct lpfc_vport **vports;
4043 int i;
4044
4045 vports = lpfc_create_vport_work_array(phba);
4046 if (vports)
4047 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
4048 lpfc_sli4_perform_vport_cvl(vports[i]);
4049 lpfc_destroy_vport_work_array(phba, vports);
4050}
4051
4052
4053
4054
4055
4056
4057
4058
4059static void
4060lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
4061 struct lpfc_acqe_fip *acqe_fip)
4062{
4063 uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
4064 int rc;
4065 struct lpfc_vport *vport;
4066 struct lpfc_nodelist *ndlp;
4067 struct Scsi_Host *shost;
4068 int active_vlink_present;
4069 struct lpfc_vport **vports;
4070 int i;
4071
4072 phba->fc_eventTag = acqe_fip->event_tag;
4073 phba->fcoe_eventtag = acqe_fip->event_tag;
4074 switch (event_type) {
4075 case LPFC_FIP_EVENT_TYPE_NEW_FCF:
4076 case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
4077 if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
4078 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4079 LOG_DISCOVERY,
4080 "2546 New FCF event, evt_tag:x%x, "
4081 "index:x%x\n",
4082 acqe_fip->event_tag,
4083 acqe_fip->index);
4084 else
4085 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
4086 LOG_DISCOVERY,
4087 "2788 FCF param modified event, "
4088 "evt_tag:x%x, index:x%x\n",
4089 acqe_fip->event_tag,
4090 acqe_fip->index);
4091 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4092
4093
4094
4095
4096
4097 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4098 LOG_DISCOVERY,
4099 "2779 Read FCF (x%x) for updating "
4100 "roundrobin FCF failover bmask\n",
4101 acqe_fip->index);
4102 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
4103 }
4104
4105
4106 spin_lock_irq(&phba->hbalock);
4107 if (phba->hba_flag & FCF_TS_INPROG) {
4108 spin_unlock_irq(&phba->hbalock);
4109 break;
4110 }
4111
4112 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
4113 spin_unlock_irq(&phba->hbalock);
4114 break;
4115 }
4116
4117
4118 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
4119 spin_unlock_irq(&phba->hbalock);
4120 break;
4121 }
4122 spin_unlock_irq(&phba->hbalock);
4123
4124
4125 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4126 "2770 Start FCF table scan per async FCF "
4127 "event, evt_tag:x%x, index:x%x\n",
4128 acqe_fip->event_tag, acqe_fip->index);
4129 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
4130 LPFC_FCOE_FCF_GET_FIRST);
4131 if (rc)
4132 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4133 "2547 Issue FCF scan read FCF mailbox "
4134 "command failed (x%x)\n", rc);
4135 break;
4136
4137 case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
4138 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4139 "2548 FCF Table full count 0x%x tag 0x%x\n",
4140 bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
4141 acqe_fip->event_tag);
4142 break;
4143
4144 case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
4145 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4146 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4147 "2549 FCF (x%x) disconnected from network, "
4148 "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
4149
4150
4151
4152
4153 spin_lock_irq(&phba->hbalock);
4154 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4155 spin_unlock_irq(&phba->hbalock);
4156
4157 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
4158 break;
4159 }
4160 spin_unlock_irq(&phba->hbalock);
4161
4162
4163 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
4164 break;
4165
4166
4167
4168
4169
4170
4171
4172 spin_lock_irq(&phba->hbalock);
4173
4174 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
4175 spin_unlock_irq(&phba->hbalock);
4176
4177 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4178 "2771 Start FCF fast failover process due to "
4179 "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
4180 "\n", acqe_fip->event_tag, acqe_fip->index);
4181 rc = lpfc_sli4_redisc_fcf_table(phba);
4182 if (rc) {
4183 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4184 LOG_DISCOVERY,
4185 "2772 Issue FCF rediscover mabilbox "
4186 "command failed, fail through to FCF "
4187 "dead event\n");
4188 spin_lock_irq(&phba->hbalock);
4189 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
4190 spin_unlock_irq(&phba->hbalock);
4191
4192
4193
4194
4195 lpfc_sli4_fcf_dead_failthrough(phba);
4196 } else {
4197
4198 lpfc_sli4_clear_fcf_rr_bmask(phba);
4199
4200
4201
4202
4203 lpfc_sli4_perform_all_vport_cvl(phba);
4204 }
4205 break;
4206 case LPFC_FIP_EVENT_TYPE_CVL:
4207 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
4208 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4209 "2718 Clear Virtual Link Received for VPI 0x%x"
4210 " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
4211
4212 vport = lpfc_find_vport_by_vpid(phba,
4213 acqe_fip->index);
4214 ndlp = lpfc_sli4_perform_vport_cvl(vport);
4215 if (!ndlp)
4216 break;
4217 active_vlink_present = 0;
4218
4219 vports = lpfc_create_vport_work_array(phba);
4220 if (vports) {
4221 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
4222 i++) {
4223 if ((!(vports[i]->fc_flag &
4224 FC_VPORT_CVL_RCVD)) &&
4225 (vports[i]->port_state > LPFC_FDISC)) {
4226 active_vlink_present = 1;
4227 break;
4228 }
4229 }
4230 lpfc_destroy_vport_work_array(phba, vports);
4231 }
4232
4233 if (active_vlink_present) {
4234
4235
4236
4237
4238 mod_timer(&ndlp->nlp_delayfunc,
4239 jiffies + msecs_to_jiffies(1000));
4240 shost = lpfc_shost_from_vport(vport);
4241 spin_lock_irq(shost->host_lock);
4242 ndlp->nlp_flag |= NLP_DELAY_TMO;
4243 spin_unlock_irq(shost->host_lock);
4244 ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
4245 vport->port_state = LPFC_FDISC;
4246 } else {
4247
4248
4249
4250
4251
4252
4253
4254 spin_lock_irq(&phba->hbalock);
4255 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
4256 spin_unlock_irq(&phba->hbalock);
4257 break;
4258 }
4259
4260 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
4261 spin_unlock_irq(&phba->hbalock);
4262 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
4263 LOG_DISCOVERY,
4264 "2773 Start FCF failover per CVL, "
4265 "evt_tag:x%x\n", acqe_fip->event_tag);
4266 rc = lpfc_sli4_redisc_fcf_table(phba);
4267 if (rc) {
4268 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
4269 LOG_DISCOVERY,
4270 "2774 Issue FCF rediscover "
4271 "mabilbox command failed, "
4272 "through to CVL event\n");
4273 spin_lock_irq(&phba->hbalock);
4274 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
4275 spin_unlock_irq(&phba->hbalock);
4276
4277
4278
4279
4280 lpfc_retry_pport_discovery(phba);
4281 } else
4282
4283
4284
4285
4286 lpfc_sli4_clear_fcf_rr_bmask(phba);
4287 }
4288 break;
4289 default:
4290 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4291 "0288 Unknown FCoE event type 0x%x event tag "
4292 "0x%x\n", event_type, acqe_fip->event_tag);
4293 break;
4294 }
4295}
4296
4297
4298
4299
4300
4301
4302
4303
4304static void
4305lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
4306 struct lpfc_acqe_dcbx *acqe_dcbx)
4307{
4308 phba->fc_eventTag = acqe_dcbx->event_tag;
4309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4310 "0290 The SLI4 DCBX asynchronous event is not "
4311 "handled yet\n");
4312}
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323static void
4324lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
4325 struct lpfc_acqe_grp5 *acqe_grp5)
4326{
4327 uint16_t prev_ll_spd;
4328
4329 phba->fc_eventTag = acqe_grp5->event_tag;
4330 phba->fcoe_eventtag = acqe_grp5->event_tag;
4331 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
4332 phba->sli4_hba.link_state.logical_speed =
4333 (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
4334 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4335 "2789 GRP5 Async Event: Updating logical link speed "
4336 "from %dMbps to %dMbps\n", prev_ll_spd,
4337 phba->sli4_hba.link_state.logical_speed);
4338}
4339
4340
4341
4342
4343
4344
4345
4346
4347void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
4348{
4349 struct lpfc_cq_event *cq_event;
4350
4351
4352 spin_lock_irq(&phba->hbalock);
4353 phba->hba_flag &= ~ASYNC_EVENT;
4354 spin_unlock_irq(&phba->hbalock);
4355
4356 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
4357
4358 spin_lock_irq(&phba->hbalock);
4359 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
4360 cq_event, struct lpfc_cq_event, list);
4361 spin_unlock_irq(&phba->hbalock);
4362
4363 switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
4364 case LPFC_TRAILER_CODE_LINK:
4365 lpfc_sli4_async_link_evt(phba,
4366 &cq_event->cqe.acqe_link);
4367 break;
4368 case LPFC_TRAILER_CODE_FCOE:
4369 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
4370 break;
4371 case LPFC_TRAILER_CODE_DCBX:
4372 lpfc_sli4_async_dcbx_evt(phba,
4373 &cq_event->cqe.acqe_dcbx);
4374 break;
4375 case LPFC_TRAILER_CODE_GRP5:
4376 lpfc_sli4_async_grp5_evt(phba,
4377 &cq_event->cqe.acqe_grp5);
4378 break;
4379 case LPFC_TRAILER_CODE_FC:
4380 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
4381 break;
4382 case LPFC_TRAILER_CODE_SLI:
4383 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
4384 break;
4385 default:
4386 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4387 "1804 Invalid asynchrous event code: "
4388 "x%x\n", bf_get(lpfc_trailer_code,
4389 &cq_event->cqe.mcqe_cmpl));
4390 break;
4391 }
4392
4393 lpfc_sli4_cq_event_release(phba, cq_event);
4394 }
4395}
4396
4397
4398
4399
4400
4401
4402
4403
4404void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
4405{
4406 int rc;
4407
4408 spin_lock_irq(&phba->hbalock);
4409
4410 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
4411
4412 phba->fcf.failover_rec.flag = 0;
4413
4414 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
4415 spin_unlock_irq(&phba->hbalock);
4416
4417
4418 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
4419 "2777 Start post-quiescent FCF table scan\n");
4420 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
4421 if (rc)
4422 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
4423 "2747 Issue FCF scan read FCF mailbox "
4424 "command failed 0x%x\n", rc);
4425}
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437int
4438lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4439{
4440 int rc;
4441
4442
4443 phba->pci_dev_grp = dev_grp;
4444
4445
4446 if (dev_grp == LPFC_PCI_DEV_OC)
4447 phba->sli_rev = LPFC_SLI_REV4;
4448
4449
4450 rc = lpfc_init_api_table_setup(phba, dev_grp);
4451 if (rc)
4452 return -ENODEV;
4453
4454 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
4455 if (rc)
4456 return -ENODEV;
4457
4458 rc = lpfc_sli_api_table_setup(phba, dev_grp);
4459 if (rc)
4460 return -ENODEV;
4461
4462 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
4463 if (rc)
4464 return -ENODEV;
4465
4466 return 0;
4467}
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
4478{
4479 switch (intr_mode) {
4480 case 0:
4481 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4482 "0470 Enable INTx interrupt mode.\n");
4483 break;
4484 case 1:
4485 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4486 "0481 Enabled MSI interrupt mode.\n");
4487 break;
4488 case 2:
4489 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4490 "0480 Enabled MSI-X interrupt mode.\n");
4491 break;
4492 default:
4493 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4494 "0482 Illegal interrupt mode.\n");
4495 break;
4496 }
4497 return;
4498}
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511static int
4512lpfc_enable_pci_dev(struct lpfc_hba *phba)
4513{
4514 struct pci_dev *pdev;
4515 int bars = 0;
4516
4517
4518 if (!phba->pcidev)
4519 goto out_error;
4520 else
4521 pdev = phba->pcidev;
4522
4523 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4524
4525 if (pci_enable_device_mem(pdev))
4526 goto out_error;
4527
4528 if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
4529 goto out_disable_device;
4530
4531 pci_set_master(pdev);
4532 pci_try_set_mwi(pdev);
4533 pci_save_state(pdev);
4534
4535
4536 if (pci_find_capability(pdev, PCI_CAP_ID_EXP))
4537 pdev->needs_freset = 1;
4538
4539 return 0;
4540
4541out_disable_device:
4542 pci_disable_device(pdev);
4543out_error:
4544 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4545 "1401 Failed to enable pci device, bars:x%x\n", bars);
4546 return -ENODEV;
4547}
4548
4549
4550
4551
4552
4553
4554
4555
4556static void
4557lpfc_disable_pci_dev(struct lpfc_hba *phba)
4558{
4559 struct pci_dev *pdev;
4560 int bars;
4561
4562
4563 if (!phba->pcidev)
4564 return;
4565 else
4566 pdev = phba->pcidev;
4567
4568 bars = pci_select_bars(pdev, IORESOURCE_MEM);
4569
4570 pci_release_selected_regions(pdev, bars);
4571 pci_disable_device(pdev);
4572
4573 pci_set_drvdata(pdev, NULL);
4574
4575 return;
4576}
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587void
4588lpfc_reset_hba(struct lpfc_hba *phba)
4589{
4590
4591 if (!phba->cfg_enable_hba_reset) {
4592 phba->link_state = LPFC_HBA_ERROR;
4593 return;
4594 }
4595 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
4596 lpfc_offline(phba);
4597 lpfc_sli_brdrestart(phba);
4598 lpfc_online(phba);
4599 lpfc_unblock_mgmt_io(phba);
4600}
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612uint16_t
4613lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4614{
4615 struct pci_dev *pdev = phba->pcidev;
4616 uint16_t nr_virtfn;
4617 int pos;
4618
4619 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4620 if (pos == 0)
4621 return 0;
4622
4623 pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
4624 return nr_virtfn;
4625}
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638int
4639lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
4640{
4641 struct pci_dev *pdev = phba->pcidev;
4642 uint16_t max_nr_vfn;
4643 int rc;
4644
4645 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
4646 if (nr_vfn > max_nr_vfn) {
4647 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4648 "3057 Requested vfs (%d) greater than "
4649 "supported vfs (%d)", nr_vfn, max_nr_vfn);
4650 return -EINVAL;
4651 }
4652
4653 rc = pci_enable_sriov(pdev, nr_vfn);
4654 if (rc) {
4655 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4656 "2806 Failed to enable sriov on this device "
4657 "with vfn number nr_vf:%d, rc:%d\n",
4658 nr_vfn, rc);
4659 } else
4660 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4661 "2807 Successful enable sriov on this device "
4662 "with vfn number nr_vf:%d\n", nr_vfn);
4663 return rc;
4664}
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677static int
4678lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
4679{
4680 struct lpfc_sli *psli;
4681 int rc;
4682
4683
4684
4685
4686
4687
4688 init_timer(&phba->hb_tmofunc);
4689 phba->hb_tmofunc.function = lpfc_hb_timeout;
4690 phba->hb_tmofunc.data = (unsigned long)phba;
4691
4692 psli = &phba->sli;
4693
4694 init_timer(&psli->mbox_tmo);
4695 psli->mbox_tmo.function = lpfc_mbox_timeout;
4696 psli->mbox_tmo.data = (unsigned long) phba;
4697
4698 init_timer(&phba->fcp_poll_timer);
4699 phba->fcp_poll_timer.function = lpfc_poll_timeout;
4700 phba->fcp_poll_timer.data = (unsigned long) phba;
4701
4702 init_timer(&phba->fabric_block_timer);
4703 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4704 phba->fabric_block_timer.data = (unsigned long) phba;
4705
4706 init_timer(&phba->eratt_poll);
4707 phba->eratt_poll.function = lpfc_poll_eratt;
4708 phba->eratt_poll.data = (unsigned long) phba;
4709
4710
4711 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
4712 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
4713
4714
4715 lpfc_get_cfgparam(phba);
4716 if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
4717 phba->menlo_flag |= HBA_MENLO_SUPPORT;
4718
4719 if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
4720 phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
4721 }
4722
4723 if (!phba->sli.ring)
4724 phba->sli.ring = (struct lpfc_sli_ring *)
4725 kzalloc(LPFC_SLI3_MAX_RING *
4726 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4727 if (!phba->sli.ring)
4728 return -ENOMEM;
4729
4730
4731
4732
4733
4734
4735
4736 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4737 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4738
4739
4740 if (phba->cfg_enable_bg) {
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4751 sizeof(struct fcp_rsp) +
4752 (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
4753
4754 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
4755 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
4756
4757
4758 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
4759 } else {
4760
4761
4762
4763
4764
4765 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4766 sizeof(struct fcp_rsp) +
4767 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
4768
4769
4770 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4771 }
4772
4773 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4774 "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
4775 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4776 phba->cfg_total_seg_cnt);
4777
4778 phba->max_vpi = LPFC_MAX_VPI;
4779
4780 phba->max_vports = 0;
4781
4782
4783
4784
4785 lpfc_sli_setup(phba);
4786 lpfc_sli_queue_setup(phba);
4787
4788
4789 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
4790 return -ENOMEM;
4791
4792
4793
4794
4795
4796 if (phba->cfg_sriov_nr_virtfn > 0) {
4797 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
4798 phba->cfg_sriov_nr_virtfn);
4799 if (rc) {
4800 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4801 "2808 Requested number of SR-IOV "
4802 "virtual functions (%d) is not "
4803 "supported\n",
4804 phba->cfg_sriov_nr_virtfn);
4805 phba->cfg_sriov_nr_virtfn = 0;
4806 }
4807 }
4808
4809 return 0;
4810}
4811
4812
4813
4814
4815
4816
4817
4818
4819static void
4820lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
4821{
4822
4823 lpfc_mem_free_all(phba);
4824
4825 return;
4826}
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839static int
4840lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4841{
4842 struct lpfc_vector_map_info *cpup;
4843 struct lpfc_sli *psli;
4844 LPFC_MBOXQ_t *mboxq;
4845 int rc, i, hbq_count, max_buf_size;
4846 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4847 struct lpfc_mqe *mqe;
4848 int longs;
4849
4850
4851 rc = lpfc_sli4_post_status_check(phba);
4852 if (rc)
4853 return -ENODEV;
4854
4855
4856
4857
4858
4859
4860 init_timer(&phba->hb_tmofunc);
4861 phba->hb_tmofunc.function = lpfc_hb_timeout;
4862 phba->hb_tmofunc.data = (unsigned long)phba;
4863 init_timer(&phba->rrq_tmr);
4864 phba->rrq_tmr.function = lpfc_rrq_timeout;
4865 phba->rrq_tmr.data = (unsigned long)phba;
4866
4867 psli = &phba->sli;
4868
4869 init_timer(&psli->mbox_tmo);
4870 psli->mbox_tmo.function = lpfc_mbox_timeout;
4871 psli->mbox_tmo.data = (unsigned long) phba;
4872
4873 init_timer(&phba->fabric_block_timer);
4874 phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
4875 phba->fabric_block_timer.data = (unsigned long) phba;
4876
4877 init_timer(&phba->eratt_poll);
4878 phba->eratt_poll.function = lpfc_poll_eratt;
4879 phba->eratt_poll.data = (unsigned long) phba;
4880
4881 init_timer(&phba->fcf.redisc_wait);
4882 phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
4883 phba->fcf.redisc_wait.data = (unsigned long)phba;
4884
4885
4886
4887
4888
4889 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
4890 sizeof(struct lpfc_mbox_ext_buf_ctx));
4891 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901 lpfc_get_cfgparam(phba);
4902 phba->max_vpi = LPFC_MAX_VPI;
4903
4904
4905 phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
4906
4907
4908 phba->max_vports = 0;
4909
4910
4911 phba->valid_vlan = 0;
4912 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4913 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4914 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4915
4916
4917
4918
4919
4920 if (!phba->sli.ring)
4921 phba->sli.ring = kzalloc(
4922 (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
4923 sizeof(struct lpfc_sli_ring), GFP_KERNEL);
4924 if (!phba->sli.ring)
4925 return -ENOMEM;
4926
4927
4928
4929
4930
4931
4932 max_buf_size = (2 * SLI4_PAGE_SIZE);
4933 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
4934 phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
4935
4936
4937
4938
4939
4940
4941 if (phba->cfg_enable_bg) {
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4952 sizeof(struct fcp_rsp) + max_buf_size;
4953
4954
4955 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
4956
4957 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
4958 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
4959 } else {
4960
4961
4962
4963
4964
4965 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
4966 sizeof(struct fcp_rsp) +
4967 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
4968
4969
4970 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
4971
4972
4973
4974
4975 }
4976
4977
4978 lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4979 lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
4980
4981 if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
4982 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
4983 else
4984 phba->cfg_sg_dma_buf_size =
4985 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
4986
4987 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4988 "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
4989 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
4990 phba->cfg_total_seg_cnt);
4991
4992
4993 hbq_count = lpfc_sli_hbq_count();
4994 for (i = 0; i < hbq_count; ++i)
4995 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
4996 INIT_LIST_HEAD(&phba->rb_pend_list);
4997 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
4998 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
4999
5000
5001
5002
5003
5004 spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
5005 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
5006
5007 spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
5008
5009
5010
5011
5012
5013
5014 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
5015
5016 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
5017
5018 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
5019
5020 INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
5021
5022 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
5023
5024 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
5025
5026
5027 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
5028 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
5029 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
5030 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
5031
5032
5033 lpfc_sli_setup(phba);
5034 lpfc_sli_queue_setup(phba);
5035
5036
5037 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
5038 if (rc)
5039 return -ENOMEM;
5040
5041
5042 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5043 LPFC_SLI_INTF_IF_TYPE_2) {
5044 rc = lpfc_pci_function_reset(phba);
5045 if (unlikely(rc))
5046 return -ENODEV;
5047 }
5048
5049
5050 rc = lpfc_create_bootstrap_mbox(phba);
5051 if (unlikely(rc))
5052 goto out_free_mem;
5053
5054
5055 rc = lpfc_setup_endian_order(phba);
5056 if (unlikely(rc))
5057 goto out_free_bsmbx;
5058
5059
5060 rc = lpfc_sli4_read_config(phba);
5061 if (unlikely(rc))
5062 goto out_free_bsmbx;
5063
5064
5065 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
5066 LPFC_SLI_INTF_IF_TYPE_0) {
5067 rc = lpfc_pci_function_reset(phba);
5068 if (unlikely(rc))
5069 goto out_free_bsmbx;
5070 }
5071
5072 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
5073 GFP_KERNEL);
5074 if (!mboxq) {
5075 rc = -ENOMEM;
5076 goto out_free_bsmbx;
5077 }
5078
5079
5080 lpfc_supported_pages(mboxq);
5081 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5082 if (!rc) {
5083 mqe = &mboxq->u.mqe;
5084 memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
5085 LPFC_MAX_SUPPORTED_PAGES);
5086 for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
5087 switch (pn_page[i]) {
5088 case LPFC_SLI4_PARAMETERS:
5089 phba->sli4_hba.pc_sli4_params.supported = 1;
5090 break;
5091 default:
5092 break;
5093 }
5094 }
5095
5096 if (phba->sli4_hba.pc_sli4_params.supported)
5097 rc = lpfc_pc_sli4_params_get(phba, mboxq);
5098 if (rc) {
5099 mempool_free(mboxq, phba->mbox_mem_pool);
5100 rc = -EIO;
5101 goto out_free_bsmbx;
5102 }
5103 }
5104
5105
5106
5107
5108
5109 rc = lpfc_get_sli4_parameters(phba, mboxq);
5110 if (rc) {
5111 if (phba->sli4_hba.extents_in_use &&
5112 phba->sli4_hba.rpi_hdrs_in_use) {
5113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5114 "2999 Unsupported SLI4 Parameters "
5115 "Extents and RPI headers enabled.\n");
5116 goto out_free_bsmbx;
5117 }
5118 }
5119 mempool_free(mboxq, phba->mbox_mem_pool);
5120
5121 rc = lpfc_sli4_queue_verify(phba);
5122 if (rc)
5123 goto out_free_bsmbx;
5124
5125
5126 rc = lpfc_sli4_cq_event_pool_create(phba);
5127 if (rc)
5128 goto out_free_bsmbx;
5129
5130
5131 lpfc_init_sgl_list(phba);
5132
5133
5134 rc = lpfc_init_active_sgl_array(phba);
5135 if (rc) {
5136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5137 "1430 Failed to initialize sgl list.\n");
5138 goto out_destroy_cq_event_pool;
5139 }
5140 rc = lpfc_sli4_init_rpi_hdrs(phba);
5141 if (rc) {
5142 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5143 "1432 Failed to initialize rpi headers.\n");
5144 goto out_free_active_sgl;
5145 }
5146
5147
5148 longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
5149 phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
5150 GFP_KERNEL);
5151 if (!phba->fcf.fcf_rr_bmask) {
5152 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5153 "2759 Failed allocate memory for FCF round "
5154 "robin failover bmask\n");
5155 rc = -ENOMEM;
5156 goto out_remove_rpi_hdrs;
5157 }
5158
5159 phba->sli4_hba.fcp_eq_hdl =
5160 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
5161 phba->cfg_fcp_io_channel), GFP_KERNEL);
5162 if (!phba->sli4_hba.fcp_eq_hdl) {
5163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5164 "2572 Failed allocate memory for "
5165 "fast-path per-EQ handle array\n");
5166 rc = -ENOMEM;
5167 goto out_free_fcf_rr_bmask;
5168 }
5169
5170 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
5171 phba->cfg_fcp_io_channel), GFP_KERNEL);
5172 if (!phba->sli4_hba.msix_entries) {
5173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5174 "2573 Failed allocate memory for msi-x "
5175 "interrupt vector entries\n");
5176 rc = -ENOMEM;
5177 goto out_free_fcp_eq_hdl;
5178 }
5179
5180 phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
5181 phba->sli4_hba.num_present_cpu),
5182 GFP_KERNEL);
5183 if (!phba->sli4_hba.cpu_map) {
5184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5185 "3327 Failed allocate memory for msi-x "
5186 "interrupt vector mapping\n");
5187 rc = -ENOMEM;
5188 goto out_free_msix;
5189 }
5190 if (lpfc_used_cpu == NULL) {
5191 lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
5192 GFP_KERNEL);
5193 if (!lpfc_used_cpu) {
5194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5195 "3335 Failed allocate memory for msi-x "
5196 "interrupt vector mapping\n");
5197 kfree(phba->sli4_hba.cpu_map);
5198 rc = -ENOMEM;
5199 goto out_free_msix;
5200 }
5201 for (i = 0; i < lpfc_present_cpu; i++)
5202 lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
5203 }
5204
5205
5206 cpup = phba->sli4_hba.cpu_map;
5207 rc = 0;
5208 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
5209 cpup->channel_id = rc;
5210 rc++;
5211 if (rc >= phba->cfg_fcp_io_channel)
5212 rc = 0;
5213 }
5214
5215
5216
5217
5218
5219 if (phba->cfg_sriov_nr_virtfn > 0) {
5220 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
5221 phba->cfg_sriov_nr_virtfn);
5222 if (rc) {
5223 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5224 "3020 Requested number of SR-IOV "
5225 "virtual functions (%d) is not "
5226 "supported\n",
5227 phba->cfg_sriov_nr_virtfn);
5228 phba->cfg_sriov_nr_virtfn = 0;
5229 }
5230 }
5231
5232 return 0;
5233
5234out_free_msix:
5235 kfree(phba->sli4_hba.msix_entries);
5236out_free_fcp_eq_hdl:
5237 kfree(phba->sli4_hba.fcp_eq_hdl);
5238out_free_fcf_rr_bmask:
5239 kfree(phba->fcf.fcf_rr_bmask);
5240out_remove_rpi_hdrs:
5241 lpfc_sli4_remove_rpi_hdrs(phba);
5242out_free_active_sgl:
5243 lpfc_free_active_sgl(phba);
5244out_destroy_cq_event_pool:
5245 lpfc_sli4_cq_event_pool_destroy(phba);
5246out_free_bsmbx:
5247 lpfc_destroy_bootstrap_mbox(phba);
5248out_free_mem:
5249 lpfc_mem_free(phba);
5250 return rc;
5251}
5252
5253
5254
5255
5256
5257
5258
5259
5260static void
5261lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
5262{
5263 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
5264
5265
5266 kfree(phba->sli4_hba.cpu_map);
5267 phba->sli4_hba.num_present_cpu = 0;
5268 phba->sli4_hba.num_online_cpu = 0;
5269
5270
5271 kfree(phba->sli4_hba.msix_entries);
5272
5273
5274 kfree(phba->sli4_hba.fcp_eq_hdl);
5275
5276
5277 lpfc_sli4_remove_rpi_hdrs(phba);
5278 lpfc_sli4_remove_rpis(phba);
5279
5280
5281 kfree(phba->fcf.fcf_rr_bmask);
5282
5283
5284 lpfc_free_active_sgl(phba);
5285 lpfc_free_els_sgl_list(phba);
5286
5287
5288 lpfc_sli4_cq_event_release_all(phba);
5289 lpfc_sli4_cq_event_pool_destroy(phba);
5290
5291
5292 lpfc_sli4_dealloc_resource_identifiers(phba);
5293
5294
5295 lpfc_destroy_bootstrap_mbox(phba);
5296
5297
5298 lpfc_mem_free_all(phba);
5299
5300
5301 list_for_each_entry_safe(conn_entry, next_conn_entry,
5302 &phba->fcf_conn_rec_list, list) {
5303 list_del_init(&conn_entry->list);
5304 kfree(conn_entry);
5305 }
5306
5307 return;
5308}
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320int
5321lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5322{
5323 phba->lpfc_hba_init_link = lpfc_hba_init_link;
5324 phba->lpfc_hba_down_link = lpfc_hba_down_link;
5325 phba->lpfc_selective_reset = lpfc_selective_reset;
5326 switch (dev_grp) {
5327 case LPFC_PCI_DEV_LP:
5328 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
5329 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
5330 phba->lpfc_stop_port = lpfc_stop_port_s3;
5331 break;
5332 case LPFC_PCI_DEV_OC:
5333 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
5334 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
5335 phba->lpfc_stop_port = lpfc_stop_port_s4;
5336 break;
5337 default:
5338 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5339 "1431 Invalid HBA PCI-device group: 0x%x\n",
5340 dev_grp);
5341 return -ENODEV;
5342 break;
5343 }
5344 return 0;
5345}
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358static int
5359lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
5360{
5361
5362
5363
5364 atomic_set(&phba->fast_event_count, 0);
5365 spin_lock_init(&phba->hbalock);
5366
5367
5368 spin_lock_init(&phba->ndlp_lock);
5369
5370 INIT_LIST_HEAD(&phba->port_list);
5371 INIT_LIST_HEAD(&phba->work_list);
5372 init_waitqueue_head(&phba->wait_4_mlo_m_q);
5373
5374
5375 init_waitqueue_head(&phba->work_waitq);
5376
5377
5378 spin_lock_init(&phba->scsi_buf_list_get_lock);
5379 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
5380 spin_lock_init(&phba->scsi_buf_list_put_lock);
5381 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
5382
5383
5384 INIT_LIST_HEAD(&phba->fabric_iocb_list);
5385
5386
5387 INIT_LIST_HEAD(&phba->elsbuf);
5388
5389
5390 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
5391
5392 return 0;
5393}
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406static int
5407lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
5408{
5409 int error;
5410
5411
5412 phba->worker_thread = kthread_run(lpfc_do_work, phba,
5413 "lpfc_worker_%d", phba->brd_no);
5414 if (IS_ERR(phba->worker_thread)) {
5415 error = PTR_ERR(phba->worker_thread);
5416 return error;
5417 }
5418
5419 return 0;
5420}
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430static void
5431lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
5432{
5433
5434 kthread_stop(phba->worker_thread);
5435}
5436
5437
5438
5439
5440
5441
5442
5443static void
5444lpfc_free_iocb_list(struct lpfc_hba *phba)
5445{
5446 struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
5447
5448 spin_lock_irq(&phba->hbalock);
5449 list_for_each_entry_safe(iocbq_entry, iocbq_next,
5450 &phba->lpfc_iocb_list, list) {
5451 list_del(&iocbq_entry->list);
5452 kfree(iocbq_entry);
5453 phba->total_iocbq_bufs--;
5454 }
5455 spin_unlock_irq(&phba->hbalock);
5456
5457 return;
5458}
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471static int
5472lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
5473{
5474 struct lpfc_iocbq *iocbq_entry = NULL;
5475 uint16_t iotag;
5476 int i;
5477
5478
5479 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
5480 for (i = 0; i < iocb_count; i++) {
5481 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
5482 if (iocbq_entry == NULL) {
5483 printk(KERN_ERR "%s: only allocated %d iocbs of "
5484 "expected %d count. Unloading driver.\n",
5485 __func__, i, LPFC_IOCB_LIST_CNT);
5486 goto out_free_iocbq;
5487 }
5488
5489 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
5490 if (iotag == 0) {
5491 kfree(iocbq_entry);
5492 printk(KERN_ERR "%s: failed to allocate IOTAG. "
5493 "Unloading driver.\n", __func__);
5494 goto out_free_iocbq;
5495 }
5496 iocbq_entry->sli4_lxritag = NO_XRI;
5497 iocbq_entry->sli4_xritag = NO_XRI;
5498
5499 spin_lock_irq(&phba->hbalock);
5500 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
5501 phba->total_iocbq_bufs++;
5502 spin_unlock_irq(&phba->hbalock);
5503 }
5504
5505 return 0;
5506
5507out_free_iocbq:
5508 lpfc_free_iocb_list(phba);
5509
5510 return -ENOMEM;
5511}
5512
5513
5514
5515
5516
5517
5518
5519
5520void
5521lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
5522{
5523 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
5524
5525 list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
5526 list_del(&sglq_entry->list);
5527 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
5528 kfree(sglq_entry);
5529 }
5530}
5531
5532
5533
5534
5535
5536
5537
5538static void
5539lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5540{
5541 LIST_HEAD(sglq_list);
5542
5543
5544 spin_lock_irq(&phba->hbalock);
5545 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5546 spin_unlock_irq(&phba->hbalock);
5547
5548
5549 lpfc_free_sgl_list(phba, &sglq_list);
5550}
5551
5552
5553
5554
5555
5556
5557
5558
5559static int
5560lpfc_init_active_sgl_array(struct lpfc_hba *phba)
5561{
5562 int size;
5563 size = sizeof(struct lpfc_sglq *);
5564 size *= phba->sli4_hba.max_cfg_param.max_xri;
5565
5566 phba->sli4_hba.lpfc_sglq_active_list =
5567 kzalloc(size, GFP_KERNEL);
5568 if (!phba->sli4_hba.lpfc_sglq_active_list)
5569 return -ENOMEM;
5570 return 0;
5571}
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581static void
5582lpfc_free_active_sgl(struct lpfc_hba *phba)
5583{
5584 kfree(phba->sli4_hba.lpfc_sglq_active_list);
5585}
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595static void
5596lpfc_init_sgl_list(struct lpfc_hba *phba)
5597{
5598
5599 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
5600 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
5601
5602
5603 phba->sli4_hba.els_xri_cnt = 0;
5604
5605
5606 phba->sli4_hba.scsi_xri_cnt = 0;
5607}
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623int
5624lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
5625{
5626 int rc = 0;
5627 struct lpfc_rpi_hdr *rpi_hdr;
5628
5629 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
5630 if (!phba->sli4_hba.rpi_hdrs_in_use)
5631 return rc;
5632 if (phba->sli4_hba.extents_in_use)
5633 return -EIO;
5634
5635 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
5636 if (!rpi_hdr) {
5637 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5638 "0391 Error during rpi post operation\n");
5639 lpfc_sli4_remove_rpis(phba);
5640 rc = -ENODEV;
5641 }
5642
5643 return rc;
5644}
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659struct lpfc_rpi_hdr *
5660lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5661{
5662 uint16_t rpi_limit, curr_rpi_range;
5663 struct lpfc_dmabuf *dmabuf;
5664 struct lpfc_rpi_hdr *rpi_hdr;
5665 uint32_t rpi_count;
5666
5667
5668
5669
5670
5671
5672 if (!phba->sli4_hba.rpi_hdrs_in_use)
5673 return NULL;
5674 if (phba->sli4_hba.extents_in_use)
5675 return NULL;
5676
5677
5678 rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
5679 phba->sli4_hba.max_cfg_param.max_rpi - 1;
5680
5681 spin_lock_irq(&phba->hbalock);
5682
5683
5684
5685
5686
5687 curr_rpi_range = phba->sli4_hba.next_rpi;
5688 spin_unlock_irq(&phba->hbalock);
5689
5690
5691
5692
5693
5694
5695 if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
5696 rpi_count = rpi_limit - curr_rpi_range;
5697 else
5698 rpi_count = LPFC_RPI_HDR_COUNT;
5699
5700 if (!rpi_count)
5701 return NULL;
5702
5703
5704
5705
5706 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5707 if (!dmabuf)
5708 return NULL;
5709
5710 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
5711 LPFC_HDR_TEMPLATE_SIZE,
5712 &dmabuf->phys,
5713 GFP_KERNEL);
5714 if (!dmabuf->virt) {
5715 rpi_hdr = NULL;
5716 goto err_free_dmabuf;
5717 }
5718
5719 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5720 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5721 rpi_hdr = NULL;
5722 goto err_free_coherent;
5723 }
5724
5725
5726 rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
5727 if (!rpi_hdr)
5728 goto err_free_coherent;
5729
5730 rpi_hdr->dmabuf = dmabuf;
5731 rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
5732 rpi_hdr->page_count = 1;
5733 spin_lock_irq(&phba->hbalock);
5734
5735
5736 rpi_hdr->start_rpi = curr_rpi_range;
5737 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
5738
5739
5740
5741
5742
5743 phba->sli4_hba.next_rpi += rpi_count;
5744 spin_unlock_irq(&phba->hbalock);
5745 return rpi_hdr;
5746
5747 err_free_coherent:
5748 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
5749 dmabuf->virt, dmabuf->phys);
5750 err_free_dmabuf:
5751 kfree(dmabuf);
5752 return NULL;
5753}
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764void
5765lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
5766{
5767 struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
5768
5769 if (!phba->sli4_hba.rpi_hdrs_in_use)
5770 goto exit;
5771
5772 list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
5773 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
5774 list_del(&rpi_hdr->list);
5775 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
5776 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
5777 kfree(rpi_hdr->dmabuf);
5778 kfree(rpi_hdr);
5779 }
5780 exit:
5781
5782 phba->sli4_hba.next_rpi = 0;
5783}
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797static struct lpfc_hba *
5798lpfc_hba_alloc(struct pci_dev *pdev)
5799{
5800 struct lpfc_hba *phba;
5801
5802
5803 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
5804 if (!phba) {
5805 dev_err(&pdev->dev, "failed to allocate hba struct\n");
5806 return NULL;
5807 }
5808
5809
5810 phba->pcidev = pdev;
5811
5812
5813 phba->brd_no = lpfc_get_instance();
5814 if (phba->brd_no < 0) {
5815 kfree(phba);
5816 return NULL;
5817 }
5818
5819 spin_lock_init(&phba->ct_ev_lock);
5820 INIT_LIST_HEAD(&phba->ct_ev_waiters);
5821
5822 return phba;
5823}
5824
5825
5826
5827
5828
5829
5830
5831
5832static void
5833lpfc_hba_free(struct lpfc_hba *phba)
5834{
5835
5836 idr_remove(&lpfc_hba_index, phba->brd_no);
5837
5838
5839 kfree(phba->sli.ring);
5840 phba->sli.ring = NULL;
5841
5842 kfree(phba);
5843 return;
5844}
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857static int
5858lpfc_create_shost(struct lpfc_hba *phba)
5859{
5860 struct lpfc_vport *vport;
5861 struct Scsi_Host *shost;
5862
5863
5864 phba->fc_edtov = FF_DEF_EDTOV;
5865 phba->fc_ratov = FF_DEF_RATOV;
5866 phba->fc_altov = FF_DEF_ALTOV;
5867 phba->fc_arbtov = FF_DEF_ARBTOV;
5868
5869 atomic_set(&phba->sdev_cnt, 0);
5870 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
5871 if (!vport)
5872 return -ENODEV;
5873
5874 shost = lpfc_shost_from_vport(vport);
5875 phba->pport = vport;
5876 lpfc_debugfs_initialize(vport);
5877
5878 pci_set_drvdata(phba->pcidev, shost);
5879
5880 return 0;
5881}
5882
5883
5884
5885
5886
5887
5888
5889
5890static void
5891lpfc_destroy_shost(struct lpfc_hba *phba)
5892{
5893 struct lpfc_vport *vport = phba->pport;
5894
5895
5896 destroy_port(vport);
5897
5898 return;
5899}
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909static void
5910lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
5911{
5912 uint32_t old_mask;
5913 uint32_t old_guard;
5914
5915 int pagecnt = 10;
5916 if (lpfc_prot_mask && lpfc_prot_guard) {
5917 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5918 "1478 Registering BlockGuard with the "
5919 "SCSI layer\n");
5920
5921 old_mask = lpfc_prot_mask;
5922 old_guard = lpfc_prot_guard;
5923
5924
5925 lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
5926 SHOST_DIX_TYPE0_PROTECTION |
5927 SHOST_DIX_TYPE1_PROTECTION);
5928 lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
5929
5930
5931 if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
5932 lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
5933
5934 if (lpfc_prot_mask && lpfc_prot_guard) {
5935 if ((old_mask != lpfc_prot_mask) ||
5936 (old_guard != lpfc_prot_guard))
5937 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5938 "1475 Registering BlockGuard with the "
5939 "SCSI layer: mask %d guard %d\n",
5940 lpfc_prot_mask, lpfc_prot_guard);
5941
5942 scsi_host_set_prot(shost, lpfc_prot_mask);
5943 scsi_host_set_guard(shost, lpfc_prot_guard);
5944 } else
5945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5946 "1479 Not Registering BlockGuard with the SCSI "
5947 "layer, Bad protection parameters: %d %d\n",
5948 old_mask, old_guard);
5949 }
5950
5951 if (!_dump_buf_data) {
5952 while (pagecnt) {
5953 spin_lock_init(&_dump_buf_lock);
5954 _dump_buf_data =
5955 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5956 if (_dump_buf_data) {
5957 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5958 "9043 BLKGRD: allocated %d pages for "
5959 "_dump_buf_data at 0x%p\n",
5960 (1 << pagecnt), _dump_buf_data);
5961 _dump_buf_data_order = pagecnt;
5962 memset(_dump_buf_data, 0,
5963 ((1 << PAGE_SHIFT) << pagecnt));
5964 break;
5965 } else
5966 --pagecnt;
5967 }
5968 if (!_dump_buf_data_order)
5969 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5970 "9044 BLKGRD: ERROR unable to allocate "
5971 "memory for hexdump\n");
5972 } else
5973 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5974 "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
5975 "\n", _dump_buf_data);
5976 if (!_dump_buf_dif) {
5977 while (pagecnt) {
5978 _dump_buf_dif =
5979 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
5980 if (_dump_buf_dif) {
5981 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5982 "9046 BLKGRD: allocated %d pages for "
5983 "_dump_buf_dif at 0x%p\n",
5984 (1 << pagecnt), _dump_buf_dif);
5985 _dump_buf_dif_order = pagecnt;
5986 memset(_dump_buf_dif, 0,
5987 ((1 << PAGE_SHIFT) << pagecnt));
5988 break;
5989 } else
5990 --pagecnt;
5991 }
5992 if (!_dump_buf_dif_order)
5993 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5994 "9047 BLKGRD: ERROR unable to allocate "
5995 "memory for hexdump\n");
5996 } else
5997 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
5998 "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
5999 _dump_buf_dif);
6000}
6001
6002
6003
6004
6005
6006
6007
6008
6009static void
6010lpfc_post_init_setup(struct lpfc_hba *phba)
6011{
6012 struct Scsi_Host *shost;
6013 struct lpfc_adapter_event_header adapter_event;
6014
6015
6016 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
6017
6018
6019
6020
6021
6022 shost = pci_get_drvdata(phba->pcidev);
6023 shost->can_queue = phba->cfg_hba_queue_depth - 10;
6024 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
6025 lpfc_setup_bg(phba, shost);
6026
6027 lpfc_host_attrib_init(shost);
6028
6029 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
6030 spin_lock_irq(shost->host_lock);
6031 lpfc_poll_start_timer(phba);
6032 spin_unlock_irq(shost->host_lock);
6033 }
6034
6035 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6036 "0428 Perform SCSI scan\n");
6037
6038 adapter_event.event_type = FC_REG_ADAPTER_EVENT;
6039 adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
6040 fc_host_post_vendor_event(shost, fc_get_event_number(),
6041 sizeof(adapter_event),
6042 (char *) &adapter_event,
6043 LPFC_NL_VENDOR_ID);
6044 return;
6045}
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058static int
6059lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
6060{
6061 struct pci_dev *pdev;
6062 unsigned long bar0map_len, bar2map_len;
6063 int i, hbq_count;
6064 void *ptr;
6065 int error = -ENODEV;
6066
6067
6068 if (!phba->pcidev)
6069 return error;
6070 else
6071 pdev = phba->pcidev;
6072
6073
6074 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
6075 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
6076 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
6077 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
6078 return error;
6079 }
6080 }
6081
6082
6083
6084
6085 phba->pci_bar0_map = pci_resource_start(pdev, 0);
6086 bar0map_len = pci_resource_len(pdev, 0);
6087
6088 phba->pci_bar2_map = pci_resource_start(pdev, 2);
6089 bar2map_len = pci_resource_len(pdev, 2);
6090
6091
6092 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
6093 if (!phba->slim_memmap_p) {
6094 dev_printk(KERN_ERR, &pdev->dev,
6095 "ioremap failed for SLIM memory.\n");
6096 goto out;
6097 }
6098
6099
6100 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
6101 if (!phba->ctrl_regs_memmap_p) {
6102 dev_printk(KERN_ERR, &pdev->dev,
6103 "ioremap failed for HBA control registers.\n");
6104 goto out_iounmap_slim;
6105 }
6106
6107
6108 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
6109 SLI2_SLIM_SIZE,
6110 &phba->slim2p.phys,
6111 GFP_KERNEL);
6112 if (!phba->slim2p.virt)
6113 goto out_iounmap;
6114
6115 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
6116 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
6117 phba->mbox_ext = (phba->slim2p.virt +
6118 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
6119 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
6120 phba->IOCBs = (phba->slim2p.virt +
6121 offsetof(struct lpfc_sli2_slim, IOCBs));
6122
6123 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
6124 lpfc_sli_hbq_size(),
6125 &phba->hbqslimp.phys,
6126 GFP_KERNEL);
6127 if (!phba->hbqslimp.virt)
6128 goto out_free_slim;
6129
6130 hbq_count = lpfc_sli_hbq_count();
6131 ptr = phba->hbqslimp.virt;
6132 for (i = 0; i < hbq_count; ++i) {
6133 phba->hbqs[i].hbq_virt = ptr;
6134 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
6135 ptr += (lpfc_hbq_defs[i]->entry_count *
6136 sizeof(struct lpfc_hbq_entry));
6137 }
6138 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
6139 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
6140
6141 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
6142
6143 INIT_LIST_HEAD(&phba->rb_pend_list);
6144
6145 phba->MBslimaddr = phba->slim_memmap_p;
6146 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
6147 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
6148 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
6149 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
6150
6151 return 0;
6152
6153out_free_slim:
6154 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6155 phba->slim2p.virt, phba->slim2p.phys);
6156out_iounmap:
6157 iounmap(phba->ctrl_regs_memmap_p);
6158out_iounmap_slim:
6159 iounmap(phba->slim_memmap_p);
6160out:
6161 return error;
6162}
6163
6164
6165
6166
6167
6168
6169
6170
6171static void
6172lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
6173{
6174 struct pci_dev *pdev;
6175
6176
6177 if (!phba->pcidev)
6178 return;
6179 else
6180 pdev = phba->pcidev;
6181
6182
6183 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
6184 phba->hbqslimp.virt, phba->hbqslimp.phys);
6185 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6186 phba->slim2p.virt, phba->slim2p.phys);
6187
6188
6189 iounmap(phba->ctrl_regs_memmap_p);
6190 iounmap(phba->slim_memmap_p);
6191
6192 return;
6193}
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204int
6205lpfc_sli4_post_status_check(struct lpfc_hba *phba)
6206{
6207 struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
6208 struct lpfc_register reg_data;
6209 int i, port_error = 0;
6210 uint32_t if_type;
6211
6212 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
6213 memset(®_data, 0, sizeof(reg_data));
6214 if (!phba->sli4_hba.PSMPHRregaddr)
6215 return -ENODEV;
6216
6217
6218 for (i = 0; i < 3000; i++) {
6219 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
6220 &portsmphr_reg.word0) ||
6221 (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
6222
6223 port_error = -ENODEV;
6224 break;
6225 }
6226 if (LPFC_POST_STAGE_PORT_READY ==
6227 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
6228 break;
6229 msleep(10);
6230 }
6231
6232
6233
6234
6235
6236 if (port_error) {
6237 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6238 "1408 Port Failed POST - portsmphr=0x%x, "
6239 "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
6240 "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
6241 portsmphr_reg.word0,
6242 bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
6243 bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
6244 bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
6245 bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
6246 bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
6247 bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
6248 bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
6249 bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
6250 } else {
6251 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6252 "2534 Device Info: SLIFamily=0x%x, "
6253 "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
6254 "SLIHint_2=0x%x, FT=0x%x\n",
6255 bf_get(lpfc_sli_intf_sli_family,
6256 &phba->sli4_hba.sli_intf),
6257 bf_get(lpfc_sli_intf_slirev,
6258 &phba->sli4_hba.sli_intf),
6259 bf_get(lpfc_sli_intf_if_type,
6260 &phba->sli4_hba.sli_intf),
6261 bf_get(lpfc_sli_intf_sli_hint1,
6262 &phba->sli4_hba.sli_intf),
6263 bf_get(lpfc_sli_intf_sli_hint2,
6264 &phba->sli4_hba.sli_intf),
6265 bf_get(lpfc_sli_intf_func_type,
6266 &phba->sli4_hba.sli_intf));
6267
6268
6269
6270
6271
6272 if_type = bf_get(lpfc_sli_intf_if_type,
6273 &phba->sli4_hba.sli_intf);
6274 switch (if_type) {
6275 case LPFC_SLI_INTF_IF_TYPE_0:
6276 phba->sli4_hba.ue_mask_lo =
6277 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
6278 phba->sli4_hba.ue_mask_hi =
6279 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
6280 uerrlo_reg.word0 =
6281 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
6282 uerrhi_reg.word0 =
6283 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
6284 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
6285 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
6286 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6287 "1422 Unrecoverable Error "
6288 "Detected during POST "
6289 "uerr_lo_reg=0x%x, "
6290 "uerr_hi_reg=0x%x, "
6291 "ue_mask_lo_reg=0x%x, "
6292 "ue_mask_hi_reg=0x%x\n",
6293 uerrlo_reg.word0,
6294 uerrhi_reg.word0,
6295 phba->sli4_hba.ue_mask_lo,
6296 phba->sli4_hba.ue_mask_hi);
6297 port_error = -ENODEV;
6298 }
6299 break;
6300 case LPFC_SLI_INTF_IF_TYPE_2:
6301
6302 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
6303 ®_data.word0) ||
6304 (bf_get(lpfc_sliport_status_err, ®_data) &&
6305 !bf_get(lpfc_sliport_status_rn, ®_data))) {
6306 phba->work_status[0] =
6307 readl(phba->sli4_hba.u.if_type2.
6308 ERR1regaddr);
6309 phba->work_status[1] =
6310 readl(phba->sli4_hba.u.if_type2.
6311 ERR2regaddr);
6312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6313 "2888 Unrecoverable port error "
6314 "following POST: port status reg "
6315 "0x%x, port_smphr reg 0x%x, "
6316 "error 1=0x%x, error 2=0x%x\n",
6317 reg_data.word0,
6318 portsmphr_reg.word0,
6319 phba->work_status[0],
6320 phba->work_status[1]);
6321 port_error = -ENODEV;
6322 }
6323 break;
6324 case LPFC_SLI_INTF_IF_TYPE_1:
6325 default:
6326 break;
6327 }
6328 }
6329 return port_error;
6330}
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340static void
6341lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
6342{
6343 switch (if_type) {
6344 case LPFC_SLI_INTF_IF_TYPE_0:
6345 phba->sli4_hba.u.if_type0.UERRLOregaddr =
6346 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
6347 phba->sli4_hba.u.if_type0.UERRHIregaddr =
6348 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
6349 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
6350 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
6351 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
6352 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
6353 phba->sli4_hba.SLIINTFregaddr =
6354 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6355 break;
6356 case LPFC_SLI_INTF_IF_TYPE_2:
6357 phba->sli4_hba.u.if_type2.ERR1regaddr =
6358 phba->sli4_hba.conf_regs_memmap_p +
6359 LPFC_CTL_PORT_ER1_OFFSET;
6360 phba->sli4_hba.u.if_type2.ERR2regaddr =
6361 phba->sli4_hba.conf_regs_memmap_p +
6362 LPFC_CTL_PORT_ER2_OFFSET;
6363 phba->sli4_hba.u.if_type2.CTRLregaddr =
6364 phba->sli4_hba.conf_regs_memmap_p +
6365 LPFC_CTL_PORT_CTL_OFFSET;
6366 phba->sli4_hba.u.if_type2.STATUSregaddr =
6367 phba->sli4_hba.conf_regs_memmap_p +
6368 LPFC_CTL_PORT_STA_OFFSET;
6369 phba->sli4_hba.SLIINTFregaddr =
6370 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
6371 phba->sli4_hba.PSMPHRregaddr =
6372 phba->sli4_hba.conf_regs_memmap_p +
6373 LPFC_CTL_PORT_SEM_OFFSET;
6374 phba->sli4_hba.RQDBregaddr =
6375 phba->sli4_hba.conf_regs_memmap_p +
6376 LPFC_ULP0_RQ_DOORBELL;
6377 phba->sli4_hba.WQDBregaddr =
6378 phba->sli4_hba.conf_regs_memmap_p +
6379 LPFC_ULP0_WQ_DOORBELL;
6380 phba->sli4_hba.EQCQDBregaddr =
6381 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
6382 phba->sli4_hba.MQDBregaddr =
6383 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
6384 phba->sli4_hba.BMBXregaddr =
6385 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
6386 break;
6387 case LPFC_SLI_INTF_IF_TYPE_1:
6388 default:
6389 dev_printk(KERN_ERR, &phba->pcidev->dev,
6390 "FATAL - unsupported SLI4 interface type - %d\n",
6391 if_type);
6392 break;
6393 }
6394}
6395
6396
6397
6398
6399
6400
6401
6402
6403static void
6404lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
6405{
6406 phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6407 LPFC_SLIPORT_IF0_SMPHR;
6408 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6409 LPFC_HST_ISR0;
6410 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6411 LPFC_HST_IMR0;
6412 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
6413 LPFC_HST_ISCR0;
6414}
6415
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425
6426static int
6427lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
6428{
6429 if (vf > LPFC_VIR_FUNC_MAX)
6430 return -ENODEV;
6431
6432 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6433 vf * LPFC_VFR_PAGE_SIZE +
6434 LPFC_ULP0_RQ_DOORBELL);
6435 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6436 vf * LPFC_VFR_PAGE_SIZE +
6437 LPFC_ULP0_WQ_DOORBELL);
6438 phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6439 vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
6440 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6441 vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
6442 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
6443 vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
6444 return 0;
6445}
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462static int
6463lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
6464{
6465 uint32_t bmbx_size;
6466 struct lpfc_dmabuf *dmabuf;
6467 struct dma_address *dma_address;
6468 uint32_t pa_addr;
6469 uint64_t phys_addr;
6470
6471 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
6472 if (!dmabuf)
6473 return -ENOMEM;
6474
6475
6476
6477
6478
6479 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6480 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6481 bmbx_size,
6482 &dmabuf->phys,
6483 GFP_KERNEL);
6484 if (!dmabuf->virt) {
6485 kfree(dmabuf);
6486 return -ENOMEM;
6487 }
6488 memset(dmabuf->virt, 0, bmbx_size);
6489
6490
6491
6492
6493
6494
6495
6496
6497 phba->sli4_hba.bmbx.dmabuf = dmabuf;
6498 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
6499
6500 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
6501 LPFC_ALIGN_16_BYTE);
6502 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
6503 LPFC_ALIGN_16_BYTE);
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513 dma_address = &phba->sli4_hba.bmbx.dma_address;
6514 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
6515 pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
6516 dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
6517 LPFC_BMBX_BIT1_ADDR_HI);
6518
6519 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
6520 dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
6521 LPFC_BMBX_BIT1_ADDR_LO);
6522 return 0;
6523}
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536static void
6537lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
6538{
6539 dma_free_coherent(&phba->pcidev->dev,
6540 phba->sli4_hba.bmbx.bmbx_size,
6541 phba->sli4_hba.bmbx.dmabuf->virt,
6542 phba->sli4_hba.bmbx.dmabuf->phys);
6543
6544 kfree(phba->sli4_hba.bmbx.dmabuf);
6545 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
6546}
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560
6561
6562int
6563lpfc_sli4_read_config(struct lpfc_hba *phba)
6564{
6565 LPFC_MBOXQ_t *pmb;
6566 struct lpfc_mbx_read_config *rd_config;
6567 union lpfc_sli4_cfg_shdr *shdr;
6568 uint32_t shdr_status, shdr_add_status;
6569 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6570 struct lpfc_rsrc_desc_fcfcoe *desc;
6571 char *pdesc_0;
6572 uint32_t desc_count;
6573 int length, i, rc = 0, rc2;
6574
6575 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6576 if (!pmb) {
6577 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6578 "2011 Unable to allocate memory for issuing "
6579 "SLI_CONFIG_SPECIAL mailbox command\n");
6580 return -ENOMEM;
6581 }
6582
6583 lpfc_read_config(phba, pmb);
6584
6585 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6586 if (rc != MBX_SUCCESS) {
6587 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6588 "2012 Mailbox failed , mbxCmd x%x "
6589 "READ_CONFIG, mbxStatus x%x\n",
6590 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6591 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6592 rc = -EIO;
6593 } else {
6594 rd_config = &pmb->u.mqe.un.rd_config;
6595 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
6596 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6597 phba->sli4_hba.lnk_info.lnk_tp =
6598 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
6599 phba->sli4_hba.lnk_info.lnk_no =
6600 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
6601 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6602 "3081 lnk_type:%d, lnk_numb:%d\n",
6603 phba->sli4_hba.lnk_info.lnk_tp,
6604 phba->sli4_hba.lnk_info.lnk_no);
6605 } else
6606 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6607 "3082 Mailbox (x%x) returned ldv:x0\n",
6608 bf_get(lpfc_mqe_command, &pmb->u.mqe));
6609 phba->sli4_hba.extents_in_use =
6610 bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
6611 phba->sli4_hba.max_cfg_param.max_xri =
6612 bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
6613 phba->sli4_hba.max_cfg_param.xri_base =
6614 bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
6615 phba->sli4_hba.max_cfg_param.max_vpi =
6616 bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
6617 phba->sli4_hba.max_cfg_param.vpi_base =
6618 bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
6619 phba->sli4_hba.max_cfg_param.max_rpi =
6620 bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
6621 phba->sli4_hba.max_cfg_param.rpi_base =
6622 bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
6623 phba->sli4_hba.max_cfg_param.max_vfi =
6624 bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
6625 phba->sli4_hba.max_cfg_param.vfi_base =
6626 bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
6627 phba->sli4_hba.max_cfg_param.max_fcfi =
6628 bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
6629 phba->sli4_hba.max_cfg_param.max_eq =
6630 bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
6631 phba->sli4_hba.max_cfg_param.max_rq =
6632 bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
6633 phba->sli4_hba.max_cfg_param.max_wq =
6634 bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
6635 phba->sli4_hba.max_cfg_param.max_cq =
6636 bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
6637 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
6638 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
6639 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
6640 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
6641 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
6642 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
6643 phba->max_vports = phba->max_vpi;
6644 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6645 "2003 cfg params Extents? %d "
6646 "XRI(B:%d M:%d), "
6647 "VPI(B:%d M:%d) "
6648 "VFI(B:%d M:%d) "
6649 "RPI(B:%d M:%d) "
6650 "FCFI(Count:%d)\n",
6651 phba->sli4_hba.extents_in_use,
6652 phba->sli4_hba.max_cfg_param.xri_base,
6653 phba->sli4_hba.max_cfg_param.max_xri,
6654 phba->sli4_hba.max_cfg_param.vpi_base,
6655 phba->sli4_hba.max_cfg_param.max_vpi,
6656 phba->sli4_hba.max_cfg_param.vfi_base,
6657 phba->sli4_hba.max_cfg_param.max_vfi,
6658 phba->sli4_hba.max_cfg_param.rpi_base,
6659 phba->sli4_hba.max_cfg_param.max_rpi,
6660 phba->sli4_hba.max_cfg_param.max_fcfi);
6661 }
6662
6663 if (rc)
6664 goto read_cfg_out;
6665
6666
6667 if (phba->cfg_hba_queue_depth >
6668 (phba->sli4_hba.max_cfg_param.max_xri -
6669 lpfc_sli4_get_els_iocb_cnt(phba)))
6670 phba->cfg_hba_queue_depth =
6671 phba->sli4_hba.max_cfg_param.max_xri -
6672 lpfc_sli4_get_els_iocb_cnt(phba);
6673
6674 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
6675 LPFC_SLI_INTF_IF_TYPE_2)
6676 goto read_cfg_out;
6677
6678
6679 length = (sizeof(struct lpfc_mbx_get_func_cfg) -
6680 sizeof(struct lpfc_sli4_cfg_mhdr));
6681 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
6682 LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
6683 length, LPFC_SLI4_MBX_EMBED);
6684
6685 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
6686 shdr = (union lpfc_sli4_cfg_shdr *)
6687 &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
6688 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6689 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6690 if (rc2 || shdr_status || shdr_add_status) {
6691 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6692 "3026 Mailbox failed , mbxCmd x%x "
6693 "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
6694 bf_get(lpfc_mqe_command, &pmb->u.mqe),
6695 bf_get(lpfc_mqe_status, &pmb->u.mqe));
6696 goto read_cfg_out;
6697 }
6698
6699
6700 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6701 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6702
6703 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6704 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
6705 length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
6706 if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
6707 length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
6708 else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
6709 goto read_cfg_out;
6710
6711 for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
6712 desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
6713 if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
6714 bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
6715 phba->sli4_hba.iov.pf_number =
6716 bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
6717 phba->sli4_hba.iov.vf_number =
6718 bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
6719 break;
6720 }
6721 }
6722
6723 if (i < LPFC_RSRC_DESC_MAX_NUM)
6724 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6725 "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
6726 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
6727 phba->sli4_hba.iov.vf_number);
6728 else
6729 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6730 "3028 GET_FUNCTION_CONFIG: failed to find "
6731 "Resrouce Descriptor:x%x\n",
6732 LPFC_RSRC_DESC_TYPE_FCFCOE);
6733
6734read_cfg_out:
6735 mempool_free(pmb, phba->mbox_mem_pool);
6736 return rc;
6737}
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750
6751
6752static int
6753lpfc_setup_endian_order(struct lpfc_hba *phba)
6754{
6755 LPFC_MBOXQ_t *mboxq;
6756 uint32_t if_type, rc = 0;
6757 uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
6758 HOST_ENDIAN_HIGH_WORD1};
6759
6760 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
6761 switch (if_type) {
6762 case LPFC_SLI_INTF_IF_TYPE_0:
6763 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
6764 GFP_KERNEL);
6765 if (!mboxq) {
6766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6767 "0492 Unable to allocate memory for "
6768 "issuing SLI_CONFIG_SPECIAL mailbox "
6769 "command\n");
6770 return -ENOMEM;
6771 }
6772
6773
6774
6775
6776
6777 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
6778 memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
6779 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6780 if (rc != MBX_SUCCESS) {
6781 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6782 "0493 SLI_CONFIG_SPECIAL mailbox "
6783 "failed with status x%x\n",
6784 rc);
6785 rc = -EIO;
6786 }
6787 mempool_free(mboxq, phba->mbox_mem_pool);
6788 break;
6789 case LPFC_SLI_INTF_IF_TYPE_2:
6790 case LPFC_SLI_INTF_IF_TYPE_1:
6791 default:
6792 break;
6793 }
6794 return rc;
6795}
6796
6797
6798
6799
6800
6801
6802
6803
6804
6805
6806
6807
6808
6809
6810static int
6811lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6812{
6813 int cfg_fcp_io_channel;
6814 uint32_t cpu;
6815 uint32_t i = 0;
6816
6817
6818
6819
6820
6821
6822
6823 cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
6824
6825
6826 for_each_present_cpu(cpu) {
6827 if (cpu_online(cpu))
6828 i++;
6829 }
6830 phba->sli4_hba.num_online_cpu = i;
6831 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
6832
6833 if (i < cfg_fcp_io_channel) {
6834 lpfc_printf_log(phba,
6835 KERN_ERR, LOG_INIT,
6836 "3188 Reducing IO channels to match number of "
6837 "online CPUs: from %d to %d\n",
6838 cfg_fcp_io_channel, i);
6839 cfg_fcp_io_channel = i;
6840 }
6841
6842 if (cfg_fcp_io_channel >
6843 phba->sli4_hba.max_cfg_param.max_eq) {
6844 if (phba->sli4_hba.max_cfg_param.max_eq <
6845 LPFC_FCP_IO_CHAN_MIN) {
6846 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6847 "2574 Not enough EQs (%d) from the "
6848 "pci function for supporting FCP "
6849 "EQs (%d)\n",
6850 phba->sli4_hba.max_cfg_param.max_eq,
6851 phba->cfg_fcp_io_channel);
6852 goto out_error;
6853 }
6854 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6855 "2575 Reducing IO channels to match number of "
6856 "available EQs: from %d to %d\n",
6857 cfg_fcp_io_channel,
6858 phba->sli4_hba.max_cfg_param.max_eq);
6859 cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
6860 }
6861
6862
6863
6864
6865 phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
6866 phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
6867 phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
6868
6869
6870 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6871 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6872
6873
6874 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6875 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6876
6877 return 0;
6878out_error:
6879 return -ENOMEM;
6880}
6881
6882
6883
6884
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896int
6897lpfc_sli4_queue_create(struct lpfc_hba *phba)
6898{
6899 struct lpfc_queue *qdesc;
6900 int idx;
6901
6902
6903
6904
6905 if (!phba->cfg_fcp_io_channel)
6906 return -ERANGE;
6907
6908 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
6909 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
6910 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
6911 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
6912 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
6913 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
6914
6915 phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
6916 phba->cfg_fcp_io_channel), GFP_KERNEL);
6917 if (!phba->sli4_hba.hba_eq) {
6918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6919 "2576 Failed allocate memory for "
6920 "fast-path EQ record array\n");
6921 goto out_error;
6922 }
6923
6924 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6925 phba->cfg_fcp_io_channel), GFP_KERNEL);
6926 if (!phba->sli4_hba.fcp_cq) {
6927 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6928 "2577 Failed allocate memory for fast-path "
6929 "CQ record array\n");
6930 goto out_error;
6931 }
6932
6933 phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
6934 phba->cfg_fcp_io_channel), GFP_KERNEL);
6935 if (!phba->sli4_hba.fcp_wq) {
6936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6937 "2578 Failed allocate memory for fast-path "
6938 "WQ record array\n");
6939 goto out_error;
6940 }
6941
6942
6943
6944
6945
6946
6947 phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
6948 phba->cfg_fcp_io_channel), GFP_KERNEL);
6949 if (!phba->sli4_hba.fcp_cq_map) {
6950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6951 "2545 Failed allocate memory for fast-path "
6952 "CQ map\n");
6953 goto out_error;
6954 }
6955
6956
6957
6958
6959
6960 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
6961
6962
6963 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6964 phba->sli4_hba.eq_ecount);
6965 if (!qdesc) {
6966 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6967 "0497 Failed allocate EQ (%d)\n", idx);
6968 goto out_error;
6969 }
6970 phba->sli4_hba.hba_eq[idx] = qdesc;
6971
6972
6973 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6974 phba->sli4_hba.cq_ecount);
6975 if (!qdesc) {
6976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6977 "0499 Failed allocate fast-path FCP "
6978 "CQ (%d)\n", idx);
6979 goto out_error;
6980 }
6981 phba->sli4_hba.fcp_cq[idx] = qdesc;
6982
6983
6984 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
6985 phba->sli4_hba.wq_ecount);
6986 if (!qdesc) {
6987 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6988 "0503 Failed allocate fast-path FCP "
6989 "WQ (%d)\n", idx);
6990 goto out_error;
6991 }
6992 phba->sli4_hba.fcp_wq[idx] = qdesc;
6993 }
6994
6995
6996
6997
6998
6999
7000
7001 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7002 phba->sli4_hba.cq_ecount);
7003 if (!qdesc) {
7004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7005 "0500 Failed allocate slow-path mailbox CQ\n");
7006 goto out_error;
7007 }
7008 phba->sli4_hba.mbx_cq = qdesc;
7009
7010
7011 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
7012 phba->sli4_hba.cq_ecount);
7013 if (!qdesc) {
7014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7015 "0501 Failed allocate slow-path ELS CQ\n");
7016 goto out_error;
7017 }
7018 phba->sli4_hba.els_cq = qdesc;
7019
7020
7021
7022
7023
7024
7025
7026
7027 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
7028 phba->sli4_hba.mq_ecount);
7029 if (!qdesc) {
7030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7031 "0505 Failed allocate slow-path MQ\n");
7032 goto out_error;
7033 }
7034 phba->sli4_hba.mbx_wq = qdesc;
7035
7036
7037
7038
7039
7040
7041 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
7042 phba->sli4_hba.wq_ecount);
7043 if (!qdesc) {
7044 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7045 "0504 Failed allocate slow-path ELS WQ\n");
7046 goto out_error;
7047 }
7048 phba->sli4_hba.els_wq = qdesc;
7049
7050
7051
7052
7053
7054
7055 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7056 phba->sli4_hba.rq_ecount);
7057 if (!qdesc) {
7058 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7059 "0506 Failed allocate receive HRQ\n");
7060 goto out_error;
7061 }
7062 phba->sli4_hba.hdr_rq = qdesc;
7063
7064
7065 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
7066 phba->sli4_hba.rq_ecount);
7067 if (!qdesc) {
7068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7069 "0507 Failed allocate receive DRQ\n");
7070 goto out_error;
7071 }
7072 phba->sli4_hba.dat_rq = qdesc;
7073
7074 return 0;
7075
7076out_error:
7077 lpfc_sli4_queue_destroy(phba);
7078 return -ENOMEM;
7079}
7080
7081
7082
7083
7084
7085
7086
7087
7088
7089
7090
7091
7092
7093void
7094lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
7095{
7096 int idx;
7097
7098 if (phba->sli4_hba.hba_eq != NULL) {
7099
7100 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7101 if (phba->sli4_hba.hba_eq[idx] != NULL) {
7102 lpfc_sli4_queue_free(
7103 phba->sli4_hba.hba_eq[idx]);
7104 phba->sli4_hba.hba_eq[idx] = NULL;
7105 }
7106 }
7107 kfree(phba->sli4_hba.hba_eq);
7108 phba->sli4_hba.hba_eq = NULL;
7109 }
7110
7111 if (phba->sli4_hba.fcp_cq != NULL) {
7112
7113 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7114 if (phba->sli4_hba.fcp_cq[idx] != NULL) {
7115 lpfc_sli4_queue_free(
7116 phba->sli4_hba.fcp_cq[idx]);
7117 phba->sli4_hba.fcp_cq[idx] = NULL;
7118 }
7119 }
7120 kfree(phba->sli4_hba.fcp_cq);
7121 phba->sli4_hba.fcp_cq = NULL;
7122 }
7123
7124 if (phba->sli4_hba.fcp_wq != NULL) {
7125
7126 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
7127 if (phba->sli4_hba.fcp_wq[idx] != NULL) {
7128 lpfc_sli4_queue_free(
7129 phba->sli4_hba.fcp_wq[idx]);
7130 phba->sli4_hba.fcp_wq[idx] = NULL;
7131 }
7132 }
7133 kfree(phba->sli4_hba.fcp_wq);
7134 phba->sli4_hba.fcp_wq = NULL;
7135 }
7136
7137 if (phba->pci_bar0_memmap_p) {
7138 iounmap(phba->pci_bar0_memmap_p);
7139 phba->pci_bar0_memmap_p = NULL;
7140 }
7141 if (phba->pci_bar2_memmap_p) {
7142 iounmap(phba->pci_bar2_memmap_p);
7143 phba->pci_bar2_memmap_p = NULL;
7144 }
7145 if (phba->pci_bar4_memmap_p) {
7146 iounmap(phba->pci_bar4_memmap_p);
7147 phba->pci_bar4_memmap_p = NULL;
7148 }
7149
7150
7151 if (phba->sli4_hba.fcp_cq_map != NULL) {
7152 kfree(phba->sli4_hba.fcp_cq_map);
7153 phba->sli4_hba.fcp_cq_map = NULL;
7154 }
7155
7156
7157 if (phba->sli4_hba.mbx_wq != NULL) {
7158 lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
7159 phba->sli4_hba.mbx_wq = NULL;
7160 }
7161
7162
7163 if (phba->sli4_hba.els_wq != NULL) {
7164 lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
7165 phba->sli4_hba.els_wq = NULL;
7166 }
7167
7168
7169 if (phba->sli4_hba.hdr_rq != NULL) {
7170 lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
7171 phba->sli4_hba.hdr_rq = NULL;
7172 }
7173 if (phba->sli4_hba.dat_rq != NULL) {
7174 lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
7175 phba->sli4_hba.dat_rq = NULL;
7176 }
7177
7178
7179 if (phba->sli4_hba.els_cq != NULL) {
7180 lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
7181 phba->sli4_hba.els_cq = NULL;
7182 }
7183
7184
7185 if (phba->sli4_hba.mbx_cq != NULL) {
7186 lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
7187 phba->sli4_hba.mbx_cq = NULL;
7188 }
7189
7190 return;
7191}
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203
7204
7205int
7206lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7207{
7208 struct lpfc_sli *psli = &phba->sli;
7209 struct lpfc_sli_ring *pring;
7210 int rc = -ENOMEM;
7211 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
7212 int fcp_cq_index = 0;
7213 uint32_t shdr_status, shdr_add_status;
7214 union lpfc_sli4_cfg_shdr *shdr;
7215 LPFC_MBOXQ_t *mboxq;
7216 uint32_t length;
7217
7218
7219 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7220 if (!mboxq) {
7221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7222 "3249 Unable to allocate memory for "
7223 "QUERY_FW_CFG mailbox command\n");
7224 return -ENOMEM;
7225 }
7226 length = (sizeof(struct lpfc_mbx_query_fw_config) -
7227 sizeof(struct lpfc_sli4_cfg_mhdr));
7228 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7229 LPFC_MBOX_OPCODE_QUERY_FW_CFG,
7230 length, LPFC_SLI4_MBX_EMBED);
7231
7232 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7233
7234 shdr = (union lpfc_sli4_cfg_shdr *)
7235 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7236 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7237 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7238 if (shdr_status || shdr_add_status || rc) {
7239 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7240 "3250 QUERY_FW_CFG mailbox failed with status "
7241 "x%x add_status x%x, mbx status x%x\n",
7242 shdr_status, shdr_add_status, rc);
7243 if (rc != MBX_TIMEOUT)
7244 mempool_free(mboxq, phba->mbox_mem_pool);
7245 rc = -ENXIO;
7246 goto out_error;
7247 }
7248
7249 phba->sli4_hba.fw_func_mode =
7250 mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
7251 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
7252 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
7253 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7254 "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
7255 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
7256 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
7257
7258 if (rc != MBX_TIMEOUT)
7259 mempool_free(mboxq, phba->mbox_mem_pool);
7260
7261
7262
7263
7264
7265
7266 if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
7267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7268 "3147 Fast-path EQs not allocated\n");
7269 rc = -ENOMEM;
7270 goto out_error;
7271 }
7272 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
7273 if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
7274 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7275 "0522 Fast-path EQ (%d) not "
7276 "allocated\n", fcp_eqidx);
7277 rc = -ENOMEM;
7278 goto out_destroy_hba_eq;
7279 }
7280 rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
7281 (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
7282 if (rc) {
7283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7284 "0523 Failed setup of fast-path EQ "
7285 "(%d), rc = 0x%x\n", fcp_eqidx, rc);
7286 goto out_destroy_hba_eq;
7287 }
7288 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7289 "2584 HBA EQ setup: "
7290 "queue[%d]-id=%d\n", fcp_eqidx,
7291 phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
7292 }
7293
7294
7295 if (!phba->sli4_hba.fcp_cq) {
7296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7297 "3148 Fast-path FCP CQ array not "
7298 "allocated\n");
7299 rc = -ENOMEM;
7300 goto out_destroy_hba_eq;
7301 }
7302
7303 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
7304 if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
7305 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7306 "0526 Fast-path FCP CQ (%d) not "
7307 "allocated\n", fcp_cqidx);
7308 rc = -ENOMEM;
7309 goto out_destroy_fcp_cq;
7310 }
7311 rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
7312 phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
7313 if (rc) {
7314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7315 "0527 Failed setup of fast-path FCP "
7316 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
7317 goto out_destroy_fcp_cq;
7318 }
7319
7320
7321 phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
7322 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
7323
7324 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7325 "2588 FCP CQ setup: cq[%d]-id=%d, "
7326 "parent seq[%d]-id=%d\n",
7327 fcp_cqidx,
7328 phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
7329 fcp_cqidx,
7330 phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
7331 }
7332
7333
7334 if (!phba->sli4_hba.fcp_wq) {
7335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7336 "3149 Fast-path FCP WQ array not "
7337 "allocated\n");
7338 rc = -ENOMEM;
7339 goto out_destroy_fcp_cq;
7340 }
7341
7342 for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
7343 if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
7344 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7345 "0534 Fast-path FCP WQ (%d) not "
7346 "allocated\n", fcp_wqidx);
7347 rc = -ENOMEM;
7348 goto out_destroy_fcp_wq;
7349 }
7350 rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
7351 phba->sli4_hba.fcp_cq[fcp_wqidx],
7352 LPFC_FCP);
7353 if (rc) {
7354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7355 "0535 Failed setup of fast-path FCP "
7356 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
7357 goto out_destroy_fcp_wq;
7358 }
7359
7360
7361 pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
7362 pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
7363 phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
7364
7365 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7366 "2591 FCP WQ setup: wq[%d]-id=%d, "
7367 "parent cq[%d]-id=%d\n",
7368 fcp_wqidx,
7369 phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
7370 fcp_cq_index,
7371 phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
7372 }
7373
7374
7375
7376
7377
7378 if (!phba->sli4_hba.mbx_cq) {
7379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7380 "0528 Mailbox CQ not allocated\n");
7381 rc = -ENOMEM;
7382 goto out_destroy_fcp_wq;
7383 }
7384 rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
7385 phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
7386 if (rc) {
7387 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7388 "0529 Failed setup of slow-path mailbox CQ: "
7389 "rc = 0x%x\n", rc);
7390 goto out_destroy_fcp_wq;
7391 }
7392 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7393 "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
7394 phba->sli4_hba.mbx_cq->queue_id,
7395 phba->sli4_hba.hba_eq[0]->queue_id);
7396
7397
7398 if (!phba->sli4_hba.els_cq) {
7399 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7400 "0530 ELS CQ not allocated\n");
7401 rc = -ENOMEM;
7402 goto out_destroy_mbx_cq;
7403 }
7404 rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
7405 phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
7406 if (rc) {
7407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7408 "0531 Failed setup of slow-path ELS CQ: "
7409 "rc = 0x%x\n", rc);
7410 goto out_destroy_mbx_cq;
7411 }
7412 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7413 "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
7414 phba->sli4_hba.els_cq->queue_id,
7415 phba->sli4_hba.hba_eq[0]->queue_id);
7416
7417
7418
7419
7420
7421
7422 if (!phba->sli4_hba.mbx_wq) {
7423 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7424 "0538 Slow-path MQ not allocated\n");
7425 rc = -ENOMEM;
7426 goto out_destroy_els_cq;
7427 }
7428 rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
7429 phba->sli4_hba.mbx_cq, LPFC_MBOX);
7430 if (rc) {
7431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7432 "0539 Failed setup of slow-path MQ: "
7433 "rc = 0x%x\n", rc);
7434 goto out_destroy_els_cq;
7435 }
7436 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7437 "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
7438 phba->sli4_hba.mbx_wq->queue_id,
7439 phba->sli4_hba.mbx_cq->queue_id);
7440
7441
7442 if (!phba->sli4_hba.els_wq) {
7443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7444 "0536 Slow-path ELS WQ not allocated\n");
7445 rc = -ENOMEM;
7446 goto out_destroy_mbx_wq;
7447 }
7448 rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
7449 phba->sli4_hba.els_cq, LPFC_ELS);
7450 if (rc) {
7451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7452 "0537 Failed setup of slow-path ELS WQ: "
7453 "rc = 0x%x\n", rc);
7454 goto out_destroy_mbx_wq;
7455 }
7456
7457
7458 pring = &psli->ring[LPFC_ELS_RING];
7459 pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
7460 phba->sli4_hba.els_cq->pring = pring;
7461
7462 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7463 "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
7464 phba->sli4_hba.els_wq->queue_id,
7465 phba->sli4_hba.els_cq->queue_id);
7466
7467
7468
7469
7470 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
7471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7472 "0540 Receive Queue not allocated\n");
7473 rc = -ENOMEM;
7474 goto out_destroy_els_wq;
7475 }
7476
7477 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
7478 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
7479
7480 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
7481 phba->sli4_hba.els_cq, LPFC_USOL);
7482 if (rc) {
7483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7484 "0541 Failed setup of Receive Queue: "
7485 "rc = 0x%x\n", rc);
7486 goto out_destroy_fcp_wq;
7487 }
7488
7489 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7490 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
7491 "parent cq-id=%d\n",
7492 phba->sli4_hba.hdr_rq->queue_id,
7493 phba->sli4_hba.dat_rq->queue_id,
7494 phba->sli4_hba.els_cq->queue_id);
7495 return 0;
7496
7497out_destroy_els_wq:
7498 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7499out_destroy_mbx_wq:
7500 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7501out_destroy_els_cq:
7502 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7503out_destroy_mbx_cq:
7504 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7505out_destroy_fcp_wq:
7506 for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
7507 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
7508out_destroy_fcp_cq:
7509 for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
7510 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
7511out_destroy_hba_eq:
7512 for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
7513 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
7514out_error:
7515 return rc;
7516}
7517
7518
7519
7520
7521
7522
7523
7524
7525
7526
7527
7528
7529
7530void
7531lpfc_sli4_queue_unset(struct lpfc_hba *phba)
7532{
7533 int fcp_qidx;
7534
7535
7536 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
7537
7538 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
7539
7540 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
7541
7542 if (phba->sli4_hba.fcp_wq) {
7543 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7544 fcp_qidx++)
7545 lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
7546 }
7547
7548 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
7549
7550 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
7551
7552 if (phba->sli4_hba.fcp_cq) {
7553 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7554 fcp_qidx++)
7555 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
7556 }
7557
7558 if (phba->sli4_hba.hba_eq) {
7559 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
7560 fcp_qidx++)
7561 lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
7562 }
7563}
7564
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574
7575
7576
7577
7578
7579
7580
7581static int
7582lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
7583{
7584 struct lpfc_cq_event *cq_event;
7585 int i;
7586
7587 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
7588 cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
7589 if (!cq_event)
7590 goto out_pool_create_fail;
7591 list_add_tail(&cq_event->list,
7592 &phba->sli4_hba.sp_cqe_event_pool);
7593 }
7594 return 0;
7595
7596out_pool_create_fail:
7597 lpfc_sli4_cq_event_pool_destroy(phba);
7598 return -ENOMEM;
7599}
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609
7610
7611static void
7612lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
7613{
7614 struct lpfc_cq_event *cq_event, *next_cq_event;
7615
7616 list_for_each_entry_safe(cq_event, next_cq_event,
7617 &phba->sli4_hba.sp_cqe_event_pool, list) {
7618 list_del(&cq_event->list);
7619 kfree(cq_event);
7620 }
7621}
7622
7623
7624
7625
7626
7627
7628
7629
7630
7631
7632
7633struct lpfc_cq_event *
7634__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7635{
7636 struct lpfc_cq_event *cq_event = NULL;
7637
7638 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
7639 struct lpfc_cq_event, list);
7640 return cq_event;
7641}
7642
7643
7644
7645
7646
7647
7648
7649
7650
7651
7652
7653struct lpfc_cq_event *
7654lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
7655{
7656 struct lpfc_cq_event *cq_event;
7657 unsigned long iflags;
7658
7659 spin_lock_irqsave(&phba->hbalock, iflags);
7660 cq_event = __lpfc_sli4_cq_event_alloc(phba);
7661 spin_unlock_irqrestore(&phba->hbalock, iflags);
7662 return cq_event;
7663}
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673void
7674__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7675 struct lpfc_cq_event *cq_event)
7676{
7677 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
7678}
7679
7680
7681
7682
7683
7684
7685
7686
7687
7688void
7689lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
7690 struct lpfc_cq_event *cq_event)
7691{
7692 unsigned long iflags;
7693 spin_lock_irqsave(&phba->hbalock, iflags);
7694 __lpfc_sli4_cq_event_release(phba, cq_event);
7695 spin_unlock_irqrestore(&phba->hbalock, iflags);
7696}
7697
7698
7699
7700
7701
7702
7703
7704
7705static void
7706lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
7707{
7708 LIST_HEAD(cqelist);
7709 struct lpfc_cq_event *cqe;
7710 unsigned long iflags;
7711
7712
7713 spin_lock_irqsave(&phba->hbalock, iflags);
7714
7715 list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
7716 &cqelist);
7717
7718 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
7719 &cqelist);
7720
7721 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
7722 &cqelist);
7723 spin_unlock_irqrestore(&phba->hbalock, iflags);
7724
7725 while (!list_empty(&cqelist)) {
7726 list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
7727 lpfc_sli4_cq_event_release(phba, cqe);
7728 }
7729}
7730
7731
7732
7733
7734
7735
7736
7737
7738
7739
7740
7741
7742
7743int
7744lpfc_pci_function_reset(struct lpfc_hba *phba)
7745{
7746 LPFC_MBOXQ_t *mboxq;
7747 uint32_t rc = 0, if_type;
7748 uint32_t shdr_status, shdr_add_status;
7749 uint32_t rdy_chk, num_resets = 0, reset_again = 0;
7750 union lpfc_sli4_cfg_shdr *shdr;
7751 struct lpfc_register reg_data;
7752 uint16_t devid;
7753
7754 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7755 switch (if_type) {
7756 case LPFC_SLI_INTF_IF_TYPE_0:
7757 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
7758 GFP_KERNEL);
7759 if (!mboxq) {
7760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7761 "0494 Unable to allocate memory for "
7762 "issuing SLI_FUNCTION_RESET mailbox "
7763 "command\n");
7764 return -ENOMEM;
7765 }
7766
7767
7768 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7769 LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
7770 LPFC_SLI4_MBX_EMBED);
7771 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7772 shdr = (union lpfc_sli4_cfg_shdr *)
7773 &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
7774 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7775 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
7776 &shdr->response);
7777 if (rc != MBX_TIMEOUT)
7778 mempool_free(mboxq, phba->mbox_mem_pool);
7779 if (shdr_status || shdr_add_status || rc) {
7780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7781 "0495 SLI_FUNCTION_RESET mailbox "
7782 "failed with status x%x add_status x%x,"
7783 " mbx status x%x\n",
7784 shdr_status, shdr_add_status, rc);
7785 rc = -ENXIO;
7786 }
7787 break;
7788 case LPFC_SLI_INTF_IF_TYPE_2:
7789 for (num_resets = 0;
7790 num_resets < MAX_IF_TYPE_2_RESETS;
7791 num_resets++) {
7792 reg_data.word0 = 0;
7793 bf_set(lpfc_sliport_ctrl_end, ®_data,
7794 LPFC_SLIPORT_LITTLE_ENDIAN);
7795 bf_set(lpfc_sliport_ctrl_ip, ®_data,
7796 LPFC_SLIPORT_INIT_PORT);
7797 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
7798 CTRLregaddr);
7799
7800 pci_read_config_word(phba->pcidev,
7801 PCI_DEVICE_ID, &devid);
7802
7803
7804
7805
7806
7807
7808 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7809 msleep(10);
7810 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7811 STATUSregaddr, ®_data.word0)) {
7812 rc = -ENODEV;
7813 goto out;
7814 }
7815 if (bf_get(lpfc_sliport_status_rn, ®_data))
7816 reset_again++;
7817 if (bf_get(lpfc_sliport_status_rdy, ®_data))
7818 break;
7819 }
7820
7821
7822
7823
7824
7825 if (reset_again && (rdy_chk < 1000)) {
7826 msleep(10);
7827 reset_again = 0;
7828 continue;
7829 }
7830
7831
7832 if ((bf_get(lpfc_sliport_status_err, ®_data)) ||
7833 (rdy_chk >= 1000)) {
7834 phba->work_status[0] = readl(
7835 phba->sli4_hba.u.if_type2.ERR1regaddr);
7836 phba->work_status[1] = readl(
7837 phba->sli4_hba.u.if_type2.ERR2regaddr);
7838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7839 "2890 Port error detected during port "
7840 "reset(%d): wait_tmo:%d ms, "
7841 "port status reg 0x%x, "
7842 "error 1=0x%x, error 2=0x%x\n",
7843 num_resets, rdy_chk*10,
7844 reg_data.word0,
7845 phba->work_status[0],
7846 phba->work_status[1]);
7847 rc = -ENODEV;
7848 }
7849
7850
7851
7852
7853
7854 if (rdy_chk < 1000)
7855 break;
7856 }
7857
7858 msleep(100);
7859 break;
7860 case LPFC_SLI_INTF_IF_TYPE_1:
7861 default:
7862 break;
7863 }
7864
7865out:
7866
7867 if (num_resets >= MAX_IF_TYPE_2_RESETS) {
7868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7869 "3317 HBA not functional: IP Reset Failed "
7870 "after (%d) retries, try: "
7871 "echo fw_reset > board_mode\n", num_resets);
7872 rc = -ENODEV;
7873 }
7874
7875 return rc;
7876}
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889static int
7890lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
7891{
7892 struct pci_dev *pdev;
7893 unsigned long bar0map_len, bar1map_len, bar2map_len;
7894 int error = -ENODEV;
7895 uint32_t if_type;
7896
7897
7898 if (!phba->pcidev)
7899 return error;
7900 else
7901 pdev = phba->pcidev;
7902
7903
7904 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
7905 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
7906 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
7907 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
7908 return error;
7909 }
7910 }
7911
7912
7913
7914
7915
7916 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
7917 &phba->sli4_hba.sli_intf.word0)) {
7918 return error;
7919 }
7920
7921
7922 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
7923 LPFC_SLI_INTF_VALID) {
7924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7925 "2894 SLI_INTF reg contents invalid "
7926 "sli_intf reg 0x%x\n",
7927 phba->sli4_hba.sli_intf.word0);
7928 return error;
7929 }
7930
7931 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7932
7933
7934
7935
7936
7937
7938 if (pci_resource_start(pdev, 0)) {
7939 phba->pci_bar0_map = pci_resource_start(pdev, 0);
7940 bar0map_len = pci_resource_len(pdev, 0);
7941
7942
7943
7944
7945
7946 phba->sli4_hba.conf_regs_memmap_p =
7947 ioremap(phba->pci_bar0_map, bar0map_len);
7948 if (!phba->sli4_hba.conf_regs_memmap_p) {
7949 dev_printk(KERN_ERR, &pdev->dev,
7950 "ioremap failed for SLI4 PCI config "
7951 "registers.\n");
7952 goto out;
7953 }
7954
7955 lpfc_sli4_bar0_register_memmap(phba, if_type);
7956 } else {
7957 phba->pci_bar0_map = pci_resource_start(pdev, 1);
7958 bar0map_len = pci_resource_len(pdev, 1);
7959 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7960 dev_printk(KERN_ERR, &pdev->dev,
7961 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
7962 goto out;
7963 }
7964 phba->sli4_hba.conf_regs_memmap_p =
7965 ioremap(phba->pci_bar0_map, bar0map_len);
7966 if (!phba->sli4_hba.conf_regs_memmap_p) {
7967 dev_printk(KERN_ERR, &pdev->dev,
7968 "ioremap failed for SLI4 PCI config "
7969 "registers.\n");
7970 goto out;
7971 }
7972 lpfc_sli4_bar0_register_memmap(phba, if_type);
7973 }
7974
7975 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7976 (pci_resource_start(pdev, 2))) {
7977
7978
7979
7980
7981 phba->pci_bar1_map = pci_resource_start(pdev, 2);
7982 bar1map_len = pci_resource_len(pdev, 2);
7983 phba->sli4_hba.ctrl_regs_memmap_p =
7984 ioremap(phba->pci_bar1_map, bar1map_len);
7985 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
7986 dev_printk(KERN_ERR, &pdev->dev,
7987 "ioremap failed for SLI4 HBA control registers.\n");
7988 goto out_iounmap_conf;
7989 }
7990 lpfc_sli4_bar1_register_memmap(phba);
7991 }
7992
7993 if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
7994 (pci_resource_start(pdev, 4))) {
7995
7996
7997
7998
7999 phba->pci_bar2_map = pci_resource_start(pdev, 4);
8000 bar2map_len = pci_resource_len(pdev, 4);
8001 phba->sli4_hba.drbl_regs_memmap_p =
8002 ioremap(phba->pci_bar2_map, bar2map_len);
8003 if (!phba->sli4_hba.drbl_regs_memmap_p) {
8004 dev_printk(KERN_ERR, &pdev->dev,
8005 "ioremap failed for SLI4 HBA doorbell registers.\n");
8006 goto out_iounmap_ctrl;
8007 }
8008 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
8009 if (error)
8010 goto out_iounmap_all;
8011 }
8012
8013 return 0;
8014
8015out_iounmap_all:
8016 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8017out_iounmap_ctrl:
8018 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8019out_iounmap_conf:
8020 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8021out:
8022 return error;
8023}
8024
8025
8026
8027
8028
8029
8030
8031
8032static void
8033lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
8034{
8035 uint32_t if_type;
8036 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8037
8038 switch (if_type) {
8039 case LPFC_SLI_INTF_IF_TYPE_0:
8040 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
8041 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
8042 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8043 break;
8044 case LPFC_SLI_INTF_IF_TYPE_2:
8045 iounmap(phba->sli4_hba.conf_regs_memmap_p);
8046 break;
8047 case LPFC_SLI_INTF_IF_TYPE_1:
8048 default:
8049 dev_printk(KERN_ERR, &phba->pcidev->dev,
8050 "FATAL - unsupported SLI4 interface type - %d\n",
8051 if_type);
8052 break;
8053 }
8054}
8055
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066
8067
8068
8069
8070
8071
8072
8073
8074
8075
8076static int
8077lpfc_sli_enable_msix(struct lpfc_hba *phba)
8078{
8079 int rc, i;
8080 LPFC_MBOXQ_t *pmb;
8081
8082
8083 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8084 phba->msix_entries[i].entry = i;
8085
8086
8087 rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
8088 ARRAY_SIZE(phba->msix_entries));
8089 if (rc) {
8090 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8091 "0420 PCI enable MSI-X failed (%d)\n", rc);
8092 goto msi_fail_out;
8093 }
8094 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8095 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8096 "0477 MSI-X entry[%d]: vector=x%x "
8097 "message=%d\n", i,
8098 phba->msix_entries[i].vector,
8099 phba->msix_entries[i].entry);
8100
8101
8102
8103
8104
8105 rc = request_irq(phba->msix_entries[0].vector,
8106 &lpfc_sli_sp_intr_handler, IRQF_SHARED,
8107 LPFC_SP_DRIVER_HANDLER_NAME, phba);
8108 if (rc) {
8109 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8110 "0421 MSI-X slow-path request_irq failed "
8111 "(%d)\n", rc);
8112 goto msi_fail_out;
8113 }
8114
8115
8116 rc = request_irq(phba->msix_entries[1].vector,
8117 &lpfc_sli_fp_intr_handler, IRQF_SHARED,
8118 LPFC_FP_DRIVER_HANDLER_NAME, phba);
8119
8120 if (rc) {
8121 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8122 "0429 MSI-X fast-path request_irq failed "
8123 "(%d)\n", rc);
8124 goto irq_fail_out;
8125 }
8126
8127
8128
8129
8130 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8131
8132 if (!pmb) {
8133 rc = -ENOMEM;
8134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8135 "0474 Unable to allocate memory for issuing "
8136 "MBOX_CONFIG_MSI command\n");
8137 goto mem_fail_out;
8138 }
8139 rc = lpfc_config_msi(phba, pmb);
8140 if (rc)
8141 goto mbx_fail_out;
8142 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
8143 if (rc != MBX_SUCCESS) {
8144 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
8145 "0351 Config MSI mailbox command failed, "
8146 "mbxCmd x%x, mbxStatus x%x\n",
8147 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
8148 goto mbx_fail_out;
8149 }
8150
8151
8152 mempool_free(pmb, phba->mbox_mem_pool);
8153 return rc;
8154
8155mbx_fail_out:
8156
8157 mempool_free(pmb, phba->mbox_mem_pool);
8158
8159mem_fail_out:
8160
8161 free_irq(phba->msix_entries[1].vector, phba);
8162
8163irq_fail_out:
8164
8165 free_irq(phba->msix_entries[0].vector, phba);
8166
8167msi_fail_out:
8168
8169 pci_disable_msix(phba->pcidev);
8170 return rc;
8171}
8172
8173
8174
8175
8176
8177
8178
8179
8180static void
8181lpfc_sli_disable_msix(struct lpfc_hba *phba)
8182{
8183 int i;
8184
8185
8186 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
8187 free_irq(phba->msix_entries[i].vector, phba);
8188
8189 pci_disable_msix(phba->pcidev);
8190
8191 return;
8192}
8193
8194
8195
8196
8197
8198
8199
8200
8201
8202
8203
8204
8205
8206
8207
8208static int
8209lpfc_sli_enable_msi(struct lpfc_hba *phba)
8210{
8211 int rc;
8212
8213 rc = pci_enable_msi(phba->pcidev);
8214 if (!rc)
8215 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8216 "0462 PCI enable MSI mode success.\n");
8217 else {
8218 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8219 "0471 PCI enable MSI mode failed (%d)\n", rc);
8220 return rc;
8221 }
8222
8223 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8224 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8225 if (rc) {
8226 pci_disable_msi(phba->pcidev);
8227 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8228 "0478 MSI request_irq failed (%d)\n", rc);
8229 }
8230 return rc;
8231}
8232
8233
8234
8235
8236
8237
8238
8239
8240
8241
8242
8243static void
8244lpfc_sli_disable_msi(struct lpfc_hba *phba)
8245{
8246 free_irq(phba->pcidev->irq, phba);
8247 pci_disable_msi(phba->pcidev);
8248 return;
8249}
8250
8251
8252
8253
8254
8255
8256
8257
8258
8259
8260
8261
8262
8263
8264
8265
8266
8267static uint32_t
8268lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8269{
8270 uint32_t intr_mode = LPFC_INTR_ERROR;
8271 int retval;
8272
8273 if (cfg_mode == 2) {
8274
8275 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
8276 if (!retval) {
8277
8278 retval = lpfc_sli_enable_msix(phba);
8279 if (!retval) {
8280
8281 phba->intr_type = MSIX;
8282 intr_mode = 2;
8283 }
8284 }
8285 }
8286
8287
8288 if (cfg_mode >= 1 && phba->intr_type == NONE) {
8289 retval = lpfc_sli_enable_msi(phba);
8290 if (!retval) {
8291
8292 phba->intr_type = MSI;
8293 intr_mode = 1;
8294 }
8295 }
8296
8297
8298 if (phba->intr_type == NONE) {
8299 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
8300 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8301 if (!retval) {
8302
8303 phba->intr_type = INTx;
8304 intr_mode = 0;
8305 }
8306 }
8307 return intr_mode;
8308}
8309
8310
8311
8312
8313
8314
8315
8316
8317
8318
8319static void
8320lpfc_sli_disable_intr(struct lpfc_hba *phba)
8321{
8322
8323 if (phba->intr_type == MSIX)
8324 lpfc_sli_disable_msix(phba);
8325 else if (phba->intr_type == MSI)
8326 lpfc_sli_disable_msi(phba);
8327 else if (phba->intr_type == INTx)
8328 free_irq(phba->pcidev->irq, phba);
8329
8330
8331 phba->intr_type = NONE;
8332 phba->sli.slistat.sli_intr = 0;
8333
8334 return;
8335}
8336
8337
8338
8339
8340
8341
8342
8343static int
8344lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
8345{
8346 struct lpfc_vector_map_info *cpup;
8347 int cpu;
8348
8349 cpup = phba->sli4_hba.cpu_map;
8350 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8351
8352 if (cpu_online(cpu)) {
8353 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8354 (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
8355 (cpup->phys_id == phys_id)) {
8356 return cpu;
8357 }
8358 }
8359 cpup++;
8360 }
8361
8362
8363
8364
8365
8366
8367
8368 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8369 if (lpfc_used_cpu[cpu] == phys_id)
8370 lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
8371 }
8372
8373 cpup = phba->sli4_hba.cpu_map;
8374 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8375
8376 if (cpu_online(cpu)) {
8377 if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
8378 (cpup->phys_id == phys_id)) {
8379 return cpu;
8380 }
8381 }
8382 cpup++;
8383 }
8384 return LPFC_VECTOR_MAP_EMPTY;
8385}
8386
8387
8388
8389
8390
8391
8392
8393
8394
8395
8396
8397static int
8398lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
8399{
8400 int i, idx, saved_chann, used_chann, cpu, phys_id;
8401 int max_phys_id, num_io_channel, first_cpu;
8402 struct lpfc_vector_map_info *cpup;
8403#ifdef CONFIG_X86
8404 struct cpuinfo_x86 *cpuinfo;
8405#endif
8406 struct cpumask *mask;
8407 uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
8408
8409
8410 if (!phba->cfg_fcp_cpu_map)
8411 return 1;
8412
8413
8414 memset(phba->sli4_hba.cpu_map, 0xff,
8415 (sizeof(struct lpfc_vector_map_info) *
8416 phba->sli4_hba.num_present_cpu));
8417
8418 max_phys_id = 0;
8419 phys_id = 0;
8420 num_io_channel = 0;
8421 first_cpu = LPFC_VECTOR_MAP_EMPTY;
8422
8423
8424 cpup = phba->sli4_hba.cpu_map;
8425 for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
8426#ifdef CONFIG_X86
8427 cpuinfo = &cpu_data(cpu);
8428 cpup->phys_id = cpuinfo->phys_proc_id;
8429 cpup->core_id = cpuinfo->cpu_core_id;
8430#else
8431
8432 cpup->phys_id = 0;
8433 cpup->core_id = 0;
8434#endif
8435
8436 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8437 "3328 CPU physid %d coreid %d\n",
8438 cpup->phys_id, cpup->core_id);
8439
8440 if (cpup->phys_id > max_phys_id)
8441 max_phys_id = cpup->phys_id;
8442 cpup++;
8443 }
8444
8445
8446 for (idx = 0; idx < vectors; idx++) {
8447 cpup = phba->sli4_hba.cpu_map;
8448 cpu = lpfc_find_next_cpu(phba, phys_id);
8449 if (cpu == LPFC_VECTOR_MAP_EMPTY) {
8450
8451
8452 for (i = 1; i < max_phys_id; i++) {
8453 phys_id++;
8454 if (phys_id > max_phys_id)
8455 phys_id = 0;
8456 cpu = lpfc_find_next_cpu(phba, phys_id);
8457 if (cpu == LPFC_VECTOR_MAP_EMPTY)
8458 continue;
8459 goto found;
8460 }
8461
8462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8463 "3329 Cannot set affinity:"
8464 "Error mapping vector %d (%d)\n",
8465 idx, vectors);
8466 return 0;
8467 }
8468found:
8469 cpup += cpu;
8470 if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
8471 lpfc_used_cpu[cpu] = phys_id;
8472
8473
8474 cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
8475
8476
8477 cpup->channel_id = idx;
8478 num_io_channel++;
8479
8480 if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
8481 first_cpu = cpu;
8482
8483
8484 mask = &cpup->maskbits;
8485 cpumask_clear(mask);
8486 cpumask_set_cpu(cpu, mask);
8487 i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
8488 vector, mask);
8489
8490 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8491 "3330 Set Affinity: CPU %d channel %d "
8492 "irq %d (%x)\n",
8493 cpu, cpup->channel_id,
8494 phba->sli4_hba.msix_entries[idx].vector, i);
8495
8496
8497 phys_id++;
8498 if (phys_id > max_phys_id)
8499 phys_id = 0;
8500 }
8501
8502
8503
8504
8505
8506
8507
8508
8509 for (i = 0; i <= max_phys_id; i++) {
8510
8511
8512
8513
8514
8515 for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
8516 chann[idx] = idx;
8517
8518 saved_chann = 0;
8519 used_chann = 0;
8520
8521
8522
8523
8524
8525
8526 cpup = phba->sli4_hba.cpu_map;
8527 cpu = first_cpu;
8528 cpup += cpu;
8529 for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
8530 idx++) {
8531 if (cpup->phys_id == i) {
8532
8533
8534
8535
8536 if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
8537 chann[saved_chann] =
8538 cpup->channel_id;
8539 saved_chann++;
8540 goto out;
8541 }
8542
8543
8544 if (saved_chann == 0)
8545 saved_chann =
8546 phba->cfg_fcp_io_channel;
8547
8548
8549 cpup->channel_id = chann[used_chann];
8550 num_io_channel++;
8551 used_chann++;
8552 if (used_chann == saved_chann)
8553 used_chann = 0;
8554
8555 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8556 "3331 Set IO_CHANN "
8557 "CPU %d channel %d\n",
8558 idx, cpup->channel_id);
8559 }
8560out:
8561 cpu++;
8562 if (cpu >= phba->sli4_hba.num_present_cpu) {
8563 cpup = phba->sli4_hba.cpu_map;
8564 cpu = 0;
8565 } else {
8566 cpup++;
8567 }
8568 }
8569 }
8570
8571 if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
8572 cpup = phba->sli4_hba.cpu_map;
8573 for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
8574 if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
8575 cpup->channel_id = 0;
8576 num_io_channel++;
8577
8578 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8579 "3332 Assign IO_CHANN "
8580 "CPU %d channel %d\n",
8581 idx, cpup->channel_id);
8582 }
8583 cpup++;
8584 }
8585 }
8586
8587
8588 if (num_io_channel != phba->sli4_hba.num_present_cpu)
8589 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8590 "3333 Set affinity mismatch:"
8591 "%d chann != %d cpus: %d vactors\n",
8592 num_io_channel, phba->sli4_hba.num_present_cpu,
8593 vectors);
8594
8595 phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
8596 return 1;
8597}
8598
8599
8600
8601
8602
8603
8604
8605
8606
8607
8608
8609
8610
8611
8612
8613
8614
8615
8616
8617
8618
8619
8620static int
8621lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8622{
8623 int vectors, rc, index;
8624
8625
8626 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8627 phba->sli4_hba.msix_entries[index].entry = index;
8628
8629
8630 vectors = phba->cfg_fcp_io_channel;
8631enable_msix_vectors:
8632 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
8633 vectors);
8634 if (rc > 1) {
8635 vectors = rc;
8636 goto enable_msix_vectors;
8637 } else if (rc) {
8638 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8639 "0484 PCI enable MSI-X failed (%d)\n", rc);
8640 goto msi_fail_out;
8641 }
8642
8643
8644 for (index = 0; index < vectors; index++)
8645 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8646 "0489 MSI-X entry[%d]: vector=x%x "
8647 "message=%d\n", index,
8648 phba->sli4_hba.msix_entries[index].vector,
8649 phba->sli4_hba.msix_entries[index].entry);
8650
8651
8652 for (index = 0; index < vectors; index++) {
8653 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8654 sprintf((char *)&phba->sli4_hba.handler_name[index],
8655 LPFC_DRIVER_HANDLER_NAME"%d", index);
8656
8657 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8658 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8659 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
8660 rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
8661 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,
8662 (char *)&phba->sli4_hba.handler_name[index],
8663 &phba->sli4_hba.fcp_eq_hdl[index]);
8664 if (rc) {
8665 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8666 "0486 MSI-X fast-path (%d) "
8667 "request_irq failed (%d)\n", index, rc);
8668 goto cfg_fail_out;
8669 }
8670 }
8671
8672 if (vectors != phba->cfg_fcp_io_channel) {
8673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8674 "3238 Reducing IO channels to match number of "
8675 "MSI-X vectors, requested %d got %d\n",
8676 phba->cfg_fcp_io_channel, vectors);
8677 phba->cfg_fcp_io_channel = vectors;
8678 }
8679
8680 lpfc_sli4_set_affinity(phba, vectors);
8681 return rc;
8682
8683cfg_fail_out:
8684
8685 for (--index; index >= 0; index--)
8686 free_irq(phba->sli4_hba.msix_entries[index].vector,
8687 &phba->sli4_hba.fcp_eq_hdl[index]);
8688
8689msi_fail_out:
8690
8691 pci_disable_msix(phba->pcidev);
8692 return rc;
8693}
8694
8695
8696
8697
8698
8699
8700
8701
8702static void
8703lpfc_sli4_disable_msix(struct lpfc_hba *phba)
8704{
8705 int index;
8706
8707
8708 for (index = 0; index < phba->cfg_fcp_io_channel; index++)
8709 free_irq(phba->sli4_hba.msix_entries[index].vector,
8710 &phba->sli4_hba.fcp_eq_hdl[index]);
8711
8712
8713 pci_disable_msix(phba->pcidev);
8714
8715 return;
8716}
8717
8718
8719
8720
8721
8722
8723
8724
8725
8726
8727
8728
8729
8730
8731
8732static int
8733lpfc_sli4_enable_msi(struct lpfc_hba *phba)
8734{
8735 int rc, index;
8736
8737 rc = pci_enable_msi(phba->pcidev);
8738 if (!rc)
8739 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8740 "0487 PCI enable MSI mode success.\n");
8741 else {
8742 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8743 "0488 PCI enable MSI mode failed (%d)\n", rc);
8744 return rc;
8745 }
8746
8747 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8748 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8749 if (rc) {
8750 pci_disable_msi(phba->pcidev);
8751 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8752 "0490 MSI request_irq failed (%d)\n", rc);
8753 return rc;
8754 }
8755
8756 for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
8757 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8758 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8759 }
8760
8761 return 0;
8762}
8763
8764
8765
8766
8767
8768
8769
8770
8771
8772
8773
8774static void
8775lpfc_sli4_disable_msi(struct lpfc_hba *phba)
8776{
8777 free_irq(phba->pcidev->irq, phba);
8778 pci_disable_msi(phba->pcidev);
8779 return;
8780}
8781
8782
8783
8784
8785
8786
8787
8788
8789
8790
8791
8792
8793
8794
8795
8796
8797
8798static uint32_t
8799lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
8800{
8801 uint32_t intr_mode = LPFC_INTR_ERROR;
8802 int retval, index;
8803
8804 if (cfg_mode == 2) {
8805
8806 retval = 0;
8807 if (!retval) {
8808
8809 retval = lpfc_sli4_enable_msix(phba);
8810 if (!retval) {
8811
8812 phba->intr_type = MSIX;
8813 intr_mode = 2;
8814 }
8815 }
8816 }
8817
8818
8819 if (cfg_mode >= 1 && phba->intr_type == NONE) {
8820 retval = lpfc_sli4_enable_msi(phba);
8821 if (!retval) {
8822
8823 phba->intr_type = MSI;
8824 intr_mode = 1;
8825 }
8826 }
8827
8828
8829 if (phba->intr_type == NONE) {
8830 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
8831 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
8832 if (!retval) {
8833
8834 phba->intr_type = INTx;
8835 intr_mode = 0;
8836 for (index = 0; index < phba->cfg_fcp_io_channel;
8837 index++) {
8838 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
8839 phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
8840 atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
8841 fcp_eq_in_use, 1);
8842 }
8843 }
8844 }
8845 return intr_mode;
8846}
8847
8848
8849
8850
8851
8852
8853
8854
8855
8856
8857static void
8858lpfc_sli4_disable_intr(struct lpfc_hba *phba)
8859{
8860
8861 if (phba->intr_type == MSIX)
8862 lpfc_sli4_disable_msix(phba);
8863 else if (phba->intr_type == MSI)
8864 lpfc_sli4_disable_msi(phba);
8865 else if (phba->intr_type == INTx)
8866 free_irq(phba->pcidev->irq, phba);
8867
8868
8869 phba->intr_type = NONE;
8870 phba->sli.slistat.sli_intr = 0;
8871
8872 return;
8873}
8874
8875
8876
8877
8878
8879
8880
8881
8882static void
8883lpfc_unset_hba(struct lpfc_hba *phba)
8884{
8885 struct lpfc_vport *vport = phba->pport;
8886 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
8887
8888 spin_lock_irq(shost->host_lock);
8889 vport->load_flag |= FC_UNLOADING;
8890 spin_unlock_irq(shost->host_lock);
8891
8892 kfree(phba->vpi_bmask);
8893 kfree(phba->vpi_ids);
8894
8895 lpfc_stop_hba_timers(phba);
8896
8897 phba->pport->work_port_events = 0;
8898
8899 lpfc_sli_hba_down(phba);
8900
8901 lpfc_sli_brdrestart(phba);
8902
8903 lpfc_sli_disable_intr(phba);
8904
8905 return;
8906}
8907
8908
8909
8910
8911
8912
8913
8914
8915
8916
8917
8918
8919
8920
8921static void
8922lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
8923{
8924 int wait_time = 0;
8925 int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8926 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8927
8928 while (!fcp_xri_cmpl || !els_xri_cmpl) {
8929 if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
8930 if (!fcp_xri_cmpl)
8931 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8932 "2877 FCP XRI exchange busy "
8933 "wait time: %d seconds.\n",
8934 wait_time/1000);
8935 if (!els_xri_cmpl)
8936 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8937 "2878 ELS XRI exchange busy "
8938 "wait time: %d seconds.\n",
8939 wait_time/1000);
8940 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
8941 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
8942 } else {
8943 msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
8944 wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
8945 }
8946 fcp_xri_cmpl =
8947 list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
8948 els_xri_cmpl =
8949 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8950 }
8951}
8952
8953
8954
8955
8956
8957
8958
8959
8960
8961
8962
8963static void
8964lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8965{
8966 int wait_cnt = 0;
8967 LPFC_MBOXQ_t *mboxq;
8968 struct pci_dev *pdev = phba->pcidev;
8969
8970 lpfc_stop_hba_timers(phba);
8971 phba->sli4_hba.intr_enable = 0;
8972
8973
8974
8975
8976
8977
8978
8979 spin_lock_irq(&phba->hbalock);
8980 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8981 spin_unlock_irq(&phba->hbalock);
8982
8983 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8984 msleep(10);
8985 if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
8986 break;
8987 }
8988
8989 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8990 spin_lock_irq(&phba->hbalock);
8991 mboxq = phba->sli.mbox_active;
8992 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8993 __lpfc_mbox_cmpl_put(phba, mboxq);
8994 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8995 phba->sli.mbox_active = NULL;
8996 spin_unlock_irq(&phba->hbalock);
8997 }
8998
8999
9000 lpfc_sli_hba_iocb_abort(phba);
9001
9002
9003 lpfc_sli4_xri_exchange_busy_wait(phba);
9004
9005
9006 lpfc_sli4_disable_intr(phba);
9007
9008
9009 if (phba->cfg_sriov_nr_virtfn)
9010 pci_disable_sriov(pdev);
9011
9012
9013 kthread_stop(phba->worker_thread);
9014
9015
9016 lpfc_pci_function_reset(phba);
9017 lpfc_sli4_queue_destroy(phba);
9018
9019
9020 phba->pport->work_port_events = 0;
9021}
9022
9023
9024
9025
9026
9027
9028
9029
9030
9031
9032
9033
9034
9035int
9036lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9037{
9038 int rc;
9039 struct lpfc_mqe *mqe;
9040 struct lpfc_pc_sli4_params *sli4_params;
9041 uint32_t mbox_tmo;
9042
9043 rc = 0;
9044 mqe = &mboxq->u.mqe;
9045
9046
9047 lpfc_pc_sli4_params(mboxq);
9048 if (!phba->sli4_hba.intr_enable)
9049 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9050 else {
9051 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9052 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9053 }
9054
9055 if (unlikely(rc))
9056 return 1;
9057
9058 sli4_params = &phba->sli4_hba.pc_sli4_params;
9059 sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
9060 sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
9061 sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
9062 sli4_params->featurelevel_1 = bf_get(featurelevel_1,
9063 &mqe->un.sli4_params);
9064 sli4_params->featurelevel_2 = bf_get(featurelevel_2,
9065 &mqe->un.sli4_params);
9066 sli4_params->proto_types = mqe->un.sli4_params.word3;
9067 sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
9068 sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
9069 sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
9070 sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
9071 sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
9072 sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
9073 sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
9074 sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
9075 sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
9076 sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
9077 sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
9078 sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
9079 sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
9080 sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
9081 sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
9082 sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
9083 sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
9084 sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
9085 sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
9086 sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
9087
9088
9089 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9090 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9091
9092 return rc;
9093}
9094
9095
9096
9097
9098
9099
9100
9101
9102
9103
9104
9105
9106
9107int
9108lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9109{
9110 int rc;
9111 struct lpfc_mqe *mqe = &mboxq->u.mqe;
9112 struct lpfc_pc_sli4_params *sli4_params;
9113 uint32_t mbox_tmo;
9114 int length;
9115 struct lpfc_sli4_parameters *mbx_sli4_parameters;
9116
9117
9118
9119
9120
9121
9122 phba->sli4_hba.rpi_hdrs_in_use = 1;
9123
9124
9125 length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
9126 sizeof(struct lpfc_sli4_cfg_mhdr));
9127 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
9128 LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
9129 length, LPFC_SLI4_MBX_EMBED);
9130 if (!phba->sli4_hba.intr_enable)
9131 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9132 else {
9133 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
9134 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
9135 }
9136 if (unlikely(rc))
9137 return rc;
9138 sli4_params = &phba->sli4_hba.pc_sli4_params;
9139 mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
9140 sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
9141 sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
9142 sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
9143 sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
9144 mbx_sli4_parameters);
9145 sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
9146 mbx_sli4_parameters);
9147 if (bf_get(cfg_phwq, mbx_sli4_parameters))
9148 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
9149 else
9150 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
9151 sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
9152 sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
9153 sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
9154 sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
9155 sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
9156 sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
9157 sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
9158 mbx_sli4_parameters);
9159 sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
9160 mbx_sli4_parameters);
9161 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
9162 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
9163
9164
9165 if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
9166 sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
9167
9168 return 0;
9169}
9170
9171
9172
9173
9174
9175
9176
9177
9178
9179
9180
9181
9182
9183
9184
9185
9186
9187
9188static int
9189lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
9190{
9191 struct lpfc_hba *phba;
9192 struct lpfc_vport *vport = NULL;
9193 struct Scsi_Host *shost = NULL;
9194 int error;
9195 uint32_t cfg_mode, intr_mode;
9196
9197
9198 phba = lpfc_hba_alloc(pdev);
9199 if (!phba)
9200 return -ENOMEM;
9201
9202
9203 error = lpfc_enable_pci_dev(phba);
9204 if (error)
9205 goto out_free_phba;
9206
9207
9208 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
9209 if (error)
9210 goto out_disable_pci_dev;
9211
9212
9213 error = lpfc_sli_pci_mem_setup(phba);
9214 if (error) {
9215 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9216 "1402 Failed to set up pci memory space.\n");
9217 goto out_disable_pci_dev;
9218 }
9219
9220
9221 error = lpfc_setup_driver_resource_phase1(phba);
9222 if (error) {
9223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9224 "1403 Failed to set up driver resource.\n");
9225 goto out_unset_pci_mem_s3;
9226 }
9227
9228
9229 error = lpfc_sli_driver_resource_setup(phba);
9230 if (error) {
9231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9232 "1404 Failed to set up driver resource.\n");
9233 goto out_unset_pci_mem_s3;
9234 }
9235
9236
9237 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
9238 if (error) {
9239 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9240 "1405 Failed to initialize iocb list.\n");
9241 goto out_unset_driver_resource_s3;
9242 }
9243
9244
9245 error = lpfc_setup_driver_resource_phase2(phba);
9246 if (error) {
9247 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9248 "1406 Failed to set up driver resource.\n");
9249 goto out_free_iocb_list;
9250 }
9251
9252
9253 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9254
9255
9256 error = lpfc_create_shost(phba);
9257 if (error) {
9258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9259 "1407 Failed to create scsi host.\n");
9260 goto out_unset_driver_resource;
9261 }
9262
9263
9264 vport = phba->pport;
9265 error = lpfc_alloc_sysfs_attr(vport);
9266 if (error) {
9267 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9268 "1476 Failed to allocate sysfs attr\n");
9269 goto out_destroy_shost;
9270 }
9271
9272 shost = lpfc_shost_from_vport(vport);
9273
9274 cfg_mode = phba->cfg_use_msi;
9275 while (true) {
9276
9277 lpfc_stop_port(phba);
9278
9279 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
9280 if (intr_mode == LPFC_INTR_ERROR) {
9281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9282 "0431 Failed to enable interrupt.\n");
9283 error = -ENODEV;
9284 goto out_free_sysfs_attr;
9285 }
9286
9287 if (lpfc_sli_hba_setup(phba)) {
9288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9289 "1477 Failed to set up hba\n");
9290 error = -ENODEV;
9291 goto out_remove_device;
9292 }
9293
9294
9295 msleep(50);
9296
9297 if (intr_mode == 0 ||
9298 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
9299
9300 phba->intr_mode = intr_mode;
9301 lpfc_log_intr_mode(phba, intr_mode);
9302 break;
9303 } else {
9304 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9305 "0447 Configure interrupt mode (%d) "
9306 "failed active interrupt test.\n",
9307 intr_mode);
9308
9309 lpfc_sli_disable_intr(phba);
9310
9311 cfg_mode = --intr_mode;
9312 }
9313 }
9314
9315
9316 lpfc_post_init_setup(phba);
9317
9318
9319 lpfc_create_static_vport(phba);
9320
9321 return 0;
9322
9323out_remove_device:
9324 lpfc_unset_hba(phba);
9325out_free_sysfs_attr:
9326 lpfc_free_sysfs_attr(vport);
9327out_destroy_shost:
9328 lpfc_destroy_shost(phba);
9329out_unset_driver_resource:
9330 lpfc_unset_driver_resource_phase2(phba);
9331out_free_iocb_list:
9332 lpfc_free_iocb_list(phba);
9333out_unset_driver_resource_s3:
9334 lpfc_sli_driver_resource_unset(phba);
9335out_unset_pci_mem_s3:
9336 lpfc_sli_pci_mem_unset(phba);
9337out_disable_pci_dev:
9338 lpfc_disable_pci_dev(phba);
9339 if (shost)
9340 scsi_host_put(shost);
9341out_free_phba:
9342 lpfc_hba_free(phba);
9343 return error;
9344}
9345
9346
9347
9348
9349
9350
9351
9352
9353
9354
9355static void
9356lpfc_pci_remove_one_s3(struct pci_dev *pdev)
9357{
9358 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9359 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
9360 struct lpfc_vport **vports;
9361 struct lpfc_hba *phba = vport->phba;
9362 int i;
9363 int bars = pci_select_bars(pdev, IORESOURCE_MEM);
9364
9365 spin_lock_irq(&phba->hbalock);
9366 vport->load_flag |= FC_UNLOADING;
9367 spin_unlock_irq(&phba->hbalock);
9368
9369 lpfc_free_sysfs_attr(vport);
9370
9371
9372 vports = lpfc_create_vport_work_array(phba);
9373 if (vports != NULL)
9374 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
9375 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
9376 continue;
9377 fc_vport_terminate(vports[i]->fc_vport);
9378 }
9379 lpfc_destroy_vport_work_array(phba, vports);
9380
9381
9382 fc_remove_host(shost);
9383 scsi_remove_host(shost);
9384 lpfc_cleanup(vport);
9385
9386
9387
9388
9389
9390
9391
9392
9393 lpfc_sli_hba_down(phba);
9394
9395 kthread_stop(phba->worker_thread);
9396
9397 lpfc_sli_brdrestart(phba);
9398
9399 kfree(phba->vpi_bmask);
9400 kfree(phba->vpi_ids);
9401
9402 lpfc_stop_hba_timers(phba);
9403 spin_lock_irq(&phba->hbalock);
9404 list_del_init(&vport->listentry);
9405 spin_unlock_irq(&phba->hbalock);
9406
9407 lpfc_debugfs_terminate(vport);
9408
9409
9410 if (phba->cfg_sriov_nr_virtfn)
9411 pci_disable_sriov(pdev);
9412
9413
9414 lpfc_sli_disable_intr(phba);
9415
9416 pci_set_drvdata(pdev, NULL);
9417 scsi_host_put(shost);
9418
9419
9420
9421
9422
9423 lpfc_scsi_free(phba);
9424 lpfc_mem_free_all(phba);
9425
9426 dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
9427 phba->hbqslimp.virt, phba->hbqslimp.phys);
9428
9429
9430 dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9431 phba->slim2p.virt, phba->slim2p.phys);
9432
9433
9434 iounmap(phba->ctrl_regs_memmap_p);
9435 iounmap(phba->slim_memmap_p);
9436
9437 lpfc_hba_free(phba);
9438
9439 pci_release_selected_regions(pdev, bars);
9440 pci_disable_device(pdev);
9441}
9442
9443
9444
9445
9446
9447
9448
9449
9450
9451
9452
9453
9454
9455
9456
9457
9458
9459
9460
9461
9462
9463
9464static int
9465lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
9466{
9467 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9468 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9469
9470 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9471 "0473 PCI device Power Management suspend.\n");
9472
9473
9474 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9475 lpfc_offline(phba);
9476 kthread_stop(phba->worker_thread);
9477
9478
9479 lpfc_sli_disable_intr(phba);
9480
9481
9482 pci_save_state(pdev);
9483 pci_set_power_state(pdev, PCI_D3hot);
9484
9485 return 0;
9486}
9487
9488
9489
9490
9491
9492
9493
9494
9495
9496
9497
9498
9499
9500
9501
9502
9503
9504
9505
9506
9507static int
9508lpfc_pci_resume_one_s3(struct pci_dev *pdev)
9509{
9510 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9511 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9512 uint32_t intr_mode;
9513 int error;
9514
9515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9516 "0452 PCI device Power Management resume.\n");
9517
9518
9519 pci_set_power_state(pdev, PCI_D0);
9520 pci_restore_state(pdev);
9521
9522
9523
9524
9525
9526 pci_save_state(pdev);
9527
9528 if (pdev->is_busmaster)
9529 pci_set_master(pdev);
9530
9531
9532 phba->worker_thread = kthread_run(lpfc_do_work, phba,
9533 "lpfc_worker_%d", phba->brd_no);
9534 if (IS_ERR(phba->worker_thread)) {
9535 error = PTR_ERR(phba->worker_thread);
9536 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9537 "0434 PM resume failed to start worker "
9538 "thread: error=x%x.\n", error);
9539 return error;
9540 }
9541
9542
9543 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9544 if (intr_mode == LPFC_INTR_ERROR) {
9545 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9546 "0430 PM resume Failed to enable interrupt\n");
9547 return -EIO;
9548 } else
9549 phba->intr_mode = intr_mode;
9550
9551
9552 lpfc_sli_brdrestart(phba);
9553 lpfc_online(phba);
9554
9555
9556 lpfc_log_intr_mode(phba, phba->intr_mode);
9557
9558 return 0;
9559}
9560
9561
9562
9563
9564
9565
9566
9567
9568static void
9569lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
9570{
9571 struct lpfc_sli *psli = &phba->sli;
9572 struct lpfc_sli_ring *pring;
9573
9574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9575 "2723 PCI channel I/O abort preparing for recovery\n");
9576
9577
9578
9579
9580
9581 pring = &psli->ring[psli->fcp_ring];
9582 lpfc_sli_abort_iocb_ring(phba, pring);
9583}
9584
9585
9586
9587
9588
9589
9590
9591
9592
9593static void
9594lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
9595{
9596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9597 "2710 PCI channel disable preparing for reset\n");
9598
9599
9600 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
9601
9602
9603 lpfc_scsi_dev_block(phba);
9604
9605
9606 lpfc_sli_flush_fcp_rings(phba);
9607
9608
9609 lpfc_stop_hba_timers(phba);
9610
9611
9612 lpfc_sli_disable_intr(phba);
9613 pci_disable_device(phba->pcidev);
9614}
9615
9616
9617
9618
9619
9620
9621
9622
9623
9624static void
9625lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
9626{
9627 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9628 "2711 PCI channel permanent disable for failure\n");
9629
9630 lpfc_scsi_dev_block(phba);
9631
9632
9633 lpfc_stop_hba_timers(phba);
9634
9635
9636 lpfc_sli_flush_fcp_rings(phba);
9637}
9638
9639
9640
9641
9642
9643
9644
9645
9646
9647
9648
9649
9650
9651
9652
9653
9654
9655
9656
9657static pci_ers_result_t
9658lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
9659{
9660 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9661 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9662
9663 switch (state) {
9664 case pci_channel_io_normal:
9665
9666 lpfc_sli_prep_dev_for_recover(phba);
9667 return PCI_ERS_RESULT_CAN_RECOVER;
9668 case pci_channel_io_frozen:
9669
9670 lpfc_sli_prep_dev_for_reset(phba);
9671 return PCI_ERS_RESULT_NEED_RESET;
9672 case pci_channel_io_perm_failure:
9673
9674 lpfc_sli_prep_dev_for_perm_failure(phba);
9675 return PCI_ERS_RESULT_DISCONNECT;
9676 default:
9677
9678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9679 "0472 Unknown PCI error state: x%x\n", state);
9680 lpfc_sli_prep_dev_for_reset(phba);
9681 return PCI_ERS_RESULT_NEED_RESET;
9682 }
9683}
9684
9685
9686
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696
9697
9698
9699
9700
9701
9702
9703static pci_ers_result_t
9704lpfc_io_slot_reset_s3(struct pci_dev *pdev)
9705{
9706 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9707 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9708 struct lpfc_sli *psli = &phba->sli;
9709 uint32_t intr_mode;
9710
9711 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
9712 if (pci_enable_device_mem(pdev)) {
9713 printk(KERN_ERR "lpfc: Cannot re-enable "
9714 "PCI device after reset.\n");
9715 return PCI_ERS_RESULT_DISCONNECT;
9716 }
9717
9718 pci_restore_state(pdev);
9719
9720
9721
9722
9723
9724 pci_save_state(pdev);
9725
9726 if (pdev->is_busmaster)
9727 pci_set_master(pdev);
9728
9729 spin_lock_irq(&phba->hbalock);
9730 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9731 spin_unlock_irq(&phba->hbalock);
9732
9733
9734 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
9735 if (intr_mode == LPFC_INTR_ERROR) {
9736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9737 "0427 Cannot re-enable interrupt after "
9738 "slot reset.\n");
9739 return PCI_ERS_RESULT_DISCONNECT;
9740 } else
9741 phba->intr_mode = intr_mode;
9742
9743
9744 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
9745 lpfc_offline(phba);
9746 lpfc_sli_brdrestart(phba);
9747
9748
9749 lpfc_log_intr_mode(phba, phba->intr_mode);
9750
9751 return PCI_ERS_RESULT_RECOVERED;
9752}
9753
9754
9755
9756
9757
9758
9759
9760
9761
9762
9763
9764static void
9765lpfc_io_resume_s3(struct pci_dev *pdev)
9766{
9767 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9768 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
9769
9770
9771 lpfc_online(phba);
9772
9773
9774 if (phba->hba_flag & HBA_AER_ENABLED)
9775 pci_cleanup_aer_uncorrect_error_status(pdev);
9776}
9777
9778
9779
9780
9781
9782
9783
9784int
9785lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9786{
9787 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
9788
9789 if (phba->sli_rev == LPFC_SLI_REV4) {
9790 if (max_xri <= 100)
9791 return 10;
9792 else if (max_xri <= 256)
9793 return 25;
9794 else if (max_xri <= 512)
9795 return 50;
9796 else if (max_xri <= 1024)
9797 return 100;
9798 else if (max_xri <= 1536)
9799 return 150;
9800 else if (max_xri <= 2048)
9801 return 200;
9802 else
9803 return 250;
9804 } else
9805 return 0;
9806}
9807
9808
9809
9810
9811
9812
9813
9814static void
9815lpfc_write_firmware(const struct firmware *fw, void *context)
9816{
9817 struct lpfc_hba *phba = (struct lpfc_hba *)context;
9818 char fwrev[FW_REV_STR_SIZE];
9819 struct lpfc_grp_hdr *image;
9820 struct list_head dma_buffer_list;
9821 int i, rc = 0;
9822 struct lpfc_dmabuf *dmabuf, *next;
9823 uint32_t offset = 0, temp_offset = 0;
9824
9825
9826 if (!fw) {
9827 rc = -ENXIO;
9828 goto out;
9829 }
9830 image = (struct lpfc_grp_hdr *)fw->data;
9831
9832 INIT_LIST_HEAD(&dma_buffer_list);
9833 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
9834 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
9835 LPFC_FILE_TYPE_GROUP) ||
9836 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
9837 (be32_to_cpu(image->size) != fw->size)) {
9838 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9839 "3022 Invalid FW image found. "
9840 "Magic:%x Type:%x ID:%x\n",
9841 be32_to_cpu(image->magic_number),
9842 bf_get_be32(lpfc_grp_hdr_file_type, image),
9843 bf_get_be32(lpfc_grp_hdr_id, image));
9844 rc = -EINVAL;
9845 goto release_out;
9846 }
9847 lpfc_decode_firmware_rev(phba, fwrev, 1);
9848 if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
9849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9850 "3023 Updating Firmware, Current Version:%s "
9851 "New Version:%s\n",
9852 fwrev, image->revision);
9853 for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
9854 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
9855 GFP_KERNEL);
9856 if (!dmabuf) {
9857 rc = -ENOMEM;
9858 goto release_out;
9859 }
9860 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9861 SLI4_PAGE_SIZE,
9862 &dmabuf->phys,
9863 GFP_KERNEL);
9864 if (!dmabuf->virt) {
9865 kfree(dmabuf);
9866 rc = -ENOMEM;
9867 goto release_out;
9868 }
9869 list_add_tail(&dmabuf->list, &dma_buffer_list);
9870 }
9871 while (offset < fw->size) {
9872 temp_offset = offset;
9873 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
9874 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
9875 memcpy(dmabuf->virt,
9876 fw->data + temp_offset,
9877 fw->size - temp_offset);
9878 temp_offset = fw->size;
9879 break;
9880 }
9881 memcpy(dmabuf->virt, fw->data + temp_offset,
9882 SLI4_PAGE_SIZE);
9883 temp_offset += SLI4_PAGE_SIZE;
9884 }
9885 rc = lpfc_wr_object(phba, &dma_buffer_list,
9886 (fw->size - offset), &offset);
9887 if (rc)
9888 goto release_out;
9889 }
9890 rc = offset;
9891 }
9892
9893release_out:
9894 list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
9895 list_del(&dmabuf->list);
9896 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
9897 dmabuf->virt, dmabuf->phys);
9898 kfree(dmabuf);
9899 }
9900 release_firmware(fw);
9901out:
9902 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9903 "3024 Firmware update done: %d.\n", rc);
9904 return;
9905}
9906
9907
9908
9909
9910
9911
9912
9913
9914int
9915lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
9916{
9917 uint8_t file_name[ELX_MODEL_NAME_SIZE];
9918 int ret;
9919 const struct firmware *fw;
9920
9921
9922 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
9923 LPFC_SLI_INTF_IF_TYPE_2)
9924 return -EPERM;
9925
9926 snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
9927
9928 if (fw_upgrade == INT_FW_UPGRADE) {
9929 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
9930 file_name, &phba->pcidev->dev,
9931 GFP_KERNEL, (void *)phba,
9932 lpfc_write_firmware);
9933 } else if (fw_upgrade == RUN_FW_UPGRADE) {
9934 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
9935 if (!ret)
9936 lpfc_write_firmware(fw, (void *)phba);
9937 } else {
9938 ret = -EINVAL;
9939 }
9940
9941 return ret;
9942}
9943
9944
9945
9946
9947
9948
9949
9950
9951
9952
9953
9954
9955
9956
9957
9958
9959
9960
9961
9962static int
9963lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9964{
9965 struct lpfc_hba *phba;
9966 struct lpfc_vport *vport = NULL;
9967 struct Scsi_Host *shost = NULL;
9968 int error, ret;
9969 uint32_t cfg_mode, intr_mode;
9970 int adjusted_fcp_io_channel;
9971
9972
9973 phba = lpfc_hba_alloc(pdev);
9974 if (!phba)
9975 return -ENOMEM;
9976
9977
9978 error = lpfc_enable_pci_dev(phba);
9979 if (error)
9980 goto out_free_phba;
9981
9982
9983 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
9984 if (error)
9985 goto out_disable_pci_dev;
9986
9987
9988 error = lpfc_sli4_pci_mem_setup(phba);
9989 if (error) {
9990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9991 "1410 Failed to set up pci memory space.\n");
9992 goto out_disable_pci_dev;
9993 }
9994
9995
9996 error = lpfc_setup_driver_resource_phase1(phba);
9997 if (error) {
9998 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9999 "1411 Failed to set up driver resource.\n");
10000 goto out_unset_pci_mem_s4;
10001 }
10002
10003
10004 error = lpfc_sli4_driver_resource_setup(phba);
10005 if (error) {
10006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10007 "1412 Failed to set up driver resource.\n");
10008 goto out_unset_pci_mem_s4;
10009 }
10010
10011
10012
10013 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10014 "2821 initialize iocb list %d.\n",
10015 phba->cfg_iocb_cnt*1024);
10016 error = lpfc_init_iocb_list(phba, phba->cfg_iocb_cnt*1024);
10017
10018 if (error) {
10019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10020 "1413 Failed to initialize iocb list.\n");
10021 goto out_unset_driver_resource_s4;
10022 }
10023
10024 INIT_LIST_HEAD(&phba->active_rrq_list);
10025 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
10026
10027
10028 error = lpfc_setup_driver_resource_phase2(phba);
10029 if (error) {
10030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10031 "1414 Failed to set up driver resource.\n");
10032 goto out_free_iocb_list;
10033 }
10034
10035
10036 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
10037
10038
10039 error = lpfc_create_shost(phba);
10040 if (error) {
10041 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10042 "1415 Failed to create scsi host.\n");
10043 goto out_unset_driver_resource;
10044 }
10045
10046
10047 vport = phba->pport;
10048 error = lpfc_alloc_sysfs_attr(vport);
10049 if (error) {
10050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10051 "1416 Failed to allocate sysfs attr\n");
10052 goto out_destroy_shost;
10053 }
10054
10055 shost = lpfc_shost_from_vport(vport);
10056
10057 cfg_mode = phba->cfg_use_msi;
10058
10059
10060 lpfc_stop_port(phba);
10061
10062 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
10063 if (intr_mode == LPFC_INTR_ERROR) {
10064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10065 "0426 Failed to enable interrupt.\n");
10066 error = -ENODEV;
10067 goto out_free_sysfs_attr;
10068 }
10069
10070 if (phba->intr_type != MSIX)
10071 adjusted_fcp_io_channel = 1;
10072 else
10073 adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
10074 phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
10075
10076 if (lpfc_sli4_hba_setup(phba)) {
10077 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10078 "1421 Failed to set up hba\n");
10079 error = -ENODEV;
10080 goto out_disable_intr;
10081 }
10082
10083
10084 phba->intr_mode = intr_mode;
10085 lpfc_log_intr_mode(phba, intr_mode);
10086
10087
10088 lpfc_post_init_setup(phba);
10089
10090
10091 if (phba->cfg_request_firmware_upgrade)
10092 ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
10093
10094
10095 lpfc_create_static_vport(phba);
10096 return 0;
10097
10098out_disable_intr:
10099 lpfc_sli4_disable_intr(phba);
10100out_free_sysfs_attr:
10101 lpfc_free_sysfs_attr(vport);
10102out_destroy_shost:
10103 lpfc_destroy_shost(phba);
10104out_unset_driver_resource:
10105 lpfc_unset_driver_resource_phase2(phba);
10106out_free_iocb_list:
10107 lpfc_free_iocb_list(phba);
10108out_unset_driver_resource_s4:
10109 lpfc_sli4_driver_resource_unset(phba);
10110out_unset_pci_mem_s4:
10111 lpfc_sli4_pci_mem_unset(phba);
10112out_disable_pci_dev:
10113 lpfc_disable_pci_dev(phba);
10114 if (shost)
10115 scsi_host_put(shost);
10116out_free_phba:
10117 lpfc_hba_free(phba);
10118 return error;
10119}
10120
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130static void
10131lpfc_pci_remove_one_s4(struct pci_dev *pdev)
10132{
10133 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10134 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
10135 struct lpfc_vport **vports;
10136 struct lpfc_hba *phba = vport->phba;
10137 int i;
10138
10139
10140 spin_lock_irq(&phba->hbalock);
10141 vport->load_flag |= FC_UNLOADING;
10142 spin_unlock_irq(&phba->hbalock);
10143
10144
10145 lpfc_free_sysfs_attr(vport);
10146
10147
10148 vports = lpfc_create_vport_work_array(phba);
10149 if (vports != NULL)
10150 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
10151 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
10152 continue;
10153 fc_vport_terminate(vports[i]->fc_vport);
10154 }
10155 lpfc_destroy_vport_work_array(phba, vports);
10156
10157
10158 fc_remove_host(shost);
10159 scsi_remove_host(shost);
10160
10161
10162 lpfc_cleanup(vport);
10163
10164
10165
10166
10167
10168
10169 lpfc_debugfs_terminate(vport);
10170 lpfc_sli4_hba_unset(phba);
10171
10172 spin_lock_irq(&phba->hbalock);
10173 list_del_init(&vport->listentry);
10174 spin_unlock_irq(&phba->hbalock);
10175
10176
10177
10178
10179 lpfc_scsi_free(phba);
10180
10181 lpfc_sli4_driver_resource_unset(phba);
10182
10183
10184 lpfc_sli4_pci_mem_unset(phba);
10185
10186
10187 scsi_host_put(shost);
10188 lpfc_disable_pci_dev(phba);
10189
10190
10191 lpfc_hba_free(phba);
10192
10193 return;
10194}
10195
10196
10197
10198
10199
10200
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211
10212
10213
10214
10215
10216
10217static int
10218lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
10219{
10220 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10221 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10222
10223 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10224 "2843 PCI device Power Management suspend.\n");
10225
10226
10227 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10228 lpfc_offline(phba);
10229 kthread_stop(phba->worker_thread);
10230
10231
10232 lpfc_sli4_disable_intr(phba);
10233 lpfc_sli4_queue_destroy(phba);
10234
10235
10236 pci_save_state(pdev);
10237 pci_set_power_state(pdev, PCI_D3hot);
10238
10239 return 0;
10240}
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257
10258
10259
10260
10261static int
10262lpfc_pci_resume_one_s4(struct pci_dev *pdev)
10263{
10264 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10265 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10266 uint32_t intr_mode;
10267 int error;
10268
10269 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10270 "0292 PCI device Power Management resume.\n");
10271
10272
10273 pci_set_power_state(pdev, PCI_D0);
10274 pci_restore_state(pdev);
10275
10276
10277
10278
10279
10280 pci_save_state(pdev);
10281
10282 if (pdev->is_busmaster)
10283 pci_set_master(pdev);
10284
10285
10286 phba->worker_thread = kthread_run(lpfc_do_work, phba,
10287 "lpfc_worker_%d", phba->brd_no);
10288 if (IS_ERR(phba->worker_thread)) {
10289 error = PTR_ERR(phba->worker_thread);
10290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10291 "0293 PM resume failed to start worker "
10292 "thread: error=x%x.\n", error);
10293 return error;
10294 }
10295
10296
10297 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10298 if (intr_mode == LPFC_INTR_ERROR) {
10299 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10300 "0294 PM resume Failed to enable interrupt\n");
10301 return -EIO;
10302 } else
10303 phba->intr_mode = intr_mode;
10304
10305
10306 lpfc_sli_brdrestart(phba);
10307 lpfc_online(phba);
10308
10309
10310 lpfc_log_intr_mode(phba, phba->intr_mode);
10311
10312 return 0;
10313}
10314
10315
10316
10317
10318
10319
10320
10321
10322static void
10323lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
10324{
10325 struct lpfc_sli *psli = &phba->sli;
10326 struct lpfc_sli_ring *pring;
10327
10328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10329 "2828 PCI channel I/O abort preparing for recovery\n");
10330
10331
10332
10333
10334 pring = &psli->ring[psli->fcp_ring];
10335 lpfc_sli_abort_iocb_ring(phba, pring);
10336}
10337
10338
10339
10340
10341
10342
10343
10344
10345
10346static void
10347lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
10348{
10349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10350 "2826 PCI channel disable preparing for reset\n");
10351
10352
10353 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
10354
10355
10356 lpfc_scsi_dev_block(phba);
10357
10358
10359 lpfc_sli_flush_fcp_rings(phba);
10360
10361
10362 lpfc_stop_hba_timers(phba);
10363
10364
10365 lpfc_sli4_disable_intr(phba);
10366 lpfc_sli4_queue_destroy(phba);
10367 pci_disable_device(phba->pcidev);
10368}
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378static void
10379lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
10380{
10381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10382 "2827 PCI channel permanent disable for failure\n");
10383
10384
10385 lpfc_scsi_dev_block(phba);
10386
10387
10388 lpfc_stop_hba_timers(phba);
10389
10390
10391 lpfc_sli_flush_fcp_rings(phba);
10392}
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410static pci_ers_result_t
10411lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
10412{
10413 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10414 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10415
10416 switch (state) {
10417 case pci_channel_io_normal:
10418
10419 lpfc_sli4_prep_dev_for_recover(phba);
10420 return PCI_ERS_RESULT_CAN_RECOVER;
10421 case pci_channel_io_frozen:
10422
10423 lpfc_sli4_prep_dev_for_reset(phba);
10424 return PCI_ERS_RESULT_NEED_RESET;
10425 case pci_channel_io_perm_failure:
10426
10427 lpfc_sli4_prep_dev_for_perm_failure(phba);
10428 return PCI_ERS_RESULT_DISCONNECT;
10429 default:
10430
10431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10432 "2825 Unknown PCI error state: x%x\n", state);
10433 lpfc_sli4_prep_dev_for_reset(phba);
10434 return PCI_ERS_RESULT_NEED_RESET;
10435 }
10436}
10437
10438
10439
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449
10450
10451
10452
10453
10454
10455
10456static pci_ers_result_t
10457lpfc_io_slot_reset_s4(struct pci_dev *pdev)
10458{
10459 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10460 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10461 struct lpfc_sli *psli = &phba->sli;
10462 uint32_t intr_mode;
10463
10464 dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
10465 if (pci_enable_device_mem(pdev)) {
10466 printk(KERN_ERR "lpfc: Cannot re-enable "
10467 "PCI device after reset.\n");
10468 return PCI_ERS_RESULT_DISCONNECT;
10469 }
10470
10471 pci_restore_state(pdev);
10472
10473
10474
10475
10476
10477 pci_save_state(pdev);
10478
10479 if (pdev->is_busmaster)
10480 pci_set_master(pdev);
10481
10482 spin_lock_irq(&phba->hbalock);
10483 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
10484 spin_unlock_irq(&phba->hbalock);
10485
10486
10487 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
10488 if (intr_mode == LPFC_INTR_ERROR) {
10489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10490 "2824 Cannot re-enable interrupt after "
10491 "slot reset.\n");
10492 return PCI_ERS_RESULT_DISCONNECT;
10493 } else
10494 phba->intr_mode = intr_mode;
10495
10496
10497 lpfc_log_intr_mode(phba, phba->intr_mode);
10498
10499 return PCI_ERS_RESULT_RECOVERED;
10500}
10501
10502
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512static void
10513lpfc_io_resume_s4(struct pci_dev *pdev)
10514{
10515 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10516 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10517
10518
10519
10520
10521
10522
10523
10524 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
10525
10526 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
10527 lpfc_offline(phba);
10528 lpfc_sli_brdrestart(phba);
10529
10530 lpfc_online(phba);
10531 }
10532
10533
10534 if (phba->hba_flag & HBA_AER_ENABLED)
10535 pci_cleanup_aer_uncorrect_error_status(pdev);
10536}
10537
10538
10539
10540
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556static int
10557lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
10558{
10559 int rc;
10560 struct lpfc_sli_intf intf;
10561
10562 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
10563 return -ENODEV;
10564
10565 if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
10566 (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
10567 rc = lpfc_pci_probe_one_s4(pdev, pid);
10568 else
10569 rc = lpfc_pci_probe_one_s3(pdev, pid);
10570
10571 return rc;
10572}
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584static void
10585lpfc_pci_remove_one(struct pci_dev *pdev)
10586{
10587 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10588 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10589
10590 switch (phba->pci_dev_grp) {
10591 case LPFC_PCI_DEV_LP:
10592 lpfc_pci_remove_one_s3(pdev);
10593 break;
10594 case LPFC_PCI_DEV_OC:
10595 lpfc_pci_remove_one_s4(pdev);
10596 break;
10597 default:
10598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10599 "1424 Invalid PCI device group: 0x%x\n",
10600 phba->pci_dev_grp);
10601 break;
10602 }
10603 return;
10604}
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620static int
10621lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
10622{
10623 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10624 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10625 int rc = -ENODEV;
10626
10627 switch (phba->pci_dev_grp) {
10628 case LPFC_PCI_DEV_LP:
10629 rc = lpfc_pci_suspend_one_s3(pdev, msg);
10630 break;
10631 case LPFC_PCI_DEV_OC:
10632 rc = lpfc_pci_suspend_one_s4(pdev, msg);
10633 break;
10634 default:
10635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10636 "1425 Invalid PCI device group: 0x%x\n",
10637 phba->pci_dev_grp);
10638 break;
10639 }
10640 return rc;
10641}
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656static int
10657lpfc_pci_resume_one(struct pci_dev *pdev)
10658{
10659 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10660 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10661 int rc = -ENODEV;
10662
10663 switch (phba->pci_dev_grp) {
10664 case LPFC_PCI_DEV_LP:
10665 rc = lpfc_pci_resume_one_s3(pdev);
10666 break;
10667 case LPFC_PCI_DEV_OC:
10668 rc = lpfc_pci_resume_one_s4(pdev);
10669 break;
10670 default:
10671 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10672 "1426 Invalid PCI device group: 0x%x\n",
10673 phba->pci_dev_grp);
10674 break;
10675 }
10676 return rc;
10677}
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690
10691
10692
10693
10694static pci_ers_result_t
10695lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10696{
10697 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10698 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10699 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10700
10701 switch (phba->pci_dev_grp) {
10702 case LPFC_PCI_DEV_LP:
10703 rc = lpfc_io_error_detected_s3(pdev, state);
10704 break;
10705 case LPFC_PCI_DEV_OC:
10706 rc = lpfc_io_error_detected_s4(pdev, state);
10707 break;
10708 default:
10709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10710 "1427 Invalid PCI device group: 0x%x\n",
10711 phba->pci_dev_grp);
10712 break;
10713 }
10714 return rc;
10715}
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731static pci_ers_result_t
10732lpfc_io_slot_reset(struct pci_dev *pdev)
10733{
10734 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10735 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10736 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
10737
10738 switch (phba->pci_dev_grp) {
10739 case LPFC_PCI_DEV_LP:
10740 rc = lpfc_io_slot_reset_s3(pdev);
10741 break;
10742 case LPFC_PCI_DEV_OC:
10743 rc = lpfc_io_slot_reset_s4(pdev);
10744 break;
10745 default:
10746 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10747 "1428 Invalid PCI device group: 0x%x\n",
10748 phba->pci_dev_grp);
10749 break;
10750 }
10751 return rc;
10752}
10753
10754
10755
10756
10757
10758
10759
10760
10761
10762
10763
10764static void
10765lpfc_io_resume(struct pci_dev *pdev)
10766{
10767 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10768 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
10769
10770 switch (phba->pci_dev_grp) {
10771 case LPFC_PCI_DEV_LP:
10772 lpfc_io_resume_s3(pdev);
10773 break;
10774 case LPFC_PCI_DEV_OC:
10775 lpfc_io_resume_s4(pdev);
10776 break;
10777 default:
10778 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10779 "1429 Invalid PCI device group: 0x%x\n",
10780 phba->pci_dev_grp);
10781 break;
10782 }
10783 return;
10784}
10785
10786static struct pci_device_id lpfc_id_table[] = {
10787 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
10788 PCI_ANY_ID, PCI_ANY_ID, },
10789 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
10790 PCI_ANY_ID, PCI_ANY_ID, },
10791 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
10792 PCI_ANY_ID, PCI_ANY_ID, },
10793 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
10794 PCI_ANY_ID, PCI_ANY_ID, },
10795 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
10796 PCI_ANY_ID, PCI_ANY_ID, },
10797 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
10798 PCI_ANY_ID, PCI_ANY_ID, },
10799 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
10800 PCI_ANY_ID, PCI_ANY_ID, },
10801 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
10802 PCI_ANY_ID, PCI_ANY_ID, },
10803 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
10804 PCI_ANY_ID, PCI_ANY_ID, },
10805 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
10806 PCI_ANY_ID, PCI_ANY_ID, },
10807 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
10808 PCI_ANY_ID, PCI_ANY_ID, },
10809 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
10810 PCI_ANY_ID, PCI_ANY_ID, },
10811 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
10812 PCI_ANY_ID, PCI_ANY_ID, },
10813 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
10814 PCI_ANY_ID, PCI_ANY_ID, },
10815 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
10816 PCI_ANY_ID, PCI_ANY_ID, },
10817 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
10818 PCI_ANY_ID, PCI_ANY_ID, },
10819 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
10820 PCI_ANY_ID, PCI_ANY_ID, },
10821 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
10822 PCI_ANY_ID, PCI_ANY_ID, },
10823 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
10824 PCI_ANY_ID, PCI_ANY_ID, },
10825 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
10826 PCI_ANY_ID, PCI_ANY_ID, },
10827 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
10828 PCI_ANY_ID, PCI_ANY_ID, },
10829 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
10830 PCI_ANY_ID, PCI_ANY_ID, },
10831 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
10832 PCI_ANY_ID, PCI_ANY_ID, },
10833 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
10834 PCI_ANY_ID, PCI_ANY_ID, },
10835 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
10836 PCI_ANY_ID, PCI_ANY_ID, },
10837 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
10838 PCI_ANY_ID, PCI_ANY_ID, },
10839 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
10840 PCI_ANY_ID, PCI_ANY_ID, },
10841 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
10842 PCI_ANY_ID, PCI_ANY_ID, },
10843 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
10844 PCI_ANY_ID, PCI_ANY_ID, },
10845 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
10846 PCI_ANY_ID, PCI_ANY_ID, },
10847 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
10848 PCI_ANY_ID, PCI_ANY_ID, },
10849 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
10850 PCI_ANY_ID, PCI_ANY_ID, },
10851 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
10852 PCI_ANY_ID, PCI_ANY_ID, },
10853 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
10854 PCI_ANY_ID, PCI_ANY_ID, },
10855 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
10856 PCI_ANY_ID, PCI_ANY_ID, },
10857 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
10858 PCI_ANY_ID, PCI_ANY_ID, },
10859 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
10860 PCI_ANY_ID, PCI_ANY_ID, },
10861 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
10862 PCI_ANY_ID, PCI_ANY_ID, },
10863 {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TOMCAT,
10864 PCI_ANY_ID, PCI_ANY_ID, },
10865 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FALCON,
10866 PCI_ANY_ID, PCI_ANY_ID, },
10867 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BALIUS,
10868 PCI_ANY_ID, PCI_ANY_ID, },
10869 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC,
10870 PCI_ANY_ID, PCI_ANY_ID, },
10871 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE,
10872 PCI_ANY_ID, PCI_ANY_ID, },
10873 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FC_VF,
10874 PCI_ANY_ID, PCI_ANY_ID, },
10875 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
10876 PCI_ANY_ID, PCI_ANY_ID, },
10877 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
10878 PCI_ANY_ID, PCI_ANY_ID, },
10879 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
10880 PCI_ANY_ID, PCI_ANY_ID, },
10881 { 0 }
10882};
10883
10884MODULE_DEVICE_TABLE(pci, lpfc_id_table);
10885
10886static const struct pci_error_handlers lpfc_err_handler = {
10887 .error_detected = lpfc_io_error_detected,
10888 .slot_reset = lpfc_io_slot_reset,
10889 .resume = lpfc_io_resume,
10890};
10891
10892static struct pci_driver lpfc_driver = {
10893 .name = LPFC_DRIVER_NAME,
10894 .id_table = lpfc_id_table,
10895 .probe = lpfc_pci_probe_one,
10896 .remove = lpfc_pci_remove_one,
10897 .suspend = lpfc_pci_suspend_one,
10898 .resume = lpfc_pci_resume_one,
10899 .err_handler = &lpfc_err_handler,
10900};
10901
10902static const struct file_operations lpfc_mgmt_fop = {
10903 .owner = THIS_MODULE,
10904};
10905
10906static struct miscdevice lpfc_mgmt_dev = {
10907 .minor = MISC_DYNAMIC_MINOR,
10908 .name = "lpfcmgmt",
10909 .fops = &lpfc_mgmt_fop,
10910};
10911
10912
10913
10914
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924static int __init
10925lpfc_init(void)
10926{
10927 int cpu;
10928 int error = 0;
10929
10930 printk(LPFC_MODULE_DESC "\n");
10931 printk(LPFC_COPYRIGHT "\n");
10932
10933 error = misc_register(&lpfc_mgmt_dev);
10934 if (error)
10935 printk(KERN_ERR "Could not register lpfcmgmt device, "
10936 "misc_register returned with status %d", error);
10937
10938 if (lpfc_enable_npiv) {
10939 lpfc_transport_functions.vport_create = lpfc_vport_create;
10940 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
10941 }
10942 lpfc_transport_template =
10943 fc_attach_transport(&lpfc_transport_functions);
10944 if (lpfc_transport_template == NULL)
10945 return -ENOMEM;
10946 if (lpfc_enable_npiv) {
10947 lpfc_vport_transport_template =
10948 fc_attach_transport(&lpfc_vport_transport_functions);
10949 if (lpfc_vport_transport_template == NULL) {
10950 fc_release_transport(lpfc_transport_template);
10951 return -ENOMEM;
10952 }
10953 }
10954
10955
10956 lpfc_used_cpu = NULL;
10957 lpfc_present_cpu = 0;
10958 for_each_present_cpu(cpu)
10959 lpfc_present_cpu++;
10960
10961 error = pci_register_driver(&lpfc_driver);
10962 if (error) {
10963 fc_release_transport(lpfc_transport_template);
10964 if (lpfc_enable_npiv)
10965 fc_release_transport(lpfc_vport_transport_template);
10966 }
10967
10968 return error;
10969}
10970
10971
10972
10973
10974
10975
10976
10977
10978static void __exit
10979lpfc_exit(void)
10980{
10981 misc_deregister(&lpfc_mgmt_dev);
10982 pci_unregister_driver(&lpfc_driver);
10983 fc_release_transport(lpfc_transport_template);
10984 if (lpfc_enable_npiv)
10985 fc_release_transport(lpfc_vport_transport_template);
10986 if (_dump_buf_data) {
10987 printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
10988 "_dump_buf_data at 0x%p\n",
10989 (1L << _dump_buf_data_order), _dump_buf_data);
10990 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
10991 }
10992
10993 if (_dump_buf_dif) {
10994 printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
10995 "_dump_buf_dif at 0x%p\n",
10996 (1L << _dump_buf_dif_order), _dump_buf_dif);
10997 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
10998 }
10999 kfree(lpfc_used_cpu);
11000}
11001
11002module_init(lpfc_init);
11003module_exit(lpfc_exit);
11004MODULE_LICENSE("GPL");
11005MODULE_DESCRIPTION(LPFC_MODULE_DESC);
11006MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
11007MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
11008