1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_cmnd.h>
29#include <scsi/scsi_device.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_fc.h>
32#include <scsi/fc/fc_fs.h>
33
34#include "lpfc_hw4.h"
35#include "lpfc_hw.h"
36#include "lpfc_sli.h"
37#include "lpfc_sli4.h"
38#include "lpfc_nl.h"
39#include "lpfc_disc.h"
40#include "lpfc_scsi.h"
41#include "lpfc.h"
42#include "lpfc_crtn.h"
43#include "lpfc_logmsg.h"
44#include "lpfc_compat.h"
45#include "lpfc_debugfs.h"
46#include "lpfc_vport.h"
47
48
49typedef enum _lpfc_iocb_type {
50 LPFC_UNKNOWN_IOCB,
51 LPFC_UNSOL_IOCB,
52 LPFC_SOL_IOCB,
53 LPFC_ABORT_IOCB
54} lpfc_iocb_type;
55
56
57
58static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
59 uint32_t);
60static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
61 uint8_t *, uint32_t *);
62
63static IOCB_t *
64lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
65{
66 return &iocbq->iocb;
67}
68
69
70
71
72
73
74
75
76
77
78
79
80
81static uint32_t
82lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
83{
84 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
85 struct lpfc_register doorbell;
86 uint32_t host_index;
87
88
89 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
90 return -ENOMEM;
91
92 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
93 bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
94
95 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
96
97
98 host_index = q->host_index;
99 q->host_index = ((q->host_index + 1) % q->entry_count);
100
101
102 doorbell.word0 = 0;
103 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
104 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
105 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
106 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
107 readl(q->phba->sli4_hba.WQDBregaddr);
108
109 return 0;
110}
111
112
113
114
115
116
117
118
119
120
121
122
123static uint32_t
124lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
125{
126 uint32_t released = 0;
127
128 if (q->hba_index == index)
129 return 0;
130 do {
131 q->hba_index = ((q->hba_index + 1) % q->entry_count);
132 released++;
133 } while (q->hba_index != index);
134 return released;
135}
136
137
138
139
140
141
142
143
144
145
146
147
148
149static uint32_t
150lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
151{
152 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
153 struct lpfc_register doorbell;
154 uint32_t host_index;
155
156
157 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
158 return -ENOMEM;
159 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
160
161 q->phba->mbox = (MAILBOX_t *)temp_mqe;
162
163
164 host_index = q->host_index;
165 q->host_index = ((q->host_index + 1) % q->entry_count);
166
167
168 doorbell.word0 = 0;
169 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
170 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
171 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
172 readl(q->phba->sli4_hba.MQDBregaddr);
173 return 0;
174}
175
176
177
178
179
180
181
182
183
184
185
186static uint32_t
187lpfc_sli4_mq_release(struct lpfc_queue *q)
188{
189
190 q->phba->mbox = NULL;
191 q->hba_index = ((q->hba_index + 1) % q->entry_count);
192 return 1;
193}
194
195
196
197
198
199
200
201
202
203
204static struct lpfc_eqe *
205lpfc_sli4_eq_get(struct lpfc_queue *q)
206{
207 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
208
209
210 if (!bf_get(lpfc_eqe_valid, eqe))
211 return NULL;
212
213 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
214 return NULL;
215
216 q->hba_index = ((q->hba_index + 1) % q->entry_count);
217 return eqe;
218}
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235uint32_t
236lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
237{
238 uint32_t released = 0;
239 struct lpfc_eqe *temp_eqe;
240 struct lpfc_register doorbell;
241
242
243 while (q->hba_index != q->host_index) {
244 temp_eqe = q->qe[q->host_index].eqe;
245 bf_set(lpfc_eqe_valid, temp_eqe, 0);
246 released++;
247 q->host_index = ((q->host_index + 1) % q->entry_count);
248 }
249 if (unlikely(released == 0 && !arm))
250 return 0;
251
252
253 doorbell.word0 = 0;
254 if (arm) {
255 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
256 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
257 }
258 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
259 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
260 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
261 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
262 return released;
263}
264
265
266
267
268
269
270
271
272
273
274static struct lpfc_cqe *
275lpfc_sli4_cq_get(struct lpfc_queue *q)
276{
277 struct lpfc_cqe *cqe;
278
279
280 if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
281 return NULL;
282
283 if (((q->hba_index + 1) % q->entry_count) == q->host_index)
284 return NULL;
285
286 cqe = q->qe[q->hba_index].cqe;
287 q->hba_index = ((q->hba_index + 1) % q->entry_count);
288 return cqe;
289}
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306uint32_t
307lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
308{
309 uint32_t released = 0;
310 struct lpfc_cqe *temp_qe;
311 struct lpfc_register doorbell;
312
313
314 while (q->hba_index != q->host_index) {
315 temp_qe = q->qe[q->host_index].cqe;
316 bf_set(lpfc_cqe_valid, temp_qe, 0);
317 released++;
318 q->host_index = ((q->host_index + 1) % q->entry_count);
319 }
320 if (unlikely(released == 0 && !arm))
321 return 0;
322
323
324 doorbell.word0 = 0;
325 if (arm)
326 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
327 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
328 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
329 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
330 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
331 return released;
332}
333
334
335
336
337
338
339
340
341
342
343
344
345
346static int
347lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
348 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
349{
350 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
351 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
352 struct lpfc_register doorbell;
353 int put_index = hq->host_index;
354
355 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
356 return -EINVAL;
357 if (hq->host_index != dq->host_index)
358 return -EINVAL;
359
360 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
361 return -EBUSY;
362 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
363 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
364
365
366 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
367 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
368
369
370 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
371 doorbell.word0 = 0;
372 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
373 LPFC_RQ_POST_BATCH);
374 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
375 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
376 }
377 return put_index;
378}
379
380
381
382
383
384
385
386
387
388
389
390static uint32_t
391lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
392{
393 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
394 return 0;
395 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
396 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
397 return 1;
398}
399
400
401
402
403
404
405
406
407
408
409
410static inline IOCB_t *
411lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
412{
413 return (IOCB_t *) (((char *) pring->cmdringaddr) +
414 pring->cmdidx * phba->iocb_cmd_size);
415}
416
417
418
419
420
421
422
423
424
425
426
427static inline IOCB_t *
428lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
429{
430 return (IOCB_t *) (((char *) pring->rspringaddr) +
431 pring->rspidx * phba->iocb_rsp_size);
432}
433
434
435
436
437
438
439
440
441
442
443static struct lpfc_iocbq *
444__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
445{
446 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
447 struct lpfc_iocbq * iocbq = NULL;
448
449 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
450 return iocbq;
451}
452
453
454
455
456
457
458
459
460
461
462
463
464
465static struct lpfc_sglq *
466__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
467{
468 uint16_t adj_xri;
469 struct lpfc_sglq *sglq;
470 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
471 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
472 return NULL;
473 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
474 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
475 return sglq;
476}
477
478
479
480
481
482
483
484
485
486
487
488
489
490static struct lpfc_sglq *
491__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
492{
493 uint16_t adj_xri;
494 struct lpfc_sglq *sglq;
495 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
496 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
497 return NULL;
498 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
499 return sglq;
500}
501
502
503
504
505
506
507
508
509
510
511static struct lpfc_sglq *
512__lpfc_sli_get_sglq(struct lpfc_hba *phba)
513{
514 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
515 struct lpfc_sglq *sglq = NULL;
516 uint16_t adj_xri;
517 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
518 adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
519 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
520 return sglq;
521}
522
523
524
525
526
527
528
529
530
531
532struct lpfc_iocbq *
533lpfc_sli_get_iocbq(struct lpfc_hba *phba)
534{
535 struct lpfc_iocbq * iocbq = NULL;
536 unsigned long iflags;
537
538 spin_lock_irqsave(&phba->hbalock, iflags);
539 iocbq = __lpfc_sli_get_iocbq(phba);
540 spin_unlock_irqrestore(&phba->hbalock, iflags);
541 return iocbq;
542}
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562static void
563__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
564{
565 struct lpfc_sglq *sglq;
566 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
567 unsigned long iflag;
568
569 if (iocbq->sli4_xritag == NO_XRI)
570 sglq = NULL;
571 else
572 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
573 if (sglq) {
574 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
575 || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
576 && (iocbq->iocb.un.ulpWord[4]
577 == IOERR_SLI_ABORTED))) {
578 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
579 iflag);
580 list_add(&sglq->list,
581 &phba->sli4_hba.lpfc_abts_els_sgl_list);
582 spin_unlock_irqrestore(
583 &phba->sli4_hba.abts_sgl_list_lock, iflag);
584 } else
585 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
586 }
587
588
589
590
591
592 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
593 iocbq->sli4_xritag = NO_XRI;
594 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
595}
596
597
598
599
600
601
602
603
604
605
606
607static void
608__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
609{
610 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
611
612
613
614
615 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
616 iocbq->sli4_xritag = NO_XRI;
617 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
618}
619
620
621
622
623
624
625
626
627
628
629
630static void
631__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
632{
633 phba->__lpfc_sli_release_iocbq(phba, iocbq);
634}
635
636
637
638
639
640
641
642
643
644void
645lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
646{
647 unsigned long iflags;
648
649
650
651
652 spin_lock_irqsave(&phba->hbalock, iflags);
653 __lpfc_sli_release_iocbq(phba, iocbq);
654 spin_unlock_irqrestore(&phba->hbalock, iflags);
655}
656
657
658
659
660
661
662
663
664
665
666
667
668
669void
670lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
671 uint32_t ulpstatus, uint32_t ulpWord4)
672{
673 struct lpfc_iocbq *piocb;
674
675 while (!list_empty(iocblist)) {
676 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
677
678 if (!piocb->iocb_cmpl)
679 lpfc_sli_release_iocbq(phba, piocb);
680 else {
681 piocb->iocb.ulpStatus = ulpstatus;
682 piocb->iocb.un.ulpWord[4] = ulpWord4;
683 (piocb->iocb_cmpl) (phba, piocb, piocb);
684 }
685 }
686 return;
687}
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704static lpfc_iocb_type
705lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
706{
707 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
708
709 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
710 return 0;
711
712 switch (iocb_cmnd) {
713 case CMD_XMIT_SEQUENCE_CR:
714 case CMD_XMIT_SEQUENCE_CX:
715 case CMD_XMIT_BCAST_CN:
716 case CMD_XMIT_BCAST_CX:
717 case CMD_ELS_REQUEST_CR:
718 case CMD_ELS_REQUEST_CX:
719 case CMD_CREATE_XRI_CR:
720 case CMD_CREATE_XRI_CX:
721 case CMD_GET_RPI_CN:
722 case CMD_XMIT_ELS_RSP_CX:
723 case CMD_GET_RPI_CR:
724 case CMD_FCP_IWRITE_CR:
725 case CMD_FCP_IWRITE_CX:
726 case CMD_FCP_IREAD_CR:
727 case CMD_FCP_IREAD_CX:
728 case CMD_FCP_ICMND_CR:
729 case CMD_FCP_ICMND_CX:
730 case CMD_FCP_TSEND_CX:
731 case CMD_FCP_TRSP_CX:
732 case CMD_FCP_TRECEIVE_CX:
733 case CMD_FCP_AUTO_TRSP_CX:
734 case CMD_ADAPTER_MSG:
735 case CMD_ADAPTER_DUMP:
736 case CMD_XMIT_SEQUENCE64_CR:
737 case CMD_XMIT_SEQUENCE64_CX:
738 case CMD_XMIT_BCAST64_CN:
739 case CMD_XMIT_BCAST64_CX:
740 case CMD_ELS_REQUEST64_CR:
741 case CMD_ELS_REQUEST64_CX:
742 case CMD_FCP_IWRITE64_CR:
743 case CMD_FCP_IWRITE64_CX:
744 case CMD_FCP_IREAD64_CR:
745 case CMD_FCP_IREAD64_CX:
746 case CMD_FCP_ICMND64_CR:
747 case CMD_FCP_ICMND64_CX:
748 case CMD_FCP_TSEND64_CX:
749 case CMD_FCP_TRSP64_CX:
750 case CMD_FCP_TRECEIVE64_CX:
751 case CMD_GEN_REQUEST64_CR:
752 case CMD_GEN_REQUEST64_CX:
753 case CMD_XMIT_ELS_RSP64_CX:
754 case DSSCMD_IWRITE64_CR:
755 case DSSCMD_IWRITE64_CX:
756 case DSSCMD_IREAD64_CR:
757 case DSSCMD_IREAD64_CX:
758 case DSSCMD_INVALIDATE_DEK:
759 case DSSCMD_SET_KEK:
760 case DSSCMD_GET_KEK_ID:
761 case DSSCMD_GEN_XFER:
762 type = LPFC_SOL_IOCB;
763 break;
764 case CMD_ABORT_XRI_CN:
765 case CMD_ABORT_XRI_CX:
766 case CMD_CLOSE_XRI_CN:
767 case CMD_CLOSE_XRI_CX:
768 case CMD_XRI_ABORTED_CX:
769 case CMD_ABORT_MXRI64_CN:
770 type = LPFC_ABORT_IOCB;
771 break;
772 case CMD_RCV_SEQUENCE_CX:
773 case CMD_RCV_ELS_REQ_CX:
774 case CMD_RCV_SEQUENCE64_CX:
775 case CMD_RCV_ELS_REQ64_CX:
776 case CMD_ASYNC_STATUS:
777 case CMD_IOCB_RCV_SEQ64_CX:
778 case CMD_IOCB_RCV_ELS64_CX:
779 case CMD_IOCB_RCV_CONT64_CX:
780 case CMD_IOCB_RET_XRI64_CX:
781 type = LPFC_UNSOL_IOCB;
782 break;
783 case CMD_IOCB_XMIT_MSEQ64_CR:
784 case CMD_IOCB_XMIT_MSEQ64_CX:
785 case CMD_IOCB_RCV_SEQ_LIST64_CX:
786 case CMD_IOCB_RCV_ELS_LIST64_CX:
787 case CMD_IOCB_CLOSE_EXTENDED_CN:
788 case CMD_IOCB_ABORT_EXTENDED_CN:
789 case CMD_IOCB_RET_HBQE64_CN:
790 case CMD_IOCB_FCP_IBIDIR64_CR:
791 case CMD_IOCB_FCP_IBIDIR64_CX:
792 case CMD_IOCB_FCP_ITASKMGT64_CX:
793 case CMD_IOCB_LOGENTRY_CN:
794 case CMD_IOCB_LOGENTRY_ASYNC_CN:
795 printk("%s - Unhandled SLI-3 Command x%x\n",
796 __func__, iocb_cmnd);
797 type = LPFC_UNKNOWN_IOCB;
798 break;
799 default:
800 type = LPFC_UNKNOWN_IOCB;
801 break;
802 }
803
804 return type;
805}
806
807
808
809
810
811
812
813
814
815
816
817
818static int
819lpfc_sli_ring_map(struct lpfc_hba *phba)
820{
821 struct lpfc_sli *psli = &phba->sli;
822 LPFC_MBOXQ_t *pmb;
823 MAILBOX_t *pmbox;
824 int i, rc, ret = 0;
825
826 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
827 if (!pmb)
828 return -ENOMEM;
829 pmbox = &pmb->u.mb;
830 phba->link_state = LPFC_INIT_MBX_CMDS;
831 for (i = 0; i < psli->num_rings; i++) {
832 lpfc_config_ring(phba, i, pmb);
833 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
834 if (rc != MBX_SUCCESS) {
835 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
836 "0446 Adapter failed to init (%d), "
837 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
838 "ring %d\n",
839 rc, pmbox->mbxCommand,
840 pmbox->mbxStatus, i);
841 phba->link_state = LPFC_HBA_ERROR;
842 ret = -ENXIO;
843 break;
844 }
845 }
846 mempool_free(pmb, phba->mbox_mem_pool);
847 return ret;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862static int
863lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
864 struct lpfc_iocbq *piocb)
865{
866 list_add_tail(&piocb->list, &pring->txcmplq);
867 pring->txcmplq_cnt++;
868 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
869 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
870 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
871 if (!piocb->vport)
872 BUG();
873 else
874 mod_timer(&piocb->vport->els_tmofunc,
875 jiffies + HZ * (phba->fc_ratov << 1));
876 }
877
878
879 return 0;
880}
881
882
883
884
885
886
887
888
889
890
891
892static struct lpfc_iocbq *
893lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
894{
895 struct lpfc_iocbq *cmd_iocb;
896
897 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
898 if (cmd_iocb != NULL)
899 pring->txq_cnt--;
900 return cmd_iocb;
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917static IOCB_t *
918lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
919{
920 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
921 uint32_t max_cmd_idx = pring->numCiocb;
922 if ((pring->next_cmdidx == pring->cmdidx) &&
923 (++pring->next_cmdidx >= max_cmd_idx))
924 pring->next_cmdidx = 0;
925
926 if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
927
928 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
929
930 if (unlikely(pring->local_getidx >= max_cmd_idx)) {
931 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
932 "0315 Ring %d issue: portCmdGet %d "
933 "is bigger than cmd ring %d\n",
934 pring->ringno,
935 pring->local_getidx, max_cmd_idx);
936
937 phba->link_state = LPFC_HBA_ERROR;
938
939
940
941
942 phba->work_ha |= HA_ERATT;
943 phba->work_hs = HS_FFER3;
944
945 lpfc_worker_wake_up(phba);
946
947 return NULL;
948 }
949
950 if (pring->local_getidx == pring->next_cmdidx)
951 return NULL;
952 }
953
954 return lpfc_cmd_iocb(phba, pring);
955}
956
957
958
959
960
961
962
963
964
965
966
967
968
969uint16_t
970lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
971{
972 struct lpfc_iocbq **new_arr;
973 struct lpfc_iocbq **old_arr;
974 size_t new_len;
975 struct lpfc_sli *psli = &phba->sli;
976 uint16_t iotag;
977
978 spin_lock_irq(&phba->hbalock);
979 iotag = psli->last_iotag;
980 if(++iotag < psli->iocbq_lookup_len) {
981 psli->last_iotag = iotag;
982 psli->iocbq_lookup[iotag] = iocbq;
983 spin_unlock_irq(&phba->hbalock);
984 iocbq->iotag = iotag;
985 return iotag;
986 } else if (psli->iocbq_lookup_len < (0xffff
987 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
988 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
989 spin_unlock_irq(&phba->hbalock);
990 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
991 GFP_KERNEL);
992 if (new_arr) {
993 spin_lock_irq(&phba->hbalock);
994 old_arr = psli->iocbq_lookup;
995 if (new_len <= psli->iocbq_lookup_len) {
996
997 kfree(new_arr);
998 iotag = psli->last_iotag;
999 if(++iotag < psli->iocbq_lookup_len) {
1000 psli->last_iotag = iotag;
1001 psli->iocbq_lookup[iotag] = iocbq;
1002 spin_unlock_irq(&phba->hbalock);
1003 iocbq->iotag = iotag;
1004 return iotag;
1005 }
1006 spin_unlock_irq(&phba->hbalock);
1007 return 0;
1008 }
1009 if (psli->iocbq_lookup)
1010 memcpy(new_arr, old_arr,
1011 ((psli->last_iotag + 1) *
1012 sizeof (struct lpfc_iocbq *)));
1013 psli->iocbq_lookup = new_arr;
1014 psli->iocbq_lookup_len = new_len;
1015 psli->last_iotag = iotag;
1016 psli->iocbq_lookup[iotag] = iocbq;
1017 spin_unlock_irq(&phba->hbalock);
1018 iocbq->iotag = iotag;
1019 kfree(old_arr);
1020 return iotag;
1021 }
1022 } else
1023 spin_unlock_irq(&phba->hbalock);
1024
1025 lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
1026 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1027 psli->last_iotag);
1028
1029 return 0;
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046static void
1047lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1048 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1049{
1050
1051
1052
1053 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1054
1055
1056 if (pring->ringno == LPFC_ELS_RING) {
1057 lpfc_debugfs_slow_ring_trc(phba,
1058 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1059 *(((uint32_t *) &nextiocb->iocb) + 4),
1060 *(((uint32_t *) &nextiocb->iocb) + 6),
1061 *(((uint32_t *) &nextiocb->iocb) + 7));
1062 }
1063
1064
1065
1066
1067 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1068 wmb();
1069 pring->stats.iocb_cmd++;
1070
1071
1072
1073
1074
1075
1076 if (nextiocb->iocb_cmpl)
1077 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1078 else
1079 __lpfc_sli_release_iocbq(phba, nextiocb);
1080
1081
1082
1083
1084
1085 pring->cmdidx = pring->next_cmdidx;
1086 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static void
1102lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1103{
1104 int ringno = pring->ringno;
1105
1106 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1107
1108 wmb();
1109
1110
1111
1112
1113
1114 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1115 readl(phba->CAregaddr);
1116
1117 pring->stats.iocb_cmd_full++;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129static void
1130lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1131{
1132 int ringno = pring->ringno;
1133
1134
1135
1136
1137 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1138 wmb();
1139 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1140 readl(phba->CAregaddr);
1141 }
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153static void
1154lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1155{
1156 IOCB_t *iocb;
1157 struct lpfc_iocbq *nextiocb;
1158
1159
1160
1161
1162
1163
1164
1165
1166 if (pring->txq_cnt &&
1167 lpfc_is_link_up(phba) &&
1168 (pring->ringno != phba->sli.fcp_ring ||
1169 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1170
1171 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1172 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1173 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1174
1175 if (iocb)
1176 lpfc_sli_update_ring(phba, pring);
1177 else
1178 lpfc_sli_update_full_ring(phba, pring);
1179 }
1180
1181 return;
1182}
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194static struct lpfc_hbq_entry *
1195lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1196{
1197 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1198
1199 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1200 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1201 hbqp->next_hbqPutIdx = 0;
1202
1203 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1204 uint32_t raw_index = phba->hbq_get[hbqno];
1205 uint32_t getidx = le32_to_cpu(raw_index);
1206
1207 hbqp->local_hbqGetIdx = getidx;
1208
1209 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1210 lpfc_printf_log(phba, KERN_ERR,
1211 LOG_SLI | LOG_VPORT,
1212 "1802 HBQ %d: local_hbqGetIdx "
1213 "%u is > than hbqp->entry_count %u\n",
1214 hbqno, hbqp->local_hbqGetIdx,
1215 hbqp->entry_count);
1216
1217 phba->link_state = LPFC_HBA_ERROR;
1218 return NULL;
1219 }
1220
1221 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1222 return NULL;
1223 }
1224
1225 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1226 hbqp->hbqPutIdx;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238void
1239lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1240{
1241 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1242 struct hbq_dmabuf *hbq_buf;
1243 unsigned long flags;
1244 int i, hbq_count;
1245 uint32_t hbqno;
1246
1247 hbq_count = lpfc_sli_hbq_count();
1248
1249 spin_lock_irqsave(&phba->hbalock, flags);
1250 for (i = 0; i < hbq_count; ++i) {
1251 list_for_each_entry_safe(dmabuf, next_dmabuf,
1252 &phba->hbqs[i].hbq_buffer_list, list) {
1253 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1254 list_del(&hbq_buf->dbuf.list);
1255 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1256 }
1257 phba->hbqs[i].buffer_count = 0;
1258 }
1259
1260 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1261 list) {
1262 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1263 list_del(&hbq_buf->dbuf.list);
1264 if (hbq_buf->tag == -1) {
1265 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1266 (phba, hbq_buf);
1267 } else {
1268 hbqno = hbq_buf->tag >> 16;
1269 if (hbqno >= LPFC_MAX_HBQS)
1270 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1271 (phba, hbq_buf);
1272 else
1273 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1274 hbq_buf);
1275 }
1276 }
1277
1278
1279 phba->hbq_in_use = 0;
1280 spin_unlock_irqrestore(&phba->hbalock, flags);
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static int
1296lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1297 struct hbq_dmabuf *hbq_buf)
1298{
1299 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313static int
1314lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1315 struct hbq_dmabuf *hbq_buf)
1316{
1317 struct lpfc_hbq_entry *hbqe;
1318 dma_addr_t physaddr = hbq_buf->dbuf.phys;
1319
1320
1321 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1322 if (hbqe) {
1323 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1324
1325 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1326 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
1327 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1328 hbqe->bde.tus.f.bdeFlags = 0;
1329 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1330 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1331
1332 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1333 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1334
1335 readl(phba->hbq_put + hbqno);
1336 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1337 return 0;
1338 } else
1339 return -ENOMEM;
1340}
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352static int
1353lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1354 struct hbq_dmabuf *hbq_buf)
1355{
1356 int rc;
1357 struct lpfc_rqe hrqe;
1358 struct lpfc_rqe drqe;
1359
1360 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1361 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1362 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1363 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1364 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1365 &hrqe, &drqe);
1366 if (rc < 0)
1367 return rc;
1368 hbq_buf->tag = rc;
1369 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1370 return 0;
1371}
1372
1373
1374static struct lpfc_hbq_init lpfc_els_hbq = {
1375 .rn = 1,
1376 .entry_count = 200,
1377 .mask_count = 0,
1378 .profile = 0,
1379 .ring_mask = (1 << LPFC_ELS_RING),
1380 .buffer_count = 0,
1381 .init_count = 40,
1382 .add_count = 40,
1383};
1384
1385
1386static struct lpfc_hbq_init lpfc_extra_hbq = {
1387 .rn = 1,
1388 .entry_count = 200,
1389 .mask_count = 0,
1390 .profile = 0,
1391 .ring_mask = (1 << LPFC_EXTRA_RING),
1392 .buffer_count = 0,
1393 .init_count = 0,
1394 .add_count = 5,
1395};
1396
1397
1398struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1399 &lpfc_els_hbq,
1400 &lpfc_extra_hbq,
1401};
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413static int
1414lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1415{
1416 uint32_t i, posted = 0;
1417 unsigned long flags;
1418 struct hbq_dmabuf *hbq_buffer;
1419 LIST_HEAD(hbq_buf_list);
1420 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1421 return 0;
1422
1423 if ((phba->hbqs[hbqno].buffer_count + count) >
1424 lpfc_hbq_defs[hbqno]->entry_count)
1425 count = lpfc_hbq_defs[hbqno]->entry_count -
1426 phba->hbqs[hbqno].buffer_count;
1427 if (!count)
1428 return 0;
1429
1430 for (i = 0; i < count; i++) {
1431 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1432 if (!hbq_buffer)
1433 break;
1434 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1435 }
1436
1437 spin_lock_irqsave(&phba->hbalock, flags);
1438 if (!phba->hbq_in_use)
1439 goto err;
1440 while (!list_empty(&hbq_buf_list)) {
1441 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1442 dbuf.list);
1443 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1444 (hbqno << 16));
1445 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1446 phba->hbqs[hbqno].buffer_count++;
1447 posted++;
1448 } else
1449 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1450 }
1451 spin_unlock_irqrestore(&phba->hbalock, flags);
1452 return posted;
1453err:
1454 spin_unlock_irqrestore(&phba->hbalock, flags);
1455 while (!list_empty(&hbq_buf_list)) {
1456 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1457 dbuf.list);
1458 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1459 }
1460 return 0;
1461}
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472int
1473lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1474{
1475 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1476 lpfc_hbq_defs[qno]->add_count));
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488static int
1489lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1490{
1491 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1492 lpfc_hbq_defs[qno]->init_count));
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503static struct hbq_dmabuf *
1504lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1505{
1506 struct lpfc_dmabuf *d_buf;
1507
1508 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1509 if (!d_buf)
1510 return NULL;
1511 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524static struct hbq_dmabuf *
1525lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
1526{
1527 struct lpfc_dmabuf *d_buf;
1528 struct hbq_dmabuf *hbq_buf;
1529 uint32_t hbqno;
1530
1531 hbqno = tag >> 16;
1532 if (hbqno >= LPFC_MAX_HBQS)
1533 return NULL;
1534
1535 spin_lock_irq(&phba->hbalock);
1536 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
1537 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1538 if (hbq_buf->tag == tag) {
1539 spin_unlock_irq(&phba->hbalock);
1540 return hbq_buf;
1541 }
1542 }
1543 spin_unlock_irq(&phba->hbalock);
1544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
1545 "1803 Bad hbq tag. Data: x%x x%x\n",
1546 tag, phba->hbqs[tag >> 16].buffer_count);
1547 return NULL;
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559void
1560lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
1561{
1562 uint32_t hbqno;
1563
1564 if (hbq_buffer) {
1565 hbqno = hbq_buffer->tag >> 16;
1566 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
1567 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1568 }
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580static int
1581lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
1582{
1583 uint8_t ret;
1584
1585 switch (mbxCommand) {
1586 case MBX_LOAD_SM:
1587 case MBX_READ_NV:
1588 case MBX_WRITE_NV:
1589 case MBX_WRITE_VPARMS:
1590 case MBX_RUN_BIU_DIAG:
1591 case MBX_INIT_LINK:
1592 case MBX_DOWN_LINK:
1593 case MBX_CONFIG_LINK:
1594 case MBX_CONFIG_RING:
1595 case MBX_RESET_RING:
1596 case MBX_READ_CONFIG:
1597 case MBX_READ_RCONFIG:
1598 case MBX_READ_SPARM:
1599 case MBX_READ_STATUS:
1600 case MBX_READ_RPI:
1601 case MBX_READ_XRI:
1602 case MBX_READ_REV:
1603 case MBX_READ_LNK_STAT:
1604 case MBX_REG_LOGIN:
1605 case MBX_UNREG_LOGIN:
1606 case MBX_READ_LA:
1607 case MBX_CLEAR_LA:
1608 case MBX_DUMP_MEMORY:
1609 case MBX_DUMP_CONTEXT:
1610 case MBX_RUN_DIAGS:
1611 case MBX_RESTART:
1612 case MBX_UPDATE_CFG:
1613 case MBX_DOWN_LOAD:
1614 case MBX_DEL_LD_ENTRY:
1615 case MBX_RUN_PROGRAM:
1616 case MBX_SET_MASK:
1617 case MBX_SET_VARIABLE:
1618 case MBX_UNREG_D_ID:
1619 case MBX_KILL_BOARD:
1620 case MBX_CONFIG_FARP:
1621 case MBX_BEACON:
1622 case MBX_LOAD_AREA:
1623 case MBX_RUN_BIU_DIAG64:
1624 case MBX_CONFIG_PORT:
1625 case MBX_READ_SPARM64:
1626 case MBX_READ_RPI64:
1627 case MBX_REG_LOGIN64:
1628 case MBX_READ_LA64:
1629 case MBX_WRITE_WWN:
1630 case MBX_SET_DEBUG:
1631 case MBX_LOAD_EXP_ROM:
1632 case MBX_ASYNCEVT_ENABLE:
1633 case MBX_REG_VPI:
1634 case MBX_UNREG_VPI:
1635 case MBX_HEARTBEAT:
1636 case MBX_PORT_CAPABILITIES:
1637 case MBX_PORT_IOV_CONTROL:
1638 case MBX_SLI4_CONFIG:
1639 case MBX_SLI4_REQ_FTRS:
1640 case MBX_REG_FCFI:
1641 case MBX_UNREG_FCFI:
1642 case MBX_REG_VFI:
1643 case MBX_UNREG_VFI:
1644 case MBX_INIT_VPI:
1645 case MBX_INIT_VFI:
1646 case MBX_RESUME_RPI:
1647 ret = mbxCommand;
1648 break;
1649 default:
1650 ret = MBX_SHUTDOWN;
1651 break;
1652 }
1653 return ret;
1654}
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667void
1668lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
1669{
1670 wait_queue_head_t *pdone_q;
1671 unsigned long drvr_flag;
1672
1673
1674
1675
1676
1677 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
1678 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1679 pdone_q = (wait_queue_head_t *) pmboxq->context1;
1680 if (pdone_q)
1681 wake_up_interruptible(pdone_q);
1682 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1683 return;
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697void
1698lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1699{
1700 struct lpfc_dmabuf *mp;
1701 uint16_t rpi, vpi;
1702 int rc;
1703
1704 mp = (struct lpfc_dmabuf *) (pmb->context1);
1705
1706 if (mp) {
1707 lpfc_mbuf_free(phba, mp->virt, mp->phys);
1708 kfree(mp);
1709 }
1710
1711 if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
1712 (phba->sli_rev == LPFC_SLI_REV4))
1713 lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
1714
1715
1716
1717
1718
1719 if (!(phba->pport->load_flag & FC_UNLOADING) &&
1720 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
1721 !pmb->u.mb.mbxStatus) {
1722 rpi = pmb->u.mb.un.varWords[0];
1723 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
1724 lpfc_unreg_login(phba, vpi, rpi, pmb);
1725 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1726 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1727 if (rc != MBX_NOT_FINISHED)
1728 return;
1729 }
1730
1731 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
1732 lpfc_sli4_mbox_cmd_free(phba, pmb);
1733 else
1734 mempool_free(pmb, phba->mbox_mem_pool);
1735}
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750int
1751lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
1752{
1753 MAILBOX_t *pmbox;
1754 LPFC_MBOXQ_t *pmb;
1755 int rc;
1756 LIST_HEAD(cmplq);
1757
1758 phba->sli.slistat.mbox_event++;
1759
1760
1761 spin_lock_irq(&phba->hbalock);
1762 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
1763 spin_unlock_irq(&phba->hbalock);
1764
1765
1766 do {
1767 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
1768 if (pmb == NULL)
1769 break;
1770
1771 pmbox = &pmb->u.mb;
1772
1773 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
1774 if (pmb->vport) {
1775 lpfc_debugfs_disc_trc(pmb->vport,
1776 LPFC_DISC_TRC_MBOX_VPORT,
1777 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
1778 (uint32_t)pmbox->mbxCommand,
1779 pmbox->un.varWords[0],
1780 pmbox->un.varWords[1]);
1781 }
1782 else {
1783 lpfc_debugfs_disc_trc(phba->pport,
1784 LPFC_DISC_TRC_MBOX,
1785 "MBOX cmpl: cmd:x%x mb:x%x x%x",
1786 (uint32_t)pmbox->mbxCommand,
1787 pmbox->un.varWords[0],
1788 pmbox->un.varWords[1]);
1789 }
1790 }
1791
1792
1793
1794
1795 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
1796 MBX_SHUTDOWN) {
1797
1798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
1799 "(%d):0323 Unknown Mailbox command "
1800 "x%x (x%x) Cmpl\n",
1801 pmb->vport ? pmb->vport->vpi : 0,
1802 pmbox->mbxCommand,
1803 lpfc_sli4_mbox_opcode_get(phba, pmb));
1804 phba->link_state = LPFC_HBA_ERROR;
1805 phba->work_hs = HS_FFER3;
1806 lpfc_handle_eratt(phba);
1807 continue;
1808 }
1809
1810 if (pmbox->mbxStatus) {
1811 phba->sli.slistat.mbox_stat_err++;
1812 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
1813
1814 lpfc_printf_log(phba, KERN_INFO,
1815 LOG_MBOX | LOG_SLI,
1816 "(%d):0305 Mbox cmd cmpl "
1817 "error - RETRYing Data: x%x "
1818 "(x%x) x%x x%x x%x\n",
1819 pmb->vport ? pmb->vport->vpi :0,
1820 pmbox->mbxCommand,
1821 lpfc_sli4_mbox_opcode_get(phba,
1822 pmb),
1823 pmbox->mbxStatus,
1824 pmbox->un.varWords[0],
1825 pmb->vport->port_state);
1826 pmbox->mbxStatus = 0;
1827 pmbox->mbxOwner = OWN_HOST;
1828 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1829 if (rc != MBX_NOT_FINISHED)
1830 continue;
1831 }
1832 }
1833
1834
1835 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
1836 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
1837 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
1838 pmb->vport ? pmb->vport->vpi : 0,
1839 pmbox->mbxCommand,
1840 lpfc_sli4_mbox_opcode_get(phba, pmb),
1841 pmb->mbox_cmpl,
1842 *((uint32_t *) pmbox),
1843 pmbox->un.varWords[0],
1844 pmbox->un.varWords[1],
1845 pmbox->un.varWords[2],
1846 pmbox->un.varWords[3],
1847 pmbox->un.varWords[4],
1848 pmbox->un.varWords[5],
1849 pmbox->un.varWords[6],
1850 pmbox->un.varWords[7]);
1851
1852 if (pmb->mbox_cmpl)
1853 pmb->mbox_cmpl(phba,pmb);
1854 } while (1);
1855 return 0;
1856}
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870static struct lpfc_dmabuf *
1871lpfc_sli_get_buff(struct lpfc_hba *phba,
1872 struct lpfc_sli_ring *pring,
1873 uint32_t tag)
1874{
1875 struct hbq_dmabuf *hbq_entry;
1876
1877 if (tag & QUE_BUFTAG_BIT)
1878 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
1879 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
1880 if (!hbq_entry)
1881 return NULL;
1882 return &hbq_entry->dbuf;
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897static int
1898lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1899 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
1900 uint32_t fch_type)
1901{
1902 int i;
1903
1904
1905 if (pring->prt[0].profile) {
1906 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1907 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1908 saveq);
1909 return 1;
1910 }
1911
1912
1913 for (i = 0; i < pring->num_mask; i++) {
1914 if ((pring->prt[i].rctl == fch_r_ctl) &&
1915 (pring->prt[i].type == fch_type)) {
1916 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1917 (pring->prt[i].lpfc_sli_rcv_unsol_event)
1918 (phba, pring, saveq);
1919 return 1;
1920 }
1921 }
1922 return 0;
1923}
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939static int
1940lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1941 struct lpfc_iocbq *saveq)
1942{
1943 IOCB_t * irsp;
1944 WORD5 * w5p;
1945 uint32_t Rctl, Type;
1946 uint32_t match;
1947 struct lpfc_iocbq *iocbq;
1948 struct lpfc_dmabuf *dmzbuf;
1949
1950 match = 0;
1951 irsp = &(saveq->iocb);
1952
1953 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
1954 if (pring->lpfc_sli_rcv_async_status)
1955 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
1956 else
1957 lpfc_printf_log(phba,
1958 KERN_WARNING,
1959 LOG_SLI,
1960 "0316 Ring %d handler: unexpected "
1961 "ASYNC_STATUS iocb received evt_code "
1962 "0x%x\n",
1963 pring->ringno,
1964 irsp->un.asyncstat.evt_code);
1965 return 1;
1966 }
1967
1968 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
1969 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
1970 if (irsp->ulpBdeCount > 0) {
1971 dmzbuf = lpfc_sli_get_buff(phba, pring,
1972 irsp->un.ulpWord[3]);
1973 lpfc_in_buf_free(phba, dmzbuf);
1974 }
1975
1976 if (irsp->ulpBdeCount > 1) {
1977 dmzbuf = lpfc_sli_get_buff(phba, pring,
1978 irsp->unsli3.sli3Words[3]);
1979 lpfc_in_buf_free(phba, dmzbuf);
1980 }
1981
1982 if (irsp->ulpBdeCount > 2) {
1983 dmzbuf = lpfc_sli_get_buff(phba, pring,
1984 irsp->unsli3.sli3Words[7]);
1985 lpfc_in_buf_free(phba, dmzbuf);
1986 }
1987
1988 return 1;
1989 }
1990
1991 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1992 if (irsp->ulpBdeCount != 0) {
1993 saveq->context2 = lpfc_sli_get_buff(phba, pring,
1994 irsp->un.ulpWord[3]);
1995 if (!saveq->context2)
1996 lpfc_printf_log(phba,
1997 KERN_ERR,
1998 LOG_SLI,
1999 "0341 Ring %d Cannot find buffer for "
2000 "an unsolicited iocb. tag 0x%x\n",
2001 pring->ringno,
2002 irsp->un.ulpWord[3]);
2003 }
2004 if (irsp->ulpBdeCount == 2) {
2005 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2006 irsp->unsli3.sli3Words[7]);
2007 if (!saveq->context3)
2008 lpfc_printf_log(phba,
2009 KERN_ERR,
2010 LOG_SLI,
2011 "0342 Ring %d Cannot find buffer for an"
2012 " unsolicited iocb. tag 0x%x\n",
2013 pring->ringno,
2014 irsp->unsli3.sli3Words[7]);
2015 }
2016 list_for_each_entry(iocbq, &saveq->list, list) {
2017 irsp = &(iocbq->iocb);
2018 if (irsp->ulpBdeCount != 0) {
2019 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2020 irsp->un.ulpWord[3]);
2021 if (!iocbq->context2)
2022 lpfc_printf_log(phba,
2023 KERN_ERR,
2024 LOG_SLI,
2025 "0343 Ring %d Cannot find "
2026 "buffer for an unsolicited iocb"
2027 ". tag 0x%x\n", pring->ringno,
2028 irsp->un.ulpWord[3]);
2029 }
2030 if (irsp->ulpBdeCount == 2) {
2031 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2032 irsp->unsli3.sli3Words[7]);
2033 if (!iocbq->context3)
2034 lpfc_printf_log(phba,
2035 KERN_ERR,
2036 LOG_SLI,
2037 "0344 Ring %d Cannot find "
2038 "buffer for an unsolicited "
2039 "iocb. tag 0x%x\n",
2040 pring->ringno,
2041 irsp->unsli3.sli3Words[7]);
2042 }
2043 }
2044 }
2045 if (irsp->ulpBdeCount != 0 &&
2046 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2047 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2048 int found = 0;
2049
2050
2051 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2052 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
2053 list_add_tail(&saveq->list, &iocbq->list);
2054 found = 1;
2055 break;
2056 }
2057 }
2058 if (!found)
2059 list_add_tail(&saveq->clist,
2060 &pring->iocb_continue_saveq);
2061 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2062 list_del_init(&iocbq->clist);
2063 saveq = iocbq;
2064 irsp = &(saveq->iocb);
2065 } else
2066 return 0;
2067 }
2068 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2069 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2070 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2071 Rctl = FC_ELS_REQ;
2072 Type = FC_ELS_DATA;
2073 } else {
2074 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2075 Rctl = w5p->hcsw.Rctl;
2076 Type = w5p->hcsw.Type;
2077
2078
2079 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2080 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2081 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2082 Rctl = FC_ELS_REQ;
2083 Type = FC_ELS_DATA;
2084 w5p->hcsw.Rctl = Rctl;
2085 w5p->hcsw.Type = Type;
2086 }
2087 }
2088
2089 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2090 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2091 "0313 Ring %d handler: unexpected Rctl x%x "
2092 "Type x%x received\n",
2093 pring->ringno, Rctl, Type);
2094
2095 return 1;
2096}
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110static struct lpfc_iocbq *
2111lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2112 struct lpfc_sli_ring *pring,
2113 struct lpfc_iocbq *prspiocb)
2114{
2115 struct lpfc_iocbq *cmd_iocb = NULL;
2116 uint16_t iotag;
2117
2118 iotag = prspiocb->iocb.ulpIoTag;
2119
2120 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2121 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2122 list_del_init(&cmd_iocb->list);
2123 pring->txcmplq_cnt--;
2124 return cmd_iocb;
2125 }
2126
2127 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2128 "0317 iotag x%x is out off "
2129 "range: max iotag x%x wd0 x%x\n",
2130 iotag, phba->sli.last_iotag,
2131 *(((uint32_t *) &prspiocb->iocb) + 7));
2132 return NULL;
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147static struct lpfc_iocbq *
2148lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2149 struct lpfc_sli_ring *pring, uint16_t iotag)
2150{
2151 struct lpfc_iocbq *cmd_iocb;
2152
2153 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2154 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2155 list_del_init(&cmd_iocb->list);
2156 pring->txcmplq_cnt--;
2157 return cmd_iocb;
2158 }
2159
2160 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2161 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2162 iotag, phba->sli.last_iotag);
2163 return NULL;
2164}
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183static int
2184lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2185 struct lpfc_iocbq *saveq)
2186{
2187 struct lpfc_iocbq *cmdiocbp;
2188 int rc = 1;
2189 unsigned long iflag;
2190
2191
2192 spin_lock_irqsave(&phba->hbalock, iflag);
2193 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2194 spin_unlock_irqrestore(&phba->hbalock, iflag);
2195
2196 if (cmdiocbp) {
2197 if (cmdiocbp->iocb_cmpl) {
2198
2199
2200
2201
2202 if (saveq->iocb.ulpStatus &&
2203 (pring->ringno == LPFC_ELS_RING) &&
2204 (cmdiocbp->iocb.ulpCommand ==
2205 CMD_ELS_REQUEST64_CR))
2206 lpfc_send_els_failure_event(phba,
2207 cmdiocbp, saveq);
2208
2209
2210
2211
2212
2213 if (pring->ringno == LPFC_ELS_RING) {
2214 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
2215 cmdiocbp->iocb_flag &=
2216 ~LPFC_DRIVER_ABORTED;
2217 saveq->iocb.ulpStatus =
2218 IOSTAT_LOCAL_REJECT;
2219 saveq->iocb.un.ulpWord[4] =
2220 IOERR_SLI_ABORTED;
2221
2222
2223
2224
2225
2226 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2227 }
2228 }
2229 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2230 } else
2231 lpfc_sli_release_iocbq(phba, cmdiocbp);
2232 } else {
2233
2234
2235
2236
2237
2238 if (pring->ringno != LPFC_ELS_RING) {
2239
2240
2241
2242
2243 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2244 "0322 Ring %d handler: "
2245 "unexpected completion IoTag x%x "
2246 "Data: x%x x%x x%x x%x\n",
2247 pring->ringno,
2248 saveq->iocb.ulpIoTag,
2249 saveq->iocb.ulpStatus,
2250 saveq->iocb.un.ulpWord[4],
2251 saveq->iocb.ulpCommand,
2252 saveq->iocb.ulpContext);
2253 }
2254 }
2255
2256 return rc;
2257}
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269static void
2270lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2271{
2272 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2273
2274
2275
2276
2277 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2278 "0312 Ring %d handler: portRspPut %d "
2279 "is bigger than rsp ring %d\n",
2280 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2281 pring->numRiocb);
2282
2283 phba->link_state = LPFC_HBA_ERROR;
2284
2285
2286
2287
2288
2289 phba->work_ha |= HA_ERATT;
2290 phba->work_hs = HS_FFER3;
2291
2292 lpfc_worker_wake_up(phba);
2293
2294 return;
2295}
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307void lpfc_poll_eratt(unsigned long ptr)
2308{
2309 struct lpfc_hba *phba;
2310 uint32_t eratt = 0;
2311
2312 phba = (struct lpfc_hba *)ptr;
2313
2314
2315 eratt = lpfc_sli_check_eratt(phba);
2316
2317 if (eratt)
2318
2319 lpfc_worker_wake_up(phba);
2320 else
2321
2322 mod_timer(&phba->eratt_poll, jiffies +
2323 HZ * LPFC_ERATT_POLL_INTERVAL);
2324 return;
2325}
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
2343{
2344 struct lpfc_sli *psli = &phba->sli;
2345 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
2346 IOCB_t *irsp = NULL;
2347 IOCB_t *entry = NULL;
2348 struct lpfc_iocbq *cmdiocbq = NULL;
2349 struct lpfc_iocbq rspiocbq;
2350 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2351 uint32_t status;
2352 uint32_t portRspPut, portRspMax;
2353 int type;
2354 uint32_t rsp_cmpl = 0;
2355 uint32_t ha_copy;
2356 unsigned long iflags;
2357
2358 pring->stats.iocb_event++;
2359
2360
2361
2362
2363
2364 portRspMax = pring->numRiocb;
2365 portRspPut = le32_to_cpu(pgp->rspPutInx);
2366 if (unlikely(portRspPut >= portRspMax)) {
2367 lpfc_sli_rsp_pointers_error(phba, pring);
2368 return;
2369 }
2370
2371 rmb();
2372 while (pring->rspidx != portRspPut) {
2373 entry = lpfc_resp_iocb(phba, pring);
2374 if (++pring->rspidx >= portRspMax)
2375 pring->rspidx = 0;
2376
2377 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2378 (uint32_t *) &rspiocbq.iocb,
2379 phba->iocb_rsp_size);
2380 irsp = &rspiocbq.iocb;
2381 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2382 pring->stats.iocb_rsp++;
2383 rsp_cmpl++;
2384
2385 if (unlikely(irsp->ulpStatus)) {
2386
2387 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2388 "0326 Rsp Ring %d error: IOCB Data: "
2389 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2390 pring->ringno,
2391 irsp->un.ulpWord[0],
2392 irsp->un.ulpWord[1],
2393 irsp->un.ulpWord[2],
2394 irsp->un.ulpWord[3],
2395 irsp->un.ulpWord[4],
2396 irsp->un.ulpWord[5],
2397 *(uint32_t *)&irsp->un1,
2398 *((uint32_t *)&irsp->un1 + 1));
2399 }
2400
2401 switch (type) {
2402 case LPFC_ABORT_IOCB:
2403 case LPFC_SOL_IOCB:
2404
2405
2406
2407
2408 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2409 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2410 "0314 IOCB cmd 0x%x "
2411 "processed. Skipping "
2412 "completion",
2413 irsp->ulpCommand);
2414 break;
2415 }
2416
2417 spin_lock_irqsave(&phba->hbalock, iflags);
2418 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2419 &rspiocbq);
2420 spin_unlock_irqrestore(&phba->hbalock, iflags);
2421 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2422 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2423 &rspiocbq);
2424 }
2425 break;
2426 default:
2427 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2428 char adaptermsg[LPFC_MAX_ADPTMSG];
2429 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2430 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2431 MAX_MSG_DATA);
2432 dev_warn(&((phba->pcidev)->dev),
2433 "lpfc%d: %s\n",
2434 phba->brd_no, adaptermsg);
2435 } else {
2436
2437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2438 "0321 Unknown IOCB command "
2439 "Data: x%x, x%x x%x x%x x%x\n",
2440 type, irsp->ulpCommand,
2441 irsp->ulpStatus,
2442 irsp->ulpIoTag,
2443 irsp->ulpContext);
2444 }
2445 break;
2446 }
2447
2448
2449
2450
2451
2452
2453
2454 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2455
2456 if (pring->rspidx == portRspPut)
2457 portRspPut = le32_to_cpu(pgp->rspPutInx);
2458 }
2459
2460 ha_copy = readl(phba->HAregaddr);
2461 ha_copy >>= (LPFC_FCP_RING * 4);
2462
2463 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
2464 spin_lock_irqsave(&phba->hbalock, iflags);
2465 pring->stats.iocb_rsp_full++;
2466 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
2467 writel(status, phba->CAregaddr);
2468 readl(phba->CAregaddr);
2469 spin_unlock_irqrestore(&phba->hbalock, iflags);
2470 }
2471 if ((ha_copy & HA_R0CE_RSP) &&
2472 (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2473 spin_lock_irqsave(&phba->hbalock, iflags);
2474 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2475 pring->stats.iocb_cmd_empty++;
2476
2477
2478 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2479 lpfc_sli_resume_iocb(phba, pring);
2480
2481 if ((pring->lpfc_sli_cmd_available))
2482 (pring->lpfc_sli_cmd_available) (phba, pring);
2483
2484 spin_unlock_irqrestore(&phba->hbalock, iflags);
2485 }
2486
2487 return;
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507static int
2508lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2509 struct lpfc_sli_ring *pring, uint32_t mask)
2510{
2511 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2512 IOCB_t *irsp = NULL;
2513 IOCB_t *entry = NULL;
2514 struct lpfc_iocbq *cmdiocbq = NULL;
2515 struct lpfc_iocbq rspiocbq;
2516 uint32_t status;
2517 uint32_t portRspPut, portRspMax;
2518 int rc = 1;
2519 lpfc_iocb_type type;
2520 unsigned long iflag;
2521 uint32_t rsp_cmpl = 0;
2522
2523 spin_lock_irqsave(&phba->hbalock, iflag);
2524 pring->stats.iocb_event++;
2525
2526
2527
2528
2529
2530 portRspMax = pring->numRiocb;
2531 portRspPut = le32_to_cpu(pgp->rspPutInx);
2532 if (unlikely(portRspPut >= portRspMax)) {
2533 lpfc_sli_rsp_pointers_error(phba, pring);
2534 spin_unlock_irqrestore(&phba->hbalock, iflag);
2535 return 1;
2536 }
2537
2538 rmb();
2539 while (pring->rspidx != portRspPut) {
2540
2541
2542
2543
2544
2545 entry = lpfc_resp_iocb(phba, pring);
2546 phba->last_completion_time = jiffies;
2547
2548 if (++pring->rspidx >= portRspMax)
2549 pring->rspidx = 0;
2550
2551 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
2552 (uint32_t *) &rspiocbq.iocb,
2553 phba->iocb_rsp_size);
2554 INIT_LIST_HEAD(&(rspiocbq.list));
2555 irsp = &rspiocbq.iocb;
2556
2557 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
2558 pring->stats.iocb_rsp++;
2559 rsp_cmpl++;
2560
2561 if (unlikely(irsp->ulpStatus)) {
2562
2563
2564
2565
2566 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2567 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2568 spin_unlock_irqrestore(&phba->hbalock, iflag);
2569 phba->lpfc_rampdown_queue_depth(phba);
2570 spin_lock_irqsave(&phba->hbalock, iflag);
2571 }
2572
2573
2574 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2575 "0336 Rsp Ring %d error: IOCB Data: "
2576 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2577 pring->ringno,
2578 irsp->un.ulpWord[0],
2579 irsp->un.ulpWord[1],
2580 irsp->un.ulpWord[2],
2581 irsp->un.ulpWord[3],
2582 irsp->un.ulpWord[4],
2583 irsp->un.ulpWord[5],
2584 *(uint32_t *)&irsp->un1,
2585 *((uint32_t *)&irsp->un1 + 1));
2586 }
2587
2588 switch (type) {
2589 case LPFC_ABORT_IOCB:
2590 case LPFC_SOL_IOCB:
2591
2592
2593
2594
2595 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
2596 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2597 "0333 IOCB cmd 0x%x"
2598 " processed. Skipping"
2599 " completion\n",
2600 irsp->ulpCommand);
2601 break;
2602 }
2603
2604 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
2605 &rspiocbq);
2606 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
2607 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2608 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2609 &rspiocbq);
2610 } else {
2611 spin_unlock_irqrestore(&phba->hbalock,
2612 iflag);
2613 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
2614 &rspiocbq);
2615 spin_lock_irqsave(&phba->hbalock,
2616 iflag);
2617 }
2618 }
2619 break;
2620 case LPFC_UNSOL_IOCB:
2621 spin_unlock_irqrestore(&phba->hbalock, iflag);
2622 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
2623 spin_lock_irqsave(&phba->hbalock, iflag);
2624 break;
2625 default:
2626 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2627 char adaptermsg[LPFC_MAX_ADPTMSG];
2628 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2629 memcpy(&adaptermsg[0], (uint8_t *) irsp,
2630 MAX_MSG_DATA);
2631 dev_warn(&((phba->pcidev)->dev),
2632 "lpfc%d: %s\n",
2633 phba->brd_no, adaptermsg);
2634 } else {
2635
2636 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2637 "0334 Unknown IOCB command "
2638 "Data: x%x, x%x x%x x%x x%x\n",
2639 type, irsp->ulpCommand,
2640 irsp->ulpStatus,
2641 irsp->ulpIoTag,
2642 irsp->ulpContext);
2643 }
2644 break;
2645 }
2646
2647
2648
2649
2650
2651
2652
2653 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2654
2655 if (pring->rspidx == portRspPut)
2656 portRspPut = le32_to_cpu(pgp->rspPutInx);
2657 }
2658
2659 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
2660 pring->stats.iocb_rsp_full++;
2661 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2662 writel(status, phba->CAregaddr);
2663 readl(phba->CAregaddr);
2664 }
2665 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2666 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2667 pring->stats.iocb_cmd_empty++;
2668
2669
2670 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2671 lpfc_sli_resume_iocb(phba, pring);
2672
2673 if ((pring->lpfc_sli_cmd_available))
2674 (pring->lpfc_sli_cmd_available) (phba, pring);
2675
2676 }
2677
2678 spin_unlock_irqrestore(&phba->hbalock, iflag);
2679 return rc;
2680}
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700static struct lpfc_iocbq *
2701lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2702 struct lpfc_iocbq *rspiocbp)
2703{
2704 struct lpfc_iocbq *saveq;
2705 struct lpfc_iocbq *cmdiocbp;
2706 struct lpfc_iocbq *next_iocb;
2707 IOCB_t *irsp = NULL;
2708 uint32_t free_saveq;
2709 uint8_t iocb_cmd_type;
2710 lpfc_iocb_type type;
2711 unsigned long iflag;
2712 int rc;
2713
2714 spin_lock_irqsave(&phba->hbalock, iflag);
2715
2716 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
2717 pring->iocb_continueq_cnt++;
2718
2719
2720 irsp = &rspiocbp->iocb;
2721 if (irsp->ulpLe) {
2722
2723
2724
2725
2726 free_saveq = 1;
2727 saveq = list_get_first(&pring->iocb_continueq,
2728 struct lpfc_iocbq, list);
2729 irsp = &(saveq->iocb);
2730 list_del_init(&pring->iocb_continueq);
2731 pring->iocb_continueq_cnt = 0;
2732
2733 pring->stats.iocb_rsp++;
2734
2735
2736
2737
2738
2739 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
2740 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
2741 spin_unlock_irqrestore(&phba->hbalock, iflag);
2742 phba->lpfc_rampdown_queue_depth(phba);
2743 spin_lock_irqsave(&phba->hbalock, iflag);
2744 }
2745
2746 if (irsp->ulpStatus) {
2747
2748 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2749 "0328 Rsp Ring %d error: "
2750 "IOCB Data: "
2751 "x%x x%x x%x x%x "
2752 "x%x x%x x%x x%x "
2753 "x%x x%x x%x x%x "
2754 "x%x x%x x%x x%x\n",
2755 pring->ringno,
2756 irsp->un.ulpWord[0],
2757 irsp->un.ulpWord[1],
2758 irsp->un.ulpWord[2],
2759 irsp->un.ulpWord[3],
2760 irsp->un.ulpWord[4],
2761 irsp->un.ulpWord[5],
2762 *(((uint32_t *) irsp) + 6),
2763 *(((uint32_t *) irsp) + 7),
2764 *(((uint32_t *) irsp) + 8),
2765 *(((uint32_t *) irsp) + 9),
2766 *(((uint32_t *) irsp) + 10),
2767 *(((uint32_t *) irsp) + 11),
2768 *(((uint32_t *) irsp) + 12),
2769 *(((uint32_t *) irsp) + 13),
2770 *(((uint32_t *) irsp) + 14),
2771 *(((uint32_t *) irsp) + 15));
2772 }
2773
2774
2775
2776
2777
2778
2779
2780 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
2781 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
2782 switch (type) {
2783 case LPFC_SOL_IOCB:
2784 spin_unlock_irqrestore(&phba->hbalock, iflag);
2785 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
2786 spin_lock_irqsave(&phba->hbalock, iflag);
2787 break;
2788
2789 case LPFC_UNSOL_IOCB:
2790 spin_unlock_irqrestore(&phba->hbalock, iflag);
2791 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
2792 spin_lock_irqsave(&phba->hbalock, iflag);
2793 if (!rc)
2794 free_saveq = 0;
2795 break;
2796
2797 case LPFC_ABORT_IOCB:
2798 cmdiocbp = NULL;
2799 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
2800 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
2801 saveq);
2802 if (cmdiocbp) {
2803
2804 if (cmdiocbp->iocb_cmpl) {
2805 spin_unlock_irqrestore(&phba->hbalock,
2806 iflag);
2807 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
2808 saveq);
2809 spin_lock_irqsave(&phba->hbalock,
2810 iflag);
2811 } else
2812 __lpfc_sli_release_iocbq(phba,
2813 cmdiocbp);
2814 }
2815 break;
2816
2817 case LPFC_UNKNOWN_IOCB:
2818 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
2819 char adaptermsg[LPFC_MAX_ADPTMSG];
2820 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
2821 memcpy(&adaptermsg[0], (uint8_t *)irsp,
2822 MAX_MSG_DATA);
2823 dev_warn(&((phba->pcidev)->dev),
2824 "lpfc%d: %s\n",
2825 phba->brd_no, adaptermsg);
2826 } else {
2827
2828 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2829 "0335 Unknown IOCB "
2830 "command Data: x%x "
2831 "x%x x%x x%x\n",
2832 irsp->ulpCommand,
2833 irsp->ulpStatus,
2834 irsp->ulpIoTag,
2835 irsp->ulpContext);
2836 }
2837 break;
2838 }
2839
2840 if (free_saveq) {
2841 list_for_each_entry_safe(rspiocbp, next_iocb,
2842 &saveq->list, list) {
2843 list_del(&rspiocbp->list);
2844 __lpfc_sli_release_iocbq(phba, rspiocbp);
2845 }
2846 __lpfc_sli_release_iocbq(phba, saveq);
2847 }
2848 rspiocbp = NULL;
2849 }
2850 spin_unlock_irqrestore(&phba->hbalock, iflag);
2851 return rspiocbp;
2852}
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863void
2864lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
2865 struct lpfc_sli_ring *pring, uint32_t mask)
2866{
2867 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
2868}
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881static void
2882lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
2883 struct lpfc_sli_ring *pring, uint32_t mask)
2884{
2885 struct lpfc_pgp *pgp;
2886 IOCB_t *entry;
2887 IOCB_t *irsp = NULL;
2888 struct lpfc_iocbq *rspiocbp = NULL;
2889 uint32_t portRspPut, portRspMax;
2890 unsigned long iflag;
2891 uint32_t status;
2892
2893 pgp = &phba->port_gp[pring->ringno];
2894 spin_lock_irqsave(&phba->hbalock, iflag);
2895 pring->stats.iocb_event++;
2896
2897
2898
2899
2900
2901 portRspMax = pring->numRiocb;
2902 portRspPut = le32_to_cpu(pgp->rspPutInx);
2903 if (portRspPut >= portRspMax) {
2904
2905
2906
2907
2908 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2909 "0303 Ring %d handler: portRspPut %d "
2910 "is bigger than rsp ring %d\n",
2911 pring->ringno, portRspPut, portRspMax);
2912
2913 phba->link_state = LPFC_HBA_ERROR;
2914 spin_unlock_irqrestore(&phba->hbalock, iflag);
2915
2916 phba->work_hs = HS_FFER3;
2917 lpfc_handle_eratt(phba);
2918
2919 return;
2920 }
2921
2922 rmb();
2923 while (pring->rspidx != portRspPut) {
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937 entry = lpfc_resp_iocb(phba, pring);
2938
2939 phba->last_completion_time = jiffies;
2940 rspiocbp = __lpfc_sli_get_iocbq(phba);
2941 if (rspiocbp == NULL) {
2942 printk(KERN_ERR "%s: out of buffers! Failing "
2943 "completion.\n", __func__);
2944 break;
2945 }
2946
2947 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
2948 phba->iocb_rsp_size);
2949 irsp = &rspiocbp->iocb;
2950
2951 if (++pring->rspidx >= portRspMax)
2952 pring->rspidx = 0;
2953
2954 if (pring->ringno == LPFC_ELS_RING) {
2955 lpfc_debugfs_slow_ring_trc(phba,
2956 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
2957 *(((uint32_t *) irsp) + 4),
2958 *(((uint32_t *) irsp) + 6),
2959 *(((uint32_t *) irsp) + 7));
2960 }
2961
2962 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
2963
2964 spin_unlock_irqrestore(&phba->hbalock, iflag);
2965
2966 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
2967 spin_lock_irqsave(&phba->hbalock, iflag);
2968
2969
2970
2971
2972
2973
2974 if (pring->rspidx == portRspPut) {
2975 portRspPut = le32_to_cpu(pgp->rspPutInx);
2976 }
2977 }
2978
2979 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
2980
2981 pring->stats.iocb_rsp_full++;
2982
2983 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
2984 writel(status, phba->CAregaddr);
2985 readl(phba->CAregaddr);
2986 }
2987 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
2988 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
2989 pring->stats.iocb_cmd_empty++;
2990
2991
2992 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
2993 lpfc_sli_resume_iocb(phba, pring);
2994
2995 if ((pring->lpfc_sli_cmd_available))
2996 (pring->lpfc_sli_cmd_available) (phba, pring);
2997
2998 }
2999
3000 spin_unlock_irqrestore(&phba->hbalock, iflag);
3001 return;
3002}
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016static void
3017lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3018 struct lpfc_sli_ring *pring, uint32_t mask)
3019{
3020 struct lpfc_iocbq *irspiocbq;
3021 unsigned long iflag;
3022
3023 while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
3024
3025 spin_lock_irqsave(&phba->hbalock, iflag);
3026 list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
3027 irspiocbq, struct lpfc_iocbq, list);
3028 spin_unlock_irqrestore(&phba->hbalock, iflag);
3029
3030 lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
3031 }
3032}
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044void
3045lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3046{
3047 LIST_HEAD(completions);
3048 struct lpfc_iocbq *iocb, *next_iocb;
3049
3050 if (pring->ringno == LPFC_ELS_RING) {
3051 lpfc_fabric_abort_hba(phba);
3052 }
3053
3054
3055
3056
3057 spin_lock_irq(&phba->hbalock);
3058 list_splice_init(&pring->txq, &completions);
3059 pring->txq_cnt = 0;
3060
3061
3062 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3063 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3064
3065 spin_unlock_irq(&phba->hbalock);
3066
3067
3068 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3069 IOERR_SLI_ABORTED);
3070}
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082void
3083lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3084{
3085 LIST_HEAD(txq);
3086 LIST_HEAD(txcmplq);
3087 struct lpfc_sli *psli = &phba->sli;
3088 struct lpfc_sli_ring *pring;
3089
3090
3091 pring = &psli->ring[psli->fcp_ring];
3092
3093 spin_lock_irq(&phba->hbalock);
3094
3095 list_splice_init(&pring->txq, &txq);
3096 pring->txq_cnt = 0;
3097
3098
3099 list_splice_init(&pring->txcmplq, &txcmplq);
3100 pring->txcmplq_cnt = 0;
3101 spin_unlock_irq(&phba->hbalock);
3102
3103
3104 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3105 IOERR_SLI_DOWN);
3106
3107
3108 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3109 IOERR_SLI_DOWN);
3110}
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125static int
3126lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3127{
3128 uint32_t status;
3129 int i = 0;
3130 int retval = 0;
3131
3132
3133 status = readl(phba->HSregaddr);
3134
3135
3136
3137
3138
3139
3140
3141 while (((status & mask) != mask) &&
3142 !(status & HS_FFERM) &&
3143 i++ < 20) {
3144
3145 if (i <= 5)
3146 msleep(10);
3147 else if (i <= 10)
3148 msleep(500);
3149 else
3150 msleep(2500);
3151
3152 if (i == 15) {
3153
3154 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3155 lpfc_sli_brdrestart(phba);
3156 }
3157
3158 status = readl(phba->HSregaddr);
3159 }
3160
3161
3162 if ((status & HS_FFERM) || (i >= 20)) {
3163 phba->link_state = LPFC_HBA_ERROR;
3164 retval = 1;
3165 }
3166
3167 return retval;
3168}
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181static int
3182lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3183{
3184 uint32_t status;
3185 int retval = 0;
3186
3187
3188 status = lpfc_sli4_post_status_check(phba);
3189
3190 if (status) {
3191 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3192 lpfc_sli_brdrestart(phba);
3193 status = lpfc_sli4_post_status_check(phba);
3194 }
3195
3196
3197 if (status) {
3198 phba->link_state = LPFC_HBA_ERROR;
3199 retval = 1;
3200 } else
3201 phba->sli4_hba.intr_enable = 0;
3202
3203 return retval;
3204}
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214int
3215lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3216{
3217 return phba->lpfc_sli_brdready(phba, mask);
3218}
3219
3220#define BARRIER_TEST_PATTERN (0xdeadbeef)
3221
3222
3223
3224
3225
3226
3227
3228
3229void lpfc_reset_barrier(struct lpfc_hba *phba)
3230{
3231 uint32_t __iomem *resp_buf;
3232 uint32_t __iomem *mbox_buf;
3233 volatile uint32_t mbox;
3234 uint32_t hc_copy;
3235 int i;
3236 uint8_t hdrtype;
3237
3238 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3239 if (hdrtype != 0x80 ||
3240 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3241 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3242 return;
3243
3244
3245
3246
3247
3248 resp_buf = phba->MBslimaddr;
3249
3250
3251 hc_copy = readl(phba->HCregaddr);
3252 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3253 readl(phba->HCregaddr);
3254 phba->link_flag |= LS_IGNORE_ERATT;
3255
3256 if (readl(phba->HAregaddr) & HA_ERATT) {
3257
3258 writel(HA_ERATT, phba->HAregaddr);
3259 phba->pport->stopped = 1;
3260 }
3261
3262 mbox = 0;
3263 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3264 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3265
3266 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3267 mbox_buf = phba->MBslimaddr;
3268 writel(mbox, mbox_buf);
3269
3270 for (i = 0;
3271 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
3272 mdelay(1);
3273
3274 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
3275 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3276 phba->pport->stopped)
3277 goto restore_hc;
3278 else
3279 goto clear_errat;
3280 }
3281
3282 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3283 for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
3284 mdelay(1);
3285
3286clear_errat:
3287
3288 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
3289 mdelay(1);
3290
3291 if (readl(phba->HAregaddr) & HA_ERATT) {
3292 writel(HA_ERATT, phba->HAregaddr);
3293 phba->pport->stopped = 1;
3294 }
3295
3296restore_hc:
3297 phba->link_flag &= ~LS_IGNORE_ERATT;
3298 writel(hc_copy, phba->HCregaddr);
3299 readl(phba->HCregaddr);
3300}
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313int
3314lpfc_sli_brdkill(struct lpfc_hba *phba)
3315{
3316 struct lpfc_sli *psli;
3317 LPFC_MBOXQ_t *pmb;
3318 uint32_t status;
3319 uint32_t ha_copy;
3320 int retval;
3321 int i = 0;
3322
3323 psli = &phba->sli;
3324
3325
3326 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3327 "0329 Kill HBA Data: x%x x%x\n",
3328 phba->pport->port_state, psli->sli_flag);
3329
3330 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3331 if (!pmb)
3332 return 1;
3333
3334
3335 spin_lock_irq(&phba->hbalock);
3336 status = readl(phba->HCregaddr);
3337 status &= ~HC_ERINT_ENA;
3338 writel(status, phba->HCregaddr);
3339 readl(phba->HCregaddr);
3340 phba->link_flag |= LS_IGNORE_ERATT;
3341 spin_unlock_irq(&phba->hbalock);
3342
3343 lpfc_kill_board(phba, pmb);
3344 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3345 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3346
3347 if (retval != MBX_SUCCESS) {
3348 if (retval != MBX_BUSY)
3349 mempool_free(pmb, phba->mbox_mem_pool);
3350 spin_lock_irq(&phba->hbalock);
3351 phba->link_flag &= ~LS_IGNORE_ERATT;
3352 spin_unlock_irq(&phba->hbalock);
3353 return 1;
3354 }
3355
3356 spin_lock_irq(&phba->hbalock);
3357 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3358 spin_unlock_irq(&phba->hbalock);
3359
3360 mempool_free(pmb, phba->mbox_mem_pool);
3361
3362
3363
3364
3365
3366
3367 ha_copy = readl(phba->HAregaddr);
3368
3369 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3370 mdelay(100);
3371 ha_copy = readl(phba->HAregaddr);
3372 }
3373
3374 del_timer_sync(&psli->mbox_tmo);
3375 if (ha_copy & HA_ERATT) {
3376 writel(HA_ERATT, phba->HAregaddr);
3377 phba->pport->stopped = 1;
3378 }
3379 spin_lock_irq(&phba->hbalock);
3380 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3381 psli->mbox_active = NULL;
3382 phba->link_flag &= ~LS_IGNORE_ERATT;
3383 spin_unlock_irq(&phba->hbalock);
3384
3385 lpfc_hba_down_post(phba);
3386 phba->link_state = LPFC_HBA_ERROR;
3387
3388 return ha_copy & HA_ERATT ? 0 : 1;
3389}
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402int
3403lpfc_sli_brdreset(struct lpfc_hba *phba)
3404{
3405 struct lpfc_sli *psli;
3406 struct lpfc_sli_ring *pring;
3407 uint16_t cfg_value;
3408 int i;
3409
3410 psli = &phba->sli;
3411
3412
3413 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3414 "0325 Reset HBA Data: x%x x%x\n",
3415 phba->pport->port_state, psli->sli_flag);
3416
3417
3418 phba->fc_eventTag = 0;
3419 phba->pport->fc_myDID = 0;
3420 phba->pport->fc_prevDID = 0;
3421
3422
3423 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3424 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3425 (cfg_value &
3426 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3427
3428 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
3429
3430
3431 writel(HC_INITFF, phba->HCregaddr);
3432 mdelay(1);
3433 readl(phba->HCregaddr);
3434 writel(0, phba->HCregaddr);
3435 readl(phba->HCregaddr);
3436
3437
3438 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
3439
3440
3441 for (i = 0; i < psli->num_rings; i++) {
3442 pring = &psli->ring[i];
3443 pring->flag = 0;
3444 pring->rspidx = 0;
3445 pring->next_cmdidx = 0;
3446 pring->local_getidx = 0;
3447 pring->cmdidx = 0;
3448 pring->missbufcnt = 0;
3449 }
3450
3451 phba->link_state = LPFC_WARM_START;
3452 return 0;
3453}
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465int
3466lpfc_sli4_brdreset(struct lpfc_hba *phba)
3467{
3468 struct lpfc_sli *psli = &phba->sli;
3469 uint16_t cfg_value;
3470 uint8_t qindx;
3471
3472
3473 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3474 "0295 Reset HBA Data: x%x x%x\n",
3475 phba->pport->port_state, psli->sli_flag);
3476
3477
3478 phba->fc_eventTag = 0;
3479 phba->pport->fc_myDID = 0;
3480 phba->pport->fc_prevDID = 0;
3481
3482
3483 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
3484 pci_write_config_word(phba->pcidev, PCI_COMMAND,
3485 (cfg_value &
3486 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
3487
3488 spin_lock_irq(&phba->hbalock);
3489 psli->sli_flag &= ~(LPFC_PROCESS_LA);
3490 phba->fcf.fcf_flag = 0;
3491
3492 list_del_init(&phba->sli4_hba.mbx_wq->list);
3493 list_del_init(&phba->sli4_hba.els_wq->list);
3494 list_del_init(&phba->sli4_hba.hdr_rq->list);
3495 list_del_init(&phba->sli4_hba.dat_rq->list);
3496 list_del_init(&phba->sli4_hba.mbx_cq->list);
3497 list_del_init(&phba->sli4_hba.els_cq->list);
3498 list_del_init(&phba->sli4_hba.rxq_cq->list);
3499 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
3500 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
3501 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
3502 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
3503 spin_unlock_irq(&phba->hbalock);
3504
3505
3506 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3507 "0389 Performing PCI function reset!\n");
3508
3509 lpfc_pci_function_reset(phba);
3510
3511 return 0;
3512}
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527static int
3528lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
3529{
3530 MAILBOX_t *mb;
3531 struct lpfc_sli *psli;
3532 volatile uint32_t word0;
3533 void __iomem *to_slim;
3534
3535 spin_lock_irq(&phba->hbalock);
3536
3537 psli = &phba->sli;
3538
3539
3540 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3541 "0337 Restart HBA Data: x%x x%x\n",
3542 phba->pport->port_state, psli->sli_flag);
3543
3544 word0 = 0;
3545 mb = (MAILBOX_t *) &word0;
3546 mb->mbxCommand = MBX_RESTART;
3547 mb->mbxHc = 1;
3548
3549 lpfc_reset_barrier(phba);
3550
3551 to_slim = phba->MBslimaddr;
3552 writel(*(uint32_t *) mb, to_slim);
3553 readl(to_slim);
3554
3555
3556 if (phba->pport->port_state)
3557 word0 = 1;
3558 else
3559 word0 = 0;
3560 to_slim = phba->MBslimaddr + sizeof (uint32_t);
3561 writel(*(uint32_t *) mb, to_slim);
3562 readl(to_slim);
3563
3564 lpfc_sli_brdreset(phba);
3565 phba->pport->stopped = 0;
3566 phba->link_state = LPFC_INIT_START;
3567 phba->hba_flag = 0;
3568 spin_unlock_irq(&phba->hbalock);
3569
3570 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3571 psli->stats_start = get_seconds();
3572
3573
3574 mdelay(100);
3575
3576 lpfc_hba_down_post(phba);
3577
3578 return 0;
3579}
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590static int
3591lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
3592{
3593 struct lpfc_sli *psli = &phba->sli;
3594
3595
3596
3597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3598 "0296 Restart HBA Data: x%x x%x\n",
3599 phba->pport->port_state, psli->sli_flag);
3600
3601 lpfc_sli4_brdreset(phba);
3602
3603 spin_lock_irq(&phba->hbalock);
3604 phba->pport->stopped = 0;
3605 phba->link_state = LPFC_INIT_START;
3606 phba->hba_flag = 0;
3607 spin_unlock_irq(&phba->hbalock);
3608
3609 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
3610 psli->stats_start = get_seconds();
3611
3612 lpfc_hba_down_post(phba);
3613
3614 return 0;
3615}
3616
3617
3618
3619
3620
3621
3622
3623
3624int
3625lpfc_sli_brdrestart(struct lpfc_hba *phba)
3626{
3627 return phba->lpfc_sli_brdrestart(phba);
3628}
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640static int
3641lpfc_sli_chipset_init(struct lpfc_hba *phba)
3642{
3643 uint32_t status, i = 0;
3644
3645
3646 status = readl(phba->HSregaddr);
3647
3648
3649 i = 0;
3650 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
3651
3652
3653
3654
3655
3656 if (i++ >= 20) {
3657
3658
3659 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3660 "0436 Adapter failed to init, "
3661 "timeout, status reg x%x, "
3662 "FW Data: A8 x%x AC x%x\n", status,
3663 readl(phba->MBslimaddr + 0xa8),
3664 readl(phba->MBslimaddr + 0xac));
3665 phba->link_state = LPFC_HBA_ERROR;
3666 return -ETIMEDOUT;
3667 }
3668
3669
3670 if (status & HS_FFERM) {
3671
3672
3673
3674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3675 "0437 Adapter failed to init, "
3676 "chipset, status reg x%x, "
3677 "FW Data: A8 x%x AC x%x\n", status,
3678 readl(phba->MBslimaddr + 0xa8),
3679 readl(phba->MBslimaddr + 0xac));
3680 phba->link_state = LPFC_HBA_ERROR;
3681 return -EIO;
3682 }
3683
3684 if (i <= 5) {
3685 msleep(10);
3686 } else if (i <= 10) {
3687 msleep(500);
3688 } else {
3689 msleep(2500);
3690 }
3691
3692 if (i == 15) {
3693
3694 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3695 lpfc_sli_brdrestart(phba);
3696 }
3697
3698 status = readl(phba->HSregaddr);
3699 }
3700
3701
3702 if (status & HS_FFERM) {
3703
3704
3705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3706 "0438 Adapter failed to init, chipset, "
3707 "status reg x%x, "
3708 "FW Data: A8 x%x AC x%x\n", status,
3709 readl(phba->MBslimaddr + 0xa8),
3710 readl(phba->MBslimaddr + 0xac));
3711 phba->link_state = LPFC_HBA_ERROR;
3712 return -EIO;
3713 }
3714
3715
3716 writel(0, phba->HCregaddr);
3717 readl(phba->HCregaddr);
3718
3719
3720 writel(0xffffffff, phba->HAregaddr);
3721 readl(phba->HAregaddr);
3722 return 0;
3723}
3724
3725
3726
3727
3728
3729
3730
3731int
3732lpfc_sli_hbq_count(void)
3733{
3734 return ARRAY_SIZE(lpfc_hbq_defs);
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744static int
3745lpfc_sli_hbq_entry_count(void)
3746{
3747 int hbq_count = lpfc_sli_hbq_count();
3748 int count = 0;
3749 int i;
3750
3751 for (i = 0; i < hbq_count; ++i)
3752 count += lpfc_hbq_defs[i]->entry_count;
3753 return count;
3754}
3755
3756
3757
3758
3759
3760
3761
3762int
3763lpfc_sli_hbq_size(void)
3764{
3765 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
3766}
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777static int
3778lpfc_sli_hbq_setup(struct lpfc_hba *phba)
3779{
3780 int hbq_count = lpfc_sli_hbq_count();
3781 LPFC_MBOXQ_t *pmb;
3782 MAILBOX_t *pmbox;
3783 uint32_t hbqno;
3784 uint32_t hbq_entry_index;
3785
3786
3787
3788
3789 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3790
3791 if (!pmb)
3792 return -ENOMEM;
3793
3794 pmbox = &pmb->u.mb;
3795
3796
3797 phba->link_state = LPFC_INIT_MBX_CMDS;
3798 phba->hbq_in_use = 1;
3799
3800 hbq_entry_index = 0;
3801 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
3802 phba->hbqs[hbqno].next_hbqPutIdx = 0;
3803 phba->hbqs[hbqno].hbqPutIdx = 0;
3804 phba->hbqs[hbqno].local_hbqGetIdx = 0;
3805 phba->hbqs[hbqno].entry_count =
3806 lpfc_hbq_defs[hbqno]->entry_count;
3807 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
3808 hbq_entry_index, pmb);
3809 hbq_entry_index += phba->hbqs[hbqno].entry_count;
3810
3811 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
3812
3813
3814
3815 lpfc_printf_log(phba, KERN_ERR,
3816 LOG_SLI | LOG_VPORT,
3817 "1805 Adapter failed to init. "
3818 "Data: x%x x%x x%x\n",
3819 pmbox->mbxCommand,
3820 pmbox->mbxStatus, hbqno);
3821
3822 phba->link_state = LPFC_HBA_ERROR;
3823 mempool_free(pmb, phba->mbox_mem_pool);
3824 return ENXIO;
3825 }
3826 }
3827 phba->hbq_count = hbq_count;
3828
3829 mempool_free(pmb, phba->mbox_mem_pool);
3830
3831
3832 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
3833 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
3834 return 0;
3835}
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846static int
3847lpfc_sli4_rb_setup(struct lpfc_hba *phba)
3848{
3849 phba->hbq_in_use = 1;
3850 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
3851 phba->hbq_count = 1;
3852
3853 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
3854 return 0;
3855}
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870int
3871lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
3872{
3873 LPFC_MBOXQ_t *pmb;
3874 uint32_t resetcount = 0, rc = 0, done = 0;
3875
3876 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3877 if (!pmb) {
3878 phba->link_state = LPFC_HBA_ERROR;
3879 return -ENOMEM;
3880 }
3881
3882 phba->sli_rev = sli_mode;
3883 while (resetcount < 2 && !done) {
3884 spin_lock_irq(&phba->hbalock);
3885 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
3886 spin_unlock_irq(&phba->hbalock);
3887 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3888 lpfc_sli_brdrestart(phba);
3889 rc = lpfc_sli_chipset_init(phba);
3890 if (rc)
3891 break;
3892
3893 spin_lock_irq(&phba->hbalock);
3894 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3895 spin_unlock_irq(&phba->hbalock);
3896 resetcount++;
3897
3898
3899
3900
3901
3902
3903 rc = lpfc_config_port_prep(phba);
3904 if (rc == -ERESTART) {
3905 phba->link_state = LPFC_LINK_UNKNOWN;
3906 continue;
3907 } else if (rc)
3908 break;
3909 phba->link_state = LPFC_INIT_MBX_CMDS;
3910 lpfc_config_port(phba, pmb);
3911 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
3912 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
3913 LPFC_SLI3_HBQ_ENABLED |
3914 LPFC_SLI3_CRP_ENABLED |
3915 LPFC_SLI3_INB_ENABLED |
3916 LPFC_SLI3_BG_ENABLED);
3917 if (rc != MBX_SUCCESS) {
3918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3919 "0442 Adapter failed to init, mbxCmd x%x "
3920 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
3921 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
3922 spin_lock_irq(&phba->hbalock);
3923 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
3924 spin_unlock_irq(&phba->hbalock);
3925 rc = -ENXIO;
3926 } else {
3927
3928 spin_lock_irq(&phba->hbalock);
3929 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
3930 spin_unlock_irq(&phba->hbalock);
3931 done = 1;
3932 }
3933 }
3934 if (!done) {
3935 rc = -EINVAL;
3936 goto do_prep_failed;
3937 }
3938 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
3939 if (!pmb->u.mb.un.varCfgPort.cMA) {
3940 rc = -ENXIO;
3941 goto do_prep_failed;
3942 }
3943 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
3944 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3945 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
3946 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
3947 phba->max_vpi : phba->max_vports;
3948
3949 } else
3950 phba->max_vpi = 0;
3951 if (pmb->u.mb.un.varCfgPort.gdss)
3952 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
3953 if (pmb->u.mb.un.varCfgPort.gerbm)
3954 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
3955 if (pmb->u.mb.un.varCfgPort.gcrp)
3956 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
3957 if (pmb->u.mb.un.varCfgPort.ginb) {
3958 phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
3959 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
3960 phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
3961 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
3962 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
3963 phba->inb_last_counter =
3964 phba->mbox->us.s3_inb_pgp.counter;
3965 } else {
3966 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
3967 phba->port_gp = phba->mbox->us.s3_pgp.port;
3968 phba->inb_ha_copy = NULL;
3969 phba->inb_counter = NULL;
3970 }
3971
3972 if (phba->cfg_enable_bg) {
3973 if (pmb->u.mb.un.varCfgPort.gbg)
3974 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
3975 else
3976 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3977 "0443 Adapter did not grant "
3978 "BlockGuard\n");
3979 }
3980 } else {
3981 phba->hbq_get = NULL;
3982 phba->port_gp = phba->mbox->us.s2.port;
3983 phba->inb_ha_copy = NULL;
3984 phba->inb_counter = NULL;
3985 phba->max_vpi = 0;
3986 }
3987do_prep_failed:
3988 mempool_free(pmb, phba->mbox_mem_pool);
3989 return rc;
3990}
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006int
4007lpfc_sli_hba_setup(struct lpfc_hba *phba)
4008{
4009 uint32_t rc;
4010 int mode = 3;
4011
4012 switch (lpfc_sli_mode) {
4013 case 2:
4014 if (phba->cfg_enable_npiv) {
4015 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4016 "1824 NPIV enabled: Override lpfc_sli_mode "
4017 "parameter (%d) to auto (0).\n",
4018 lpfc_sli_mode);
4019 break;
4020 }
4021 mode = 2;
4022 break;
4023 case 0:
4024 case 3:
4025 break;
4026 default:
4027 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4028 "1819 Unrecognized lpfc_sli_mode "
4029 "parameter: %d.\n", lpfc_sli_mode);
4030
4031 break;
4032 }
4033
4034 rc = lpfc_sli_config_port(phba, mode);
4035
4036 if (rc && lpfc_sli_mode == 3)
4037 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4038 "1820 Unable to select SLI-3. "
4039 "Not supported by adapter.\n");
4040 if (rc && mode != 2)
4041 rc = lpfc_sli_config_port(phba, 2);
4042 if (rc)
4043 goto lpfc_sli_hba_setup_error;
4044
4045 if (phba->sli_rev == 3) {
4046 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4047 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4048 } else {
4049 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4050 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4051 phba->sli3_options = 0;
4052 }
4053
4054 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4055 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4056 phba->sli_rev, phba->max_vpi);
4057 rc = lpfc_sli_ring_map(phba);
4058
4059 if (rc)
4060 goto lpfc_sli_hba_setup_error;
4061
4062
4063 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4064 rc = lpfc_sli_hbq_setup(phba);
4065 if (rc)
4066 goto lpfc_sli_hba_setup_error;
4067 }
4068 spin_lock_irq(&phba->hbalock);
4069 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4070 spin_unlock_irq(&phba->hbalock);
4071
4072 rc = lpfc_config_port_post(phba);
4073 if (rc)
4074 goto lpfc_sli_hba_setup_error;
4075
4076 return rc;
4077
4078lpfc_sli_hba_setup_error:
4079 phba->link_state = LPFC_HBA_ERROR;
4080 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4081 "0445 Firmware initialization failed\n");
4082 return rc;
4083}
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093static int
4094lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
4095 LPFC_MBOXQ_t *mboxq)
4096{
4097 struct lpfc_dmabuf *mp;
4098 struct lpfc_mqe *mqe;
4099 uint32_t data_length;
4100 int rc;
4101
4102
4103 phba->valid_vlan = 0;
4104 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4105 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4106 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4107
4108 mqe = &mboxq->u.mqe;
4109 if (lpfc_dump_fcoe_param(phba, mboxq))
4110 return -ENOMEM;
4111
4112 mp = (struct lpfc_dmabuf *) mboxq->context1;
4113 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4114
4115 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4116 "(%d):2571 Mailbox cmd x%x Status x%x "
4117 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4118 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4119 "CQ: x%x x%x x%x x%x\n",
4120 mboxq->vport ? mboxq->vport->vpi : 0,
4121 bf_get(lpfc_mqe_command, mqe),
4122 bf_get(lpfc_mqe_status, mqe),
4123 mqe->un.mb_words[0], mqe->un.mb_words[1],
4124 mqe->un.mb_words[2], mqe->un.mb_words[3],
4125 mqe->un.mb_words[4], mqe->un.mb_words[5],
4126 mqe->un.mb_words[6], mqe->un.mb_words[7],
4127 mqe->un.mb_words[8], mqe->un.mb_words[9],
4128 mqe->un.mb_words[10], mqe->un.mb_words[11],
4129 mqe->un.mb_words[12], mqe->un.mb_words[13],
4130 mqe->un.mb_words[14], mqe->un.mb_words[15],
4131 mqe->un.mb_words[16], mqe->un.mb_words[50],
4132 mboxq->mcqe.word0,
4133 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4134 mboxq->mcqe.trailer);
4135
4136 if (rc) {
4137 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4138 kfree(mp);
4139 return -EIO;
4140 }
4141 data_length = mqe->un.mb_words[5];
4142 if (data_length > DMP_RGN23_SIZE) {
4143 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4144 kfree(mp);
4145 return -EIO;
4146 }
4147
4148 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4149 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4150 kfree(mp);
4151 return 0;
4152}
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169static int
4170lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4171 uint8_t *vpd, uint32_t *vpd_size)
4172{
4173 int rc = 0;
4174 uint32_t dma_size;
4175 struct lpfc_dmabuf *dmabuf;
4176 struct lpfc_mqe *mqe;
4177
4178 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4179 if (!dmabuf)
4180 return -ENOMEM;
4181
4182
4183
4184
4185
4186 dma_size = *vpd_size;
4187 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
4188 dma_size,
4189 &dmabuf->phys,
4190 GFP_KERNEL);
4191 if (!dmabuf->virt) {
4192 kfree(dmabuf);
4193 return -ENOMEM;
4194 }
4195 memset(dmabuf->virt, 0, dma_size);
4196
4197
4198
4199
4200
4201
4202 lpfc_read_rev(phba, mboxq);
4203 mqe = &mboxq->u.mqe;
4204 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4205 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4206 mqe->un.read_rev.word1 &= 0x0000FFFF;
4207 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4208 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4209
4210 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4211 if (rc) {
4212 dma_free_coherent(&phba->pcidev->dev, dma_size,
4213 dmabuf->virt, dmabuf->phys);
4214 return -EIO;
4215 }
4216
4217
4218
4219
4220
4221
4222 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4223 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4224
4225 lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
4226 dma_free_coherent(&phba->pcidev->dev, dma_size,
4227 dmabuf->virt, dmabuf->phys);
4228 kfree(dmabuf);
4229 return 0;
4230}
4231
4232
4233
4234
4235
4236
4237
4238
4239static void
4240lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
4241{
4242 uint8_t fcp_eqidx;
4243
4244 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
4245 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
4246 lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
4247 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4248 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
4249 LPFC_QUEUE_REARM);
4250 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
4251 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
4252 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
4253 LPFC_QUEUE_REARM);
4254}
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265int
4266lpfc_sli4_hba_setup(struct lpfc_hba *phba)
4267{
4268 int rc;
4269 LPFC_MBOXQ_t *mboxq;
4270 struct lpfc_mqe *mqe;
4271 uint8_t *vpd;
4272 uint32_t vpd_size;
4273 uint32_t ftr_rsp = 0;
4274 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
4275 struct lpfc_vport *vport = phba->pport;
4276 struct lpfc_dmabuf *mp;
4277
4278
4279 rc = lpfc_pci_function_reset(phba);
4280 if (unlikely(rc))
4281 return -ENODEV;
4282
4283
4284 rc = lpfc_sli4_post_status_check(phba);
4285 if (unlikely(rc))
4286 return -ENODEV;
4287 else {
4288 spin_lock_irq(&phba->hbalock);
4289 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
4290 spin_unlock_irq(&phba->hbalock);
4291 }
4292
4293
4294
4295
4296
4297 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4298 if (!mboxq)
4299 return -ENOMEM;
4300
4301
4302
4303
4304
4305 if (lpfc_sli4_read_fcoe_params(phba, mboxq))
4306 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
4307 "2570 Failed to read FCoE parameters\n");
4308
4309
4310 vpd_size = PAGE_SIZE;
4311 vpd = kzalloc(vpd_size, GFP_KERNEL);
4312 if (!vpd) {
4313 rc = -ENOMEM;
4314 goto out_free_mbox;
4315 }
4316
4317 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
4318 if (unlikely(rc))
4319 goto out_free_vpd;
4320
4321 mqe = &mboxq->u.mqe;
4322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
4323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
4324 phba->hba_flag |= HBA_FCOE_SUPPORT;
4325 if (phba->sli_rev != LPFC_SLI_REV4 ||
4326 !(phba->hba_flag & HBA_FCOE_SUPPORT)) {
4327 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4328 "0376 READ_REV Error. SLI Level %d "
4329 "FCoE enabled %d\n",
4330 phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
4331 rc = -EIO;
4332 goto out_free_vpd;
4333 }
4334
4335
4336
4337
4338
4339 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
4340 if (unlikely(!rc)) {
4341 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4342 "0377 Error %d parsing vpd. "
4343 "Using defaults.\n", rc);
4344 rc = 0;
4345 }
4346
4347
4348 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
4349 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
4350 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
4351 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
4352 &mqe->un.read_rev);
4353 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
4354 &mqe->un.read_rev);
4355 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
4356 &mqe->un.read_rev);
4357 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
4358 &mqe->un.read_rev);
4359 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
4360 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
4361 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
4362 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
4363 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
4364 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
4365 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4366 "(%d):0380 READ_REV Status x%x "
4367 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4368 mboxq->vport ? mboxq->vport->vpi : 0,
4369 bf_get(lpfc_mqe_status, mqe),
4370 phba->vpd.rev.opFwName,
4371 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
4372 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
4373
4374
4375
4376
4377
4378 lpfc_request_features(phba, mboxq);
4379 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4380 if (unlikely(rc)) {
4381 rc = -EIO;
4382 goto out_free_vpd;
4383 }
4384
4385
4386
4387
4388
4389 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
4390 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4391 "0378 No support for fcpi mode.\n");
4392 ftr_rsp++;
4393 }
4394
4395
4396
4397
4398
4399
4400 if ((phba->cfg_enable_bg) &&
4401 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4402 ftr_rsp++;
4403
4404 if (phba->max_vpi && phba->cfg_enable_npiv &&
4405 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4406 ftr_rsp++;
4407
4408 if (ftr_rsp) {
4409 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4410 "0379 Feature Mismatch Data: x%08x %08x "
4411 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
4412 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
4413 phba->cfg_enable_npiv, phba->max_vpi);
4414 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
4415 phba->cfg_enable_bg = 0;
4416 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
4417 phba->cfg_enable_npiv = 0;
4418 }
4419
4420
4421 spin_lock_irq(&phba->hbalock);
4422 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
4423 spin_unlock_irq(&phba->hbalock);
4424
4425
4426 lpfc_read_sparam(phba, mboxq, vport->vpi);
4427 mboxq->vport = vport;
4428 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4429 mp = (struct lpfc_dmabuf *) mboxq->context1;
4430 if (rc == MBX_SUCCESS) {
4431 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
4432 rc = 0;
4433 }
4434
4435
4436
4437
4438
4439 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4440 kfree(mp);
4441 mboxq->context1 = NULL;
4442 if (unlikely(rc)) {
4443 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4444 "0382 READ_SPARAM command failed "
4445 "status %d, mbxStatus x%x\n",
4446 rc, bf_get(lpfc_mqe_status, mqe));
4447 phba->link_state = LPFC_HBA_ERROR;
4448 rc = -EIO;
4449 goto out_free_vpd;
4450 }
4451
4452 if (phba->cfg_soft_wwnn)
4453 u64_to_wwn(phba->cfg_soft_wwnn,
4454 vport->fc_sparam.nodeName.u.wwn);
4455 if (phba->cfg_soft_wwpn)
4456 u64_to_wwn(phba->cfg_soft_wwpn,
4457 vport->fc_sparam.portName.u.wwn);
4458 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
4459 sizeof(struct lpfc_name));
4460 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
4461 sizeof(struct lpfc_name));
4462
4463
4464 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
4465 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
4466
4467
4468 rc = lpfc_sli4_post_sgl_list(phba);
4469 if (unlikely(rc)) {
4470 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4471 "0582 Error %d during sgl post operation", rc);
4472 rc = -ENODEV;
4473 goto out_free_vpd;
4474 }
4475
4476
4477 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
4478 if (unlikely(rc)) {
4479 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
4480 "0383 Error %d during scsi sgl post opeation",
4481 rc);
4482
4483
4484 rc = -ENODEV;
4485 goto out_free_vpd;
4486 }
4487
4488
4489 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
4490 if (unlikely(rc)) {
4491 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4492 "0393 Error %d during rpi post operation\n",
4493 rc);
4494 rc = -ENODEV;
4495 goto out_free_vpd;
4496 }
4497 if (phba->cfg_enable_fip)
4498 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
4499 else
4500 bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
4501
4502
4503 rc = lpfc_sli4_queue_setup(phba);
4504 if (unlikely(rc)) {
4505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4506 "0381 Error %d during queue setup.\n ", rc);
4507 goto out_stop_timers;
4508 }
4509
4510
4511 lpfc_sli4_arm_cqeq_intr(phba);
4512
4513
4514 phba->sli4_hba.intr_enable = 1;
4515
4516
4517 spin_lock_irq(&phba->hbalock);
4518 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4519 spin_unlock_irq(&phba->hbalock);
4520
4521
4522 lpfc_sli4_rb_setup(phba);
4523
4524
4525 mod_timer(&vport->els_tmofunc,
4526 jiffies + HZ * (phba->fc_ratov * 2));
4527
4528
4529 mod_timer(&phba->hb_tmofunc,
4530 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
4531 phba->hb_outstanding = 0;
4532 phba->last_completion_time = jiffies;
4533
4534
4535 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
4536
4537
4538
4539
4540
4541 lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
4542 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4543 lpfc_set_loopback_flag(phba);
4544
4545 spin_lock_irq(&phba->hbalock);
4546 phba->link_state = LPFC_LINK_DOWN;
4547 spin_unlock_irq(&phba->hbalock);
4548 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
4549 if (unlikely(rc != MBX_NOT_FINISHED)) {
4550 kfree(vpd);
4551 return 0;
4552 } else
4553 rc = -EIO;
4554
4555
4556 if (rc)
4557 lpfc_sli4_queue_unset(phba);
4558
4559out_stop_timers:
4560 if (rc)
4561 lpfc_stop_hba_timers(phba);
4562out_free_vpd:
4563 kfree(vpd);
4564out_free_mbox:
4565 mempool_free(mboxq, phba->mbox_mem_pool);
4566 return rc;
4567}
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581void
4582lpfc_mbox_timeout(unsigned long ptr)
4583{
4584 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4585 unsigned long iflag;
4586 uint32_t tmo_posted;
4587
4588 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
4589 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
4590 if (!tmo_posted)
4591 phba->pport->work_port_events |= WORKER_MBOX_TMO;
4592 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
4593
4594 if (!tmo_posted)
4595 lpfc_worker_wake_up(phba);
4596 return;
4597}
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608void
4609lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
4610{
4611 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
4612 MAILBOX_t *mb = &pmbox->u.mb;
4613 struct lpfc_sli *psli = &phba->sli;
4614 struct lpfc_sli_ring *pring;
4615
4616
4617
4618
4619
4620
4621 spin_lock_irq(&phba->hbalock);
4622 if (pmbox == NULL) {
4623 lpfc_printf_log(phba, KERN_WARNING,
4624 LOG_MBOX | LOG_SLI,
4625 "0353 Active Mailbox cleared - mailbox timeout "
4626 "exiting\n");
4627 spin_unlock_irq(&phba->hbalock);
4628 return;
4629 }
4630
4631
4632 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4633 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
4634 mb->mbxCommand,
4635 phba->pport->port_state,
4636 phba->sli.sli_flag,
4637 phba->sli.mbox_active);
4638 spin_unlock_irq(&phba->hbalock);
4639
4640
4641
4642
4643
4644 spin_lock_irq(&phba->pport->work_port_lock);
4645 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
4646 spin_unlock_irq(&phba->pport->work_port_lock);
4647 spin_lock_irq(&phba->hbalock);
4648 phba->link_state = LPFC_LINK_UNKNOWN;
4649 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4650 spin_unlock_irq(&phba->hbalock);
4651
4652 pring = &psli->ring[psli->fcp_ring];
4653 lpfc_sli_abort_iocb_ring(phba, pring);
4654
4655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4656 "0345 Resetting board due to mailbox timeout\n");
4657
4658
4659 lpfc_reset_hba(phba);
4660}
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688static int
4689lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
4690 uint32_t flag)
4691{
4692 MAILBOX_t *mb;
4693 struct lpfc_sli *psli = &phba->sli;
4694 uint32_t status, evtctr;
4695 uint32_t ha_copy;
4696 int i;
4697 unsigned long timeout;
4698 unsigned long drvr_flag = 0;
4699 uint32_t word0, ldata;
4700 void __iomem *to_slim;
4701 int processing_queue = 0;
4702
4703 spin_lock_irqsave(&phba->hbalock, drvr_flag);
4704 if (!pmbox) {
4705 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4706
4707 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
4708 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4709 return MBX_SUCCESS;
4710 }
4711 processing_queue = 1;
4712 pmbox = lpfc_mbox_get(phba);
4713 if (!pmbox) {
4714 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4715 return MBX_SUCCESS;
4716 }
4717 }
4718
4719 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
4720 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
4721 if(!pmbox->vport) {
4722 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4723 lpfc_printf_log(phba, KERN_ERR,
4724 LOG_MBOX | LOG_VPORT,
4725 "1806 Mbox x%x failed. No vport\n",
4726 pmbox->u.mb.mbxCommand);
4727 dump_stack();
4728 goto out_not_finished;
4729 }
4730 }
4731
4732
4733 if (unlikely(pci_channel_offline(phba->pcidev))) {
4734 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4735 goto out_not_finished;
4736 }
4737
4738
4739 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
4740 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4741 goto out_not_finished;
4742 }
4743
4744 psli = &phba->sli;
4745
4746 mb = &pmbox->u.mb;
4747 status = MBX_SUCCESS;
4748
4749 if (phba->link_state == LPFC_HBA_ERROR) {
4750 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4751
4752
4753 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4754 "(%d):0311 Mailbox command x%x cannot "
4755 "issue Data: x%x x%x\n",
4756 pmbox->vport ? pmbox->vport->vpi : 0,
4757 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
4758 goto out_not_finished;
4759 }
4760
4761 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
4762 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
4763 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4764 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4765 "(%d):2528 Mailbox command x%x cannot "
4766 "issue Data: x%x x%x\n",
4767 pmbox->vport ? pmbox->vport->vpi : 0,
4768 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
4769 goto out_not_finished;
4770 }
4771
4772 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
4773
4774
4775
4776
4777
4778 if (flag & MBX_POLL) {
4779 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4780
4781
4782 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4783 "(%d):2529 Mailbox command x%x "
4784 "cannot issue Data: x%x x%x\n",
4785 pmbox->vport ? pmbox->vport->vpi : 0,
4786 pmbox->u.mb.mbxCommand,
4787 psli->sli_flag, flag);
4788 goto out_not_finished;
4789 }
4790
4791 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
4792 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4793
4794 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4795 "(%d):2530 Mailbox command x%x "
4796 "cannot issue Data: x%x x%x\n",
4797 pmbox->vport ? pmbox->vport->vpi : 0,
4798 pmbox->u.mb.mbxCommand,
4799 psli->sli_flag, flag);
4800 goto out_not_finished;
4801 }
4802
4803
4804
4805
4806 lpfc_mbox_put(phba, pmbox);
4807
4808
4809 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4810 "(%d):0308 Mbox cmd issue - BUSY Data: "
4811 "x%x x%x x%x x%x\n",
4812 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
4813 mb->mbxCommand, phba->pport->port_state,
4814 psli->sli_flag, flag);
4815
4816 psli->slistat.mbox_busy++;
4817 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4818
4819 if (pmbox->vport) {
4820 lpfc_debugfs_disc_trc(pmbox->vport,
4821 LPFC_DISC_TRC_MBOX_VPORT,
4822 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
4823 (uint32_t)mb->mbxCommand,
4824 mb->un.varWords[0], mb->un.varWords[1]);
4825 }
4826 else {
4827 lpfc_debugfs_disc_trc(phba->pport,
4828 LPFC_DISC_TRC_MBOX,
4829 "MBOX Bsy: cmd:x%x mb:x%x x%x",
4830 (uint32_t)mb->mbxCommand,
4831 mb->un.varWords[0], mb->un.varWords[1]);
4832 }
4833
4834 return MBX_BUSY;
4835 }
4836
4837 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4838
4839
4840 if (flag != MBX_POLL) {
4841 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
4842 (mb->mbxCommand != MBX_KILL_BOARD)) {
4843 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4844 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
4845
4846 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
4847 "(%d):2531 Mailbox command x%x "
4848 "cannot issue Data: x%x x%x\n",
4849 pmbox->vport ? pmbox->vport->vpi : 0,
4850 pmbox->u.mb.mbxCommand,
4851 psli->sli_flag, flag);
4852 goto out_not_finished;
4853 }
4854
4855 mod_timer(&psli->mbox_tmo, (jiffies +
4856 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
4857 }
4858
4859
4860 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4861 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
4862 "x%x\n",
4863 pmbox->vport ? pmbox->vport->vpi : 0,
4864 mb->mbxCommand, phba->pport->port_state,
4865 psli->sli_flag, flag);
4866
4867 if (mb->mbxCommand != MBX_HEARTBEAT) {
4868 if (pmbox->vport) {
4869 lpfc_debugfs_disc_trc(pmbox->vport,
4870 LPFC_DISC_TRC_MBOX_VPORT,
4871 "MBOX Send vport: cmd:x%x mb:x%x x%x",
4872 (uint32_t)mb->mbxCommand,
4873 mb->un.varWords[0], mb->un.varWords[1]);
4874 }
4875 else {
4876 lpfc_debugfs_disc_trc(phba->pport,
4877 LPFC_DISC_TRC_MBOX,
4878 "MBOX Send: cmd:x%x mb:x%x x%x",
4879 (uint32_t)mb->mbxCommand,
4880 mb->un.varWords[0], mb->un.varWords[1]);
4881 }
4882 }
4883
4884 psli->slistat.mbox_cmd++;
4885 evtctr = psli->slistat.mbox_event;
4886
4887
4888 mb->mbxOwner = OWN_CHIP;
4889
4890 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4891
4892 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
4893 } else {
4894 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4895
4896 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
4897 }
4898
4899
4900
4901 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4902 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
4903 MAILBOX_CMD_SIZE - sizeof (uint32_t));
4904
4905
4906 ldata = *((uint32_t *)mb);
4907 to_slim = phba->MBslimaddr;
4908 writel(ldata, to_slim);
4909 readl(to_slim);
4910
4911 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4912
4913 psli->sli_flag |= LPFC_SLI_ACTIVE;
4914 }
4915 }
4916
4917 wmb();
4918
4919 switch (flag) {
4920 case MBX_NOWAIT:
4921
4922 psli->mbox_active = pmbox;
4923
4924 writel(CA_MBATT, phba->CAregaddr);
4925 readl(phba->CAregaddr);
4926
4927 break;
4928
4929 case MBX_POLL:
4930
4931 psli->mbox_active = NULL;
4932
4933 writel(CA_MBATT, phba->CAregaddr);
4934 readl(phba->CAregaddr);
4935
4936 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4937
4938 word0 = *((uint32_t *)phba->mbox);
4939 word0 = le32_to_cpu(word0);
4940 } else {
4941
4942 word0 = readl(phba->MBslimaddr);
4943 }
4944
4945
4946 ha_copy = readl(phba->HAregaddr);
4947 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
4948 mb->mbxCommand) *
4949 1000) + jiffies;
4950 i = 0;
4951
4952 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
4953 (!(ha_copy & HA_MBATT) &&
4954 (phba->link_state > LPFC_WARM_START))) {
4955 if (time_after(jiffies, timeout)) {
4956 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4957 spin_unlock_irqrestore(&phba->hbalock,
4958 drvr_flag);
4959 goto out_not_finished;
4960 }
4961
4962
4963
4964 if (((word0 & OWN_CHIP) != OWN_CHIP)
4965 && (evtctr != psli->slistat.mbox_event))
4966 break;
4967
4968 if (i++ > 10) {
4969 spin_unlock_irqrestore(&phba->hbalock,
4970 drvr_flag);
4971 msleep(1);
4972 spin_lock_irqsave(&phba->hbalock, drvr_flag);
4973 }
4974
4975 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
4976
4977 word0 = *((uint32_t *)phba->mbox);
4978 word0 = le32_to_cpu(word0);
4979 if (mb->mbxCommand == MBX_CONFIG_PORT) {
4980 MAILBOX_t *slimmb;
4981 uint32_t slimword0;
4982
4983 slimword0 = readl(phba->MBslimaddr);
4984 slimmb = (MAILBOX_t *) & slimword0;
4985 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
4986 && slimmb->mbxStatus) {
4987 psli->sli_flag &=
4988 ~LPFC_SLI_ACTIVE;
4989 word0 = slimword0;
4990 }
4991 }
4992 } else {
4993
4994 word0 = readl(phba->MBslimaddr);
4995 }
4996
4997 ha_copy = readl(phba->HAregaddr);
4998 }
4999
5000 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
5001
5002 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
5003 } else {
5004
5005 lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
5006 MAILBOX_CMD_SIZE);
5007 if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
5008 pmbox->context2) {
5009 lpfc_memcpy_from_slim((void *)pmbox->context2,
5010 phba->MBslimaddr + DMP_RSP_OFFSET,
5011 mb->un.varDmp.word_cnt);
5012 }
5013 }
5014
5015 writel(HA_MBATT, phba->HAregaddr);
5016 readl(phba->HAregaddr);
5017
5018 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5019 status = mb->mbxStatus;
5020 }
5021
5022 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
5023 return status;
5024
5025out_not_finished:
5026 if (processing_queue) {
5027 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
5028 lpfc_mbox_cmpl_put(phba, pmbox);
5029 }
5030 return MBX_NOT_FINISHED;
5031}
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045static int
5046lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
5047{
5048 struct lpfc_sli *psli = &phba->sli;
5049 uint8_t actcmd = MBX_HEARTBEAT;
5050 int rc = 0;
5051 unsigned long timeout;
5052
5053
5054 spin_lock_irq(&phba->hbalock);
5055 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
5056 if (phba->sli.mbox_active)
5057 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
5058 spin_unlock_irq(&phba->hbalock);
5059
5060
5061
5062 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
5063 jiffies;
5064
5065 while (phba->sli.mbox_active) {
5066
5067 msleep(2);
5068 if (time_after(jiffies, timeout)) {
5069
5070 rc = 1;
5071 break;
5072 }
5073 }
5074
5075
5076 if (rc) {
5077 spin_lock_irq(&phba->hbalock);
5078 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5079 spin_unlock_irq(&phba->hbalock);
5080 }
5081 return rc;
5082}
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095static void
5096lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
5097{
5098 struct lpfc_sli *psli = &phba->sli;
5099
5100 spin_lock_irq(&phba->hbalock);
5101 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5102
5103 spin_unlock_irq(&phba->hbalock);
5104 return;
5105 }
5106
5107
5108
5109
5110
5111
5112 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5113 spin_unlock_irq(&phba->hbalock);
5114
5115
5116 lpfc_worker_wake_up(phba);
5117}
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135static int
5136lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5137{
5138 int rc = MBX_SUCCESS;
5139 unsigned long iflag;
5140 uint32_t db_ready;
5141 uint32_t mcqe_status;
5142 uint32_t mbx_cmnd;
5143 unsigned long timeout;
5144 struct lpfc_sli *psli = &phba->sli;
5145 struct lpfc_mqe *mb = &mboxq->u.mqe;
5146 struct lpfc_bmbx_create *mbox_rgn;
5147 struct dma_address *dma_address;
5148 struct lpfc_register bmbx_reg;
5149
5150
5151
5152
5153
5154 spin_lock_irqsave(&phba->hbalock, iflag);
5155 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5156 spin_unlock_irqrestore(&phba->hbalock, iflag);
5157 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5158 "(%d):2532 Mailbox command x%x (x%x) "
5159 "cannot issue Data: x%x x%x\n",
5160 mboxq->vport ? mboxq->vport->vpi : 0,
5161 mboxq->u.mb.mbxCommand,
5162 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5163 psli->sli_flag, MBX_POLL);
5164 return MBXERR_ERROR;
5165 }
5166
5167 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5168 phba->sli.mbox_active = mboxq;
5169 spin_unlock_irqrestore(&phba->hbalock, iflag);
5170
5171
5172
5173
5174
5175
5176 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
5177 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
5178 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
5179 sizeof(struct lpfc_mqe));
5180
5181
5182 dma_address = &phba->sli4_hba.bmbx.dma_address;
5183 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
5184
5185 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5186 * 1000) + jiffies;
5187 do {
5188 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5189 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5190 if (!db_ready)
5191 msleep(2);
5192
5193 if (time_after(jiffies, timeout)) {
5194 rc = MBXERR_ERROR;
5195 goto exit;
5196 }
5197 } while (!db_ready);
5198
5199
5200 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
5201 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
5202 * 1000) + jiffies;
5203 do {
5204 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
5205 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
5206 if (!db_ready)
5207 msleep(2);
5208
5209 if (time_after(jiffies, timeout)) {
5210 rc = MBXERR_ERROR;
5211 goto exit;
5212 }
5213 } while (!db_ready);
5214
5215
5216
5217
5218
5219
5220 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
5221 sizeof(struct lpfc_mqe));
5222 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
5223 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
5224 sizeof(struct lpfc_mcqe));
5225 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
5226
5227
5228 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
5229 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
5230 rc = MBXERR_ERROR;
5231 }
5232
5233 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5234 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5235 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5236 " x%x x%x CQ: x%x x%x x%x x%x\n",
5237 mboxq->vport ? mboxq->vport->vpi : 0,
5238 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
5239 bf_get(lpfc_mqe_status, mb),
5240 mb->un.mb_words[0], mb->un.mb_words[1],
5241 mb->un.mb_words[2], mb->un.mb_words[3],
5242 mb->un.mb_words[4], mb->un.mb_words[5],
5243 mb->un.mb_words[6], mb->un.mb_words[7],
5244 mb->un.mb_words[8], mb->un.mb_words[9],
5245 mb->un.mb_words[10], mb->un.mb_words[11],
5246 mb->un.mb_words[12], mboxq->mcqe.word0,
5247 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5248 mboxq->mcqe.trailer);
5249exit:
5250
5251 spin_lock_irqsave(&phba->hbalock, iflag);
5252 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5253 phba->sli.mbox_active = NULL;
5254 spin_unlock_irqrestore(&phba->hbalock, iflag);
5255 return rc;
5256}
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270static int
5271lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5272 uint32_t flag)
5273{
5274 struct lpfc_sli *psli = &phba->sli;
5275 unsigned long iflags;
5276 int rc;
5277
5278 rc = lpfc_mbox_dev_check(phba);
5279 if (unlikely(rc)) {
5280 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5281 "(%d):2544 Mailbox command x%x (x%x) "
5282 "cannot issue Data: x%x x%x\n",
5283 mboxq->vport ? mboxq->vport->vpi : 0,
5284 mboxq->u.mb.mbxCommand,
5285 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5286 psli->sli_flag, flag);
5287 goto out_not_finished;
5288 }
5289
5290
5291 if (!phba->sli4_hba.intr_enable) {
5292 if (flag == MBX_POLL)
5293 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5294 else
5295 rc = -EIO;
5296 if (rc != MBX_SUCCESS)
5297 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5298 "(%d):2541 Mailbox command x%x "
5299 "(x%x) cannot issue Data: x%x x%x\n",
5300 mboxq->vport ? mboxq->vport->vpi : 0,
5301 mboxq->u.mb.mbxCommand,
5302 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5303 psli->sli_flag, flag);
5304 return rc;
5305 } else if (flag == MBX_POLL) {
5306 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
5307 "(%d):2542 Try to issue mailbox command "
5308 "x%x (x%x) synchronously ahead of async"
5309 "mailbox command queue: x%x x%x\n",
5310 mboxq->vport ? mboxq->vport->vpi : 0,
5311 mboxq->u.mb.mbxCommand,
5312 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5313 psli->sli_flag, flag);
5314
5315 rc = lpfc_sli4_async_mbox_block(phba);
5316 if (!rc) {
5317
5318 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
5319 if (rc != MBX_SUCCESS)
5320 lpfc_printf_log(phba, KERN_ERR,
5321 LOG_MBOX | LOG_SLI,
5322 "(%d):2597 Mailbox command "
5323 "x%x (x%x) cannot issue "
5324 "Data: x%x x%x\n",
5325 mboxq->vport ?
5326 mboxq->vport->vpi : 0,
5327 mboxq->u.mb.mbxCommand,
5328 lpfc_sli4_mbox_opcode_get(phba,
5329 mboxq),
5330 psli->sli_flag, flag);
5331
5332 lpfc_sli4_async_mbox_unblock(phba);
5333 }
5334 return rc;
5335 }
5336
5337
5338 rc = lpfc_mbox_cmd_check(phba, mboxq);
5339 if (rc) {
5340 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5341 "(%d):2543 Mailbox command x%x (x%x) "
5342 "cannot issue Data: x%x x%x\n",
5343 mboxq->vport ? mboxq->vport->vpi : 0,
5344 mboxq->u.mb.mbxCommand,
5345 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5346 psli->sli_flag, flag);
5347 goto out_not_finished;
5348 }
5349
5350
5351 psli->slistat.mbox_busy++;
5352 spin_lock_irqsave(&phba->hbalock, iflags);
5353 lpfc_mbox_put(phba, mboxq);
5354 spin_unlock_irqrestore(&phba->hbalock, iflags);
5355 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5356 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5357 "x%x (x%x) x%x x%x x%x\n",
5358 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
5359 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5360 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5361 phba->pport->port_state,
5362 psli->sli_flag, MBX_NOWAIT);
5363
5364 lpfc_worker_wake_up(phba);
5365
5366 return MBX_BUSY;
5367
5368out_not_finished:
5369 return MBX_NOT_FINISHED;
5370}
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380int
5381lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
5382{
5383 struct lpfc_sli *psli = &phba->sli;
5384 LPFC_MBOXQ_t *mboxq;
5385 int rc = MBX_SUCCESS;
5386 unsigned long iflags;
5387 struct lpfc_mqe *mqe;
5388 uint32_t mbx_cmnd;
5389
5390
5391 if (unlikely(!phba->sli4_hba.intr_enable))
5392 return MBX_NOT_FINISHED;
5393
5394
5395 spin_lock_irqsave(&phba->hbalock, iflags);
5396 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
5397 spin_unlock_irqrestore(&phba->hbalock, iflags);
5398 return MBX_NOT_FINISHED;
5399 }
5400 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
5401 spin_unlock_irqrestore(&phba->hbalock, iflags);
5402 return MBX_NOT_FINISHED;
5403 }
5404 if (unlikely(phba->sli.mbox_active)) {
5405 spin_unlock_irqrestore(&phba->hbalock, iflags);
5406 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5407 "0384 There is pending active mailbox cmd\n");
5408 return MBX_NOT_FINISHED;
5409 }
5410
5411 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5412
5413
5414 mboxq = lpfc_mbox_get(phba);
5415
5416
5417 if (!mboxq) {
5418 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5419 spin_unlock_irqrestore(&phba->hbalock, iflags);
5420 return MBX_SUCCESS;
5421 }
5422 phba->sli.mbox_active = mboxq;
5423 spin_unlock_irqrestore(&phba->hbalock, iflags);
5424
5425
5426 rc = lpfc_mbox_dev_check(phba);
5427 if (unlikely(rc))
5428
5429 goto out_not_finished;
5430
5431
5432 mqe = &mboxq->u.mqe;
5433 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
5434
5435
5436 mod_timer(&psli->mbox_tmo, (jiffies +
5437 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
5438
5439 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5440 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5441 "x%x x%x\n",
5442 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
5443 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5444 phba->pport->port_state, psli->sli_flag);
5445
5446 if (mbx_cmnd != MBX_HEARTBEAT) {
5447 if (mboxq->vport) {
5448 lpfc_debugfs_disc_trc(mboxq->vport,
5449 LPFC_DISC_TRC_MBOX_VPORT,
5450 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5451 mbx_cmnd, mqe->un.mb_words[0],
5452 mqe->un.mb_words[1]);
5453 } else {
5454 lpfc_debugfs_disc_trc(phba->pport,
5455 LPFC_DISC_TRC_MBOX,
5456 "MBOX Send: cmd:x%x mb:x%x x%x",
5457 mbx_cmnd, mqe->un.mb_words[0],
5458 mqe->un.mb_words[1]);
5459 }
5460 }
5461 psli->slistat.mbox_cmd++;
5462
5463
5464 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
5465 if (rc != MBX_SUCCESS) {
5466 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5467 "(%d):2533 Mailbox command x%x (x%x) "
5468 "cannot issue Data: x%x x%x\n",
5469 mboxq->vport ? mboxq->vport->vpi : 0,
5470 mboxq->u.mb.mbxCommand,
5471 lpfc_sli4_mbox_opcode_get(phba, mboxq),
5472 psli->sli_flag, MBX_NOWAIT);
5473 goto out_not_finished;
5474 }
5475
5476 return rc;
5477
5478out_not_finished:
5479 spin_lock_irqsave(&phba->hbalock, iflags);
5480 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
5481 __lpfc_mbox_cmpl_put(phba, mboxq);
5482
5483 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5484 phba->sli.mbox_active = NULL;
5485 spin_unlock_irqrestore(&phba->hbalock, iflags);
5486
5487 return MBX_NOT_FINISHED;
5488}
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502int
5503lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
5504{
5505 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
5506}
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517int
5518lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
5519{
5520
5521 switch (dev_grp) {
5522 case LPFC_PCI_DEV_LP:
5523 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
5524 phba->lpfc_sli_handle_slow_ring_event =
5525 lpfc_sli_handle_slow_ring_event_s3;
5526 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
5527 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
5528 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
5529 break;
5530 case LPFC_PCI_DEV_OC:
5531 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
5532 phba->lpfc_sli_handle_slow_ring_event =
5533 lpfc_sli_handle_slow_ring_event_s4;
5534 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
5535 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
5536 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
5537 break;
5538 default:
5539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5540 "1420 Invalid HBA PCI-device group: 0x%x\n",
5541 dev_grp);
5542 return -ENODEV;
5543 break;
5544 }
5545 return 0;
5546}
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558static void
5559__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5560 struct lpfc_iocbq *piocb)
5561{
5562
5563 list_add_tail(&piocb->list, &pring->txq);
5564 pring->txq_cnt++;
5565}
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584static struct lpfc_iocbq *
5585lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5586 struct lpfc_iocbq **piocb)
5587{
5588 struct lpfc_iocbq * nextiocb;
5589
5590 nextiocb = lpfc_sli_ringtx_get(phba, pring);
5591 if (!nextiocb) {
5592 nextiocb = *piocb;
5593 *piocb = NULL;
5594 }
5595
5596 return nextiocb;
5597}
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621static int
5622__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
5623 struct lpfc_iocbq *piocb, uint32_t flag)
5624{
5625 struct lpfc_iocbq *nextiocb;
5626 IOCB_t *iocb;
5627 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
5628
5629 if (piocb->iocb_cmpl && (!piocb->vport) &&
5630 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
5631 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
5632 lpfc_printf_log(phba, KERN_ERR,
5633 LOG_SLI | LOG_VPORT,
5634 "1807 IOCB x%x failed. No vport\n",
5635 piocb->iocb.ulpCommand);
5636 dump_stack();
5637 return IOCB_ERROR;
5638 }
5639
5640
5641
5642 if (unlikely(pci_channel_offline(phba->pcidev)))
5643 return IOCB_ERROR;
5644
5645
5646 if (unlikely(phba->hba_flag & DEFER_ERATT))
5647 return IOCB_ERROR;
5648
5649
5650
5651
5652 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
5653 return IOCB_ERROR;
5654
5655
5656
5657
5658
5659 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
5660 goto iocb_busy;
5661
5662 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
5663
5664
5665
5666
5667 switch (piocb->iocb.ulpCommand) {
5668 case CMD_GEN_REQUEST64_CR:
5669 case CMD_GEN_REQUEST64_CX:
5670 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
5671 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
5672 FC_FCP_CMND) ||
5673 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
5674 MENLO_TRANSPORT_TYPE))
5675
5676 goto iocb_busy;
5677 break;
5678 case CMD_QUE_RING_BUF_CN:
5679 case CMD_QUE_RING_BUF64_CN:
5680
5681
5682
5683
5684 if (piocb->iocb_cmpl)
5685 piocb->iocb_cmpl = NULL;
5686
5687 case CMD_CREATE_XRI_CR:
5688 case CMD_CLOSE_XRI_CN:
5689 case CMD_CLOSE_XRI_CX:
5690 break;
5691 default:
5692 goto iocb_busy;
5693 }
5694
5695
5696
5697
5698
5699 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
5700 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
5701 goto iocb_busy;
5702 }
5703
5704 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
5705 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
5706 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
5707
5708 if (iocb)
5709 lpfc_sli_update_ring(phba, pring);
5710 else
5711 lpfc_sli_update_full_ring(phba, pring);
5712
5713 if (!piocb)
5714 return IOCB_SUCCESS;
5715
5716 goto out_busy;
5717
5718 iocb_busy:
5719 pring->stats.iocb_cmd_delay++;
5720
5721 out_busy:
5722
5723 if (!(flag & SLI_IOCB_RET_IOCB)) {
5724 __lpfc_sli_ringtx_put(phba, pring, piocb);
5725 return IOCB_SUCCESS;
5726 }
5727
5728 return IOCB_BUSY;
5729}
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748static uint16_t
5749lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
5750 struct lpfc_sglq *sglq)
5751{
5752 uint16_t xritag = NO_XRI;
5753 struct ulp_bde64 *bpl = NULL;
5754 struct ulp_bde64 bde;
5755 struct sli4_sge *sgl = NULL;
5756 IOCB_t *icmd;
5757 int numBdes = 0;
5758 int i = 0;
5759
5760 if (!piocbq || !sglq)
5761 return xritag;
5762
5763 sgl = (struct sli4_sge *)sglq->sgl;
5764 icmd = &piocbq->iocb;
5765 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5766 numBdes = icmd->un.genreq64.bdl.bdeSize /
5767 sizeof(struct ulp_bde64);
5768
5769
5770
5771
5772 bpl = (struct ulp_bde64 *)
5773 ((struct lpfc_dmabuf *)piocbq->context3)->virt;
5774
5775 if (!bpl)
5776 return xritag;
5777
5778 for (i = 0; i < numBdes; i++) {
5779
5780 sgl->addr_hi = bpl->addrHigh;
5781 sgl->addr_lo = bpl->addrLow;
5782
5783
5784
5785 bde.tus.w = le32_to_cpu(bpl->tus.w);
5786 bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
5787 if ((i+1) == numBdes)
5788 bf_set(lpfc_sli4_sge_last, sgl, 1);
5789 else
5790 bf_set(lpfc_sli4_sge_last, sgl, 0);
5791 sgl->word2 = cpu_to_le32(sgl->word2);
5792 sgl->word3 = cpu_to_le32(sgl->word3);
5793 bpl++;
5794 sgl++;
5795 }
5796 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
5797
5798
5799
5800
5801 sgl->addr_hi =
5802 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
5803 sgl->addr_lo =
5804 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
5805 bf_set(lpfc_sli4_sge_len, sgl,
5806 icmd->un.genreq64.bdl.bdeSize);
5807 bf_set(lpfc_sli4_sge_last, sgl, 1);
5808 sgl->word2 = cpu_to_le32(sgl->word2);
5809 sgl->word3 = cpu_to_le32(sgl->word3);
5810 }
5811 return sglq->sli4_xritag;
5812}
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822
5823
5824static uint32_t
5825lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
5826{
5827 ++phba->fcp_qidx;
5828 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
5829 phba->fcp_qidx = 0;
5830
5831 return phba->fcp_qidx;
5832}
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848static int
5849lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
5850 union lpfc_wqe *wqe)
5851{
5852 uint32_t payload_len = 0;
5853 uint8_t ct = 0;
5854 uint32_t fip;
5855 uint32_t abort_tag;
5856 uint8_t command_type = ELS_COMMAND_NON_FIP;
5857 uint8_t cmnd;
5858 uint16_t xritag;
5859 struct ulp_bde64 *bpl = NULL;
5860
5861 fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
5862
5863 if (iocbq->iocb_flag & LPFC_IO_FCP)
5864 command_type = FCP_COMMAND;
5865 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
5866 command_type = ELS_COMMAND_FIP;
5867 else
5868 command_type = ELS_COMMAND_NON_FIP;
5869
5870
5871 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
5872 abort_tag = (uint32_t) iocbq->iotag;
5873 xritag = iocbq->sli4_xritag;
5874 wqe->words[7] = 0;
5875
5876 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
5877 bpl = (struct ulp_bde64 *)
5878 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
5879 if (!bpl)
5880 return IOCB_ERROR;
5881
5882
5883 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
5884 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
5885
5886
5887
5888 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
5889 payload_len = wqe->generic.bde.tus.f.bdeSize;
5890 } else
5891 payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
5892
5893 iocbq->iocb.ulpIoTag = iocbq->iotag;
5894 cmnd = iocbq->iocb.ulpCommand;
5895
5896 switch (iocbq->iocb.ulpCommand) {
5897 case CMD_ELS_REQUEST64_CR:
5898 if (!iocbq->iocb.ulpLe) {
5899 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5900 "2007 Only Limited Edition cmd Format"
5901 " supported 0x%x\n",
5902 iocbq->iocb.ulpCommand);
5903 return IOCB_ERROR;
5904 }
5905 wqe->els_req.payload_len = payload_len;
5906
5907 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
5908 iocbq->iocb.ulpTimeout);
5909
5910 bf_set(els_req64_vf, &wqe->els_req, 0);
5911
5912 bf_set(els_req64_vfid, &wqe->els_req, 0);
5913
5914
5915
5916
5917
5918
5919 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
5920 bf_set(lpfc_wqe_gen_context, &wqe->generic,
5921 iocbq->iocb.ulpContext);
5922
5923 bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
5924 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5925
5926 break;
5927 case CMD_XMIT_SEQUENCE64_CR:
5928
5929
5930
5931
5932 wqe->words[3] = 0;
5933
5934
5935 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5936 wqe->xmit_sequence.xmit_len = payload_len;
5937 break;
5938 case CMD_XMIT_BCAST64_CN:
5939
5940 wqe->words[3] = 0;
5941
5942
5943
5944 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
5945 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
5946 break;
5947 case CMD_FCP_IWRITE64_CR:
5948 command_type = FCP_COMMAND_DATA_OUT;
5949
5950
5951
5952
5953
5954
5955
5956
5957
5958 wqe->fcp_iwrite.initial_xfer_len = 0;
5959
5960
5961
5962 case CMD_FCP_IREAD64_CR:
5963
5964 wqe->fcp_iread.payload_len =
5965 payload_len + sizeof(struct fcp_rsp);
5966
5967
5968
5969 bf_set(lpfc_wqe_gen_erp, &wqe->generic,
5970 iocbq->iocb.ulpFCP2Rcvy);
5971 bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5983
5984 wqe->words[10] &= 0xffff0000;
5985 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
5986 break;
5987 case CMD_FCP_ICMND64_CR:
5988
5989 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
5990
5991 wqe->words[4] = 0;
5992 wqe->words[10] &= 0xffff0000;
5993 bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
5994 break;
5995 case CMD_GEN_REQUEST64_CR:
5996
5997
5998
5999
6000
6001
6002 wqe->gen_req.command_len = payload_len;
6003
6004
6005
6006 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
6007 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
6008 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6009 "2015 Invalid CT %x command 0x%x\n",
6010 ct, iocbq->iocb.ulpCommand);
6011 return IOCB_ERROR;
6012 }
6013 bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
6014 bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
6015 iocbq->iocb.ulpTimeout);
6016
6017 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
6018 command_type = OTHER_COMMAND;
6019 break;
6020 case CMD_XMIT_ELS_RSP64_CX:
6021
6022
6023 wqe->words[3] = 0;
6024
6025 wqe->words[4] = 0;
6026
6027 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
6028 iocbq->iocb.un.elsreq64.remoteID);
6029
6030 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6031 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6032
6033 bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
6034 bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
6035 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
6036 bf_set(lpfc_wqe_gen_context, &wqe->generic,
6037 iocbq->vport->vpi + phba->vpi_base);
6038 command_type = OTHER_COMMAND;
6039 break;
6040 case CMD_CLOSE_XRI_CN:
6041 case CMD_ABORT_XRI_CN:
6042 case CMD_ABORT_XRI_CX:
6043
6044
6045 if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6046
6047
6048
6049
6050 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
6051 else
6052 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
6053 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
6054 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6055 wqe->words[5] = 0;
6056 bf_set(lpfc_wqe_gen_ct, &wqe->generic,
6057 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
6058 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
6059 wqe->generic.abort_tag = abort_tag;
6060
6061
6062
6063
6064 bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
6065 cmnd = CMD_ABORT_XRI_CX;
6066 command_type = OTHER_COMMAND;
6067 xritag = 0;
6068 break;
6069 case CMD_XRI_ABORTED_CX:
6070 case CMD_CREATE_XRI_CR:
6071
6072
6073 wqe->words[3] = 0;
6074 wqe->words[4] = 0;
6075
6076
6077
6078 case CMD_IOCB_FCP_IBIDIR64_CR:
6079 case CMD_FCP_TSEND64_CX:
6080 case CMD_FCP_TRSP64_CX:
6081 case CMD_FCP_AUTO_TRSP_CX:
6082 default:
6083 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6084 "2014 Invalid command 0x%x\n",
6085 iocbq->iocb.ulpCommand);
6086 return IOCB_ERROR;
6087 break;
6088
6089 }
6090 bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
6091 bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
6092 wqe->generic.abort_tag = abort_tag;
6093 bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
6094 bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
6095 bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
6096 bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
6097
6098 return 0;
6099}
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115static int
6116__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
6117 struct lpfc_iocbq *piocb, uint32_t flag)
6118{
6119 struct lpfc_sglq *sglq;
6120 uint16_t xritag;
6121 union lpfc_wqe wqe;
6122 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
6123 uint32_t fcp_wqidx;
6124
6125 if (piocb->sli4_xritag == NO_XRI) {
6126 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
6127 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
6128 sglq = NULL;
6129 else {
6130 sglq = __lpfc_sli_get_sglq(phba);
6131 if (!sglq)
6132 return IOCB_ERROR;
6133 piocb->sli4_xritag = sglq->sli4_xritag;
6134 }
6135 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
6136 sglq = NULL;
6137
6138
6139 } else {
6140
6141
6142
6143 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
6144 if (!sglq)
6145 return IOCB_ERROR;
6146 }
6147
6148 if (sglq) {
6149 xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
6150 if (xritag != sglq->sli4_xritag)
6151 return IOCB_ERROR;
6152 }
6153
6154 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
6155 return IOCB_ERROR;
6156
6157 if (piocb->iocb_flag & LPFC_IO_FCP) {
6158 fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
6159 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
6160 return IOCB_ERROR;
6161 } else {
6162 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
6163 return IOCB_ERROR;
6164 }
6165 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
6166
6167 return 0;
6168}
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181static inline int
6182__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6183 struct lpfc_iocbq *piocb, uint32_t flag)
6184{
6185 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6186}
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197int
6198lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
6199{
6200
6201 switch (dev_grp) {
6202 case LPFC_PCI_DEV_LP:
6203 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
6204 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
6205 break;
6206 case LPFC_PCI_DEV_OC:
6207 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
6208 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
6209 break;
6210 default:
6211 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6212 "1419 Invalid HBA PCI-device group: 0x%x\n",
6213 dev_grp);
6214 return -ENODEV;
6215 break;
6216 }
6217 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
6218 return 0;
6219}
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234int
6235lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
6236 struct lpfc_iocbq *piocb, uint32_t flag)
6237{
6238 unsigned long iflags;
6239 int rc;
6240
6241 spin_lock_irqsave(&phba->hbalock, iflags);
6242 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
6243 spin_unlock_irqrestore(&phba->hbalock, iflags);
6244
6245 return rc;
6246}
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259static int
6260lpfc_extra_ring_setup( struct lpfc_hba *phba)
6261{
6262 struct lpfc_sli *psli;
6263 struct lpfc_sli_ring *pring;
6264
6265 psli = &phba->sli;
6266
6267
6268
6269
6270 pring = &psli->ring[psli->fcp_ring];
6271 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6272 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
6273 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6274 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
6275
6276
6277 pring = &psli->ring[psli->extra_ring];
6278
6279 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6280 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
6281 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6282 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
6283
6284
6285 pring->iotag_max = 4096;
6286 pring->num_mask = 1;
6287 pring->prt[0].profile = 0;
6288 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
6289 pring->prt[0].type = phba->cfg_multi_ring_type;
6290 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
6291 return 0;
6292}
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307static void
6308lpfc_sli_async_event_handler(struct lpfc_hba * phba,
6309 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
6310{
6311 IOCB_t *icmd;
6312 uint16_t evt_code;
6313 uint16_t temp;
6314 struct temp_event temp_event_data;
6315 struct Scsi_Host *shost;
6316 uint32_t *iocb_w;
6317
6318 icmd = &iocbq->iocb;
6319 evt_code = icmd->un.asyncstat.evt_code;
6320 temp = icmd->ulpContext;
6321
6322 if ((evt_code != ASYNC_TEMP_WARN) &&
6323 (evt_code != ASYNC_TEMP_SAFE)) {
6324 iocb_w = (uint32_t *) icmd;
6325 lpfc_printf_log(phba,
6326 KERN_ERR,
6327 LOG_SLI,
6328 "0346 Ring %d handler: unexpected ASYNC_STATUS"
6329 " evt_code 0x%x\n"
6330 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
6331 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
6332 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
6333 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
6334 pring->ringno,
6335 icmd->un.asyncstat.evt_code,
6336 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
6337 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
6338 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
6339 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
6340
6341 return;
6342 }
6343 temp_event_data.data = (uint32_t)temp;
6344 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
6345 if (evt_code == ASYNC_TEMP_WARN) {
6346 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
6347 lpfc_printf_log(phba,
6348 KERN_ERR,
6349 LOG_TEMP,
6350 "0347 Adapter is very hot, please take "
6351 "corrective action. temperature : %d Celsius\n",
6352 temp);
6353 }
6354 if (evt_code == ASYNC_TEMP_SAFE) {
6355 temp_event_data.event_code = LPFC_NORMAL_TEMP;
6356 lpfc_printf_log(phba,
6357 KERN_ERR,
6358 LOG_TEMP,
6359 "0340 Adapter temperature is OK now. "
6360 "temperature : %d Celsius\n",
6361 temp);
6362 }
6363
6364
6365 shost = lpfc_shost_from_vport(phba->pport);
6366 fc_host_post_vendor_event(shost, fc_get_event_number(),
6367 sizeof(temp_event_data), (char *) &temp_event_data,
6368 LPFC_NL_VENDOR_ID);
6369
6370}
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384int
6385lpfc_sli_setup(struct lpfc_hba *phba)
6386{
6387 int i, totiocbsize = 0;
6388 struct lpfc_sli *psli = &phba->sli;
6389 struct lpfc_sli_ring *pring;
6390
6391 psli->num_rings = MAX_CONFIGURED_RINGS;
6392 psli->sli_flag = 0;
6393 psli->fcp_ring = LPFC_FCP_RING;
6394 psli->next_ring = LPFC_FCP_NEXT_RING;
6395 psli->extra_ring = LPFC_EXTRA_RING;
6396
6397 psli->iocbq_lookup = NULL;
6398 psli->iocbq_lookup_len = 0;
6399 psli->last_iotag = 0;
6400
6401 for (i = 0; i < psli->num_rings; i++) {
6402 pring = &psli->ring[i];
6403 switch (i) {
6404 case LPFC_FCP_RING:
6405
6406 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
6407 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
6408 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
6409 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
6410 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
6411 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
6412 pring->sizeCiocb = (phba->sli_rev == 3) ?
6413 SLI3_IOCB_CMD_SIZE :
6414 SLI2_IOCB_CMD_SIZE;
6415 pring->sizeRiocb = (phba->sli_rev == 3) ?
6416 SLI3_IOCB_RSP_SIZE :
6417 SLI2_IOCB_RSP_SIZE;
6418 pring->iotag_ctr = 0;
6419 pring->iotag_max =
6420 (phba->cfg_hba_queue_depth * 2);
6421 pring->fast_iotag = pring->iotag_max;
6422 pring->num_mask = 0;
6423 break;
6424 case LPFC_EXTRA_RING:
6425
6426 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
6427 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
6428 pring->sizeCiocb = (phba->sli_rev == 3) ?
6429 SLI3_IOCB_CMD_SIZE :
6430 SLI2_IOCB_CMD_SIZE;
6431 pring->sizeRiocb = (phba->sli_rev == 3) ?
6432 SLI3_IOCB_RSP_SIZE :
6433 SLI2_IOCB_RSP_SIZE;
6434 pring->iotag_max = phba->cfg_hba_queue_depth;
6435 pring->num_mask = 0;
6436 break;
6437 case LPFC_ELS_RING:
6438
6439 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
6440 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
6441 pring->sizeCiocb = (phba->sli_rev == 3) ?
6442 SLI3_IOCB_CMD_SIZE :
6443 SLI2_IOCB_CMD_SIZE;
6444 pring->sizeRiocb = (phba->sli_rev == 3) ?
6445 SLI3_IOCB_RSP_SIZE :
6446 SLI2_IOCB_RSP_SIZE;
6447 pring->fast_iotag = 0;
6448 pring->iotag_ctr = 0;
6449 pring->iotag_max = 4096;
6450 pring->lpfc_sli_rcv_async_status =
6451 lpfc_sli_async_event_handler;
6452 pring->num_mask = 4;
6453 pring->prt[0].profile = 0;
6454 pring->prt[0].rctl = FC_ELS_REQ;
6455 pring->prt[0].type = FC_ELS_DATA;
6456 pring->prt[0].lpfc_sli_rcv_unsol_event =
6457 lpfc_els_unsol_event;
6458 pring->prt[1].profile = 0;
6459 pring->prt[1].rctl = FC_ELS_RSP;
6460 pring->prt[1].type = FC_ELS_DATA;
6461 pring->prt[1].lpfc_sli_rcv_unsol_event =
6462 lpfc_els_unsol_event;
6463 pring->prt[2].profile = 0;
6464
6465 pring->prt[2].rctl = FC_UNSOL_CTL;
6466
6467 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
6468 pring->prt[2].lpfc_sli_rcv_unsol_event =
6469 lpfc_ct_unsol_event;
6470 pring->prt[3].profile = 0;
6471
6472 pring->prt[3].rctl = FC_SOL_CTL;
6473
6474 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
6475 pring->prt[3].lpfc_sli_rcv_unsol_event =
6476 lpfc_ct_unsol_event;
6477 break;
6478 }
6479 totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
6480 (pring->numRiocb * pring->sizeRiocb);
6481 }
6482 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
6483
6484 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
6485 "SLI2 SLIM Data: x%x x%lx\n",
6486 phba->brd_no, totiocbsize,
6487 (unsigned long) MAX_SLIM_IOCB_SIZE);
6488 }
6489 if (phba->cfg_multi_ring_support == 2)
6490 lpfc_extra_ring_setup(phba);
6491
6492 return 0;
6493}
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506int
6507lpfc_sli_queue_setup(struct lpfc_hba *phba)
6508{
6509 struct lpfc_sli *psli;
6510 struct lpfc_sli_ring *pring;
6511 int i;
6512
6513 psli = &phba->sli;
6514 spin_lock_irq(&phba->hbalock);
6515 INIT_LIST_HEAD(&psli->mboxq);
6516 INIT_LIST_HEAD(&psli->mboxq_cmpl);
6517
6518 for (i = 0; i < psli->num_rings; i++) {
6519 pring = &psli->ring[i];
6520 pring->ringno = i;
6521 pring->next_cmdidx = 0;
6522 pring->local_getidx = 0;
6523 pring->cmdidx = 0;
6524 INIT_LIST_HEAD(&pring->txq);
6525 INIT_LIST_HEAD(&pring->txcmplq);
6526 INIT_LIST_HEAD(&pring->iocb_continueq);
6527 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
6528 INIT_LIST_HEAD(&pring->postbufq);
6529 }
6530 spin_unlock_irq(&phba->hbalock);
6531 return 1;
6532}
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549static void
6550lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
6551{
6552 LIST_HEAD(completions);
6553 struct lpfc_sli *psli = &phba->sli;
6554 LPFC_MBOXQ_t *pmb;
6555 unsigned long iflag;
6556
6557
6558 spin_lock_irqsave(&phba->hbalock, iflag);
6559
6560 list_splice_init(&phba->sli.mboxq, &completions);
6561
6562 if (psli->mbox_active) {
6563 list_add_tail(&psli->mbox_active->list, &completions);
6564 psli->mbox_active = NULL;
6565 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6566 }
6567
6568 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
6569 spin_unlock_irqrestore(&phba->hbalock, iflag);
6570
6571
6572 while (!list_empty(&completions)) {
6573 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
6574 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
6575 if (pmb->mbox_cmpl)
6576 pmb->mbox_cmpl(phba, pmb);
6577 }
6578}
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597int
6598lpfc_sli_host_down(struct lpfc_vport *vport)
6599{
6600 LIST_HEAD(completions);
6601 struct lpfc_hba *phba = vport->phba;
6602 struct lpfc_sli *psli = &phba->sli;
6603 struct lpfc_sli_ring *pring;
6604 struct lpfc_iocbq *iocb, *next_iocb;
6605 int i;
6606 unsigned long flags = 0;
6607 uint16_t prev_pring_flag;
6608
6609 lpfc_cleanup_discovery_resources(vport);
6610
6611 spin_lock_irqsave(&phba->hbalock, flags);
6612 for (i = 0; i < psli->num_rings; i++) {
6613 pring = &psli->ring[i];
6614 prev_pring_flag = pring->flag;
6615
6616 if (pring->ringno == LPFC_ELS_RING) {
6617 pring->flag |= LPFC_DEFERRED_RING_EVENT;
6618
6619 set_bit(LPFC_DATA_READY, &phba->data_flags);
6620 }
6621
6622
6623
6624
6625 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
6626 if (iocb->vport != vport)
6627 continue;
6628 list_move_tail(&iocb->list, &completions);
6629 pring->txq_cnt--;
6630 }
6631
6632
6633 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
6634 list) {
6635 if (iocb->vport != vport)
6636 continue;
6637 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
6638 }
6639
6640 pring->flag = prev_pring_flag;
6641 }
6642
6643 spin_unlock_irqrestore(&phba->hbalock, flags);
6644
6645
6646 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6647 IOERR_SLI_DOWN);
6648 return 1;
6649}
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666int
6667lpfc_sli_hba_down(struct lpfc_hba *phba)
6668{
6669 LIST_HEAD(completions);
6670 struct lpfc_sli *psli = &phba->sli;
6671 struct lpfc_sli_ring *pring;
6672 struct lpfc_dmabuf *buf_ptr;
6673 unsigned long flags = 0;
6674 int i;
6675
6676
6677 lpfc_sli_mbox_sys_shutdown(phba);
6678
6679 lpfc_hba_down_prep(phba);
6680
6681 lpfc_fabric_abort_hba(phba);
6682
6683 spin_lock_irqsave(&phba->hbalock, flags);
6684 for (i = 0; i < psli->num_rings; i++) {
6685 pring = &psli->ring[i];
6686
6687 if (pring->ringno == LPFC_ELS_RING) {
6688 pring->flag |= LPFC_DEFERRED_RING_EVENT;
6689
6690 set_bit(LPFC_DATA_READY, &phba->data_flags);
6691 }
6692
6693
6694
6695
6696
6697 list_splice_init(&pring->txq, &completions);
6698 pring->txq_cnt = 0;
6699
6700 }
6701 spin_unlock_irqrestore(&phba->hbalock, flags);
6702
6703
6704 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6705 IOERR_SLI_DOWN);
6706
6707 spin_lock_irqsave(&phba->hbalock, flags);
6708 list_splice_init(&phba->elsbuf, &completions);
6709 phba->elsbuf_cnt = 0;
6710 phba->elsbuf_prev_cnt = 0;
6711 spin_unlock_irqrestore(&phba->hbalock, flags);
6712
6713 while (!list_empty(&completions)) {
6714 list_remove_head(&completions, buf_ptr,
6715 struct lpfc_dmabuf, list);
6716 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
6717 kfree(buf_ptr);
6718 }
6719
6720
6721 del_timer_sync(&psli->mbox_tmo);
6722
6723 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
6724 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6725 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
6726
6727 return 1;
6728}
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746int
6747lpfc_sli4_hba_down(struct lpfc_hba *phba)
6748{
6749
6750 lpfc_stop_port(phba);
6751
6752
6753 lpfc_sli4_queue_unset(phba);
6754
6755
6756 lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
6757
6758 return 1;
6759}
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773void
6774lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
6775{
6776 uint32_t *src = srcp;
6777 uint32_t *dest = destp;
6778 uint32_t ldata;
6779 int i;
6780
6781 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
6782 ldata = *src;
6783 ldata = le32_to_cpu(ldata);
6784 *dest = ldata;
6785 src++;
6786 dest++;
6787 }
6788}
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801void
6802lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
6803{
6804 uint32_t *src = srcp;
6805 uint32_t *dest = destp;
6806 uint32_t ldata;
6807 int i;
6808
6809 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
6810 ldata = *src;
6811 ldata = be32_to_cpu(ldata);
6812 *dest = ldata;
6813 src++;
6814 dest++;
6815 }
6816}
6817
6818
6819
6820
6821
6822
6823
6824
6825
6826
6827
6828int
6829lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6830 struct lpfc_dmabuf *mp)
6831{
6832
6833
6834 spin_lock_irq(&phba->hbalock);
6835 list_add_tail(&mp->list, &pring->postbufq);
6836 pring->postbufq_cnt++;
6837 spin_unlock_irq(&phba->hbalock);
6838 return 0;
6839}
6840
6841
6842
6843
6844
6845
6846
6847
6848
6849
6850
6851
6852uint32_t
6853lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
6854{
6855 spin_lock_irq(&phba->hbalock);
6856 phba->buffer_tag_count++;
6857
6858
6859
6860
6861 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
6862 spin_unlock_irq(&phba->hbalock);
6863 return phba->buffer_tag_count;
6864}
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881struct lpfc_dmabuf *
6882lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6883 uint32_t tag)
6884{
6885 struct lpfc_dmabuf *mp, *next_mp;
6886 struct list_head *slp = &pring->postbufq;
6887
6888
6889 spin_lock_irq(&phba->hbalock);
6890 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
6891 if (mp->buffer_tag == tag) {
6892 list_del_init(&mp->list);
6893 pring->postbufq_cnt--;
6894 spin_unlock_irq(&phba->hbalock);
6895 return mp;
6896 }
6897 }
6898
6899 spin_unlock_irq(&phba->hbalock);
6900 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6901 "0402 Cannot find virtual addr for buffer tag on "
6902 "ring %d Data x%lx x%p x%p x%x\n",
6903 pring->ringno, (unsigned long) tag,
6904 slp->next, slp->prev, pring->postbufq_cnt);
6905
6906 return NULL;
6907}
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925struct lpfc_dmabuf *
6926lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6927 dma_addr_t phys)
6928{
6929 struct lpfc_dmabuf *mp, *next_mp;
6930 struct list_head *slp = &pring->postbufq;
6931
6932
6933 spin_lock_irq(&phba->hbalock);
6934 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
6935 if (mp->phys == phys) {
6936 list_del_init(&mp->list);
6937 pring->postbufq_cnt--;
6938 spin_unlock_irq(&phba->hbalock);
6939 return mp;
6940 }
6941 }
6942
6943 spin_unlock_irq(&phba->hbalock);
6944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6945 "0410 Cannot find virtual addr for mapped buf on "
6946 "ring %d Data x%llx x%p x%p x%x\n",
6947 pring->ringno, (unsigned long long)phys,
6948 slp->next, slp->prev, pring->postbufq_cnt);
6949 return NULL;
6950}
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963static void
6964lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6965 struct lpfc_iocbq *rspiocb)
6966{
6967 IOCB_t *irsp = &rspiocb->iocb;
6968 uint16_t abort_iotag, abort_context;
6969 struct lpfc_iocbq *abort_iocb;
6970 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6971
6972 abort_iocb = NULL;
6973
6974 if (irsp->ulpStatus) {
6975 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
6976 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
6977
6978 spin_lock_irq(&phba->hbalock);
6979 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
6980 abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
6981
6982 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
6983 "0327 Cannot abort els iocb %p "
6984 "with tag %x context %x, abort status %x, "
6985 "abort code %x\n",
6986 abort_iocb, abort_iotag, abort_context,
6987 irsp->ulpStatus, irsp->un.ulpWord[4]);
6988
6989
6990
6991
6992
6993 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
6994 spin_unlock_irq(&phba->hbalock);
6995 lpfc_sli_release_iocbq(phba, cmdiocb);
6996 return;
6997 }
6998
6999
7000
7001
7002 if (!abort_iocb ||
7003 abort_iocb->iocb.ulpContext != abort_context ||
7004 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
7005 spin_unlock_irq(&phba->hbalock);
7006 else {
7007 list_del_init(&abort_iocb->list);
7008 pring->txcmplq_cnt--;
7009 spin_unlock_irq(&phba->hbalock);
7010
7011
7012
7013
7014
7015 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
7016
7017 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
7018 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
7019 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
7020 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
7021 }
7022 }
7023
7024 lpfc_sli_release_iocbq(phba, cmdiocb);
7025 return;
7026}
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039static void
7040lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7041 struct lpfc_iocbq *rspiocb)
7042{
7043 IOCB_t *irsp = &rspiocb->iocb;
7044
7045
7046 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
7047 "0139 Ignoring ELS cmd tag x%x completion Data: "
7048 "x%x x%x x%x\n",
7049 irsp->ulpIoTag, irsp->ulpStatus,
7050 irsp->un.ulpWord[4], irsp->ulpTimeout);
7051 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
7052 lpfc_ct_free_iocb(phba, cmdiocb);
7053 else
7054 lpfc_els_free_iocb(phba, cmdiocb);
7055 return;
7056}
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069int
7070lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7071 struct lpfc_iocbq *cmdiocb)
7072{
7073 struct lpfc_vport *vport = cmdiocb->vport;
7074 struct lpfc_iocbq *abtsiocbp;
7075 IOCB_t *icmd = NULL;
7076 IOCB_t *iabt = NULL;
7077 int retval = IOCB_ERROR;
7078
7079
7080
7081
7082
7083
7084 icmd = &cmdiocb->iocb;
7085 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
7086 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
7087 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
7088 return 0;
7089
7090
7091
7092
7093 if ((vport->load_flag & FC_UNLOADING) &&
7094 (pring->ringno == LPFC_ELS_RING)) {
7095 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
7096 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
7097 else
7098 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
7099 goto abort_iotag_exit;
7100 }
7101
7102
7103 abtsiocbp = __lpfc_sli_get_iocbq(phba);
7104 if (abtsiocbp == NULL)
7105 return 0;
7106
7107
7108
7109
7110 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
7111
7112 iabt = &abtsiocbp->iocb;
7113 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
7114 iabt->un.acxri.abortContextTag = icmd->ulpContext;
7115 if (phba->sli_rev == LPFC_SLI_REV4)
7116 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
7117 else
7118 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
7119 iabt->ulpLe = 1;
7120 iabt->ulpClass = icmd->ulpClass;
7121
7122 if (phba->link_state >= LPFC_LINK_UP)
7123 iabt->ulpCommand = CMD_ABORT_XRI_CN;
7124 else
7125 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
7126
7127 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
7128
7129 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
7130 "0339 Abort xri x%x, original iotag x%x, "
7131 "abort cmd iotag x%x\n",
7132 iabt->un.acxri.abortContextTag,
7133 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
7134 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
7135
7136 if (retval)
7137 __lpfc_sli_release_iocbq(phba, abtsiocbp);
7138abort_iotag_exit:
7139
7140
7141
7142
7143
7144 return retval;
7145}
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169static int
7170lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
7171 uint16_t tgt_id, uint64_t lun_id,
7172 lpfc_ctx_cmd ctx_cmd)
7173{
7174 struct lpfc_scsi_buf *lpfc_cmd;
7175 int rc = 1;
7176
7177 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
7178 return rc;
7179
7180 if (iocbq->vport != vport)
7181 return rc;
7182
7183 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
7184
7185 if (lpfc_cmd->pCmd == NULL)
7186 return rc;
7187
7188 switch (ctx_cmd) {
7189 case LPFC_CTX_LUN:
7190 if ((lpfc_cmd->rdata->pnode) &&
7191 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
7192 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
7193 rc = 0;
7194 break;
7195 case LPFC_CTX_TGT:
7196 if ((lpfc_cmd->rdata->pnode) &&
7197 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
7198 rc = 0;
7199 break;
7200 case LPFC_CTX_HOST:
7201 rc = 0;
7202 break;
7203 default:
7204 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
7205 __func__, ctx_cmd);
7206 break;
7207 }
7208
7209 return rc;
7210}
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231int
7232lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
7233 lpfc_ctx_cmd ctx_cmd)
7234{
7235 struct lpfc_hba *phba = vport->phba;
7236 struct lpfc_iocbq *iocbq;
7237 int sum, i;
7238
7239 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
7240 iocbq = phba->sli.iocbq_lookup[i];
7241
7242 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
7243 ctx_cmd) == 0)
7244 sum++;
7245 }
7246
7247 return sum;
7248}
7249
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259
7260void
7261lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7262 struct lpfc_iocbq *rspiocb)
7263{
7264 lpfc_sli_release_iocbq(phba, cmdiocb);
7265 return;
7266}
7267
7268
7269
7270
7271
7272
7273
7274
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289int
7290lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
7291 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
7292{
7293 struct lpfc_hba *phba = vport->phba;
7294 struct lpfc_iocbq *iocbq;
7295 struct lpfc_iocbq *abtsiocb;
7296 IOCB_t *cmd = NULL;
7297 int errcnt = 0, ret_val = 0;
7298 int i;
7299
7300 for (i = 1; i <= phba->sli.last_iotag; i++) {
7301 iocbq = phba->sli.iocbq_lookup[i];
7302
7303 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
7304 abort_cmd) != 0)
7305 continue;
7306
7307
7308 abtsiocb = lpfc_sli_get_iocbq(phba);
7309 if (abtsiocb == NULL) {
7310 errcnt++;
7311 continue;
7312 }
7313
7314 cmd = &iocbq->iocb;
7315 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
7316 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
7317 if (phba->sli_rev == LPFC_SLI_REV4)
7318 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
7319 else
7320 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
7321 abtsiocb->iocb.ulpLe = 1;
7322 abtsiocb->iocb.ulpClass = cmd->ulpClass;
7323 abtsiocb->vport = phba->pport;
7324
7325 if (lpfc_is_link_up(phba))
7326 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
7327 else
7328 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
7329
7330
7331 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
7332 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
7333 abtsiocb, 0);
7334 if (ret_val == IOCB_ERROR) {
7335 lpfc_sli_release_iocbq(phba, abtsiocb);
7336 errcnt++;
7337 continue;
7338 }
7339 }
7340
7341 return errcnt;
7342}
7343
7344
7345
7346
7347
7348
7349
7350
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361static void
7362lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
7363 struct lpfc_iocbq *cmdiocbq,
7364 struct lpfc_iocbq *rspiocbq)
7365{
7366 wait_queue_head_t *pdone_q;
7367 unsigned long iflags;
7368
7369 spin_lock_irqsave(&phba->hbalock, iflags);
7370 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
7371 if (cmdiocbq->context2 && rspiocbq)
7372 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
7373 &rspiocbq->iocb, sizeof(IOCB_t));
7374
7375 pdone_q = cmdiocbq->context_un.wait_queue;
7376 if (pdone_q)
7377 wake_up(pdone_q);
7378 spin_unlock_irqrestore(&phba->hbalock, iflags);
7379 return;
7380}
7381
7382
7383
7384
7385
7386
7387
7388
7389
7390
7391
7392
7393
7394static int
7395lpfc_chk_iocb_flg(struct lpfc_hba *phba,
7396 struct lpfc_iocbq *piocbq, uint32_t flag)
7397{
7398 unsigned long iflags;
7399 int ret;
7400
7401 spin_lock_irqsave(&phba->hbalock, iflags);
7402 ret = piocbq->iocb_flag & flag;
7403 spin_unlock_irqrestore(&phba->hbalock, iflags);
7404 return ret;
7405
7406}
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418
7419
7420
7421
7422
7423
7424
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438int
7439lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
7440 uint32_t ring_number,
7441 struct lpfc_iocbq *piocb,
7442 struct lpfc_iocbq *prspiocbq,
7443 uint32_t timeout)
7444{
7445 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
7446 long timeleft, timeout_req = 0;
7447 int retval = IOCB_SUCCESS;
7448 uint32_t creg_val;
7449
7450
7451
7452
7453
7454 if (prspiocbq) {
7455 if (piocb->context2)
7456 return IOCB_ERROR;
7457 piocb->context2 = prspiocbq;
7458 }
7459
7460 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
7461 piocb->context_un.wait_queue = &done_q;
7462 piocb->iocb_flag &= ~LPFC_IO_WAKE;
7463
7464 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7465 creg_val = readl(phba->HCregaddr);
7466 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
7467 writel(creg_val, phba->HCregaddr);
7468 readl(phba->HCregaddr);
7469 }
7470
7471 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
7472 if (retval == IOCB_SUCCESS) {
7473 timeout_req = timeout * HZ;
7474 timeleft = wait_event_timeout(done_q,
7475 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
7476 timeout_req);
7477
7478 if (piocb->iocb_flag & LPFC_IO_WAKE) {
7479 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7480 "0331 IOCB wake signaled\n");
7481 } else if (timeleft == 0) {
7482 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7483 "0338 IOCB wait timeout error - no "
7484 "wake response Data x%x\n", timeout);
7485 retval = IOCB_TIMEDOUT;
7486 } else {
7487 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7488 "0330 IOCB wake NOT set, "
7489 "Data x%x x%lx\n",
7490 timeout, (timeleft / jiffies));
7491 retval = IOCB_TIMEDOUT;
7492 }
7493 } else {
7494 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7495 "0332 IOCB wait issue failed, Data x%x\n",
7496 retval);
7497 retval = IOCB_ERROR;
7498 }
7499
7500 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
7501 creg_val = readl(phba->HCregaddr);
7502 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
7503 writel(creg_val, phba->HCregaddr);
7504 readl(phba->HCregaddr);
7505 }
7506
7507 if (prspiocbq)
7508 piocb->context2 = NULL;
7509
7510 piocb->context_un.wait_queue = NULL;
7511 piocb->iocb_cmpl = NULL;
7512 return retval;
7513}
7514
7515
7516
7517
7518
7519
7520
7521
7522
7523
7524
7525
7526
7527
7528
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538
7539
7540
7541int
7542lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
7543 uint32_t timeout)
7544{
7545 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
7546 int retval;
7547 unsigned long flag;
7548
7549
7550 if (pmboxq->context1)
7551 return MBX_NOT_FINISHED;
7552
7553 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
7554
7555 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
7556
7557 pmboxq->context1 = &done_q;
7558
7559
7560 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
7561
7562 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
7563 wait_event_interruptible_timeout(done_q,
7564 pmboxq->mbox_flag & LPFC_MBX_WAKE,
7565 timeout * HZ);
7566
7567 spin_lock_irqsave(&phba->hbalock, flag);
7568 pmboxq->context1 = NULL;
7569
7570
7571
7572
7573 if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
7574 retval = MBX_SUCCESS;
7575 else {
7576 retval = MBX_TIMEOUT;
7577 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
7578 }
7579 spin_unlock_irqrestore(&phba->hbalock, flag);
7580 }
7581
7582 return retval;
7583}
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600void
7601lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
7602{
7603 struct lpfc_sli *psli = &phba->sli;
7604 uint8_t actcmd = MBX_HEARTBEAT;
7605 unsigned long timeout;
7606
7607 spin_lock_irq(&phba->hbalock);
7608 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7609 spin_unlock_irq(&phba->hbalock);
7610
7611 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7612 spin_lock_irq(&phba->hbalock);
7613 if (phba->sli.mbox_active)
7614 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
7615 spin_unlock_irq(&phba->hbalock);
7616
7617
7618
7619 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
7620 1000) + jiffies;
7621 while (phba->sli.mbox_active) {
7622
7623 msleep(2);
7624 if (time_after(jiffies, timeout))
7625
7626
7627
7628 break;
7629 }
7630 }
7631 lpfc_sli_mbox_sys_flush(phba);
7632}
7633
7634
7635
7636
7637
7638
7639
7640
7641
7642
7643
7644
7645static int
7646lpfc_sli_eratt_read(struct lpfc_hba *phba)
7647{
7648 uint32_t ha_copy;
7649
7650
7651 ha_copy = readl(phba->HAregaddr);
7652 if (ha_copy & HA_ERATT) {
7653
7654 lpfc_sli_read_hs(phba);
7655
7656
7657 if ((HS_FFER1 & phba->work_hs) &&
7658 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7659 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7660 phba->hba_flag |= DEFER_ERATT;
7661
7662 writel(0, phba->HCregaddr);
7663 readl(phba->HCregaddr);
7664 }
7665
7666
7667 phba->work_ha |= HA_ERATT;
7668
7669 phba->hba_flag |= HBA_ERATT_HANDLED;
7670 return 1;
7671 }
7672 return 0;
7673}
7674
7675
7676
7677
7678
7679
7680
7681
7682
7683
7684
7685
7686static int
7687lpfc_sli4_eratt_read(struct lpfc_hba *phba)
7688{
7689 uint32_t uerr_sta_hi, uerr_sta_lo;
7690 uint32_t onlnreg0, onlnreg1;
7691
7692
7693
7694
7695 onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
7696 onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
7697 if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
7698 uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
7699 uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
7700 if (uerr_sta_lo || uerr_sta_hi) {
7701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7702 "1423 HBA Unrecoverable error: "
7703 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
7704 "online0_reg=0x%x, online1_reg=0x%x\n",
7705 uerr_sta_lo, uerr_sta_hi,
7706 onlnreg0, onlnreg1);
7707 phba->work_status[0] = uerr_sta_lo;
7708 phba->work_status[1] = uerr_sta_hi;
7709
7710 phba->work_ha |= HA_ERATT;
7711
7712 phba->hba_flag |= HBA_ERATT_HANDLED;
7713 return 1;
7714 }
7715 }
7716 return 0;
7717}
7718
7719
7720
7721
7722
7723
7724
7725
7726
7727
7728
7729int
7730lpfc_sli_check_eratt(struct lpfc_hba *phba)
7731{
7732 uint32_t ha_copy;
7733
7734
7735
7736
7737 if (phba->link_flag & LS_IGNORE_ERATT)
7738 return 0;
7739
7740
7741 spin_lock_irq(&phba->hbalock);
7742 if (phba->hba_flag & HBA_ERATT_HANDLED) {
7743
7744 spin_unlock_irq(&phba->hbalock);
7745 return 0;
7746 }
7747
7748
7749
7750
7751
7752 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7753 spin_unlock_irq(&phba->hbalock);
7754 return 0;
7755 }
7756
7757
7758 if (unlikely(pci_channel_offline(phba->pcidev))) {
7759 spin_unlock_irq(&phba->hbalock);
7760 return 0;
7761 }
7762
7763 switch (phba->sli_rev) {
7764 case LPFC_SLI_REV2:
7765 case LPFC_SLI_REV3:
7766
7767 ha_copy = lpfc_sli_eratt_read(phba);
7768 break;
7769 case LPFC_SLI_REV4:
7770
7771 ha_copy = lpfc_sli4_eratt_read(phba);
7772 break;
7773 default:
7774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7775 "0299 Invalid SLI revision (%d)\n",
7776 phba->sli_rev);
7777 ha_copy = 0;
7778 break;
7779 }
7780 spin_unlock_irq(&phba->hbalock);
7781
7782 return ha_copy;
7783}
7784
7785
7786
7787
7788
7789
7790
7791
7792
7793
7794
7795static inline int
7796lpfc_intr_state_check(struct lpfc_hba *phba)
7797{
7798
7799 if (unlikely(pci_channel_offline(phba->pcidev)))
7800 return -EIO;
7801
7802
7803 phba->sli.slistat.sli_intr++;
7804
7805
7806 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7807 return -EIO;
7808
7809 return 0;
7810}
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820
7821
7822
7823
7824
7825
7826
7827
7828
7829
7830
7831
7832
7833irqreturn_t
7834lpfc_sli_sp_intr_handler(int irq, void *dev_id)
7835{
7836 struct lpfc_hba *phba;
7837 uint32_t ha_copy;
7838 uint32_t work_ha_copy;
7839 unsigned long status;
7840 unsigned long iflag;
7841 uint32_t control;
7842
7843 MAILBOX_t *mbox, *pmbox;
7844 struct lpfc_vport *vport;
7845 struct lpfc_nodelist *ndlp;
7846 struct lpfc_dmabuf *mp;
7847 LPFC_MBOXQ_t *pmb;
7848 int rc;
7849
7850
7851
7852
7853
7854 phba = (struct lpfc_hba *)dev_id;
7855
7856 if (unlikely(!phba))
7857 return IRQ_NONE;
7858
7859
7860
7861
7862
7863 if (phba->intr_type == MSIX) {
7864
7865 if (lpfc_intr_state_check(phba))
7866 return IRQ_NONE;
7867
7868 spin_lock_irqsave(&phba->hbalock, iflag);
7869 ha_copy = readl(phba->HAregaddr);
7870
7871
7872
7873 if (phba->link_flag & LS_IGNORE_ERATT)
7874 ha_copy &= ~HA_ERATT;
7875
7876 if (ha_copy & HA_ERATT) {
7877 if (phba->hba_flag & HBA_ERATT_HANDLED)
7878
7879 ha_copy &= ~HA_ERATT;
7880 else
7881
7882 phba->hba_flag |= HBA_ERATT_HANDLED;
7883 }
7884
7885
7886
7887
7888
7889 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7890 spin_unlock_irqrestore(&phba->hbalock, iflag);
7891 return IRQ_NONE;
7892 }
7893
7894
7895 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
7896 phba->HAregaddr);
7897 readl(phba->HAregaddr);
7898 spin_unlock_irqrestore(&phba->hbalock, iflag);
7899 } else
7900 ha_copy = phba->ha_copy;
7901
7902 work_ha_copy = ha_copy & phba->work_ha_mask;
7903
7904 if (work_ha_copy) {
7905 if (work_ha_copy & HA_LATT) {
7906 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
7907
7908
7909
7910
7911 spin_lock_irqsave(&phba->hbalock, iflag);
7912 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
7913 control = readl(phba->HCregaddr);
7914 control &= ~HC_LAINT_ENA;
7915 writel(control, phba->HCregaddr);
7916 readl(phba->HCregaddr);
7917 spin_unlock_irqrestore(&phba->hbalock, iflag);
7918 }
7919 else
7920 work_ha_copy &= ~HA_LATT;
7921 }
7922
7923 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
7924
7925
7926
7927
7928 status = (work_ha_copy &
7929 (HA_RXMASK << (4*LPFC_ELS_RING)));
7930 status >>= (4*LPFC_ELS_RING);
7931 if (status & HA_RXMASK) {
7932 spin_lock_irqsave(&phba->hbalock, iflag);
7933 control = readl(phba->HCregaddr);
7934
7935 lpfc_debugfs_slow_ring_trc(phba,
7936 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
7937 control, status,
7938 (uint32_t)phba->sli.slistat.sli_intr);
7939
7940 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
7941 lpfc_debugfs_slow_ring_trc(phba,
7942 "ISR Disable ring:"
7943 "pwork:x%x hawork:x%x wait:x%x",
7944 phba->work_ha, work_ha_copy,
7945 (uint32_t)((unsigned long)
7946 &phba->work_waitq));
7947
7948 control &=
7949 ~(HC_R0INT_ENA << LPFC_ELS_RING);
7950 writel(control, phba->HCregaddr);
7951 readl(phba->HCregaddr);
7952 }
7953 else {
7954 lpfc_debugfs_slow_ring_trc(phba,
7955 "ISR slow ring: pwork:"
7956 "x%x hawork:x%x wait:x%x",
7957 phba->work_ha, work_ha_copy,
7958 (uint32_t)((unsigned long)
7959 &phba->work_waitq));
7960 }
7961 spin_unlock_irqrestore(&phba->hbalock, iflag);
7962 }
7963 }
7964 spin_lock_irqsave(&phba->hbalock, iflag);
7965 if (work_ha_copy & HA_ERATT) {
7966 lpfc_sli_read_hs(phba);
7967
7968
7969
7970
7971 if ((HS_FFER1 & phba->work_hs) &&
7972 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
7973 HS_FFER6 | HS_FFER7) & phba->work_hs)) {
7974 phba->hba_flag |= DEFER_ERATT;
7975
7976 writel(0, phba->HCregaddr);
7977 readl(phba->HCregaddr);
7978 }
7979 }
7980
7981 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
7982 pmb = phba->sli.mbox_active;
7983 pmbox = &pmb->u.mb;
7984 mbox = phba->mbox;
7985 vport = pmb->vport;
7986
7987
7988 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
7989 if (pmbox->mbxOwner != OWN_HOST) {
7990 spin_unlock_irqrestore(&phba->hbalock, iflag);
7991
7992
7993
7994
7995 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
7996 LOG_SLI,
7997 "(%d):0304 Stray Mailbox "
7998 "Interrupt mbxCommand x%x "
7999 "mbxStatus x%x\n",
8000 (vport ? vport->vpi : 0),
8001 pmbox->mbxCommand,
8002 pmbox->mbxStatus);
8003
8004 work_ha_copy &= ~HA_MBATT;
8005 } else {
8006 phba->sli.mbox_active = NULL;
8007 spin_unlock_irqrestore(&phba->hbalock, iflag);
8008 phba->last_completion_time = jiffies;
8009 del_timer(&phba->sli.mbox_tmo);
8010 if (pmb->mbox_cmpl) {
8011 lpfc_sli_pcimem_bcopy(mbox, pmbox,
8012 MAILBOX_CMD_SIZE);
8013 }
8014 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8015 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8016
8017 lpfc_debugfs_disc_trc(vport,
8018 LPFC_DISC_TRC_MBOX_VPORT,
8019 "MBOX dflt rpi: : "
8020 "status:x%x rpi:x%x",
8021 (uint32_t)pmbox->mbxStatus,
8022 pmbox->un.varWords[0], 0);
8023
8024 if (!pmbox->mbxStatus) {
8025 mp = (struct lpfc_dmabuf *)
8026 (pmb->context1);
8027 ndlp = (struct lpfc_nodelist *)
8028 pmb->context2;
8029
8030
8031
8032
8033
8034
8035 lpfc_unreg_login(phba,
8036 vport->vpi,
8037 pmbox->un.varWords[0],
8038 pmb);
8039 pmb->mbox_cmpl =
8040 lpfc_mbx_cmpl_dflt_rpi;
8041 pmb->context1 = mp;
8042 pmb->context2 = ndlp;
8043 pmb->vport = vport;
8044 rc = lpfc_sli_issue_mbox(phba,
8045 pmb,
8046 MBX_NOWAIT);
8047 if (rc != MBX_BUSY)
8048 lpfc_printf_log(phba,
8049 KERN_ERR,
8050 LOG_MBOX | LOG_SLI,
8051 "0350 rc should have"
8052 "been MBX_BUSY");
8053 if (rc != MBX_NOT_FINISHED)
8054 goto send_current_mbox;
8055 }
8056 }
8057 spin_lock_irqsave(
8058 &phba->pport->work_port_lock,
8059 iflag);
8060 phba->pport->work_port_events &=
8061 ~WORKER_MBOX_TMO;
8062 spin_unlock_irqrestore(
8063 &phba->pport->work_port_lock,
8064 iflag);
8065 lpfc_mbox_cmpl_put(phba, pmb);
8066 }
8067 } else
8068 spin_unlock_irqrestore(&phba->hbalock, iflag);
8069
8070 if ((work_ha_copy & HA_MBATT) &&
8071 (phba->sli.mbox_active == NULL)) {
8072send_current_mbox:
8073
8074 do {
8075 rc = lpfc_sli_issue_mbox(phba, NULL,
8076 MBX_NOWAIT);
8077 } while (rc == MBX_NOT_FINISHED);
8078 if (rc != MBX_SUCCESS)
8079 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8080 LOG_SLI, "0349 rc should be "
8081 "MBX_SUCCESS");
8082 }
8083
8084 spin_lock_irqsave(&phba->hbalock, iflag);
8085 phba->work_ha |= work_ha_copy;
8086 spin_unlock_irqrestore(&phba->hbalock, iflag);
8087 lpfc_worker_wake_up(phba);
8088 }
8089 return IRQ_HANDLED;
8090
8091}
8092
8093
8094
8095
8096
8097
8098
8099
8100
8101
8102
8103
8104
8105
8106
8107
8108
8109
8110
8111
8112irqreturn_t
8113lpfc_sli_fp_intr_handler(int irq, void *dev_id)
8114{
8115 struct lpfc_hba *phba;
8116 uint32_t ha_copy;
8117 unsigned long status;
8118 unsigned long iflag;
8119
8120
8121
8122
8123 phba = (struct lpfc_hba *) dev_id;
8124
8125 if (unlikely(!phba))
8126 return IRQ_NONE;
8127
8128
8129
8130
8131
8132 if (phba->intr_type == MSIX) {
8133
8134 if (lpfc_intr_state_check(phba))
8135 return IRQ_NONE;
8136
8137 ha_copy = readl(phba->HAregaddr);
8138
8139 spin_lock_irqsave(&phba->hbalock, iflag);
8140
8141
8142
8143
8144 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8145 spin_unlock_irqrestore(&phba->hbalock, iflag);
8146 return IRQ_NONE;
8147 }
8148 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
8149 phba->HAregaddr);
8150 readl(phba->HAregaddr);
8151 spin_unlock_irqrestore(&phba->hbalock, iflag);
8152 } else
8153 ha_copy = phba->ha_copy;
8154
8155
8156
8157
8158 ha_copy &= ~(phba->work_ha_mask);
8159
8160 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
8161 status >>= (4*LPFC_FCP_RING);
8162 if (status & HA_RXMASK)
8163 lpfc_sli_handle_fast_ring_event(phba,
8164 &phba->sli.ring[LPFC_FCP_RING],
8165 status);
8166
8167 if (phba->cfg_multi_ring_support == 2) {
8168
8169
8170
8171
8172 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
8173 status >>= (4*LPFC_EXTRA_RING);
8174 if (status & HA_RXMASK) {
8175 lpfc_sli_handle_fast_ring_event(phba,
8176 &phba->sli.ring[LPFC_EXTRA_RING],
8177 status);
8178 }
8179 }
8180 return IRQ_HANDLED;
8181}
8182
8183
8184
8185
8186
8187
8188
8189
8190
8191
8192
8193
8194
8195
8196
8197
8198
8199
8200irqreturn_t
8201lpfc_sli_intr_handler(int irq, void *dev_id)
8202{
8203 struct lpfc_hba *phba;
8204 irqreturn_t sp_irq_rc, fp_irq_rc;
8205 unsigned long status1, status2;
8206
8207
8208
8209
8210
8211 phba = (struct lpfc_hba *) dev_id;
8212
8213 if (unlikely(!phba))
8214 return IRQ_NONE;
8215
8216
8217 if (lpfc_intr_state_check(phba))
8218 return IRQ_NONE;
8219
8220 spin_lock(&phba->hbalock);
8221 phba->ha_copy = readl(phba->HAregaddr);
8222 if (unlikely(!phba->ha_copy)) {
8223 spin_unlock(&phba->hbalock);
8224 return IRQ_NONE;
8225 } else if (phba->ha_copy & HA_ERATT) {
8226 if (phba->hba_flag & HBA_ERATT_HANDLED)
8227
8228 phba->ha_copy &= ~HA_ERATT;
8229 else
8230
8231 phba->hba_flag |= HBA_ERATT_HANDLED;
8232 }
8233
8234
8235
8236
8237 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8238 spin_unlock_irq(&phba->hbalock);
8239 return IRQ_NONE;
8240 }
8241
8242
8243 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
8244 readl(phba->HAregaddr);
8245 spin_unlock(&phba->hbalock);
8246
8247
8248
8249
8250
8251
8252 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
8253
8254
8255 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
8256 status2 >>= (4*LPFC_ELS_RING);
8257
8258 if (status1 || (status2 & HA_RXMASK))
8259 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
8260 else
8261 sp_irq_rc = IRQ_NONE;
8262
8263
8264
8265
8266
8267
8268 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
8269 status1 >>= (4*LPFC_FCP_RING);
8270
8271
8272 if (phba->cfg_multi_ring_support == 2) {
8273 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
8274 status2 >>= (4*LPFC_EXTRA_RING);
8275 } else
8276 status2 = 0;
8277
8278 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
8279 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
8280 else
8281 fp_irq_rc = IRQ_NONE;
8282
8283
8284 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
8285}
8286
8287
8288
8289
8290
8291
8292
8293
8294void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
8295{
8296 struct lpfc_cq_event *cq_event;
8297
8298
8299 spin_lock_irq(&phba->hbalock);
8300 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
8301 spin_unlock_irq(&phba->hbalock);
8302
8303 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
8304
8305 spin_lock_irq(&phba->hbalock);
8306 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
8307 cq_event, struct lpfc_cq_event, list);
8308 spin_unlock_irq(&phba->hbalock);
8309
8310 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8311
8312 lpfc_sli4_cq_event_release(phba, cq_event);
8313 }
8314}
8315
8316
8317
8318
8319
8320
8321
8322
8323void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
8324{
8325 struct lpfc_cq_event *cq_event;
8326
8327
8328 spin_lock_irq(&phba->hbalock);
8329 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
8330 spin_unlock_irq(&phba->hbalock);
8331
8332 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
8333
8334 spin_lock_irq(&phba->hbalock);
8335 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
8336 cq_event, struct lpfc_cq_event, list);
8337 spin_unlock_irq(&phba->hbalock);
8338
8339 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
8340
8341 lpfc_sli4_cq_event_release(phba, cq_event);
8342 }
8343}
8344
8345static void
8346lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
8347 struct lpfc_iocbq *pIocbOut,
8348 struct lpfc_wcqe_complete *wcqe)
8349{
8350 size_t offset = offsetof(struct lpfc_iocbq, iocb);
8351
8352 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
8353 sizeof(struct lpfc_iocbq) - offset);
8354 memset(&pIocbIn->sli4_info, 0,
8355 sizeof(struct lpfc_sli4_rspiocb_info));
8356
8357 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
8358 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
8359 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
8360 pIocbIn->iocb.un.fcpi.fcpi_parm =
8361 pIocbOut->iocb.un.fcpi.fcpi_parm -
8362 wcqe->total_data_placed;
8363 else
8364 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8365 else
8366 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
8367
8368 pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
8369 pIocbIn->sli4_info.bfield = 0;
8370 if (bf_get(lpfc_wcqe_c_xb, wcqe))
8371 pIocbIn->sli4_info.bfield |= LPFC_XB;
8372 if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
8373 pIocbIn->sli4_info.bfield |= LPFC_PV;
8374 pIocbIn->sli4_info.priority =
8375 bf_get(lpfc_wcqe_c_priority, wcqe);
8376 }
8377}
8378
8379
8380
8381
8382
8383
8384
8385
8386
8387
8388
8389static bool
8390lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8391{
8392 struct lpfc_cq_event *cq_event;
8393 unsigned long iflags;
8394
8395 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8396 "0392 Async Event: word0:x%x, word1:x%x, "
8397 "word2:x%x, word3:x%x\n", mcqe->word0,
8398 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
8399
8400
8401 cq_event = lpfc_sli4_cq_event_alloc(phba);
8402 if (!cq_event) {
8403 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8404 "0394 Failed to allocate CQ_EVENT entry\n");
8405 return false;
8406 }
8407
8408
8409 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
8410 spin_lock_irqsave(&phba->hbalock, iflags);
8411 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
8412
8413 phba->hba_flag |= ASYNC_EVENT;
8414 spin_unlock_irqrestore(&phba->hbalock, iflags);
8415
8416 return true;
8417}
8418
8419
8420
8421
8422
8423
8424
8425
8426
8427
8428
8429static bool
8430lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
8431{
8432 uint32_t mcqe_status;
8433 MAILBOX_t *mbox, *pmbox;
8434 struct lpfc_mqe *mqe;
8435 struct lpfc_vport *vport;
8436 struct lpfc_nodelist *ndlp;
8437 struct lpfc_dmabuf *mp;
8438 unsigned long iflags;
8439 LPFC_MBOXQ_t *pmb;
8440 bool workposted = false;
8441 int rc;
8442
8443
8444 if (!bf_get(lpfc_trailer_completed, mcqe))
8445 goto out_no_mqe_complete;
8446
8447
8448 spin_lock_irqsave(&phba->hbalock, iflags);
8449 pmb = phba->sli.mbox_active;
8450 if (unlikely(!pmb)) {
8451 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
8452 "1832 No pending MBOX command to handle\n");
8453 spin_unlock_irqrestore(&phba->hbalock, iflags);
8454 goto out_no_mqe_complete;
8455 }
8456 spin_unlock_irqrestore(&phba->hbalock, iflags);
8457 mqe = &pmb->u.mqe;
8458 pmbox = (MAILBOX_t *)&pmb->u.mqe;
8459 mbox = phba->mbox;
8460 vport = pmb->vport;
8461
8462
8463 phba->last_completion_time = jiffies;
8464 del_timer(&phba->sli.mbox_tmo);
8465
8466
8467 if (pmb->mbox_cmpl && mbox)
8468 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
8469
8470 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
8471 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
8472 bf_set(lpfc_mqe_status, mqe,
8473 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8474
8475 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
8476 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
8477 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
8478 "MBOX dflt rpi: status:x%x rpi:x%x",
8479 mcqe_status,
8480 pmbox->un.varWords[0], 0);
8481 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
8482 mp = (struct lpfc_dmabuf *)(pmb->context1);
8483 ndlp = (struct lpfc_nodelist *)pmb->context2;
8484
8485
8486
8487 lpfc_unreg_login(phba, vport->vpi,
8488 pmbox->un.varWords[0], pmb);
8489 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
8490 pmb->context1 = mp;
8491 pmb->context2 = ndlp;
8492 pmb->vport = vport;
8493 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
8494 if (rc != MBX_BUSY)
8495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
8496 LOG_SLI, "0385 rc should "
8497 "have been MBX_BUSY\n");
8498 if (rc != MBX_NOT_FINISHED)
8499 goto send_current_mbox;
8500 }
8501 }
8502 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
8503 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8504 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
8505
8506
8507 spin_lock_irqsave(&phba->hbalock, iflags);
8508 __lpfc_mbox_cmpl_put(phba, pmb);
8509 phba->work_ha |= HA_MBATT;
8510 spin_unlock_irqrestore(&phba->hbalock, iflags);
8511 workposted = true;
8512
8513send_current_mbox:
8514 spin_lock_irqsave(&phba->hbalock, iflags);
8515
8516 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8517
8518 phba->sli.mbox_active = NULL;
8519 spin_unlock_irqrestore(&phba->hbalock, iflags);
8520
8521 lpfc_worker_wake_up(phba);
8522out_no_mqe_complete:
8523 if (bf_get(lpfc_trailer_consumed, mcqe))
8524 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
8525 return workposted;
8526}
8527
8528
8529
8530
8531
8532
8533
8534
8535
8536
8537
8538
8539static bool
8540lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8541{
8542 struct lpfc_mcqe mcqe;
8543 bool workposted;
8544
8545
8546 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
8547
8548
8549 if (!bf_get(lpfc_trailer_async, &mcqe))
8550 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
8551 else
8552 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
8553 return workposted;
8554}
8555
8556
8557
8558
8559
8560
8561
8562
8563
8564
8565static bool
8566lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
8567 struct lpfc_wcqe_complete *wcqe)
8568{
8569 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
8570 struct lpfc_iocbq *cmdiocbq;
8571 struct lpfc_iocbq *irspiocbq;
8572 unsigned long iflags;
8573 bool workposted = false;
8574
8575 spin_lock_irqsave(&phba->hbalock, iflags);
8576 pring->stats.iocb_event++;
8577
8578 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8579 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8580 spin_unlock_irqrestore(&phba->hbalock, iflags);
8581
8582 if (unlikely(!cmdiocbq)) {
8583 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8584 "0386 ELS complete with no corresponding "
8585 "cmdiocb: iotag (%d)\n",
8586 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8587 return workposted;
8588 }
8589
8590
8591 irspiocbq = lpfc_sli_get_iocbq(phba);
8592 if (!irspiocbq) {
8593 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8594 "0387 Failed to allocate an iocbq\n");
8595 return workposted;
8596 }
8597 lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
8598
8599
8600 spin_lock_irqsave(&phba->hbalock, iflags);
8601 list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
8602
8603 phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
8604 spin_unlock_irqrestore(&phba->hbalock, iflags);
8605 workposted = true;
8606
8607 return workposted;
8608}
8609
8610
8611
8612
8613
8614
8615
8616
8617
8618static void
8619lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
8620 struct lpfc_wcqe_release *wcqe)
8621{
8622
8623 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
8624 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
8625 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8626 else
8627 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8628 "2579 Slow-path wqe consume event carries "
8629 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
8630 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
8631 phba->sli4_hba.els_wq->queue_id);
8632}
8633
8634
8635
8636
8637
8638
8639
8640
8641
8642
8643
8644static bool
8645lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
8646 struct lpfc_queue *cq,
8647 struct sli4_wcqe_xri_aborted *wcqe)
8648{
8649 bool workposted = false;
8650 struct lpfc_cq_event *cq_event;
8651 unsigned long iflags;
8652
8653
8654 cq_event = lpfc_sli4_cq_event_alloc(phba);
8655 if (!cq_event) {
8656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8657 "0602 Failed to allocate CQ_EVENT entry\n");
8658 return false;
8659 }
8660
8661
8662 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
8663 switch (cq->subtype) {
8664 case LPFC_FCP:
8665 spin_lock_irqsave(&phba->hbalock, iflags);
8666 list_add_tail(&cq_event->list,
8667 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
8668
8669 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
8670 spin_unlock_irqrestore(&phba->hbalock, iflags);
8671 workposted = true;
8672 break;
8673 case LPFC_ELS:
8674 spin_lock_irqsave(&phba->hbalock, iflags);
8675 list_add_tail(&cq_event->list,
8676 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
8677
8678 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
8679 spin_unlock_irqrestore(&phba->hbalock, iflags);
8680 workposted = true;
8681 break;
8682 default:
8683 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8684 "0603 Invalid work queue CQE subtype (x%x)\n",
8685 cq->subtype);
8686 workposted = false;
8687 break;
8688 }
8689 return workposted;
8690}
8691
8692
8693
8694
8695
8696
8697
8698
8699
8700
8701
8702static bool
8703lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8704 struct lpfc_cqe *cqe)
8705{
8706 struct lpfc_wcqe_complete wcqe;
8707 bool workposted = false;
8708
8709
8710 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
8711
8712
8713 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
8714 case CQE_CODE_COMPL_WQE:
8715
8716 workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
8717 (struct lpfc_wcqe_complete *)&wcqe);
8718 break;
8719 case CQE_CODE_RELEASE_WQE:
8720
8721 lpfc_sli4_sp_handle_rel_wcqe(phba,
8722 (struct lpfc_wcqe_release *)&wcqe);
8723 break;
8724 case CQE_CODE_XRI_ABORTED:
8725
8726 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
8727 (struct sli4_wcqe_xri_aborted *)&wcqe);
8728 break;
8729 default:
8730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8731 "0388 Not a valid WCQE code: x%x\n",
8732 bf_get(lpfc_wcqe_c_code, &wcqe));
8733 break;
8734 }
8735 return workposted;
8736}
8737
8738
8739
8740
8741
8742
8743
8744
8745
8746
8747static bool
8748lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
8749{
8750 struct lpfc_rcqe rcqe;
8751 bool workposted = false;
8752 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
8753 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
8754 struct hbq_dmabuf *dma_buf;
8755 uint32_t status;
8756 unsigned long iflags;
8757
8758
8759 lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
8760 lpfc_sli4_rq_release(hrq, drq);
8761 if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
8762 goto out;
8763 if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
8764 goto out;
8765
8766 status = bf_get(lpfc_rcqe_status, &rcqe);
8767 switch (status) {
8768 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
8769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8770 "2537 Receive Frame Truncated!!\n");
8771 case FC_STATUS_RQ_SUCCESS:
8772 spin_lock_irqsave(&phba->hbalock, iflags);
8773 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
8774 if (!dma_buf) {
8775 spin_unlock_irqrestore(&phba->hbalock, iflags);
8776 goto out;
8777 }
8778 memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
8779
8780 list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
8781
8782 phba->hba_flag |= HBA_RECEIVE_BUFFER;
8783 spin_unlock_irqrestore(&phba->hbalock, iflags);
8784 workposted = true;
8785 break;
8786 case FC_STATUS_INSUFF_BUF_NEED_BUF:
8787 case FC_STATUS_INSUFF_BUF_FRM_DISC:
8788
8789 spin_lock_irqsave(&phba->hbalock, iflags);
8790 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
8791 spin_unlock_irqrestore(&phba->hbalock, iflags);
8792 workposted = true;
8793 break;
8794 }
8795out:
8796 return workposted;
8797
8798}
8799
8800
8801
8802
8803
8804
8805
8806
8807
8808
8809
8810
8811
8812
8813static void
8814lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
8815{
8816 struct lpfc_queue *cq = NULL, *childq, *speq;
8817 struct lpfc_cqe *cqe;
8818 bool workposted = false;
8819 int ecount = 0;
8820 uint16_t cqid;
8821
8822 if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
8823 bf_get(lpfc_eqe_minor_code, eqe) != 0) {
8824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8825 "0359 Not a valid slow-path completion "
8826 "event: majorcode=x%x, minorcode=x%x\n",
8827 bf_get(lpfc_eqe_major_code, eqe),
8828 bf_get(lpfc_eqe_minor_code, eqe));
8829 return;
8830 }
8831
8832
8833 cqid = bf_get(lpfc_eqe_resource_id, eqe);
8834
8835
8836 speq = phba->sli4_hba.sp_eq;
8837 list_for_each_entry(childq, &speq->child_list, list) {
8838 if (childq->queue_id == cqid) {
8839 cq = childq;
8840 break;
8841 }
8842 }
8843 if (unlikely(!cq)) {
8844 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8845 "0365 Slow-path CQ identifier (%d) does "
8846 "not exist\n", cqid);
8847 return;
8848 }
8849
8850
8851 switch (cq->type) {
8852 case LPFC_MCQ:
8853 while ((cqe = lpfc_sli4_cq_get(cq))) {
8854 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
8855 if (!(++ecount % LPFC_GET_QE_REL_INT))
8856 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8857 }
8858 break;
8859 case LPFC_WCQ:
8860 while ((cqe = lpfc_sli4_cq_get(cq))) {
8861 workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
8862 if (!(++ecount % LPFC_GET_QE_REL_INT))
8863 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8864 }
8865 break;
8866 case LPFC_RCQ:
8867 while ((cqe = lpfc_sli4_cq_get(cq))) {
8868 workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
8869 if (!(++ecount % LPFC_GET_QE_REL_INT))
8870 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
8871 }
8872 break;
8873 default:
8874 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8875 "0370 Invalid completion queue type (%d)\n",
8876 cq->type);
8877 return;
8878 }
8879
8880
8881 if (unlikely(ecount == 0))
8882 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8883 "0371 No entry from the CQ: identifier "
8884 "(x%x), type (%d)\n", cq->queue_id, cq->type);
8885
8886
8887 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
8888
8889
8890 if (workposted)
8891 lpfc_worker_wake_up(phba);
8892}
8893
8894
8895
8896
8897
8898
8899
8900
8901static void
8902lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
8903 struct lpfc_wcqe_complete *wcqe)
8904{
8905 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
8906 struct lpfc_iocbq *cmdiocbq;
8907 struct lpfc_iocbq irspiocbq;
8908 unsigned long iflags;
8909
8910 spin_lock_irqsave(&phba->hbalock, iflags);
8911 pring->stats.iocb_event++;
8912 spin_unlock_irqrestore(&phba->hbalock, iflags);
8913
8914
8915 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
8916
8917
8918
8919 if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
8920 IOSTAT_LOCAL_REJECT) &&
8921 (wcqe->parameter == IOERR_NO_RESOURCES)) {
8922 phba->lpfc_rampdown_queue_depth(phba);
8923 }
8924
8925 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8926 "0373 FCP complete error: status=x%x, "
8927 "hw_status=x%x, total_data_specified=%d, "
8928 "parameter=x%x, word3=x%x\n",
8929 bf_get(lpfc_wcqe_c_status, wcqe),
8930 bf_get(lpfc_wcqe_c_hw_status, wcqe),
8931 wcqe->total_data_placed, wcqe->parameter,
8932 wcqe->word3);
8933 }
8934
8935
8936 spin_lock_irqsave(&phba->hbalock, iflags);
8937 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
8938 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8939 spin_unlock_irqrestore(&phba->hbalock, iflags);
8940 if (unlikely(!cmdiocbq)) {
8941 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8942 "0374 FCP complete with no corresponding "
8943 "cmdiocb: iotag (%d)\n",
8944 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8945 return;
8946 }
8947 if (unlikely(!cmdiocbq->iocb_cmpl)) {
8948 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8949 "0375 FCP cmdiocb not callback function "
8950 "iotag: (%d)\n",
8951 bf_get(lpfc_wcqe_c_request_tag, wcqe));
8952 return;
8953 }
8954
8955
8956 lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
8957
8958
8959 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
8960}
8961
8962
8963
8964
8965
8966
8967
8968
8969
8970
8971static void
8972lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
8973 struct lpfc_wcqe_release *wcqe)
8974{
8975 struct lpfc_queue *childwq;
8976 bool wqid_matched = false;
8977 uint16_t fcp_wqid;
8978
8979
8980 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
8981 list_for_each_entry(childwq, &cq->child_list, list) {
8982 if (childwq->queue_id == fcp_wqid) {
8983 lpfc_sli4_wq_release(childwq,
8984 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
8985 wqid_matched = true;
8986 break;
8987 }
8988 }
8989
8990 if (wqid_matched != true)
8991 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8992 "2580 Fast-path wqe consume event carries "
8993 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
8994}
8995
8996
8997
8998
8999
9000
9001
9002
9003
9004static int
9005lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
9006 struct lpfc_cqe *cqe)
9007{
9008 struct lpfc_wcqe_release wcqe;
9009 bool workposted = false;
9010
9011
9012 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
9013
9014
9015 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
9016 case CQE_CODE_COMPL_WQE:
9017
9018 lpfc_sli4_fp_handle_fcp_wcqe(phba,
9019 (struct lpfc_wcqe_complete *)&wcqe);
9020 break;
9021 case CQE_CODE_RELEASE_WQE:
9022
9023 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
9024 (struct lpfc_wcqe_release *)&wcqe);
9025 break;
9026 case CQE_CODE_XRI_ABORTED:
9027
9028 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
9029 (struct sli4_wcqe_xri_aborted *)&wcqe);
9030 break;
9031 default:
9032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9033 "0144 Not a valid WCQE code: x%x\n",
9034 bf_get(lpfc_wcqe_c_code, &wcqe));
9035 break;
9036 }
9037 return workposted;
9038}
9039
9040
9041
9042
9043
9044
9045
9046
9047
9048
9049
9050
9051
9052static void
9053lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
9054 uint32_t fcp_cqidx)
9055{
9056 struct lpfc_queue *cq;
9057 struct lpfc_cqe *cqe;
9058 bool workposted = false;
9059 uint16_t cqid;
9060 int ecount = 0;
9061
9062 if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
9063 unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
9064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9065 "0366 Not a valid fast-path completion "
9066 "event: majorcode=x%x, minorcode=x%x\n",
9067 bf_get(lpfc_eqe_major_code, eqe),
9068 bf_get(lpfc_eqe_minor_code, eqe));
9069 return;
9070 }
9071
9072 cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
9073 if (unlikely(!cq)) {
9074 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9075 "0367 Fast-path completion queue does not "
9076 "exist\n");
9077 return;
9078 }
9079
9080
9081 cqid = bf_get(lpfc_eqe_resource_id, eqe);
9082 if (unlikely(cqid != cq->queue_id)) {
9083 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9084 "0368 Miss-matched fast-path completion "
9085 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
9086 cqid, cq->queue_id);
9087 return;
9088 }
9089
9090
9091 while ((cqe = lpfc_sli4_cq_get(cq))) {
9092 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
9093 if (!(++ecount % LPFC_GET_QE_REL_INT))
9094 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
9095 }
9096
9097
9098 if (unlikely(ecount == 0))
9099 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9100 "0369 No entry from fast-path completion "
9101 "queue fcpcqid=%d\n", cq->queue_id);
9102
9103
9104 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
9105
9106
9107 if (workposted)
9108 lpfc_worker_wake_up(phba);
9109}
9110
9111static void
9112lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
9113{
9114 struct lpfc_eqe *eqe;
9115
9116
9117 while ((eqe = lpfc_sli4_eq_get(eq)))
9118 ;
9119
9120
9121 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
9122}
9123
9124
9125
9126
9127
9128
9129
9130
9131
9132
9133
9134
9135
9136
9137
9138
9139
9140
9141
9142
9143
9144irqreturn_t
9145lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
9146{
9147 struct lpfc_hba *phba;
9148 struct lpfc_queue *speq;
9149 struct lpfc_eqe *eqe;
9150 unsigned long iflag;
9151 int ecount = 0;
9152
9153
9154
9155
9156 phba = (struct lpfc_hba *)dev_id;
9157
9158 if (unlikely(!phba))
9159 return IRQ_NONE;
9160
9161
9162 speq = phba->sli4_hba.sp_eq;
9163
9164
9165 if (unlikely(lpfc_intr_state_check(phba))) {
9166
9167 spin_lock_irqsave(&phba->hbalock, iflag);
9168 if (phba->link_state < LPFC_LINK_DOWN)
9169
9170 lpfc_sli4_eq_flush(phba, speq);
9171 spin_unlock_irqrestore(&phba->hbalock, iflag);
9172 return IRQ_NONE;
9173 }
9174
9175
9176
9177
9178 while ((eqe = lpfc_sli4_eq_get(speq))) {
9179 lpfc_sli4_sp_handle_eqe(phba, eqe);
9180 if (!(++ecount % LPFC_GET_QE_REL_INT))
9181 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
9182 }
9183
9184
9185 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
9186
9187
9188 if (unlikely(ecount == 0)) {
9189 if (phba->intr_type == MSIX)
9190
9191 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9192 "0357 MSI-X interrupt with no EQE\n");
9193 else
9194
9195 return IRQ_NONE;
9196 }
9197
9198 return IRQ_HANDLED;
9199}
9200
9201
9202
9203
9204
9205
9206
9207
9208
9209
9210
9211
9212
9213
9214
9215
9216
9217
9218
9219
9220
9221
9222irqreturn_t
9223lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
9224{
9225 struct lpfc_hba *phba;
9226 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9227 struct lpfc_queue *fpeq;
9228 struct lpfc_eqe *eqe;
9229 unsigned long iflag;
9230 int ecount = 0;
9231 uint32_t fcp_eqidx;
9232
9233
9234 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
9235 phba = fcp_eq_hdl->phba;
9236 fcp_eqidx = fcp_eq_hdl->idx;
9237
9238 if (unlikely(!phba))
9239 return IRQ_NONE;
9240
9241
9242 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
9243
9244
9245 if (unlikely(lpfc_intr_state_check(phba))) {
9246
9247 spin_lock_irqsave(&phba->hbalock, iflag);
9248 if (phba->link_state < LPFC_LINK_DOWN)
9249
9250 lpfc_sli4_eq_flush(phba, fpeq);
9251 spin_unlock_irqrestore(&phba->hbalock, iflag);
9252 return IRQ_NONE;
9253 }
9254
9255
9256
9257
9258 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9259 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
9260 if (!(++ecount % LPFC_GET_QE_REL_INT))
9261 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
9262 }
9263
9264
9265 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
9266
9267 if (unlikely(ecount == 0)) {
9268 if (phba->intr_type == MSIX)
9269
9270 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9271 "0358 MSI-X interrupt with no EQE\n");
9272 else
9273
9274 return IRQ_NONE;
9275 }
9276
9277 return IRQ_HANDLED;
9278}
9279
9280
9281
9282
9283
9284
9285
9286
9287
9288
9289
9290
9291
9292
9293
9294
9295
9296
9297irqreturn_t
9298lpfc_sli4_intr_handler(int irq, void *dev_id)
9299{
9300 struct lpfc_hba *phba;
9301 irqreturn_t sp_irq_rc, fp_irq_rc;
9302 bool fp_handled = false;
9303 uint32_t fcp_eqidx;
9304
9305
9306 phba = (struct lpfc_hba *)dev_id;
9307
9308 if (unlikely(!phba))
9309 return IRQ_NONE;
9310
9311
9312
9313
9314 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
9315
9316
9317
9318
9319 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
9320 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
9321 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
9322 if (fp_irq_rc == IRQ_HANDLED)
9323 fp_handled |= true;
9324 }
9325
9326 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
9327}
9328
9329
9330
9331
9332
9333
9334
9335
9336
9337void
9338lpfc_sli4_queue_free(struct lpfc_queue *queue)
9339{
9340 struct lpfc_dmabuf *dmabuf;
9341
9342 if (!queue)
9343 return;
9344
9345 while (!list_empty(&queue->page_list)) {
9346 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
9347 list);
9348 dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
9349 dmabuf->virt, dmabuf->phys);
9350 kfree(dmabuf);
9351 }
9352 kfree(queue);
9353 return;
9354}
9355
9356
9357
9358
9359
9360
9361
9362
9363
9364
9365
9366struct lpfc_queue *
9367lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
9368 uint32_t entry_count)
9369{
9370 struct lpfc_queue *queue;
9371 struct lpfc_dmabuf *dmabuf;
9372 int x, total_qe_count;
9373 void *dma_pointer;
9374
9375
9376 queue = kzalloc(sizeof(struct lpfc_queue) +
9377 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
9378 if (!queue)
9379 return NULL;
9380 queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
9381 INIT_LIST_HEAD(&queue->list);
9382 INIT_LIST_HEAD(&queue->page_list);
9383 INIT_LIST_HEAD(&queue->child_list);
9384 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
9385 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
9386 if (!dmabuf)
9387 goto out_fail;
9388 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
9389 PAGE_SIZE, &dmabuf->phys,
9390 GFP_KERNEL);
9391 if (!dmabuf->virt) {
9392 kfree(dmabuf);
9393 goto out_fail;
9394 }
9395 memset(dmabuf->virt, 0, PAGE_SIZE);
9396 dmabuf->buffer_tag = x;
9397 list_add_tail(&dmabuf->list, &queue->page_list);
9398
9399 dma_pointer = dmabuf->virt;
9400 for (; total_qe_count < entry_count &&
9401 dma_pointer < (PAGE_SIZE + dmabuf->virt);
9402 total_qe_count++, dma_pointer += entry_size) {
9403 queue->qe[total_qe_count].address = dma_pointer;
9404 }
9405 }
9406 queue->entry_size = entry_size;
9407 queue->entry_count = entry_count;
9408 queue->phba = phba;
9409
9410 return queue;
9411out_fail:
9412 lpfc_sli4_queue_free(queue);
9413 return NULL;
9414}
9415
9416
9417
9418
9419
9420
9421
9422
9423
9424
9425
9426
9427
9428
9429
9430
9431
9432
9433
9434
9435
9436uint32_t
9437lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
9438{
9439 struct lpfc_mbx_eq_create *eq_create;
9440 LPFC_MBOXQ_t *mbox;
9441 int rc, length, status = 0;
9442 struct lpfc_dmabuf *dmabuf;
9443 uint32_t shdr_status, shdr_add_status;
9444 union lpfc_sli4_cfg_shdr *shdr;
9445 uint16_t dmult;
9446
9447 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9448 if (!mbox)
9449 return -ENOMEM;
9450 length = (sizeof(struct lpfc_mbx_eq_create) -
9451 sizeof(struct lpfc_sli4_cfg_mhdr));
9452 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9453 LPFC_MBOX_OPCODE_EQ_CREATE,
9454 length, LPFC_SLI4_MBX_EMBED);
9455 eq_create = &mbox->u.mqe.un.eq_create;
9456 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
9457 eq->page_count);
9458 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
9459 LPFC_EQE_SIZE);
9460 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
9461
9462 dmult = LPFC_DMULT_CONST/imax - 1;
9463 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
9464 dmult);
9465 switch (eq->entry_count) {
9466 default:
9467 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9468 "0360 Unsupported EQ count. (%d)\n",
9469 eq->entry_count);
9470 if (eq->entry_count < 256)
9471 return -EINVAL;
9472
9473 case 256:
9474 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9475 LPFC_EQ_CNT_256);
9476 break;
9477 case 512:
9478 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9479 LPFC_EQ_CNT_512);
9480 break;
9481 case 1024:
9482 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9483 LPFC_EQ_CNT_1024);
9484 break;
9485 case 2048:
9486 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9487 LPFC_EQ_CNT_2048);
9488 break;
9489 case 4096:
9490 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
9491 LPFC_EQ_CNT_4096);
9492 break;
9493 }
9494 list_for_each_entry(dmabuf, &eq->page_list, list) {
9495 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9496 putPaddrLow(dmabuf->phys);
9497 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9498 putPaddrHigh(dmabuf->phys);
9499 }
9500 mbox->vport = phba->pport;
9501 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
9502 mbox->context1 = NULL;
9503 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9504 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
9505 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9506 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9507 if (shdr_status || shdr_add_status || rc) {
9508 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9509 "2500 EQ_CREATE mailbox failed with "
9510 "status x%x add_status x%x, mbx status x%x\n",
9511 shdr_status, shdr_add_status, rc);
9512 status = -ENXIO;
9513 }
9514 eq->type = LPFC_EQ;
9515 eq->subtype = LPFC_NONE;
9516 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
9517 if (eq->queue_id == 0xFFFF)
9518 status = -ENXIO;
9519 eq->host_index = 0;
9520 eq->hba_index = 0;
9521
9522 mempool_free(mbox, phba->mbox_mem_pool);
9523 return status;
9524}
9525
9526
9527
9528
9529
9530
9531
9532
9533
9534
9535
9536
9537
9538
9539
9540
9541
9542
9543
9544
9545
9546
9547uint32_t
9548lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
9549 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
9550{
9551 struct lpfc_mbx_cq_create *cq_create;
9552 struct lpfc_dmabuf *dmabuf;
9553 LPFC_MBOXQ_t *mbox;
9554 int rc, length, status = 0;
9555 uint32_t shdr_status, shdr_add_status;
9556 union lpfc_sli4_cfg_shdr *shdr;
9557
9558 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9559 if (!mbox)
9560 return -ENOMEM;
9561 length = (sizeof(struct lpfc_mbx_cq_create) -
9562 sizeof(struct lpfc_sli4_cfg_mhdr));
9563 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9564 LPFC_MBOX_OPCODE_CQ_CREATE,
9565 length, LPFC_SLI4_MBX_EMBED);
9566 cq_create = &mbox->u.mqe.un.cq_create;
9567 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
9568 cq->page_count);
9569 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
9570 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
9571 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
9572 switch (cq->entry_count) {
9573 default:
9574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9575 "0361 Unsupported CQ count. (%d)\n",
9576 cq->entry_count);
9577 if (cq->entry_count < 256)
9578 return -EINVAL;
9579
9580 case 256:
9581 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9582 LPFC_CQ_CNT_256);
9583 break;
9584 case 512:
9585 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9586 LPFC_CQ_CNT_512);
9587 break;
9588 case 1024:
9589 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
9590 LPFC_CQ_CNT_1024);
9591 break;
9592 }
9593 list_for_each_entry(dmabuf, &cq->page_list, list) {
9594 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9595 putPaddrLow(dmabuf->phys);
9596 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9597 putPaddrHigh(dmabuf->phys);
9598 }
9599 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9600
9601
9602 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
9603 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9604 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9605 if (shdr_status || shdr_add_status || rc) {
9606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9607 "2501 CQ_CREATE mailbox failed with "
9608 "status x%x add_status x%x, mbx status x%x\n",
9609 shdr_status, shdr_add_status, rc);
9610 status = -ENXIO;
9611 goto out;
9612 }
9613 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9614 if (cq->queue_id == 0xFFFF) {
9615 status = -ENXIO;
9616 goto out;
9617 }
9618
9619 list_add_tail(&cq->list, &eq->child_list);
9620
9621 cq->type = type;
9622 cq->subtype = subtype;
9623 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
9624 cq->host_index = 0;
9625 cq->hba_index = 0;
9626
9627out:
9628 mempool_free(mbox, phba->mbox_mem_pool);
9629 return status;
9630}
9631
9632
9633
9634
9635
9636
9637
9638
9639
9640
9641
9642
9643
9644
9645
9646
9647
9648
9649
9650
9651uint32_t
9652lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
9653 struct lpfc_queue *cq, uint32_t subtype)
9654{
9655 struct lpfc_mbx_mq_create *mq_create;
9656 struct lpfc_dmabuf *dmabuf;
9657 LPFC_MBOXQ_t *mbox;
9658 int rc, length, status = 0;
9659 uint32_t shdr_status, shdr_add_status;
9660 union lpfc_sli4_cfg_shdr *shdr;
9661
9662 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9663 if (!mbox)
9664 return -ENOMEM;
9665 length = (sizeof(struct lpfc_mbx_mq_create) -
9666 sizeof(struct lpfc_sli4_cfg_mhdr));
9667 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
9668 LPFC_MBOX_OPCODE_MQ_CREATE,
9669 length, LPFC_SLI4_MBX_EMBED);
9670 mq_create = &mbox->u.mqe.un.mq_create;
9671 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
9672 mq->page_count);
9673 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
9674 cq->queue_id);
9675 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
9676 switch (mq->entry_count) {
9677 default:
9678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9679 "0362 Unsupported MQ count. (%d)\n",
9680 mq->entry_count);
9681 if (mq->entry_count < 16)
9682 return -EINVAL;
9683
9684 case 16:
9685 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9686 LPFC_MQ_CNT_16);
9687 break;
9688 case 32:
9689 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9690 LPFC_MQ_CNT_32);
9691 break;
9692 case 64:
9693 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9694 LPFC_MQ_CNT_64);
9695 break;
9696 case 128:
9697 bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
9698 LPFC_MQ_CNT_128);
9699 break;
9700 }
9701 list_for_each_entry(dmabuf, &mq->page_list, list) {
9702 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9703 putPaddrLow(dmabuf->phys);
9704 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9705 putPaddrHigh(dmabuf->phys);
9706 }
9707 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9708
9709 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
9710 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9711 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9712 if (shdr_status || shdr_add_status || rc) {
9713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9714 "2502 MQ_CREATE mailbox failed with "
9715 "status x%x add_status x%x, mbx status x%x\n",
9716 shdr_status, shdr_add_status, rc);
9717 status = -ENXIO;
9718 goto out;
9719 }
9720 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
9721 if (mq->queue_id == 0xFFFF) {
9722 status = -ENXIO;
9723 goto out;
9724 }
9725 mq->type = LPFC_MQ;
9726 mq->subtype = subtype;
9727 mq->host_index = 0;
9728 mq->hba_index = 0;
9729
9730
9731 list_add_tail(&mq->list, &cq->child_list);
9732out:
9733 mempool_free(mbox, phba->mbox_mem_pool);
9734 return status;
9735}
9736
9737
9738
9739
9740
9741
9742
9743
9744
9745
9746
9747
9748
9749
9750
9751
9752
9753
9754
9755
9756
9757
9758
9759uint32_t
9760lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
9761 struct lpfc_queue *cq, uint32_t subtype)
9762{
9763 struct lpfc_mbx_wq_create *wq_create;
9764 struct lpfc_dmabuf *dmabuf;
9765 LPFC_MBOXQ_t *mbox;
9766 int rc, length, status = 0;
9767 uint32_t shdr_status, shdr_add_status;
9768 union lpfc_sli4_cfg_shdr *shdr;
9769
9770 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9771 if (!mbox)
9772 return -ENOMEM;
9773 length = (sizeof(struct lpfc_mbx_wq_create) -
9774 sizeof(struct lpfc_sli4_cfg_mhdr));
9775 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9776 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
9777 length, LPFC_SLI4_MBX_EMBED);
9778 wq_create = &mbox->u.mqe.un.wq_create;
9779 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
9780 wq->page_count);
9781 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
9782 cq->queue_id);
9783 list_for_each_entry(dmabuf, &wq->page_list, list) {
9784 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9785 putPaddrLow(dmabuf->phys);
9786 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9787 putPaddrHigh(dmabuf->phys);
9788 }
9789 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9790
9791 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
9792 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9793 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9794 if (shdr_status || shdr_add_status || rc) {
9795 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9796 "2503 WQ_CREATE mailbox failed with "
9797 "status x%x add_status x%x, mbx status x%x\n",
9798 shdr_status, shdr_add_status, rc);
9799 status = -ENXIO;
9800 goto out;
9801 }
9802 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
9803 if (wq->queue_id == 0xFFFF) {
9804 status = -ENXIO;
9805 goto out;
9806 }
9807 wq->type = LPFC_WQ;
9808 wq->subtype = subtype;
9809 wq->host_index = 0;
9810 wq->hba_index = 0;
9811
9812
9813 list_add_tail(&wq->list, &cq->child_list);
9814out:
9815 mempool_free(mbox, phba->mbox_mem_pool);
9816 return status;
9817}
9818
9819
9820
9821
9822
9823
9824
9825
9826
9827
9828
9829
9830
9831
9832
9833
9834
9835
9836
9837
9838
9839
9840
9841
9842uint32_t
9843lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
9844 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
9845{
9846 struct lpfc_mbx_rq_create *rq_create;
9847 struct lpfc_dmabuf *dmabuf;
9848 LPFC_MBOXQ_t *mbox;
9849 int rc, length, status = 0;
9850 uint32_t shdr_status, shdr_add_status;
9851 union lpfc_sli4_cfg_shdr *shdr;
9852
9853 if (hrq->entry_count != drq->entry_count)
9854 return -EINVAL;
9855 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9856 if (!mbox)
9857 return -ENOMEM;
9858 length = (sizeof(struct lpfc_mbx_rq_create) -
9859 sizeof(struct lpfc_sli4_cfg_mhdr));
9860 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9861 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9862 length, LPFC_SLI4_MBX_EMBED);
9863 rq_create = &mbox->u.mqe.un.rq_create;
9864 switch (hrq->entry_count) {
9865 default:
9866 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9867 "2535 Unsupported RQ count. (%d)\n",
9868 hrq->entry_count);
9869 if (hrq->entry_count < 512)
9870 return -EINVAL;
9871
9872 case 512:
9873 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9874 LPFC_RQ_RING_SIZE_512);
9875 break;
9876 case 1024:
9877 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9878 LPFC_RQ_RING_SIZE_1024);
9879 break;
9880 case 2048:
9881 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9882 LPFC_RQ_RING_SIZE_2048);
9883 break;
9884 case 4096:
9885 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9886 LPFC_RQ_RING_SIZE_4096);
9887 break;
9888 }
9889 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9890 cq->queue_id);
9891 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9892 hrq->page_count);
9893 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9894 LPFC_HDR_BUF_SIZE);
9895 list_for_each_entry(dmabuf, &hrq->page_list, list) {
9896 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9897 putPaddrLow(dmabuf->phys);
9898 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9899 putPaddrHigh(dmabuf->phys);
9900 }
9901 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9902
9903 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9904 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9905 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9906 if (shdr_status || shdr_add_status || rc) {
9907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9908 "2504 RQ_CREATE mailbox failed with "
9909 "status x%x add_status x%x, mbx status x%x\n",
9910 shdr_status, shdr_add_status, rc);
9911 status = -ENXIO;
9912 goto out;
9913 }
9914 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9915 if (hrq->queue_id == 0xFFFF) {
9916 status = -ENXIO;
9917 goto out;
9918 }
9919 hrq->type = LPFC_HRQ;
9920 hrq->subtype = subtype;
9921 hrq->host_index = 0;
9922 hrq->hba_index = 0;
9923
9924
9925 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
9926 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
9927 length, LPFC_SLI4_MBX_EMBED);
9928 switch (drq->entry_count) {
9929 default:
9930 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9931 "2536 Unsupported RQ count. (%d)\n",
9932 drq->entry_count);
9933 if (drq->entry_count < 512)
9934 return -EINVAL;
9935
9936 case 512:
9937 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9938 LPFC_RQ_RING_SIZE_512);
9939 break;
9940 case 1024:
9941 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9942 LPFC_RQ_RING_SIZE_1024);
9943 break;
9944 case 2048:
9945 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9946 LPFC_RQ_RING_SIZE_2048);
9947 break;
9948 case 4096:
9949 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
9950 LPFC_RQ_RING_SIZE_4096);
9951 break;
9952 }
9953 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
9954 cq->queue_id);
9955 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
9956 drq->page_count);
9957 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
9958 LPFC_DATA_BUF_SIZE);
9959 list_for_each_entry(dmabuf, &drq->page_list, list) {
9960 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
9961 putPaddrLow(dmabuf->phys);
9962 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
9963 putPaddrHigh(dmabuf->phys);
9964 }
9965 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
9966
9967 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
9968 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
9969 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
9970 if (shdr_status || shdr_add_status || rc) {
9971 status = -ENXIO;
9972 goto out;
9973 }
9974 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
9975 if (drq->queue_id == 0xFFFF) {
9976 status = -ENXIO;
9977 goto out;
9978 }
9979 drq->type = LPFC_DRQ;
9980 drq->subtype = subtype;
9981 drq->host_index = 0;
9982 drq->hba_index = 0;
9983
9984
9985 list_add_tail(&hrq->list, &cq->child_list);
9986 list_add_tail(&drq->list, &cq->child_list);
9987
9988out:
9989 mempool_free(mbox, phba->mbox_mem_pool);
9990 return status;
9991}
9992
9993
9994
9995
9996
9997
9998
9999
10000
10001
10002
10003
10004
10005uint32_t
10006lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
10007{
10008 LPFC_MBOXQ_t *mbox;
10009 int rc, length, status = 0;
10010 uint32_t shdr_status, shdr_add_status;
10011 union lpfc_sli4_cfg_shdr *shdr;
10012
10013 if (!eq)
10014 return -ENODEV;
10015 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
10016 if (!mbox)
10017 return -ENOMEM;
10018 length = (sizeof(struct lpfc_mbx_eq_destroy) -
10019 sizeof(struct lpfc_sli4_cfg_mhdr));
10020 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10021 LPFC_MBOX_OPCODE_EQ_DESTROY,
10022 length, LPFC_SLI4_MBX_EMBED);
10023 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
10024 eq->queue_id);
10025 mbox->vport = eq->phba->pport;
10026 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10027
10028 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
10029
10030 shdr = (union lpfc_sli4_cfg_shdr *)
10031 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
10032 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10033 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10034 if (shdr_status || shdr_add_status || rc) {
10035 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10036 "2505 EQ_DESTROY mailbox failed with "
10037 "status x%x add_status x%x, mbx status x%x\n",
10038 shdr_status, shdr_add_status, rc);
10039 status = -ENXIO;
10040 }
10041
10042
10043 list_del_init(&eq->list);
10044 mempool_free(mbox, eq->phba->mbox_mem_pool);
10045 return status;
10046}
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057
10058
10059
10060uint32_t
10061lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
10062{
10063 LPFC_MBOXQ_t *mbox;
10064 int rc, length, status = 0;
10065 uint32_t shdr_status, shdr_add_status;
10066 union lpfc_sli4_cfg_shdr *shdr;
10067
10068 if (!cq)
10069 return -ENODEV;
10070 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
10071 if (!mbox)
10072 return -ENOMEM;
10073 length = (sizeof(struct lpfc_mbx_cq_destroy) -
10074 sizeof(struct lpfc_sli4_cfg_mhdr));
10075 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10076 LPFC_MBOX_OPCODE_CQ_DESTROY,
10077 length, LPFC_SLI4_MBX_EMBED);
10078 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
10079 cq->queue_id);
10080 mbox->vport = cq->phba->pport;
10081 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10082 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
10083
10084 shdr = (union lpfc_sli4_cfg_shdr *)
10085 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
10086 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10087 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10088 if (shdr_status || shdr_add_status || rc) {
10089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10090 "2506 CQ_DESTROY mailbox failed with "
10091 "status x%x add_status x%x, mbx status x%x\n",
10092 shdr_status, shdr_add_status, rc);
10093 status = -ENXIO;
10094 }
10095
10096 list_del_init(&cq->list);
10097 mempool_free(mbox, cq->phba->mbox_mem_pool);
10098 return status;
10099}
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113uint32_t
10114lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
10115{
10116 LPFC_MBOXQ_t *mbox;
10117 int rc, length, status = 0;
10118 uint32_t shdr_status, shdr_add_status;
10119 union lpfc_sli4_cfg_shdr *shdr;
10120
10121 if (!mq)
10122 return -ENODEV;
10123 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
10124 if (!mbox)
10125 return -ENOMEM;
10126 length = (sizeof(struct lpfc_mbx_mq_destroy) -
10127 sizeof(struct lpfc_sli4_cfg_mhdr));
10128 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
10129 LPFC_MBOX_OPCODE_MQ_DESTROY,
10130 length, LPFC_SLI4_MBX_EMBED);
10131 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
10132 mq->queue_id);
10133 mbox->vport = mq->phba->pport;
10134 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10135 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
10136
10137 shdr = (union lpfc_sli4_cfg_shdr *)
10138 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
10139 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10140 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10141 if (shdr_status || shdr_add_status || rc) {
10142 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10143 "2507 MQ_DESTROY mailbox failed with "
10144 "status x%x add_status x%x, mbx status x%x\n",
10145 shdr_status, shdr_add_status, rc);
10146 status = -ENXIO;
10147 }
10148
10149 list_del_init(&mq->list);
10150 mempool_free(mbox, mq->phba->mbox_mem_pool);
10151 return status;
10152}
10153
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166uint32_t
10167lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
10168{
10169 LPFC_MBOXQ_t *mbox;
10170 int rc, length, status = 0;
10171 uint32_t shdr_status, shdr_add_status;
10172 union lpfc_sli4_cfg_shdr *shdr;
10173
10174 if (!wq)
10175 return -ENODEV;
10176 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
10177 if (!mbox)
10178 return -ENOMEM;
10179 length = (sizeof(struct lpfc_mbx_wq_destroy) -
10180 sizeof(struct lpfc_sli4_cfg_mhdr));
10181 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10182 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
10183 length, LPFC_SLI4_MBX_EMBED);
10184 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
10185 wq->queue_id);
10186 mbox->vport = wq->phba->pport;
10187 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10188 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
10189 shdr = (union lpfc_sli4_cfg_shdr *)
10190 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
10191 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10192 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10193 if (shdr_status || shdr_add_status || rc) {
10194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10195 "2508 WQ_DESTROY mailbox failed with "
10196 "status x%x add_status x%x, mbx status x%x\n",
10197 shdr_status, shdr_add_status, rc);
10198 status = -ENXIO;
10199 }
10200
10201 list_del_init(&wq->list);
10202 mempool_free(mbox, wq->phba->mbox_mem_pool);
10203 return status;
10204}
10205
10206
10207
10208
10209
10210
10211
10212
10213
10214
10215
10216
10217
10218uint32_t
10219lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
10220 struct lpfc_queue *drq)
10221{
10222 LPFC_MBOXQ_t *mbox;
10223 int rc, length, status = 0;
10224 uint32_t shdr_status, shdr_add_status;
10225 union lpfc_sli4_cfg_shdr *shdr;
10226
10227 if (!hrq || !drq)
10228 return -ENODEV;
10229 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
10230 if (!mbox)
10231 return -ENOMEM;
10232 length = (sizeof(struct lpfc_mbx_rq_destroy) -
10233 sizeof(struct mbox_header));
10234 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10235 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
10236 length, LPFC_SLI4_MBX_EMBED);
10237 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10238 hrq->queue_id);
10239 mbox->vport = hrq->phba->pport;
10240 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10241 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
10242
10243 shdr = (union lpfc_sli4_cfg_shdr *)
10244 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10245 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10246 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10247 if (shdr_status || shdr_add_status || rc) {
10248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10249 "2509 RQ_DESTROY mailbox failed with "
10250 "status x%x add_status x%x, mbx status x%x\n",
10251 shdr_status, shdr_add_status, rc);
10252 if (rc != MBX_TIMEOUT)
10253 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10254 return -ENXIO;
10255 }
10256 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
10257 drq->queue_id);
10258 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
10259 shdr = (union lpfc_sli4_cfg_shdr *)
10260 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
10261 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10262 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10263 if (shdr_status || shdr_add_status || rc) {
10264 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10265 "2510 RQ_DESTROY mailbox failed with "
10266 "status x%x add_status x%x, mbx status x%x\n",
10267 shdr_status, shdr_add_status, rc);
10268 status = -ENXIO;
10269 }
10270 list_del_init(&hrq->list);
10271 list_del_init(&drq->list);
10272 mempool_free(mbox, hrq->phba->mbox_mem_pool);
10273 return status;
10274}
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298int
10299lpfc_sli4_post_sgl(struct lpfc_hba *phba,
10300 dma_addr_t pdma_phys_addr0,
10301 dma_addr_t pdma_phys_addr1,
10302 uint16_t xritag)
10303{
10304 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
10305 LPFC_MBOXQ_t *mbox;
10306 int rc;
10307 uint32_t shdr_status, shdr_add_status;
10308 union lpfc_sli4_cfg_shdr *shdr;
10309
10310 if (xritag == NO_XRI) {
10311 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10312 "0364 Invalid param:\n");
10313 return -EINVAL;
10314 }
10315
10316 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10317 if (!mbox)
10318 return -ENOMEM;
10319
10320 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10321 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
10322 sizeof(struct lpfc_mbx_post_sgl_pages) -
10323 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
10324
10325 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
10326 &mbox->u.mqe.un.post_sgl_pages;
10327 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
10328 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
10329
10330 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
10331 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
10332 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
10333 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
10334
10335 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
10336 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
10337 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
10338 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
10339 if (!phba->sli4_hba.intr_enable)
10340 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10341 else
10342 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10343
10344 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
10345 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10346 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10347 if (rc != MBX_TIMEOUT)
10348 mempool_free(mbox, phba->mbox_mem_pool);
10349 if (shdr_status || shdr_add_status || rc) {
10350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10351 "2511 POST_SGL mailbox failed with "
10352 "status x%x add_status x%x, mbx status x%x\n",
10353 shdr_status, shdr_add_status, rc);
10354 rc = -ENXIO;
10355 }
10356 return 0;
10357}
10358
10359
10360
10361
10362
10363
10364
10365
10366
10367
10368int
10369lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
10370{
10371 LPFC_MBOXQ_t *mbox;
10372 int rc;
10373 uint32_t shdr_status, shdr_add_status;
10374 union lpfc_sli4_cfg_shdr *shdr;
10375
10376 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10377 if (!mbox)
10378 return -ENOMEM;
10379
10380 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10381 LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
10382 LPFC_SLI4_MBX_EMBED);
10383 if (!phba->sli4_hba.intr_enable)
10384 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10385 else
10386 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
10387
10388 shdr = (union lpfc_sli4_cfg_shdr *)
10389 &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
10390 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10391 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10392 if (rc != MBX_TIMEOUT)
10393 mempool_free(mbox, phba->mbox_mem_pool);
10394 if (shdr_status || shdr_add_status || rc) {
10395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10396 "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
10397 "status x%x add_status x%x, mbx status x%x\n",
10398 shdr_status, shdr_add_status, rc);
10399 rc = -ENXIO;
10400 }
10401 return rc;
10402}
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413
10414uint16_t
10415lpfc_sli4_next_xritag(struct lpfc_hba *phba)
10416{
10417 uint16_t xritag;
10418
10419 spin_lock_irq(&phba->hbalock);
10420 xritag = phba->sli4_hba.next_xri;
10421 if ((xritag != (uint16_t) -1) && xritag <
10422 (phba->sli4_hba.max_cfg_param.max_xri
10423 + phba->sli4_hba.max_cfg_param.xri_base)) {
10424 phba->sli4_hba.next_xri++;
10425 phba->sli4_hba.max_cfg_param.xri_used++;
10426 spin_unlock_irq(&phba->hbalock);
10427 return xritag;
10428 }
10429 spin_unlock_irq(&phba->hbalock);
10430
10431 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10432 "2004 Failed to allocate XRI.last XRITAG is %d"
10433 " Max XRI is %d, Used XRI is %d\n",
10434 phba->sli4_hba.next_xri,
10435 phba->sli4_hba.max_cfg_param.max_xri,
10436 phba->sli4_hba.max_cfg_param.xri_used);
10437 return -1;
10438}
10439
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449int
10450lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
10451{
10452 struct lpfc_sglq *sglq_entry;
10453 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10454 struct sgl_page_pairs *sgl_pg_pairs;
10455 void *viraddr;
10456 LPFC_MBOXQ_t *mbox;
10457 uint32_t reqlen, alloclen, pg_pairs;
10458 uint32_t mbox_tmo;
10459 uint16_t xritag_start = 0;
10460 int els_xri_cnt, rc = 0;
10461 uint32_t shdr_status, shdr_add_status;
10462 union lpfc_sli4_cfg_shdr *shdr;
10463
10464
10465 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
10466
10467 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
10468 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10469 if (reqlen > PAGE_SIZE) {
10470 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10471 "2559 Block sgl registration required DMA "
10472 "size (%d) great than a page\n", reqlen);
10473 return -ENOMEM;
10474 }
10475 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10476 if (!mbox) {
10477 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10478 "2560 Failed to allocate mbox cmd memory\n");
10479 return -ENOMEM;
10480 }
10481
10482
10483 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10484 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10485 LPFC_SLI4_MBX_NEMBED);
10486
10487 if (alloclen < reqlen) {
10488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10489 "0285 Allocated DMA memory size (%d) is "
10490 "less than the requested DMA memory "
10491 "size (%d)\n", alloclen, reqlen);
10492 lpfc_sli4_mbox_cmd_free(phba, mbox);
10493 return -ENOMEM;
10494 }
10495
10496
10497 if (unlikely(!mbox->sge_array)) {
10498 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10499 "2525 Failed to get the non-embedded SGE "
10500 "virtual address\n");
10501 lpfc_sli4_mbox_cmd_free(phba, mbox);
10502 return -ENOMEM;
10503 }
10504 viraddr = mbox->sge_array->addr[0];
10505
10506
10507 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10508 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10509
10510 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
10511 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
10512
10513 sgl_pg_pairs->sgl_pg0_addr_lo =
10514 cpu_to_le32(putPaddrLow(sglq_entry->phys));
10515 sgl_pg_pairs->sgl_pg0_addr_hi =
10516 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
10517 sgl_pg_pairs->sgl_pg1_addr_lo =
10518 cpu_to_le32(putPaddrLow(0));
10519 sgl_pg_pairs->sgl_pg1_addr_hi =
10520 cpu_to_le32(putPaddrHigh(0));
10521
10522 if (pg_pairs == 0)
10523 xritag_start = sglq_entry->sli4_xritag;
10524 sgl_pg_pairs++;
10525 }
10526 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10527 pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
10528 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10529
10530 sgl->word0 = cpu_to_le32(sgl->word0);
10531
10532 if (!phba->sli4_hba.intr_enable)
10533 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10534 else {
10535 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10536 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10537 }
10538 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10539 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10540 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10541 if (rc != MBX_TIMEOUT)
10542 lpfc_sli4_mbox_cmd_free(phba, mbox);
10543 if (shdr_status || shdr_add_status || rc) {
10544 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10545 "2513 POST_SGL_BLOCK mailbox command failed "
10546 "status x%x add_status x%x mbx status x%x\n",
10547 shdr_status, shdr_add_status, rc);
10548 rc = -ENXIO;
10549 }
10550 return rc;
10551}
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564int
10565lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
10566 int cnt)
10567{
10568 struct lpfc_scsi_buf *psb;
10569 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
10570 struct sgl_page_pairs *sgl_pg_pairs;
10571 void *viraddr;
10572 LPFC_MBOXQ_t *mbox;
10573 uint32_t reqlen, alloclen, pg_pairs;
10574 uint32_t mbox_tmo;
10575 uint16_t xritag_start = 0;
10576 int rc = 0;
10577 uint32_t shdr_status, shdr_add_status;
10578 dma_addr_t pdma_phys_bpl1;
10579 union lpfc_sli4_cfg_shdr *shdr;
10580
10581
10582 reqlen = cnt * sizeof(struct sgl_page_pairs) +
10583 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
10584 if (reqlen > PAGE_SIZE) {
10585 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10586 "0217 Block sgl registration required DMA "
10587 "size (%d) great than a page\n", reqlen);
10588 return -ENOMEM;
10589 }
10590 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
10591 if (!mbox) {
10592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10593 "0283 Failed to allocate mbox cmd memory\n");
10594 return -ENOMEM;
10595 }
10596
10597
10598 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
10599 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
10600 LPFC_SLI4_MBX_NEMBED);
10601
10602 if (alloclen < reqlen) {
10603 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10604 "2561 Allocated DMA memory size (%d) is "
10605 "less than the requested DMA memory "
10606 "size (%d)\n", alloclen, reqlen);
10607 lpfc_sli4_mbox_cmd_free(phba, mbox);
10608 return -ENOMEM;
10609 }
10610
10611
10612 if (unlikely(!mbox->sge_array)) {
10613 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
10614 "2565 Failed to get the non-embedded SGE "
10615 "virtual address\n");
10616 lpfc_sli4_mbox_cmd_free(phba, mbox);
10617 return -ENOMEM;
10618 }
10619 viraddr = mbox->sge_array->addr[0];
10620
10621
10622 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
10623 sgl_pg_pairs = &sgl->sgl_pg_pairs;
10624
10625 pg_pairs = 0;
10626 list_for_each_entry(psb, sblist, list) {
10627
10628 sgl_pg_pairs->sgl_pg0_addr_lo =
10629 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
10630 sgl_pg_pairs->sgl_pg0_addr_hi =
10631 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
10632 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
10633 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
10634 else
10635 pdma_phys_bpl1 = 0;
10636 sgl_pg_pairs->sgl_pg1_addr_lo =
10637 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
10638 sgl_pg_pairs->sgl_pg1_addr_hi =
10639 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
10640
10641 if (pg_pairs == 0)
10642 xritag_start = psb->cur_iocbq.sli4_xritag;
10643 sgl_pg_pairs++;
10644 pg_pairs++;
10645 }
10646 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
10647 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
10648
10649 sgl->word0 = cpu_to_le32(sgl->word0);
10650
10651 if (!phba->sli4_hba.intr_enable)
10652 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
10653 else {
10654 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
10655 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
10656 }
10657 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
10658 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
10659 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
10660 if (rc != MBX_TIMEOUT)
10661 lpfc_sli4_mbox_cmd_free(phba, mbox);
10662 if (shdr_status || shdr_add_status || rc) {
10663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10664 "2564 POST_SGL_BLOCK mailbox command failed "
10665 "status x%x add_status x%x mbx status x%x\n",
10666 shdr_status, shdr_add_status, rc);
10667 rc = -ENXIO;
10668 }
10669 return rc;
10670}
10671
10672
10673
10674
10675
10676
10677
10678
10679
10680
10681
10682static int
10683lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
10684{
10685 char *rctl_names[] = FC_RCTL_NAMES_INIT;
10686 char *type_names[] = FC_TYPE_NAMES_INIT;
10687 struct fc_vft_header *fc_vft_hdr;
10688
10689 switch (fc_hdr->fh_r_ctl) {
10690 case FC_RCTL_DD_UNCAT:
10691 case FC_RCTL_DD_SOL_DATA:
10692 case FC_RCTL_DD_UNSOL_CTL:
10693 case FC_RCTL_DD_SOL_CTL:
10694 case FC_RCTL_DD_UNSOL_DATA:
10695 case FC_RCTL_DD_DATA_DESC:
10696 case FC_RCTL_DD_UNSOL_CMD:
10697 case FC_RCTL_DD_CMD_STATUS:
10698 case FC_RCTL_ELS_REQ:
10699 case FC_RCTL_ELS_REP:
10700 case FC_RCTL_ELS4_REQ:
10701 case FC_RCTL_ELS4_REP:
10702 case FC_RCTL_BA_NOP:
10703 case FC_RCTL_BA_ABTS:
10704 case FC_RCTL_BA_RMC:
10705 case FC_RCTL_BA_ACC:
10706 case FC_RCTL_BA_RJT:
10707 case FC_RCTL_BA_PRMT:
10708 case FC_RCTL_ACK_1:
10709 case FC_RCTL_ACK_0:
10710 case FC_RCTL_P_RJT:
10711 case FC_RCTL_F_RJT:
10712 case FC_RCTL_P_BSY:
10713 case FC_RCTL_F_BSY:
10714 case FC_RCTL_F_BSYL:
10715 case FC_RCTL_LCR:
10716 case FC_RCTL_END:
10717 break;
10718 case FC_RCTL_VFTH:
10719 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10720 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
10721 return lpfc_fc_frame_check(phba, fc_hdr);
10722 default:
10723 goto drop;
10724 }
10725 switch (fc_hdr->fh_type) {
10726 case FC_TYPE_BLS:
10727 case FC_TYPE_ELS:
10728 case FC_TYPE_FCP:
10729 case FC_TYPE_CT:
10730 break;
10731 case FC_TYPE_IP:
10732 case FC_TYPE_ILS:
10733 default:
10734 goto drop;
10735 }
10736 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10737 "2538 Received frame rctl:%s type:%s\n",
10738 rctl_names[fc_hdr->fh_r_ctl],
10739 type_names[fc_hdr->fh_type]);
10740 return 0;
10741drop:
10742 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
10743 "2539 Dropped frame rctl:%s type:%s\n",
10744 rctl_names[fc_hdr->fh_r_ctl],
10745 type_names[fc_hdr->fh_type]);
10746 return 1;
10747}
10748
10749
10750
10751
10752
10753
10754
10755
10756
10757static uint32_t
10758lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
10759{
10760 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
10761
10762 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
10763 return 0;
10764 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
10765}
10766
10767
10768
10769
10770
10771
10772
10773
10774
10775
10776
10777
10778
10779static struct lpfc_vport *
10780lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
10781 uint16_t fcfi)
10782{
10783 struct lpfc_vport **vports;
10784 struct lpfc_vport *vport = NULL;
10785 int i;
10786 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
10787 fc_hdr->fh_d_id[1] << 8 |
10788 fc_hdr->fh_d_id[2]);
10789
10790 vports = lpfc_create_vport_work_array(phba);
10791 if (vports != NULL)
10792 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
10793 if (phba->fcf.fcfi == fcfi &&
10794 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
10795 vports[i]->fc_myDID == did) {
10796 vport = vports[i];
10797 break;
10798 }
10799 }
10800 lpfc_destroy_vport_work_array(phba, vports);
10801 return vport;
10802}
10803
10804
10805
10806
10807
10808
10809
10810
10811
10812
10813
10814
10815
10816static struct hbq_dmabuf *
10817lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
10818{
10819 struct fc_frame_header *new_hdr;
10820 struct fc_frame_header *temp_hdr;
10821 struct lpfc_dmabuf *d_buf;
10822 struct lpfc_dmabuf *h_buf;
10823 struct hbq_dmabuf *seq_dmabuf = NULL;
10824 struct hbq_dmabuf *temp_dmabuf = NULL;
10825
10826 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10827
10828 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
10829 temp_hdr = (struct fc_frame_header *)h_buf->virt;
10830 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
10831 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
10832 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
10833 continue;
10834
10835 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
10836 break;
10837 }
10838 if (!seq_dmabuf) {
10839
10840
10841
10842
10843 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
10844 return dmabuf;
10845 }
10846 temp_hdr = seq_dmabuf->hbuf.virt;
10847 if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
10848 list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
10849 return dmabuf;
10850 }
10851
10852 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
10853 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10854 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
10855
10856
10857
10858
10859 if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
10860 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
10861 return seq_dmabuf;
10862 }
10863 }
10864 return NULL;
10865}
10866
10867
10868
10869
10870
10871
10872
10873
10874
10875
10876
10877
10878
10879static int
10880lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
10881{
10882 struct fc_frame_header *hdr;
10883 struct lpfc_dmabuf *d_buf;
10884 struct hbq_dmabuf *seq_dmabuf;
10885 uint32_t fctl;
10886 int seq_count = 0;
10887
10888 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
10889
10890 if (hdr->fh_seq_cnt != seq_count)
10891 return 0;
10892 fctl = (hdr->fh_f_ctl[0] << 16 |
10893 hdr->fh_f_ctl[1] << 8 |
10894 hdr->fh_f_ctl[2]);
10895
10896 if (fctl & FC_FC_END_SEQ)
10897 return 1;
10898 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
10899 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
10900 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10901
10902 if (++seq_count != hdr->fh_seq_cnt)
10903 return 0;
10904 fctl = (hdr->fh_f_ctl[0] << 16 |
10905 hdr->fh_f_ctl[1] << 8 |
10906 hdr->fh_f_ctl[2]);
10907
10908 if (fctl & FC_FC_END_SEQ)
10909 return 1;
10910 }
10911 return 0;
10912}
10913
10914
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925
10926
10927static struct lpfc_iocbq *
10928lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
10929{
10930 struct lpfc_dmabuf *d_buf, *n_buf;
10931 struct lpfc_iocbq *first_iocbq, *iocbq;
10932 struct fc_frame_header *fc_hdr;
10933 uint32_t sid;
10934
10935 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
10936
10937 list_del_init(&seq_dmabuf->hbuf.list);
10938
10939 sid = (fc_hdr->fh_s_id[0] << 16 |
10940 fc_hdr->fh_s_id[1] << 8 |
10941 fc_hdr->fh_s_id[2]);
10942
10943 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
10944 if (first_iocbq) {
10945
10946 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
10947 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
10948 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
10949 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
10950 first_iocbq->iocb.unsli3.rcvsli3.vpi =
10951 vport->vpi + vport->phba->vpi_base;
10952
10953 first_iocbq->context2 = &seq_dmabuf->dbuf;
10954 first_iocbq->context3 = NULL;
10955 first_iocbq->iocb.ulpBdeCount = 1;
10956 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10957 LPFC_DATA_BUF_SIZE;
10958 first_iocbq->iocb.un.rcvels.remoteID = sid;
10959 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10960 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10961 }
10962 iocbq = first_iocbq;
10963
10964
10965
10966
10967 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
10968 if (!iocbq) {
10969 lpfc_in_buf_free(vport->phba, d_buf);
10970 continue;
10971 }
10972 if (!iocbq->context3) {
10973 iocbq->context3 = d_buf;
10974 iocbq->iocb.ulpBdeCount++;
10975 iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
10976 LPFC_DATA_BUF_SIZE;
10977 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10978 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10979 } else {
10980 iocbq = lpfc_sli_get_iocbq(vport->phba);
10981 if (!iocbq) {
10982 if (first_iocbq) {
10983 first_iocbq->iocb.ulpStatus =
10984 IOSTAT_FCP_RSP_ERROR;
10985 first_iocbq->iocb.un.ulpWord[4] =
10986 IOERR_NO_RESOURCES;
10987 }
10988 lpfc_in_buf_free(vport->phba, d_buf);
10989 continue;
10990 }
10991 iocbq->context2 = d_buf;
10992 iocbq->context3 = NULL;
10993 iocbq->iocb.ulpBdeCount = 1;
10994 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
10995 LPFC_DATA_BUF_SIZE;
10996 first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
10997 bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
10998 iocbq->iocb.un.rcvels.remoteID = sid;
10999 list_add_tail(&iocbq->list, &first_iocbq->list);
11000 }
11001 }
11002 return first_iocbq;
11003}
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014
11015
11016
11017int
11018lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
11019{
11020 LIST_HEAD(cmplq);
11021 struct hbq_dmabuf *dmabuf, *seq_dmabuf;
11022 struct fc_frame_header *fc_hdr;
11023 struct lpfc_vport *vport;
11024 uint32_t fcfi;
11025 struct lpfc_iocbq *iocbq;
11026
11027
11028 spin_lock_irq(&phba->hbalock);
11029 phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
11030 list_splice_init(&phba->rb_pend_list, &cmplq);
11031 spin_unlock_irq(&phba->hbalock);
11032
11033
11034 while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
11035 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
11036
11037 if (lpfc_fc_frame_check(phba, fc_hdr)) {
11038 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11039 continue;
11040 }
11041 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
11042 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
11043 if (!vport) {
11044
11045 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11046 continue;
11047 }
11048
11049 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
11050 if (!seq_dmabuf) {
11051
11052 lpfc_in_buf_free(phba, &dmabuf->dbuf);
11053 continue;
11054 }
11055
11056 if (!lpfc_seq_complete(seq_dmabuf)) {
11057
11058
11059
11060
11061 lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
11062 dmabuf->tag = -1;
11063 continue;
11064 }
11065 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
11066 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
11067 if (!lpfc_complete_unsol_iocb(phba,
11068 &phba->sli.ring[LPFC_ELS_RING],
11069 iocbq, fc_hdr->fh_r_ctl,
11070 fc_hdr->fh_type))
11071 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11072 "2540 Ring %d handler: unexpected Rctl "
11073 "x%x Type x%x received\n",
11074 LPFC_ELS_RING,
11075 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
11076 };
11077 return 0;
11078}
11079
11080
11081
11082
11083
11084
11085
11086
11087
11088
11089
11090
11091
11092
11093
11094
11095
11096
11097
11098
11099
11100
11101int
11102lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
11103{
11104 struct lpfc_rpi_hdr *rpi_page;
11105 uint32_t rc = 0;
11106
11107
11108 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
11109 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
11110 if (rc != MBX_SUCCESS) {
11111 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11112 "2008 Error %d posting all rpi "
11113 "headers\n", rc);
11114 rc = -EIO;
11115 break;
11116 }
11117 }
11118
11119 return rc;
11120}
11121
11122
11123
11124
11125
11126
11127
11128
11129
11130
11131
11132
11133
11134
11135
11136int
11137lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
11138{
11139 LPFC_MBOXQ_t *mboxq;
11140 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
11141 uint32_t rc = 0;
11142 uint32_t mbox_tmo;
11143 uint32_t shdr_status, shdr_add_status;
11144 union lpfc_sli4_cfg_shdr *shdr;
11145
11146
11147 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11148 if (!mboxq) {
11149 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11150 "2001 Unable to allocate memory for issuing "
11151 "SLI_CONFIG_SPECIAL mailbox command\n");
11152 return -ENOMEM;
11153 }
11154
11155
11156 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
11157 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
11158 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11159 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
11160 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
11161 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
11162 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
11163 hdr_tmpl, rpi_page->page_count);
11164 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
11165 rpi_page->start_rpi);
11166 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
11167 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
11168 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11169 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
11170 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11171 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11172 if (rc != MBX_TIMEOUT)
11173 mempool_free(mboxq, phba->mbox_mem_pool);
11174 if (shdr_status || shdr_add_status || rc) {
11175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11176 "2514 POST_RPI_HDR mailbox failed with "
11177 "status x%x add_status x%x, mbx status x%x\n",
11178 shdr_status, shdr_add_status, rc);
11179 rc = -ENXIO;
11180 }
11181 return rc;
11182}
11183
11184
11185
11186
11187
11188
11189
11190
11191
11192
11193
11194
11195
11196
11197int
11198lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
11199{
11200 int rpi;
11201 uint16_t max_rpi, rpi_base, rpi_limit;
11202 uint16_t rpi_remaining;
11203 struct lpfc_rpi_hdr *rpi_hdr;
11204
11205 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
11206 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
11207 rpi_limit = phba->sli4_hba.next_rpi;
11208
11209
11210
11211
11212
11213 spin_lock_irq(&phba->hbalock);
11214 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
11215 if (rpi >= rpi_limit || rpi < rpi_base)
11216 rpi = LPFC_RPI_ALLOC_ERROR;
11217 else {
11218 set_bit(rpi, phba->sli4_hba.rpi_bmask);
11219 phba->sli4_hba.max_cfg_param.rpi_used++;
11220 phba->sli4_hba.rpi_count++;
11221 }
11222
11223
11224
11225
11226
11227 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
11228 (phba->sli4_hba.rpi_count >= max_rpi)) {
11229 spin_unlock_irq(&phba->hbalock);
11230 return rpi;
11231 }
11232
11233
11234
11235
11236
11237
11238
11239 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
11240 phba->sli4_hba.rpi_count;
11241 spin_unlock_irq(&phba->hbalock);
11242 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
11243 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
11244 if (!rpi_hdr) {
11245 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11246 "2002 Error Could not grow rpi "
11247 "count\n");
11248 } else {
11249 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
11250 }
11251 }
11252
11253 return rpi;
11254}
11255
11256
11257
11258
11259
11260
11261
11262
11263void
11264lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
11265{
11266 spin_lock_irq(&phba->hbalock);
11267 clear_bit(rpi, phba->sli4_hba.rpi_bmask);
11268 phba->sli4_hba.rpi_count--;
11269 phba->sli4_hba.max_cfg_param.rpi_used--;
11270 spin_unlock_irq(&phba->hbalock);
11271}
11272
11273
11274
11275
11276
11277
11278
11279
11280void
11281lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
11282{
11283 kfree(phba->sli4_hba.rpi_bmask);
11284}
11285
11286
11287
11288
11289
11290
11291
11292
11293int
11294lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
11295{
11296 LPFC_MBOXQ_t *mboxq;
11297 struct lpfc_hba *phba = ndlp->phba;
11298 int rc;
11299
11300
11301 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11302 if (!mboxq)
11303 return -ENOMEM;
11304
11305
11306 lpfc_resume_rpi(mboxq, ndlp);
11307 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11308 if (rc == MBX_NOT_FINISHED) {
11309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11310 "2010 Resume RPI Mailbox failed "
11311 "status %d, mbxStatus x%x\n", rc,
11312 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11313 mempool_free(mboxq, phba->mbox_mem_pool);
11314 return -EIO;
11315 }
11316 return 0;
11317}
11318
11319
11320
11321
11322
11323
11324
11325
11326
11327
11328
11329
11330
11331
11332int
11333lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
11334{
11335 LPFC_MBOXQ_t *mboxq;
11336 int rc = 0;
11337 uint32_t mbox_tmo;
11338
11339 if (vpi == 0)
11340 return -EINVAL;
11341 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11342 if (!mboxq)
11343 return -ENOMEM;
11344 lpfc_init_vpi(phba, mboxq, vpi);
11345 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
11346 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
11347 if (rc != MBX_TIMEOUT)
11348 mempool_free(mboxq, phba->mbox_mem_pool);
11349 if (rc != MBX_SUCCESS) {
11350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11351 "2022 INIT VPI Mailbox failed "
11352 "status %d, mbxStatus x%x\n", rc,
11353 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
11354 rc = -EIO;
11355 }
11356 return rc;
11357}
11358
11359
11360
11361
11362
11363
11364
11365
11366
11367
11368static void
11369lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
11370{
11371 void *virt_addr;
11372 union lpfc_sli4_cfg_shdr *shdr;
11373 uint32_t shdr_status, shdr_add_status;
11374
11375 virt_addr = mboxq->sge_array->addr[0];
11376
11377 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
11378 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
11379 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
11380
11381 if ((shdr_status || shdr_add_status) &&
11382 (shdr_status != STATUS_FCF_IN_USE))
11383 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11384 "2558 ADD_FCF_RECORD mailbox failed with "
11385 "status x%x add_status x%x\n",
11386 shdr_status, shdr_add_status);
11387
11388 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11389}
11390
11391
11392
11393
11394
11395
11396
11397
11398
11399
11400int
11401lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
11402{
11403 int rc = 0;
11404 LPFC_MBOXQ_t *mboxq;
11405 uint8_t *bytep;
11406 void *virt_addr;
11407 dma_addr_t phys_addr;
11408 struct lpfc_mbx_sge sge;
11409 uint32_t alloc_len, req_len;
11410 uint32_t fcfindex;
11411
11412 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11413 if (!mboxq) {
11414 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11415 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
11416 return -ENOMEM;
11417 }
11418
11419 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
11420 sizeof(uint32_t);
11421
11422
11423 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11424 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
11425 req_len, LPFC_SLI4_MBX_NEMBED);
11426 if (alloc_len < req_len) {
11427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11428 "2523 Allocated DMA memory size (x%x) is "
11429 "less than the requested DMA memory "
11430 "size (x%x)\n", alloc_len, req_len);
11431 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11432 return -ENOMEM;
11433 }
11434
11435
11436
11437
11438
11439 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11440 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11441 if (unlikely(!mboxq->sge_array)) {
11442 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11443 "2526 Failed to get the non-embedded SGE "
11444 "virtual address\n");
11445 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11446 return -ENOMEM;
11447 }
11448 virt_addr = mboxq->sge_array->addr[0];
11449
11450
11451
11452
11453 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
11454 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11455 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
11456
11457
11458
11459
11460
11461
11462 bytep += sizeof(uint32_t);
11463 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
11464 mboxq->vport = phba->pport;
11465 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
11466 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11467 if (rc == MBX_NOT_FINISHED) {
11468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11469 "2515 ADD_FCF_RECORD mailbox failed with "
11470 "status 0x%x\n", rc);
11471 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11472 rc = -EIO;
11473 } else
11474 rc = 0;
11475
11476 return rc;
11477}
11478
11479
11480
11481
11482
11483
11484
11485
11486
11487
11488
11489void
11490lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
11491 struct fcf_record *fcf_record,
11492 uint16_t fcf_index)
11493{
11494 memset(fcf_record, 0, sizeof(struct fcf_record));
11495 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
11496 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
11497 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
11498 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
11499 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
11500 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
11501 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
11502 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
11503 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
11504 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
11505 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
11506 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
11507 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
11508 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
11509 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
11510 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
11511 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
11512
11513 if (phba->valid_vlan) {
11514 fcf_record->vlan_bitmap[phba->vlan_id / 8]
11515 = 1 << (phba->vlan_id % 8);
11516 }
11517}
11518
11519
11520
11521
11522
11523
11524
11525
11526
11527int
11528lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
11529{
11530 int rc = 0, error;
11531 LPFC_MBOXQ_t *mboxq;
11532 void *virt_addr;
11533 dma_addr_t phys_addr;
11534 uint8_t *bytep;
11535 struct lpfc_mbx_sge sge;
11536 uint32_t alloc_len, req_len;
11537 struct lpfc_mbx_read_fcf_tbl *read_fcf;
11538
11539 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
11540 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11541 if (!mboxq) {
11542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11543 "2000 Failed to allocate mbox for "
11544 "READ_FCF cmd\n");
11545 return -ENOMEM;
11546 }
11547
11548 req_len = sizeof(struct fcf_record) +
11549 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
11550
11551
11552 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
11553 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
11554 LPFC_SLI4_MBX_NEMBED);
11555
11556 if (alloc_len < req_len) {
11557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11558 "0291 Allocated DMA memory size (x%x) is "
11559 "less than the requested DMA memory "
11560 "size (x%x)\n", alloc_len, req_len);
11561 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11562 return -ENOMEM;
11563 }
11564
11565
11566
11567
11568 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
11569 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
11570 if (unlikely(!mboxq->sge_array)) {
11571 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11572 "2527 Failed to get the non-embedded SGE "
11573 "virtual address\n");
11574 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11575 return -ENOMEM;
11576 }
11577 virt_addr = mboxq->sge_array->addr[0];
11578 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
11579
11580
11581 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
11582
11583 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
11584 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
11585 mboxq->vport = phba->pport;
11586 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
11587 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
11588 if (rc == MBX_NOT_FINISHED) {
11589 lpfc_sli4_mbox_cmd_free(phba, mboxq);
11590 error = -EIO;
11591 } else {
11592 spin_lock_irq(&phba->hbalock);
11593 phba->hba_flag |= FCF_DISC_INPROGRESS;
11594 spin_unlock_irq(&phba->hbalock);
11595 error = 0;
11596 }
11597 return error;
11598}
11599
11600
11601
11602
11603
11604
11605
11606
11607
11608void
11609lpfc_sli_read_link_ste(struct lpfc_hba *phba)
11610{
11611 LPFC_MBOXQ_t *pmb = NULL;
11612 MAILBOX_t *mb;
11613 uint8_t *rgn23_data = NULL;
11614 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
11615 int rc;
11616
11617 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11618 if (!pmb) {
11619 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11620 "2600 lpfc_sli_read_serdes_param failed to"
11621 " allocate mailbox memory\n");
11622 goto out;
11623 }
11624 mb = &pmb->u.mb;
11625
11626
11627 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
11628 if (!rgn23_data)
11629 goto out;
11630
11631 do {
11632 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
11633 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
11634
11635 if (rc != MBX_SUCCESS) {
11636 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11637 "2601 lpfc_sli_read_link_ste failed to"
11638 " read config region 23 rc 0x%x Status 0x%x\n",
11639 rc, mb->mbxStatus);
11640 mb->un.varDmp.word_cnt = 0;
11641 }
11642
11643
11644
11645
11646 if (mb->un.varDmp.word_cnt == 0)
11647 break;
11648 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
11649 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
11650
11651 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
11652 rgn23_data + offset,
11653 mb->un.varDmp.word_cnt);
11654 offset += mb->un.varDmp.word_cnt;
11655 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
11656
11657 data_size = offset;
11658 offset = 0;
11659
11660 if (!data_size)
11661 goto out;
11662
11663
11664 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
11665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11666 "2619 Config region 23 has bad signature\n");
11667 goto out;
11668 }
11669 offset += 4;
11670
11671
11672 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
11673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11674 "2620 Config region 23 has bad version\n");
11675 goto out;
11676 }
11677 offset += 4;
11678
11679
11680 while (offset < data_size) {
11681 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
11682 break;
11683
11684
11685
11686
11687 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
11688 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
11689 (rgn23_data[offset + 3] != 0)) {
11690 offset += rgn23_data[offset + 1] * 4 + 4;
11691 continue;
11692 }
11693
11694
11695 sub_tlv_len = rgn23_data[offset + 1] * 4;
11696 offset += 4;
11697 tlv_offset = 0;
11698
11699
11700
11701
11702 while ((offset < data_size) &&
11703 (tlv_offset < sub_tlv_len)) {
11704 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
11705 offset += 4;
11706 tlv_offset += 4;
11707 break;
11708 }
11709 if (rgn23_data[offset] != PORT_STE_TYPE) {
11710 offset += rgn23_data[offset + 1] * 4 + 4;
11711 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
11712 continue;
11713 }
11714
11715
11716 if (!rgn23_data[offset + 2])
11717 phba->hba_flag |= LINK_DISABLED;
11718
11719 goto out;
11720 }
11721 }
11722out:
11723 if (pmb)
11724 mempool_free(pmb, phba->mbox_mem_pool);
11725 kfree(rgn23_data);
11726 return;
11727}
11728