1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/lockdep.h>
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h>
34#include <scsi/fc/fc_fs.h>
35#include <linux/aer.h>
36
37#include "lpfc_hw4.h"
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_scsi.h"
44#include "lpfc.h"
45#include "lpfc_crtn.h"
46#include "lpfc_logmsg.h"
47#include "lpfc_compat.h"
48#include "lpfc_debugfs.h"
49#include "lpfc_vport.h"
50
51
52typedef enum _lpfc_iocb_type {
53 LPFC_UNKNOWN_IOCB,
54 LPFC_UNSOL_IOCB,
55 LPFC_SOL_IOCB,
56 LPFC_ABORT_IOCB
57} lpfc_iocb_type;
58
59
60
61static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
62 uint32_t);
63static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
64 uint8_t *, uint32_t *);
65static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
66 struct lpfc_iocbq *);
67static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
68 struct hbq_dmabuf *);
69static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
70 struct lpfc_cqe *);
71static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
72 int);
73static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
74 uint32_t);
75static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
76static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
77
78static IOCB_t *
79lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
80{
81 return &iocbq->iocb;
82}
83
84
85
86
87
88
89
90
91
92
93
94
95
96static uint32_t
97lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
98{
99 union lpfc_wqe *temp_wqe;
100 struct lpfc_register doorbell;
101 uint32_t host_index;
102 uint32_t idx;
103
104
105 if (unlikely(!q))
106 return -ENOMEM;
107 temp_wqe = q->qe[q->host_index].wqe;
108
109
110 idx = ((q->host_index + 1) % q->entry_count);
111 if (idx == q->hba_index) {
112 q->WQ_overflow++;
113 return -ENOMEM;
114 }
115 q->WQ_posted++;
116
117 if (!((q->host_index + 1) % q->entry_repost))
118 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
119 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
120 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
121 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
122
123
124 host_index = q->host_index;
125
126 q->host_index = idx;
127
128
129 doorbell.word0 = 0;
130 if (q->db_format == LPFC_DB_LIST_FORMAT) {
131 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
132 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
133 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
134 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
135 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
136 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
137 } else {
138 return -EINVAL;
139 }
140 writel(doorbell.word0, q->db_regaddr);
141
142 return 0;
143}
144
145
146
147
148
149
150
151
152
153
154
155
156static uint32_t
157lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
158{
159 uint32_t released = 0;
160
161
162 if (unlikely(!q))
163 return 0;
164
165 if (q->hba_index == index)
166 return 0;
167 do {
168 q->hba_index = ((q->hba_index + 1) % q->entry_count);
169 released++;
170 } while (q->hba_index != index);
171 return released;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186static uint32_t
187lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
188{
189 struct lpfc_mqe *temp_mqe;
190 struct lpfc_register doorbell;
191
192
193 if (unlikely(!q))
194 return -ENOMEM;
195 temp_mqe = q->qe[q->host_index].mqe;
196
197
198 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
199 return -ENOMEM;
200 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
201
202 q->phba->mbox = (MAILBOX_t *)temp_mqe;
203
204
205 q->host_index = ((q->host_index + 1) % q->entry_count);
206
207
208 doorbell.word0 = 0;
209 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
210 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
211 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
212 return 0;
213}
214
215
216
217
218
219
220
221
222
223
224
225static uint32_t
226lpfc_sli4_mq_release(struct lpfc_queue *q)
227{
228
229 if (unlikely(!q))
230 return 0;
231
232
233 q->phba->mbox = NULL;
234 q->hba_index = ((q->hba_index + 1) % q->entry_count);
235 return 1;
236}
237
238
239
240
241
242
243
244
245
246
247static struct lpfc_eqe *
248lpfc_sli4_eq_get(struct lpfc_queue *q)
249{
250 struct lpfc_eqe *eqe;
251 uint32_t idx;
252
253
254 if (unlikely(!q))
255 return NULL;
256 eqe = q->qe[q->hba_index].eqe;
257
258
259 if (!bf_get_le32(lpfc_eqe_valid, eqe))
260 return NULL;
261
262 idx = ((q->hba_index + 1) % q->entry_count);
263 if (idx == q->host_index)
264 return NULL;
265
266 q->hba_index = idx;
267
268
269
270
271
272
273
274
275
276 mb();
277 return eqe;
278}
279
280
281
282
283
284
285static inline void
286lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
287{
288 struct lpfc_register doorbell;
289
290 doorbell.word0 = 0;
291 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
292 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
293 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
294 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
295 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
296 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
297}
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314uint32_t
315lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
316{
317 uint32_t released = 0;
318 struct lpfc_eqe *temp_eqe;
319 struct lpfc_register doorbell;
320
321
322 if (unlikely(!q))
323 return 0;
324
325
326 while (q->hba_index != q->host_index) {
327 temp_eqe = q->qe[q->host_index].eqe;
328 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
329 released++;
330 q->host_index = ((q->host_index + 1) % q->entry_count);
331 }
332 if (unlikely(released == 0 && !arm))
333 return 0;
334
335
336 doorbell.word0 = 0;
337 if (arm) {
338 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
339 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
340 }
341 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
342 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
343 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
344 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
345 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
346 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
347
348 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
349 readl(q->phba->sli4_hba.EQCQDBregaddr);
350 return released;
351}
352
353
354
355
356
357
358
359
360
361
362static struct lpfc_cqe *
363lpfc_sli4_cq_get(struct lpfc_queue *q)
364{
365 struct lpfc_cqe *cqe;
366 uint32_t idx;
367
368
369 if (unlikely(!q))
370 return NULL;
371
372
373 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
374 return NULL;
375
376 idx = ((q->hba_index + 1) % q->entry_count);
377 if (idx == q->host_index)
378 return NULL;
379
380 cqe = q->qe[q->hba_index].cqe;
381 q->hba_index = idx;
382
383
384
385
386
387
388
389
390
391
392 mb();
393 return cqe;
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411uint32_t
412lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
413{
414 uint32_t released = 0;
415 struct lpfc_cqe *temp_qe;
416 struct lpfc_register doorbell;
417
418
419 if (unlikely(!q))
420 return 0;
421
422 while (q->hba_index != q->host_index) {
423 temp_qe = q->qe[q->host_index].cqe;
424 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
425 released++;
426 q->host_index = ((q->host_index + 1) % q->entry_count);
427 }
428 if (unlikely(released == 0 && !arm))
429 return 0;
430
431
432 doorbell.word0 = 0;
433 if (arm)
434 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
435 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
436 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
437 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
438 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
439 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
440 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
441 return released;
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456static int
457lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
458 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
459{
460 struct lpfc_rqe *temp_hrqe;
461 struct lpfc_rqe *temp_drqe;
462 struct lpfc_register doorbell;
463 int put_index;
464
465
466 if (unlikely(!hq) || unlikely(!dq))
467 return -ENOMEM;
468 put_index = hq->host_index;
469 temp_hrqe = hq->qe[hq->host_index].rqe;
470 temp_drqe = dq->qe[dq->host_index].rqe;
471
472 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
473 return -EINVAL;
474 if (hq->host_index != dq->host_index)
475 return -EINVAL;
476
477 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
478 return -EBUSY;
479 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
480 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
481
482
483 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
484 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
485
486
487 if (!(hq->host_index % hq->entry_repost)) {
488 doorbell.word0 = 0;
489 if (hq->db_format == LPFC_DB_RING_FORMAT) {
490 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
491 hq->entry_repost);
492 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
493 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
494 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
495 hq->entry_repost);
496 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
497 hq->host_index);
498 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
499 } else {
500 return -EINVAL;
501 }
502 writel(doorbell.word0, hq->db_regaddr);
503 }
504 return put_index;
505}
506
507
508
509
510
511
512
513
514
515
516
517static uint32_t
518lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
519{
520
521 if (unlikely(!hq) || unlikely(!dq))
522 return 0;
523
524 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
525 return 0;
526 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
527 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
528 return 1;
529}
530
531
532
533
534
535
536
537
538
539
540
541static inline IOCB_t *
542lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
543{
544 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
545 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
546}
547
548
549
550
551
552
553
554
555
556
557
558static inline IOCB_t *
559lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
560{
561 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
562 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
563}
564
565
566
567
568
569
570
571
572
573
574struct lpfc_iocbq *
575__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
576{
577 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
578 struct lpfc_iocbq * iocbq = NULL;
579
580 lockdep_assert_held(&phba->hbalock);
581
582 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
583 if (iocbq)
584 phba->iocb_cnt++;
585 if (phba->iocb_cnt > phba->iocb_max)
586 phba->iocb_max = phba->iocb_cnt;
587 return iocbq;
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602static struct lpfc_sglq *
603__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
604{
605 struct lpfc_sglq *sglq;
606
607 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
608 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
609 return sglq;
610}
611
612
613
614
615
616
617
618
619
620
621
622
623
624struct lpfc_sglq *
625__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
626{
627 struct lpfc_sglq *sglq;
628
629 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
630 return sglq;
631}
632
633
634
635
636
637
638
639
640void
641lpfc_clr_rrq_active(struct lpfc_hba *phba,
642 uint16_t xritag,
643 struct lpfc_node_rrq *rrq)
644{
645 struct lpfc_nodelist *ndlp = NULL;
646
647 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
648 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
649
650
651
652
653
654 if ((!ndlp) && rrq->ndlp)
655 ndlp = rrq->ndlp;
656
657 if (!ndlp)
658 goto out;
659
660 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
661 rrq->send_rrq = 0;
662 rrq->xritag = 0;
663 rrq->rrq_stop_time = 0;
664 }
665out:
666 mempool_free(rrq, phba->rrq_pool);
667}
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683void
684lpfc_handle_rrq_active(struct lpfc_hba *phba)
685{
686 struct lpfc_node_rrq *rrq;
687 struct lpfc_node_rrq *nextrrq;
688 unsigned long next_time;
689 unsigned long iflags;
690 LIST_HEAD(send_rrq);
691
692 spin_lock_irqsave(&phba->hbalock, iflags);
693 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
694 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
695 list_for_each_entry_safe(rrq, nextrrq,
696 &phba->active_rrq_list, list) {
697 if (time_after(jiffies, rrq->rrq_stop_time))
698 list_move(&rrq->list, &send_rrq);
699 else if (time_before(rrq->rrq_stop_time, next_time))
700 next_time = rrq->rrq_stop_time;
701 }
702 spin_unlock_irqrestore(&phba->hbalock, iflags);
703 if ((!list_empty(&phba->active_rrq_list)) &&
704 (!(phba->pport->load_flag & FC_UNLOADING)))
705 mod_timer(&phba->rrq_tmr, next_time);
706 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
707 list_del(&rrq->list);
708 if (!rrq->send_rrq)
709
710 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
711 else if (lpfc_send_rrq(phba, rrq)) {
712
713
714
715 lpfc_clr_rrq_active(phba, rrq->xritag,
716 rrq);
717 }
718 }
719}
720
721
722
723
724
725
726
727
728
729
730struct lpfc_node_rrq *
731lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
732{
733 struct lpfc_hba *phba = vport->phba;
734 struct lpfc_node_rrq *rrq;
735 struct lpfc_node_rrq *nextrrq;
736 unsigned long iflags;
737
738 if (phba->sli_rev != LPFC_SLI_REV4)
739 return NULL;
740 spin_lock_irqsave(&phba->hbalock, iflags);
741 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
742 if (rrq->vport == vport && rrq->xritag == xri &&
743 rrq->nlp_DID == did){
744 list_del(&rrq->list);
745 spin_unlock_irqrestore(&phba->hbalock, iflags);
746 return rrq;
747 }
748 }
749 spin_unlock_irqrestore(&phba->hbalock, iflags);
750 return NULL;
751}
752
753
754
755
756
757
758
759
760
761void
762lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
763
764{
765 struct lpfc_hba *phba = vport->phba;
766 struct lpfc_node_rrq *rrq;
767 struct lpfc_node_rrq *nextrrq;
768 unsigned long iflags;
769 LIST_HEAD(rrq_list);
770
771 if (phba->sli_rev != LPFC_SLI_REV4)
772 return;
773 if (!ndlp) {
774 lpfc_sli4_vport_delete_els_xri_aborted(vport);
775 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
776 }
777 spin_lock_irqsave(&phba->hbalock, iflags);
778 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
779 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
780 list_move(&rrq->list, &rrq_list);
781 spin_unlock_irqrestore(&phba->hbalock, iflags);
782
783 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
784 list_del(&rrq->list);
785 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
786 }
787}
788
789
790
791
792
793
794
795
796
797
798
799int
800lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
801 uint16_t xritag)
802{
803 lockdep_assert_held(&phba->hbalock);
804 if (!ndlp)
805 return 0;
806 if (!ndlp->active_rrqs_xri_bitmap)
807 return 0;
808 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
809 return 1;
810 else
811 return 0;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829int
830lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
831 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
832{
833 unsigned long iflags;
834 struct lpfc_node_rrq *rrq;
835 int empty;
836
837 if (!ndlp)
838 return -EINVAL;
839
840 if (!phba->cfg_enable_rrq)
841 return -EINVAL;
842
843 spin_lock_irqsave(&phba->hbalock, iflags);
844 if (phba->pport->load_flag & FC_UNLOADING) {
845 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
846 goto out;
847 }
848
849
850
851
852 if (NLP_CHK_FREE_REQ(ndlp))
853 goto out;
854
855 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
856 goto out;
857
858 if (!ndlp->active_rrqs_xri_bitmap)
859 goto out;
860
861 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
862 goto out;
863
864 spin_unlock_irqrestore(&phba->hbalock, iflags);
865 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
866 if (!rrq) {
867 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
868 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
869 " DID:0x%x Send:%d\n",
870 xritag, rxid, ndlp->nlp_DID, send_rrq);
871 return -EINVAL;
872 }
873 if (phba->cfg_enable_rrq == 1)
874 rrq->send_rrq = send_rrq;
875 else
876 rrq->send_rrq = 0;
877 rrq->xritag = xritag;
878 rrq->rrq_stop_time = jiffies +
879 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
880 rrq->ndlp = ndlp;
881 rrq->nlp_DID = ndlp->nlp_DID;
882 rrq->vport = ndlp->vport;
883 rrq->rxid = rxid;
884 spin_lock_irqsave(&phba->hbalock, iflags);
885 empty = list_empty(&phba->active_rrq_list);
886 list_add_tail(&rrq->list, &phba->active_rrq_list);
887 phba->hba_flag |= HBA_RRQ_ACTIVE;
888 if (empty)
889 lpfc_worker_wake_up(phba);
890 spin_unlock_irqrestore(&phba->hbalock, iflags);
891 return 0;
892out:
893 spin_unlock_irqrestore(&phba->hbalock, iflags);
894 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
895 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
896 " DID:0x%x Send:%d\n",
897 xritag, rxid, ndlp->nlp_DID, send_rrq);
898 return -EINVAL;
899}
900
901
902
903
904
905
906
907
908
909
910
911static struct lpfc_sglq *
912__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
913{
914 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
915 struct lpfc_sglq *sglq = NULL;
916 struct lpfc_sglq *start_sglq = NULL;
917 struct lpfc_scsi_buf *lpfc_cmd;
918 struct lpfc_nodelist *ndlp;
919 int found = 0;
920
921 lockdep_assert_held(&phba->hbalock);
922
923 if (piocbq->iocb_flag & LPFC_IO_FCP) {
924 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
925 ndlp = lpfc_cmd->rdata->pnode;
926 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
927 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
928 ndlp = piocbq->context_un.ndlp;
929 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
930 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
931 ndlp = NULL;
932 else
933 ndlp = piocbq->context_un.ndlp;
934 } else {
935 ndlp = piocbq->context1;
936 }
937
938 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
939 start_sglq = sglq;
940 while (!found) {
941 if (!sglq)
942 return NULL;
943 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
944
945
946
947 list_add_tail(&sglq->list, lpfc_sgl_list);
948 sglq = NULL;
949 list_remove_head(lpfc_sgl_list, sglq,
950 struct lpfc_sglq, list);
951 if (sglq == start_sglq) {
952 sglq = NULL;
953 break;
954 } else
955 continue;
956 }
957 sglq->ndlp = ndlp;
958 found = 1;
959 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
960 sglq->state = SGL_ALLOCATED;
961 }
962 return sglq;
963}
964
965
966
967
968
969
970
971
972
973
974struct lpfc_iocbq *
975lpfc_sli_get_iocbq(struct lpfc_hba *phba)
976{
977 struct lpfc_iocbq * iocbq = NULL;
978 unsigned long iflags;
979
980 spin_lock_irqsave(&phba->hbalock, iflags);
981 iocbq = __lpfc_sli_get_iocbq(phba);
982 spin_unlock_irqrestore(&phba->hbalock, iflags);
983 return iocbq;
984}
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004static void
1005__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1006{
1007 struct lpfc_sglq *sglq;
1008 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1009 unsigned long iflag = 0;
1010 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1011
1012 lockdep_assert_held(&phba->hbalock);
1013
1014 if (iocbq->sli4_xritag == NO_XRI)
1015 sglq = NULL;
1016 else
1017 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1018
1019
1020 if (sglq) {
1021 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1022 (sglq->state != SGL_XRI_ABORTED)) {
1023 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
1024 iflag);
1025 list_add(&sglq->list,
1026 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1027 spin_unlock_irqrestore(
1028 &phba->sli4_hba.abts_sgl_list_lock, iflag);
1029 } else {
1030 spin_lock_irqsave(&pring->ring_lock, iflag);
1031 sglq->state = SGL_FREED;
1032 sglq->ndlp = NULL;
1033 list_add_tail(&sglq->list,
1034 &phba->sli4_hba.lpfc_sgl_list);
1035 spin_unlock_irqrestore(&pring->ring_lock, iflag);
1036
1037
1038 if (!list_empty(&pring->txq))
1039 lpfc_worker_wake_up(phba);
1040 }
1041 }
1042
1043
1044
1045
1046
1047 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1048 iocbq->sli4_lxritag = NO_XRI;
1049 iocbq->sli4_xritag = NO_XRI;
1050 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static void
1065__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1066{
1067 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1068
1069 lockdep_assert_held(&phba->hbalock);
1070
1071
1072
1073
1074 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1075 iocbq->sli4_xritag = NO_XRI;
1076 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089static void
1090__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1091{
1092 lockdep_assert_held(&phba->hbalock);
1093
1094 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1095 phba->iocb_cnt--;
1096}
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106void
1107lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1108{
1109 unsigned long iflags;
1110
1111
1112
1113
1114 spin_lock_irqsave(&phba->hbalock, iflags);
1115 __lpfc_sli_release_iocbq(phba, iocbq);
1116 spin_unlock_irqrestore(&phba->hbalock, iflags);
1117}
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131void
1132lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1133 uint32_t ulpstatus, uint32_t ulpWord4)
1134{
1135 struct lpfc_iocbq *piocb;
1136
1137 while (!list_empty(iocblist)) {
1138 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1139 if (!piocb->iocb_cmpl)
1140 lpfc_sli_release_iocbq(phba, piocb);
1141 else {
1142 piocb->iocb.ulpStatus = ulpstatus;
1143 piocb->iocb.un.ulpWord[4] = ulpWord4;
1144 (piocb->iocb_cmpl) (phba, piocb, piocb);
1145 }
1146 }
1147 return;
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165static lpfc_iocb_type
1166lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1167{
1168 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1169
1170 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1171 return 0;
1172
1173 switch (iocb_cmnd) {
1174 case CMD_XMIT_SEQUENCE_CR:
1175 case CMD_XMIT_SEQUENCE_CX:
1176 case CMD_XMIT_BCAST_CN:
1177 case CMD_XMIT_BCAST_CX:
1178 case CMD_ELS_REQUEST_CR:
1179 case CMD_ELS_REQUEST_CX:
1180 case CMD_CREATE_XRI_CR:
1181 case CMD_CREATE_XRI_CX:
1182 case CMD_GET_RPI_CN:
1183 case CMD_XMIT_ELS_RSP_CX:
1184 case CMD_GET_RPI_CR:
1185 case CMD_FCP_IWRITE_CR:
1186 case CMD_FCP_IWRITE_CX:
1187 case CMD_FCP_IREAD_CR:
1188 case CMD_FCP_IREAD_CX:
1189 case CMD_FCP_ICMND_CR:
1190 case CMD_FCP_ICMND_CX:
1191 case CMD_FCP_TSEND_CX:
1192 case CMD_FCP_TRSP_CX:
1193 case CMD_FCP_TRECEIVE_CX:
1194 case CMD_FCP_AUTO_TRSP_CX:
1195 case CMD_ADAPTER_MSG:
1196 case CMD_ADAPTER_DUMP:
1197 case CMD_XMIT_SEQUENCE64_CR:
1198 case CMD_XMIT_SEQUENCE64_CX:
1199 case CMD_XMIT_BCAST64_CN:
1200 case CMD_XMIT_BCAST64_CX:
1201 case CMD_ELS_REQUEST64_CR:
1202 case CMD_ELS_REQUEST64_CX:
1203 case CMD_FCP_IWRITE64_CR:
1204 case CMD_FCP_IWRITE64_CX:
1205 case CMD_FCP_IREAD64_CR:
1206 case CMD_FCP_IREAD64_CX:
1207 case CMD_FCP_ICMND64_CR:
1208 case CMD_FCP_ICMND64_CX:
1209 case CMD_FCP_TSEND64_CX:
1210 case CMD_FCP_TRSP64_CX:
1211 case CMD_FCP_TRECEIVE64_CX:
1212 case CMD_GEN_REQUEST64_CR:
1213 case CMD_GEN_REQUEST64_CX:
1214 case CMD_XMIT_ELS_RSP64_CX:
1215 case DSSCMD_IWRITE64_CR:
1216 case DSSCMD_IWRITE64_CX:
1217 case DSSCMD_IREAD64_CR:
1218 case DSSCMD_IREAD64_CX:
1219 type = LPFC_SOL_IOCB;
1220 break;
1221 case CMD_ABORT_XRI_CN:
1222 case CMD_ABORT_XRI_CX:
1223 case CMD_CLOSE_XRI_CN:
1224 case CMD_CLOSE_XRI_CX:
1225 case CMD_XRI_ABORTED_CX:
1226 case CMD_ABORT_MXRI64_CN:
1227 case CMD_XMIT_BLS_RSP64_CX:
1228 type = LPFC_ABORT_IOCB;
1229 break;
1230 case CMD_RCV_SEQUENCE_CX:
1231 case CMD_RCV_ELS_REQ_CX:
1232 case CMD_RCV_SEQUENCE64_CX:
1233 case CMD_RCV_ELS_REQ64_CX:
1234 case CMD_ASYNC_STATUS:
1235 case CMD_IOCB_RCV_SEQ64_CX:
1236 case CMD_IOCB_RCV_ELS64_CX:
1237 case CMD_IOCB_RCV_CONT64_CX:
1238 case CMD_IOCB_RET_XRI64_CX:
1239 type = LPFC_UNSOL_IOCB;
1240 break;
1241 case CMD_IOCB_XMIT_MSEQ64_CR:
1242 case CMD_IOCB_XMIT_MSEQ64_CX:
1243 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1244 case CMD_IOCB_RCV_ELS_LIST64_CX:
1245 case CMD_IOCB_CLOSE_EXTENDED_CN:
1246 case CMD_IOCB_ABORT_EXTENDED_CN:
1247 case CMD_IOCB_RET_HBQE64_CN:
1248 case CMD_IOCB_FCP_IBIDIR64_CR:
1249 case CMD_IOCB_FCP_IBIDIR64_CX:
1250 case CMD_IOCB_FCP_ITASKMGT64_CX:
1251 case CMD_IOCB_LOGENTRY_CN:
1252 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1253 printk("%s - Unhandled SLI-3 Command x%x\n",
1254 __func__, iocb_cmnd);
1255 type = LPFC_UNKNOWN_IOCB;
1256 break;
1257 default:
1258 type = LPFC_UNKNOWN_IOCB;
1259 break;
1260 }
1261
1262 return type;
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static int
1277lpfc_sli_ring_map(struct lpfc_hba *phba)
1278{
1279 struct lpfc_sli *psli = &phba->sli;
1280 LPFC_MBOXQ_t *pmb;
1281 MAILBOX_t *pmbox;
1282 int i, rc, ret = 0;
1283
1284 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1285 if (!pmb)
1286 return -ENOMEM;
1287 pmbox = &pmb->u.mb;
1288 phba->link_state = LPFC_INIT_MBX_CMDS;
1289 for (i = 0; i < psli->num_rings; i++) {
1290 lpfc_config_ring(phba, i, pmb);
1291 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1292 if (rc != MBX_SUCCESS) {
1293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1294 "0446 Adapter failed to init (%d), "
1295 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1296 "ring %d\n",
1297 rc, pmbox->mbxCommand,
1298 pmbox->mbxStatus, i);
1299 phba->link_state = LPFC_HBA_ERROR;
1300 ret = -ENXIO;
1301 break;
1302 }
1303 }
1304 mempool_free(pmb, phba->mbox_mem_pool);
1305 return ret;
1306}
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320static int
1321lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1322 struct lpfc_iocbq *piocb)
1323{
1324 lockdep_assert_held(&phba->hbalock);
1325
1326 list_add_tail(&piocb->list, &pring->txcmplq);
1327 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1328
1329 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1330 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1331 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) &&
1332 (!(piocb->vport->load_flag & FC_UNLOADING))) {
1333 if (!piocb->vport)
1334 BUG();
1335 else
1336 mod_timer(&piocb->vport->els_tmofunc,
1337 jiffies +
1338 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1339 }
1340
1341
1342 return 0;
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355struct lpfc_iocbq *
1356lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1357{
1358 struct lpfc_iocbq *cmd_iocb;
1359
1360 lockdep_assert_held(&phba->hbalock);
1361
1362 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1363 return cmd_iocb;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380static IOCB_t *
1381lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1382{
1383 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1384 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1385
1386 lockdep_assert_held(&phba->hbalock);
1387
1388 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1389 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1390 pring->sli.sli3.next_cmdidx = 0;
1391
1392 if (unlikely(pring->sli.sli3.local_getidx ==
1393 pring->sli.sli3.next_cmdidx)) {
1394
1395 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1396
1397 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1398 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1399 "0315 Ring %d issue: portCmdGet %d "
1400 "is bigger than cmd ring %d\n",
1401 pring->ringno,
1402 pring->sli.sli3.local_getidx,
1403 max_cmd_idx);
1404
1405 phba->link_state = LPFC_HBA_ERROR;
1406
1407
1408
1409
1410 phba->work_ha |= HA_ERATT;
1411 phba->work_hs = HS_FFER3;
1412
1413 lpfc_worker_wake_up(phba);
1414
1415 return NULL;
1416 }
1417
1418 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1419 return NULL;
1420 }
1421
1422 return lpfc_cmd_iocb(phba, pring);
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437uint16_t
1438lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1439{
1440 struct lpfc_iocbq **new_arr;
1441 struct lpfc_iocbq **old_arr;
1442 size_t new_len;
1443 struct lpfc_sli *psli = &phba->sli;
1444 uint16_t iotag;
1445
1446 spin_lock_irq(&phba->hbalock);
1447 iotag = psli->last_iotag;
1448 if(++iotag < psli->iocbq_lookup_len) {
1449 psli->last_iotag = iotag;
1450 psli->iocbq_lookup[iotag] = iocbq;
1451 spin_unlock_irq(&phba->hbalock);
1452 iocbq->iotag = iotag;
1453 return iotag;
1454 } else if (psli->iocbq_lookup_len < (0xffff
1455 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1456 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1457 spin_unlock_irq(&phba->hbalock);
1458 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1459 GFP_KERNEL);
1460 if (new_arr) {
1461 spin_lock_irq(&phba->hbalock);
1462 old_arr = psli->iocbq_lookup;
1463 if (new_len <= psli->iocbq_lookup_len) {
1464
1465 kfree(new_arr);
1466 iotag = psli->last_iotag;
1467 if(++iotag < psli->iocbq_lookup_len) {
1468 psli->last_iotag = iotag;
1469 psli->iocbq_lookup[iotag] = iocbq;
1470 spin_unlock_irq(&phba->hbalock);
1471 iocbq->iotag = iotag;
1472 return iotag;
1473 }
1474 spin_unlock_irq(&phba->hbalock);
1475 return 0;
1476 }
1477 if (psli->iocbq_lookup)
1478 memcpy(new_arr, old_arr,
1479 ((psli->last_iotag + 1) *
1480 sizeof (struct lpfc_iocbq *)));
1481 psli->iocbq_lookup = new_arr;
1482 psli->iocbq_lookup_len = new_len;
1483 psli->last_iotag = iotag;
1484 psli->iocbq_lookup[iotag] = iocbq;
1485 spin_unlock_irq(&phba->hbalock);
1486 iocbq->iotag = iotag;
1487 kfree(old_arr);
1488 return iotag;
1489 }
1490 } else
1491 spin_unlock_irq(&phba->hbalock);
1492
1493 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1494 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1495 psli->last_iotag);
1496
1497 return 0;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static void
1515lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1516 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1517{
1518 lockdep_assert_held(&phba->hbalock);
1519
1520
1521
1522 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1523
1524
1525 if (pring->ringno == LPFC_ELS_RING) {
1526 lpfc_debugfs_slow_ring_trc(phba,
1527 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1528 *(((uint32_t *) &nextiocb->iocb) + 4),
1529 *(((uint32_t *) &nextiocb->iocb) + 6),
1530 *(((uint32_t *) &nextiocb->iocb) + 7));
1531 }
1532
1533
1534
1535
1536 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1537 wmb();
1538 pring->stats.iocb_cmd++;
1539
1540
1541
1542
1543
1544
1545 if (nextiocb->iocb_cmpl)
1546 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1547 else
1548 __lpfc_sli_release_iocbq(phba, nextiocb);
1549
1550
1551
1552
1553
1554 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1555 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570static void
1571lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1572{
1573 int ringno = pring->ringno;
1574
1575 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1576
1577 wmb();
1578
1579
1580
1581
1582
1583 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1584 readl(phba->CAregaddr);
1585
1586 pring->stats.iocb_cmd_full++;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598static void
1599lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1600{
1601 int ringno = pring->ringno;
1602
1603
1604
1605
1606 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1607 wmb();
1608 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1609 readl(phba->CAregaddr);
1610 }
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static void
1623lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1624{
1625 IOCB_t *iocb;
1626 struct lpfc_iocbq *nextiocb;
1627
1628 lockdep_assert_held(&phba->hbalock);
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 if (lpfc_is_link_up(phba) &&
1639 (!list_empty(&pring->txq)) &&
1640 (pring->ringno != phba->sli.fcp_ring ||
1641 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1642
1643 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1644 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1645 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1646
1647 if (iocb)
1648 lpfc_sli_update_ring(phba, pring);
1649 else
1650 lpfc_sli_update_full_ring(phba, pring);
1651 }
1652
1653 return;
1654}
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666static struct lpfc_hbq_entry *
1667lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1668{
1669 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1670
1671 lockdep_assert_held(&phba->hbalock);
1672
1673 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1674 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1675 hbqp->next_hbqPutIdx = 0;
1676
1677 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1678 uint32_t raw_index = phba->hbq_get[hbqno];
1679 uint32_t getidx = le32_to_cpu(raw_index);
1680
1681 hbqp->local_hbqGetIdx = getidx;
1682
1683 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1684 lpfc_printf_log(phba, KERN_ERR,
1685 LOG_SLI | LOG_VPORT,
1686 "1802 HBQ %d: local_hbqGetIdx "
1687 "%u is > than hbqp->entry_count %u\n",
1688 hbqno, hbqp->local_hbqGetIdx,
1689 hbqp->entry_count);
1690
1691 phba->link_state = LPFC_HBA_ERROR;
1692 return NULL;
1693 }
1694
1695 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1696 return NULL;
1697 }
1698
1699 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1700 hbqp->hbqPutIdx;
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712void
1713lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1714{
1715 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1716 struct hbq_dmabuf *hbq_buf;
1717 unsigned long flags;
1718 int i, hbq_count;
1719 uint32_t hbqno;
1720
1721 hbq_count = lpfc_sli_hbq_count();
1722
1723 spin_lock_irqsave(&phba->hbalock, flags);
1724 for (i = 0; i < hbq_count; ++i) {
1725 list_for_each_entry_safe(dmabuf, next_dmabuf,
1726 &phba->hbqs[i].hbq_buffer_list, list) {
1727 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1728 list_del(&hbq_buf->dbuf.list);
1729 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1730 }
1731 phba->hbqs[i].buffer_count = 0;
1732 }
1733
1734 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1735 list) {
1736 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1737 list_del(&hbq_buf->dbuf.list);
1738 if (hbq_buf->tag == -1) {
1739 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1740 (phba, hbq_buf);
1741 } else {
1742 hbqno = hbq_buf->tag >> 16;
1743 if (hbqno >= LPFC_MAX_HBQS)
1744 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1745 (phba, hbq_buf);
1746 else
1747 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1748 hbq_buf);
1749 }
1750 }
1751
1752
1753 phba->hbq_in_use = 0;
1754 spin_unlock_irqrestore(&phba->hbalock, flags);
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769static int
1770lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1771 struct hbq_dmabuf *hbq_buf)
1772{
1773 lockdep_assert_held(&phba->hbalock);
1774 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1775}
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788static int
1789lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1790 struct hbq_dmabuf *hbq_buf)
1791{
1792 struct lpfc_hbq_entry *hbqe;
1793 dma_addr_t physaddr = hbq_buf->dbuf.phys;
1794
1795 lockdep_assert_held(&phba->hbalock);
1796
1797 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1798 if (hbqe) {
1799 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1800
1801 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1802 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
1803 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1804 hbqe->bde.tus.f.bdeFlags = 0;
1805 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1806 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1807
1808 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1809 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1810
1811 readl(phba->hbq_put + hbqno);
1812 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1813 return 0;
1814 } else
1815 return -ENOMEM;
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static int
1829lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1830 struct hbq_dmabuf *hbq_buf)
1831{
1832 int rc;
1833 struct lpfc_rqe hrqe;
1834 struct lpfc_rqe drqe;
1835
1836 lockdep_assert_held(&phba->hbalock);
1837 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1838 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1839 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1840 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1841 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1842 &hrqe, &drqe);
1843 if (rc < 0)
1844 return rc;
1845 hbq_buf->tag = rc;
1846 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1847 return 0;
1848}
1849
1850
1851static struct lpfc_hbq_init lpfc_els_hbq = {
1852 .rn = 1,
1853 .entry_count = 256,
1854 .mask_count = 0,
1855 .profile = 0,
1856 .ring_mask = (1 << LPFC_ELS_RING),
1857 .buffer_count = 0,
1858 .init_count = 40,
1859 .add_count = 40,
1860};
1861
1862
1863static struct lpfc_hbq_init lpfc_extra_hbq = {
1864 .rn = 1,
1865 .entry_count = 200,
1866 .mask_count = 0,
1867 .profile = 0,
1868 .ring_mask = (1 << LPFC_EXTRA_RING),
1869 .buffer_count = 0,
1870 .init_count = 0,
1871 .add_count = 5,
1872};
1873
1874
1875struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1876 &lpfc_els_hbq,
1877 &lpfc_extra_hbq,
1878};
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890static int
1891lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1892{
1893 uint32_t i, posted = 0;
1894 unsigned long flags;
1895 struct hbq_dmabuf *hbq_buffer;
1896 LIST_HEAD(hbq_buf_list);
1897 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1898 return 0;
1899
1900 if ((phba->hbqs[hbqno].buffer_count + count) >
1901 lpfc_hbq_defs[hbqno]->entry_count)
1902 count = lpfc_hbq_defs[hbqno]->entry_count -
1903 phba->hbqs[hbqno].buffer_count;
1904 if (!count)
1905 return 0;
1906
1907 for (i = 0; i < count; i++) {
1908 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1909 if (!hbq_buffer)
1910 break;
1911 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1912 }
1913
1914 spin_lock_irqsave(&phba->hbalock, flags);
1915 if (!phba->hbq_in_use)
1916 goto err;
1917 while (!list_empty(&hbq_buf_list)) {
1918 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1919 dbuf.list);
1920 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1921 (hbqno << 16));
1922 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1923 phba->hbqs[hbqno].buffer_count++;
1924 posted++;
1925 } else
1926 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1927 }
1928 spin_unlock_irqrestore(&phba->hbalock, flags);
1929 return posted;
1930err:
1931 spin_unlock_irqrestore(&phba->hbalock, flags);
1932 while (!list_empty(&hbq_buf_list)) {
1933 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1934 dbuf.list);
1935 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1936 }
1937 return 0;
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949int
1950lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1951{
1952 if (phba->sli_rev == LPFC_SLI_REV4)
1953 return 0;
1954 else
1955 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1956 lpfc_hbq_defs[qno]->add_count);
1957}
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968static int
1969lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1970{
1971 if (phba->sli_rev == LPFC_SLI_REV4)
1972 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1973 lpfc_hbq_defs[qno]->entry_count);
1974 else
1975 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1976 lpfc_hbq_defs[qno]->init_count);
1977}
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987static struct hbq_dmabuf *
1988lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1989{
1990 struct lpfc_dmabuf *d_buf;
1991
1992 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1993 if (!d_buf)
1994 return NULL;
1995 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static struct hbq_dmabuf *
2009lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2010{
2011 struct lpfc_dmabuf *d_buf;
2012 struct hbq_dmabuf *hbq_buf;
2013 uint32_t hbqno;
2014
2015 lockdep_assert_held(&phba->hbalock);
2016
2017 hbqno = tag >> 16;
2018 if (hbqno >= LPFC_MAX_HBQS)
2019 return NULL;
2020
2021 spin_lock_irq(&phba->hbalock);
2022 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2023 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2024 if (hbq_buf->tag == tag) {
2025 spin_unlock_irq(&phba->hbalock);
2026 return hbq_buf;
2027 }
2028 }
2029 spin_unlock_irq(&phba->hbalock);
2030 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2031 "1803 Bad hbq tag. Data: x%x x%x\n",
2032 tag, phba->hbqs[tag >> 16].buffer_count);
2033 return NULL;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045void
2046lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2047{
2048 uint32_t hbqno;
2049
2050 if (hbq_buffer) {
2051 hbqno = hbq_buffer->tag >> 16;
2052 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2053 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2054 }
2055}
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066static int
2067lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2068{
2069 uint8_t ret;
2070
2071 switch (mbxCommand) {
2072 case MBX_LOAD_SM:
2073 case MBX_READ_NV:
2074 case MBX_WRITE_NV:
2075 case MBX_WRITE_VPARMS:
2076 case MBX_RUN_BIU_DIAG:
2077 case MBX_INIT_LINK:
2078 case MBX_DOWN_LINK:
2079 case MBX_CONFIG_LINK:
2080 case MBX_CONFIG_RING:
2081 case MBX_RESET_RING:
2082 case MBX_READ_CONFIG:
2083 case MBX_READ_RCONFIG:
2084 case MBX_READ_SPARM:
2085 case MBX_READ_STATUS:
2086 case MBX_READ_RPI:
2087 case MBX_READ_XRI:
2088 case MBX_READ_REV:
2089 case MBX_READ_LNK_STAT:
2090 case MBX_REG_LOGIN:
2091 case MBX_UNREG_LOGIN:
2092 case MBX_CLEAR_LA:
2093 case MBX_DUMP_MEMORY:
2094 case MBX_DUMP_CONTEXT:
2095 case MBX_RUN_DIAGS:
2096 case MBX_RESTART:
2097 case MBX_UPDATE_CFG:
2098 case MBX_DOWN_LOAD:
2099 case MBX_DEL_LD_ENTRY:
2100 case MBX_RUN_PROGRAM:
2101 case MBX_SET_MASK:
2102 case MBX_SET_VARIABLE:
2103 case MBX_UNREG_D_ID:
2104 case MBX_KILL_BOARD:
2105 case MBX_CONFIG_FARP:
2106 case MBX_BEACON:
2107 case MBX_LOAD_AREA:
2108 case MBX_RUN_BIU_DIAG64:
2109 case MBX_CONFIG_PORT:
2110 case MBX_READ_SPARM64:
2111 case MBX_READ_RPI64:
2112 case MBX_REG_LOGIN64:
2113 case MBX_READ_TOPOLOGY:
2114 case MBX_WRITE_WWN:
2115 case MBX_SET_DEBUG:
2116 case MBX_LOAD_EXP_ROM:
2117 case MBX_ASYNCEVT_ENABLE:
2118 case MBX_REG_VPI:
2119 case MBX_UNREG_VPI:
2120 case MBX_HEARTBEAT:
2121 case MBX_PORT_CAPABILITIES:
2122 case MBX_PORT_IOV_CONTROL:
2123 case MBX_SLI4_CONFIG:
2124 case MBX_SLI4_REQ_FTRS:
2125 case MBX_REG_FCFI:
2126 case MBX_UNREG_FCFI:
2127 case MBX_REG_VFI:
2128 case MBX_UNREG_VFI:
2129 case MBX_INIT_VPI:
2130 case MBX_INIT_VFI:
2131 case MBX_RESUME_RPI:
2132 case MBX_READ_EVENT_LOG_STATUS:
2133 case MBX_READ_EVENT_LOG:
2134 case MBX_SECURITY_MGMT:
2135 case MBX_AUTH_PORT:
2136 case MBX_ACCESS_VDATA:
2137 ret = mbxCommand;
2138 break;
2139 default:
2140 ret = MBX_SHUTDOWN;
2141 break;
2142 }
2143 return ret;
2144}
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157void
2158lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2159{
2160 wait_queue_head_t *pdone_q;
2161 unsigned long drvr_flag;
2162
2163
2164
2165
2166
2167 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2168 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2169 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2170 if (pdone_q)
2171 wake_up_interruptible(pdone_q);
2172 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2173 return;
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187void
2188lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2189{
2190 struct lpfc_vport *vport = pmb->vport;
2191 struct lpfc_dmabuf *mp;
2192 struct lpfc_nodelist *ndlp;
2193 struct Scsi_Host *shost;
2194 uint16_t rpi, vpi;
2195 int rc;
2196
2197 mp = (struct lpfc_dmabuf *) (pmb->context1);
2198
2199 if (mp) {
2200 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2201 kfree(mp);
2202 }
2203
2204
2205
2206
2207
2208 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2209 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2210 !pmb->u.mb.mbxStatus) {
2211 rpi = pmb->u.mb.un.varWords[0];
2212 vpi = pmb->u.mb.un.varRegLogin.vpi;
2213 lpfc_unreg_login(phba, vpi, rpi, pmb);
2214 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2215 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2216 if (rc != MBX_NOT_FINISHED)
2217 return;
2218 }
2219
2220 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2221 !(phba->pport->load_flag & FC_UNLOADING) &&
2222 !pmb->u.mb.mbxStatus) {
2223 shost = lpfc_shost_from_vport(vport);
2224 spin_lock_irq(shost->host_lock);
2225 vport->vpi_state |= LPFC_VPI_REGISTERED;
2226 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2227 spin_unlock_irq(shost->host_lock);
2228 }
2229
2230 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2231 ndlp = (struct lpfc_nodelist *)pmb->context2;
2232 lpfc_nlp_put(ndlp);
2233 pmb->context2 = NULL;
2234 }
2235
2236
2237 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2238 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2239 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2240 "2860 SLI authentication is required "
2241 "for INIT_LINK but has not done yet\n");
2242
2243 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2244 lpfc_sli4_mbox_cmd_free(phba, pmb);
2245 else
2246 mempool_free(pmb, phba->mbox_mem_pool);
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261void
2262lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2263{
2264 struct lpfc_vport *vport = pmb->vport;
2265 struct lpfc_nodelist *ndlp;
2266
2267 ndlp = pmb->context1;
2268 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2269 if (phba->sli_rev == LPFC_SLI_REV4 &&
2270 (bf_get(lpfc_sli_intf_if_type,
2271 &phba->sli4_hba.sli_intf) ==
2272 LPFC_SLI_INTF_IF_TYPE_2)) {
2273 if (ndlp) {
2274 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2275 "0010 UNREG_LOGIN vpi:%x "
2276 "rpi:%x DID:%x map:%x %p\n",
2277 vport->vpi, ndlp->nlp_rpi,
2278 ndlp->nlp_DID,
2279 ndlp->nlp_usg_map, ndlp);
2280 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2281 lpfc_nlp_put(ndlp);
2282 }
2283 }
2284 }
2285
2286 mempool_free(pmb, phba->mbox_mem_pool);
2287}
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302int
2303lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2304{
2305 MAILBOX_t *pmbox;
2306 LPFC_MBOXQ_t *pmb;
2307 int rc;
2308 LIST_HEAD(cmplq);
2309
2310 phba->sli.slistat.mbox_event++;
2311
2312
2313 spin_lock_irq(&phba->hbalock);
2314 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2315 spin_unlock_irq(&phba->hbalock);
2316
2317
2318 do {
2319 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2320 if (pmb == NULL)
2321 break;
2322
2323 pmbox = &pmb->u.mb;
2324
2325 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2326 if (pmb->vport) {
2327 lpfc_debugfs_disc_trc(pmb->vport,
2328 LPFC_DISC_TRC_MBOX_VPORT,
2329 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2330 (uint32_t)pmbox->mbxCommand,
2331 pmbox->un.varWords[0],
2332 pmbox->un.varWords[1]);
2333 }
2334 else {
2335 lpfc_debugfs_disc_trc(phba->pport,
2336 LPFC_DISC_TRC_MBOX,
2337 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2338 (uint32_t)pmbox->mbxCommand,
2339 pmbox->un.varWords[0],
2340 pmbox->un.varWords[1]);
2341 }
2342 }
2343
2344
2345
2346
2347 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2348 MBX_SHUTDOWN) {
2349
2350 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2351 "(%d):0323 Unknown Mailbox command "
2352 "x%x (x%x/x%x) Cmpl\n",
2353 pmb->vport ? pmb->vport->vpi : 0,
2354 pmbox->mbxCommand,
2355 lpfc_sli_config_mbox_subsys_get(phba,
2356 pmb),
2357 lpfc_sli_config_mbox_opcode_get(phba,
2358 pmb));
2359 phba->link_state = LPFC_HBA_ERROR;
2360 phba->work_hs = HS_FFER3;
2361 lpfc_handle_eratt(phba);
2362 continue;
2363 }
2364
2365 if (pmbox->mbxStatus) {
2366 phba->sli.slistat.mbox_stat_err++;
2367 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2368
2369 lpfc_printf_log(phba, KERN_INFO,
2370 LOG_MBOX | LOG_SLI,
2371 "(%d):0305 Mbox cmd cmpl "
2372 "error - RETRYing Data: x%x "
2373 "(x%x/x%x) x%x x%x x%x\n",
2374 pmb->vport ? pmb->vport->vpi : 0,
2375 pmbox->mbxCommand,
2376 lpfc_sli_config_mbox_subsys_get(phba,
2377 pmb),
2378 lpfc_sli_config_mbox_opcode_get(phba,
2379 pmb),
2380 pmbox->mbxStatus,
2381 pmbox->un.varWords[0],
2382 pmb->vport->port_state);
2383 pmbox->mbxStatus = 0;
2384 pmbox->mbxOwner = OWN_HOST;
2385 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2386 if (rc != MBX_NOT_FINISHED)
2387 continue;
2388 }
2389 }
2390
2391
2392 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2393 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2394 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2395 "x%x x%x x%x\n",
2396 pmb->vport ? pmb->vport->vpi : 0,
2397 pmbox->mbxCommand,
2398 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2399 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2400 pmb->mbox_cmpl,
2401 *((uint32_t *) pmbox),
2402 pmbox->un.varWords[0],
2403 pmbox->un.varWords[1],
2404 pmbox->un.varWords[2],
2405 pmbox->un.varWords[3],
2406 pmbox->un.varWords[4],
2407 pmbox->un.varWords[5],
2408 pmbox->un.varWords[6],
2409 pmbox->un.varWords[7],
2410 pmbox->un.varWords[8],
2411 pmbox->un.varWords[9],
2412 pmbox->un.varWords[10]);
2413
2414 if (pmb->mbox_cmpl)
2415 pmb->mbox_cmpl(phba,pmb);
2416 } while (1);
2417 return 0;
2418}
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432static struct lpfc_dmabuf *
2433lpfc_sli_get_buff(struct lpfc_hba *phba,
2434 struct lpfc_sli_ring *pring,
2435 uint32_t tag)
2436{
2437 struct hbq_dmabuf *hbq_entry;
2438
2439 if (tag & QUE_BUFTAG_BIT)
2440 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2441 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2442 if (!hbq_entry)
2443 return NULL;
2444 return &hbq_entry->dbuf;
2445}
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459static int
2460lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2461 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2462 uint32_t fch_type)
2463{
2464 int i;
2465
2466
2467 if (pring->prt[0].profile) {
2468 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2469 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2470 saveq);
2471 return 1;
2472 }
2473
2474
2475 for (i = 0; i < pring->num_mask; i++) {
2476 if ((pring->prt[i].rctl == fch_r_ctl) &&
2477 (pring->prt[i].type == fch_type)) {
2478 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2479 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2480 (phba, pring, saveq);
2481 return 1;
2482 }
2483 }
2484 return 0;
2485}
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501static int
2502lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2503 struct lpfc_iocbq *saveq)
2504{
2505 IOCB_t * irsp;
2506 WORD5 * w5p;
2507 uint32_t Rctl, Type;
2508 struct lpfc_iocbq *iocbq;
2509 struct lpfc_dmabuf *dmzbuf;
2510
2511 irsp = &(saveq->iocb);
2512
2513 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2514 if (pring->lpfc_sli_rcv_async_status)
2515 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2516 else
2517 lpfc_printf_log(phba,
2518 KERN_WARNING,
2519 LOG_SLI,
2520 "0316 Ring %d handler: unexpected "
2521 "ASYNC_STATUS iocb received evt_code "
2522 "0x%x\n",
2523 pring->ringno,
2524 irsp->un.asyncstat.evt_code);
2525 return 1;
2526 }
2527
2528 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2529 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2530 if (irsp->ulpBdeCount > 0) {
2531 dmzbuf = lpfc_sli_get_buff(phba, pring,
2532 irsp->un.ulpWord[3]);
2533 lpfc_in_buf_free(phba, dmzbuf);
2534 }
2535
2536 if (irsp->ulpBdeCount > 1) {
2537 dmzbuf = lpfc_sli_get_buff(phba, pring,
2538 irsp->unsli3.sli3Words[3]);
2539 lpfc_in_buf_free(phba, dmzbuf);
2540 }
2541
2542 if (irsp->ulpBdeCount > 2) {
2543 dmzbuf = lpfc_sli_get_buff(phba, pring,
2544 irsp->unsli3.sli3Words[7]);
2545 lpfc_in_buf_free(phba, dmzbuf);
2546 }
2547
2548 return 1;
2549 }
2550
2551 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2552 if (irsp->ulpBdeCount != 0) {
2553 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2554 irsp->un.ulpWord[3]);
2555 if (!saveq->context2)
2556 lpfc_printf_log(phba,
2557 KERN_ERR,
2558 LOG_SLI,
2559 "0341 Ring %d Cannot find buffer for "
2560 "an unsolicited iocb. tag 0x%x\n",
2561 pring->ringno,
2562 irsp->un.ulpWord[3]);
2563 }
2564 if (irsp->ulpBdeCount == 2) {
2565 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2566 irsp->unsli3.sli3Words[7]);
2567 if (!saveq->context3)
2568 lpfc_printf_log(phba,
2569 KERN_ERR,
2570 LOG_SLI,
2571 "0342 Ring %d Cannot find buffer for an"
2572 " unsolicited iocb. tag 0x%x\n",
2573 pring->ringno,
2574 irsp->unsli3.sli3Words[7]);
2575 }
2576 list_for_each_entry(iocbq, &saveq->list, list) {
2577 irsp = &(iocbq->iocb);
2578 if (irsp->ulpBdeCount != 0) {
2579 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2580 irsp->un.ulpWord[3]);
2581 if (!iocbq->context2)
2582 lpfc_printf_log(phba,
2583 KERN_ERR,
2584 LOG_SLI,
2585 "0343 Ring %d Cannot find "
2586 "buffer for an unsolicited iocb"
2587 ". tag 0x%x\n", pring->ringno,
2588 irsp->un.ulpWord[3]);
2589 }
2590 if (irsp->ulpBdeCount == 2) {
2591 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2592 irsp->unsli3.sli3Words[7]);
2593 if (!iocbq->context3)
2594 lpfc_printf_log(phba,
2595 KERN_ERR,
2596 LOG_SLI,
2597 "0344 Ring %d Cannot find "
2598 "buffer for an unsolicited "
2599 "iocb. tag 0x%x\n",
2600 pring->ringno,
2601 irsp->unsli3.sli3Words[7]);
2602 }
2603 }
2604 }
2605 if (irsp->ulpBdeCount != 0 &&
2606 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2607 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2608 int found = 0;
2609
2610
2611 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2612 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2613 saveq->iocb.unsli3.rcvsli3.ox_id) {
2614 list_add_tail(&saveq->list, &iocbq->list);
2615 found = 1;
2616 break;
2617 }
2618 }
2619 if (!found)
2620 list_add_tail(&saveq->clist,
2621 &pring->iocb_continue_saveq);
2622 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2623 list_del_init(&iocbq->clist);
2624 saveq = iocbq;
2625 irsp = &(saveq->iocb);
2626 } else
2627 return 0;
2628 }
2629 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2630 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2631 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2632 Rctl = FC_RCTL_ELS_REQ;
2633 Type = FC_TYPE_ELS;
2634 } else {
2635 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2636 Rctl = w5p->hcsw.Rctl;
2637 Type = w5p->hcsw.Type;
2638
2639
2640 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2641 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2642 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2643 Rctl = FC_RCTL_ELS_REQ;
2644 Type = FC_TYPE_ELS;
2645 w5p->hcsw.Rctl = Rctl;
2646 w5p->hcsw.Type = Type;
2647 }
2648 }
2649
2650 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2651 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2652 "0313 Ring %d handler: unexpected Rctl x%x "
2653 "Type x%x received\n",
2654 pring->ringno, Rctl, Type);
2655
2656 return 1;
2657}
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671static struct lpfc_iocbq *
2672lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2673 struct lpfc_sli_ring *pring,
2674 struct lpfc_iocbq *prspiocb)
2675{
2676 struct lpfc_iocbq *cmd_iocb = NULL;
2677 uint16_t iotag;
2678 lockdep_assert_held(&phba->hbalock);
2679
2680 iotag = prspiocb->iocb.ulpIoTag;
2681
2682 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2683 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2684 list_del_init(&cmd_iocb->list);
2685 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2686 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2687 }
2688 return cmd_iocb;
2689 }
2690
2691 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2692 "0317 iotag x%x is out off "
2693 "range: max iotag x%x wd0 x%x\n",
2694 iotag, phba->sli.last_iotag,
2695 *(((uint32_t *) &prspiocb->iocb) + 7));
2696 return NULL;
2697}
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711static struct lpfc_iocbq *
2712lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2713 struct lpfc_sli_ring *pring, uint16_t iotag)
2714{
2715 struct lpfc_iocbq *cmd_iocb;
2716
2717 lockdep_assert_held(&phba->hbalock);
2718 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2719 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2720 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2721
2722 list_del_init(&cmd_iocb->list);
2723 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2724 return cmd_iocb;
2725 }
2726 }
2727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2728 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2729 iotag, phba->sli.last_iotag);
2730 return NULL;
2731}
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750static int
2751lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2752 struct lpfc_iocbq *saveq)
2753{
2754 struct lpfc_iocbq *cmdiocbp;
2755 int rc = 1;
2756 unsigned long iflag;
2757
2758
2759 spin_lock_irqsave(&phba->hbalock, iflag);
2760 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2761 spin_unlock_irqrestore(&phba->hbalock, iflag);
2762
2763 if (cmdiocbp) {
2764 if (cmdiocbp->iocb_cmpl) {
2765
2766
2767
2768
2769 if (saveq->iocb.ulpStatus &&
2770 (pring->ringno == LPFC_ELS_RING) &&
2771 (cmdiocbp->iocb.ulpCommand ==
2772 CMD_ELS_REQUEST64_CR))
2773 lpfc_send_els_failure_event(phba,
2774 cmdiocbp, saveq);
2775
2776
2777
2778
2779
2780 if (pring->ringno == LPFC_ELS_RING) {
2781 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2782 (cmdiocbp->iocb_flag &
2783 LPFC_DRIVER_ABORTED)) {
2784 spin_lock_irqsave(&phba->hbalock,
2785 iflag);
2786 cmdiocbp->iocb_flag &=
2787 ~LPFC_DRIVER_ABORTED;
2788 spin_unlock_irqrestore(&phba->hbalock,
2789 iflag);
2790 saveq->iocb.ulpStatus =
2791 IOSTAT_LOCAL_REJECT;
2792 saveq->iocb.un.ulpWord[4] =
2793 IOERR_SLI_ABORTED;
2794
2795
2796
2797
2798
2799 spin_lock_irqsave(&phba->hbalock,
2800 iflag);
2801 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2802 spin_unlock_irqrestore(&phba->hbalock,
2803 iflag);
2804 }
2805 if (phba->sli_rev == LPFC_SLI_REV4) {
2806 if (saveq->iocb_flag &
2807 LPFC_EXCHANGE_BUSY) {
2808
2809
2810
2811
2812
2813
2814 spin_lock_irqsave(
2815 &phba->hbalock, iflag);
2816 cmdiocbp->iocb_flag |=
2817 LPFC_EXCHANGE_BUSY;
2818 spin_unlock_irqrestore(
2819 &phba->hbalock, iflag);
2820 }
2821 if (cmdiocbp->iocb_flag &
2822 LPFC_DRIVER_ABORTED) {
2823
2824
2825
2826
2827
2828 spin_lock_irqsave(
2829 &phba->hbalock, iflag);
2830 cmdiocbp->iocb_flag &=
2831 ~LPFC_DRIVER_ABORTED;
2832 spin_unlock_irqrestore(
2833 &phba->hbalock, iflag);
2834 cmdiocbp->iocb.ulpStatus =
2835 IOSTAT_LOCAL_REJECT;
2836 cmdiocbp->iocb.un.ulpWord[4] =
2837 IOERR_ABORT_REQUESTED;
2838
2839
2840
2841
2842
2843
2844 saveq->iocb.ulpStatus =
2845 IOSTAT_LOCAL_REJECT;
2846 saveq->iocb.un.ulpWord[4] =
2847 IOERR_SLI_ABORTED;
2848 spin_lock_irqsave(
2849 &phba->hbalock, iflag);
2850 saveq->iocb_flag |=
2851 LPFC_DELAY_MEM_FREE;
2852 spin_unlock_irqrestore(
2853 &phba->hbalock, iflag);
2854 }
2855 }
2856 }
2857 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2858 } else
2859 lpfc_sli_release_iocbq(phba, cmdiocbp);
2860 } else {
2861
2862
2863
2864
2865
2866 if (pring->ringno != LPFC_ELS_RING) {
2867
2868
2869
2870
2871 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2872 "0322 Ring %d handler: "
2873 "unexpected completion IoTag x%x "
2874 "Data: x%x x%x x%x x%x\n",
2875 pring->ringno,
2876 saveq->iocb.ulpIoTag,
2877 saveq->iocb.ulpStatus,
2878 saveq->iocb.un.ulpWord[4],
2879 saveq->iocb.ulpCommand,
2880 saveq->iocb.ulpContext);
2881 }
2882 }
2883
2884 return rc;
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897static void
2898lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2899{
2900 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2901
2902
2903
2904
2905 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2906 "0312 Ring %d handler: portRspPut %d "
2907 "is bigger than rsp ring %d\n",
2908 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2909 pring->sli.sli3.numRiocb);
2910
2911 phba->link_state = LPFC_HBA_ERROR;
2912
2913
2914
2915
2916
2917 phba->work_ha |= HA_ERATT;
2918 phba->work_hs = HS_FFER3;
2919
2920 lpfc_worker_wake_up(phba);
2921
2922 return;
2923}
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935void lpfc_poll_eratt(unsigned long ptr)
2936{
2937 struct lpfc_hba *phba;
2938 uint32_t eratt = 0;
2939 uint64_t sli_intr, cnt;
2940
2941 phba = (struct lpfc_hba *)ptr;
2942
2943
2944 sli_intr = phba->sli.slistat.sli_intr;
2945
2946 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2947 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2948 sli_intr);
2949 else
2950 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2951
2952
2953 do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2954 phba->sli.slistat.sli_ips = cnt;
2955
2956 phba->sli.slistat.sli_prev_intr = sli_intr;
2957
2958
2959 eratt = lpfc_sli_check_eratt(phba);
2960
2961 if (eratt)
2962
2963 lpfc_worker_wake_up(phba);
2964 else
2965
2966 mod_timer(&phba->eratt_poll,
2967 jiffies +
2968 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
2969 return;
2970}
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990int
2991lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2992 struct lpfc_sli_ring *pring, uint32_t mask)
2993{
2994 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2995 IOCB_t *irsp = NULL;
2996 IOCB_t *entry = NULL;
2997 struct lpfc_iocbq *cmdiocbq = NULL;
2998 struct lpfc_iocbq rspiocbq;
2999 uint32_t status;
3000 uint32_t portRspPut, portRspMax;
3001 int rc = 1;
3002 lpfc_iocb_type type;
3003 unsigned long iflag;
3004 uint32_t rsp_cmpl = 0;
3005
3006 spin_lock_irqsave(&phba->hbalock, iflag);
3007 pring->stats.iocb_event++;
3008
3009
3010
3011
3012
3013 portRspMax = pring->sli.sli3.numRiocb;
3014 portRspPut = le32_to_cpu(pgp->rspPutInx);
3015 if (unlikely(portRspPut >= portRspMax)) {
3016 lpfc_sli_rsp_pointers_error(phba, pring);
3017 spin_unlock_irqrestore(&phba->hbalock, iflag);
3018 return 1;
3019 }
3020 if (phba->fcp_ring_in_use) {
3021 spin_unlock_irqrestore(&phba->hbalock, iflag);
3022 return 1;
3023 } else
3024 phba->fcp_ring_in_use = 1;
3025
3026 rmb();
3027 while (pring->sli.sli3.rspidx != portRspPut) {
3028
3029
3030
3031
3032
3033 entry = lpfc_resp_iocb(phba, pring);
3034 phba->last_completion_time = jiffies;
3035
3036 if (++pring->sli.sli3.rspidx >= portRspMax)
3037 pring->sli.sli3.rspidx = 0;
3038
3039 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3040 (uint32_t *) &rspiocbq.iocb,
3041 phba->iocb_rsp_size);
3042 INIT_LIST_HEAD(&(rspiocbq.list));
3043 irsp = &rspiocbq.iocb;
3044
3045 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3046 pring->stats.iocb_rsp++;
3047 rsp_cmpl++;
3048
3049 if (unlikely(irsp->ulpStatus)) {
3050
3051
3052
3053
3054 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3055 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3056 IOERR_NO_RESOURCES)) {
3057 spin_unlock_irqrestore(&phba->hbalock, iflag);
3058 phba->lpfc_rampdown_queue_depth(phba);
3059 spin_lock_irqsave(&phba->hbalock, iflag);
3060 }
3061
3062
3063 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3064 "0336 Rsp Ring %d error: IOCB Data: "
3065 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3066 pring->ringno,
3067 irsp->un.ulpWord[0],
3068 irsp->un.ulpWord[1],
3069 irsp->un.ulpWord[2],
3070 irsp->un.ulpWord[3],
3071 irsp->un.ulpWord[4],
3072 irsp->un.ulpWord[5],
3073 *(uint32_t *)&irsp->un1,
3074 *((uint32_t *)&irsp->un1 + 1));
3075 }
3076
3077 switch (type) {
3078 case LPFC_ABORT_IOCB:
3079 case LPFC_SOL_IOCB:
3080
3081
3082
3083
3084 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3085 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3086 "0333 IOCB cmd 0x%x"
3087 " processed. Skipping"
3088 " completion\n",
3089 irsp->ulpCommand);
3090 break;
3091 }
3092
3093 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3094 &rspiocbq);
3095 if (unlikely(!cmdiocbq))
3096 break;
3097 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3098 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3099 if (cmdiocbq->iocb_cmpl) {
3100 spin_unlock_irqrestore(&phba->hbalock, iflag);
3101 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3102 &rspiocbq);
3103 spin_lock_irqsave(&phba->hbalock, iflag);
3104 }
3105 break;
3106 case LPFC_UNSOL_IOCB:
3107 spin_unlock_irqrestore(&phba->hbalock, iflag);
3108 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3109 spin_lock_irqsave(&phba->hbalock, iflag);
3110 break;
3111 default:
3112 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3113 char adaptermsg[LPFC_MAX_ADPTMSG];
3114 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3115 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3116 MAX_MSG_DATA);
3117 dev_warn(&((phba->pcidev)->dev),
3118 "lpfc%d: %s\n",
3119 phba->brd_no, adaptermsg);
3120 } else {
3121
3122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3123 "0334 Unknown IOCB command "
3124 "Data: x%x, x%x x%x x%x x%x\n",
3125 type, irsp->ulpCommand,
3126 irsp->ulpStatus,
3127 irsp->ulpIoTag,
3128 irsp->ulpContext);
3129 }
3130 break;
3131 }
3132
3133
3134
3135
3136
3137
3138
3139 writel(pring->sli.sli3.rspidx,
3140 &phba->host_gp[pring->ringno].rspGetInx);
3141
3142 if (pring->sli.sli3.rspidx == portRspPut)
3143 portRspPut = le32_to_cpu(pgp->rspPutInx);
3144 }
3145
3146 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3147 pring->stats.iocb_rsp_full++;
3148 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3149 writel(status, phba->CAregaddr);
3150 readl(phba->CAregaddr);
3151 }
3152 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3153 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3154 pring->stats.iocb_cmd_empty++;
3155
3156
3157 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3158 lpfc_sli_resume_iocb(phba, pring);
3159
3160 if ((pring->lpfc_sli_cmd_available))
3161 (pring->lpfc_sli_cmd_available) (phba, pring);
3162
3163 }
3164
3165 phba->fcp_ring_in_use = 0;
3166 spin_unlock_irqrestore(&phba->hbalock, iflag);
3167 return rc;
3168}
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static struct lpfc_iocbq *
3189lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3190 struct lpfc_iocbq *rspiocbp)
3191{
3192 struct lpfc_iocbq *saveq;
3193 struct lpfc_iocbq *cmdiocbp;
3194 struct lpfc_iocbq *next_iocb;
3195 IOCB_t *irsp = NULL;
3196 uint32_t free_saveq;
3197 uint8_t iocb_cmd_type;
3198 lpfc_iocb_type type;
3199 unsigned long iflag;
3200 int rc;
3201
3202 spin_lock_irqsave(&phba->hbalock, iflag);
3203
3204 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3205 pring->iocb_continueq_cnt++;
3206
3207
3208 irsp = &rspiocbp->iocb;
3209 if (irsp->ulpLe) {
3210
3211
3212
3213
3214 free_saveq = 1;
3215 saveq = list_get_first(&pring->iocb_continueq,
3216 struct lpfc_iocbq, list);
3217 irsp = &(saveq->iocb);
3218 list_del_init(&pring->iocb_continueq);
3219 pring->iocb_continueq_cnt = 0;
3220
3221 pring->stats.iocb_rsp++;
3222
3223
3224
3225
3226
3227 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3228 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3229 IOERR_NO_RESOURCES)) {
3230 spin_unlock_irqrestore(&phba->hbalock, iflag);
3231 phba->lpfc_rampdown_queue_depth(phba);
3232 spin_lock_irqsave(&phba->hbalock, iflag);
3233 }
3234
3235 if (irsp->ulpStatus) {
3236
3237 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3238 "0328 Rsp Ring %d error: "
3239 "IOCB Data: "
3240 "x%x x%x x%x x%x "
3241 "x%x x%x x%x x%x "
3242 "x%x x%x x%x x%x "
3243 "x%x x%x x%x x%x\n",
3244 pring->ringno,
3245 irsp->un.ulpWord[0],
3246 irsp->un.ulpWord[1],
3247 irsp->un.ulpWord[2],
3248 irsp->un.ulpWord[3],
3249 irsp->un.ulpWord[4],
3250 irsp->un.ulpWord[5],
3251 *(((uint32_t *) irsp) + 6),
3252 *(((uint32_t *) irsp) + 7),
3253 *(((uint32_t *) irsp) + 8),
3254 *(((uint32_t *) irsp) + 9),
3255 *(((uint32_t *) irsp) + 10),
3256 *(((uint32_t *) irsp) + 11),
3257 *(((uint32_t *) irsp) + 12),
3258 *(((uint32_t *) irsp) + 13),
3259 *(((uint32_t *) irsp) + 14),
3260 *(((uint32_t *) irsp) + 15));
3261 }
3262
3263
3264
3265
3266
3267
3268
3269 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3270 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3271 switch (type) {
3272 case LPFC_SOL_IOCB:
3273 spin_unlock_irqrestore(&phba->hbalock, iflag);
3274 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3275 spin_lock_irqsave(&phba->hbalock, iflag);
3276 break;
3277
3278 case LPFC_UNSOL_IOCB:
3279 spin_unlock_irqrestore(&phba->hbalock, iflag);
3280 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3281 spin_lock_irqsave(&phba->hbalock, iflag);
3282 if (!rc)
3283 free_saveq = 0;
3284 break;
3285
3286 case LPFC_ABORT_IOCB:
3287 cmdiocbp = NULL;
3288 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3289 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3290 saveq);
3291 if (cmdiocbp) {
3292
3293 if (cmdiocbp->iocb_cmpl) {
3294 spin_unlock_irqrestore(&phba->hbalock,
3295 iflag);
3296 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3297 saveq);
3298 spin_lock_irqsave(&phba->hbalock,
3299 iflag);
3300 } else
3301 __lpfc_sli_release_iocbq(phba,
3302 cmdiocbp);
3303 }
3304 break;
3305
3306 case LPFC_UNKNOWN_IOCB:
3307 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3308 char adaptermsg[LPFC_MAX_ADPTMSG];
3309 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3310 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3311 MAX_MSG_DATA);
3312 dev_warn(&((phba->pcidev)->dev),
3313 "lpfc%d: %s\n",
3314 phba->brd_no, adaptermsg);
3315 } else {
3316
3317 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3318 "0335 Unknown IOCB "
3319 "command Data: x%x "
3320 "x%x x%x x%x\n",
3321 irsp->ulpCommand,
3322 irsp->ulpStatus,
3323 irsp->ulpIoTag,
3324 irsp->ulpContext);
3325 }
3326 break;
3327 }
3328
3329 if (free_saveq) {
3330 list_for_each_entry_safe(rspiocbp, next_iocb,
3331 &saveq->list, list) {
3332 list_del_init(&rspiocbp->list);
3333 __lpfc_sli_release_iocbq(phba, rspiocbp);
3334 }
3335 __lpfc_sli_release_iocbq(phba, saveq);
3336 }
3337 rspiocbp = NULL;
3338 }
3339 spin_unlock_irqrestore(&phba->hbalock, iflag);
3340 return rspiocbp;
3341}
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352void
3353lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3354 struct lpfc_sli_ring *pring, uint32_t mask)
3355{
3356 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3357}
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370static void
3371lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3372 struct lpfc_sli_ring *pring, uint32_t mask)
3373{
3374 struct lpfc_pgp *pgp;
3375 IOCB_t *entry;
3376 IOCB_t *irsp = NULL;
3377 struct lpfc_iocbq *rspiocbp = NULL;
3378 uint32_t portRspPut, portRspMax;
3379 unsigned long iflag;
3380 uint32_t status;
3381
3382 pgp = &phba->port_gp[pring->ringno];
3383 spin_lock_irqsave(&phba->hbalock, iflag);
3384 pring->stats.iocb_event++;
3385
3386
3387
3388
3389
3390 portRspMax = pring->sli.sli3.numRiocb;
3391 portRspPut = le32_to_cpu(pgp->rspPutInx);
3392 if (portRspPut >= portRspMax) {
3393
3394
3395
3396
3397 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3398 "0303 Ring %d handler: portRspPut %d "
3399 "is bigger than rsp ring %d\n",
3400 pring->ringno, portRspPut, portRspMax);
3401
3402 phba->link_state = LPFC_HBA_ERROR;
3403 spin_unlock_irqrestore(&phba->hbalock, iflag);
3404
3405 phba->work_hs = HS_FFER3;
3406 lpfc_handle_eratt(phba);
3407
3408 return;
3409 }
3410
3411 rmb();
3412 while (pring->sli.sli3.rspidx != portRspPut) {
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426 entry = lpfc_resp_iocb(phba, pring);
3427
3428 phba->last_completion_time = jiffies;
3429 rspiocbp = __lpfc_sli_get_iocbq(phba);
3430 if (rspiocbp == NULL) {
3431 printk(KERN_ERR "%s: out of buffers! Failing "
3432 "completion.\n", __func__);
3433 break;
3434 }
3435
3436 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3437 phba->iocb_rsp_size);
3438 irsp = &rspiocbp->iocb;
3439
3440 if (++pring->sli.sli3.rspidx >= portRspMax)
3441 pring->sli.sli3.rspidx = 0;
3442
3443 if (pring->ringno == LPFC_ELS_RING) {
3444 lpfc_debugfs_slow_ring_trc(phba,
3445 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3446 *(((uint32_t *) irsp) + 4),
3447 *(((uint32_t *) irsp) + 6),
3448 *(((uint32_t *) irsp) + 7));
3449 }
3450
3451 writel(pring->sli.sli3.rspidx,
3452 &phba->host_gp[pring->ringno].rspGetInx);
3453
3454 spin_unlock_irqrestore(&phba->hbalock, iflag);
3455
3456 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3457 spin_lock_irqsave(&phba->hbalock, iflag);
3458
3459
3460
3461
3462
3463
3464 if (pring->sli.sli3.rspidx == portRspPut) {
3465 portRspPut = le32_to_cpu(pgp->rspPutInx);
3466 }
3467 }
3468
3469 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3470
3471 pring->stats.iocb_rsp_full++;
3472
3473 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3474 writel(status, phba->CAregaddr);
3475 readl(phba->CAregaddr);
3476 }
3477 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3478 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3479 pring->stats.iocb_cmd_empty++;
3480
3481
3482 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3483 lpfc_sli_resume_iocb(phba, pring);
3484
3485 if ((pring->lpfc_sli_cmd_available))
3486 (pring->lpfc_sli_cmd_available) (phba, pring);
3487
3488 }
3489
3490 spin_unlock_irqrestore(&phba->hbalock, iflag);
3491 return;
3492}
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506static void
3507lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3508 struct lpfc_sli_ring *pring, uint32_t mask)
3509{
3510 struct lpfc_iocbq *irspiocbq;
3511 struct hbq_dmabuf *dmabuf;
3512 struct lpfc_cq_event *cq_event;
3513 unsigned long iflag;
3514
3515 spin_lock_irqsave(&phba->hbalock, iflag);
3516 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3517 spin_unlock_irqrestore(&phba->hbalock, iflag);
3518 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3519
3520 spin_lock_irqsave(&phba->hbalock, iflag);
3521 list_remove_head(&phba->sli4_hba.sp_queue_event,
3522 cq_event, struct lpfc_cq_event, list);
3523 spin_unlock_irqrestore(&phba->hbalock, iflag);
3524
3525 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3526 case CQE_CODE_COMPL_WQE:
3527 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3528 cq_event);
3529
3530 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3531 irspiocbq);
3532 if (irspiocbq)
3533 lpfc_sli_sp_handle_rspiocb(phba, pring,
3534 irspiocbq);
3535 break;
3536 case CQE_CODE_RECEIVE:
3537 case CQE_CODE_RECEIVE_V1:
3538 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3539 cq_event);
3540 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3541 break;
3542 default:
3543 break;
3544 }
3545 }
3546}
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558void
3559lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3560{
3561 LIST_HEAD(completions);
3562 struct lpfc_iocbq *iocb, *next_iocb;
3563
3564 if (pring->ringno == LPFC_ELS_RING) {
3565 lpfc_fabric_abort_hba(phba);
3566 }
3567
3568
3569
3570
3571 if (phba->sli_rev >= LPFC_SLI_REV4) {
3572 spin_lock_irq(&pring->ring_lock);
3573 list_splice_init(&pring->txq, &completions);
3574 pring->txq_cnt = 0;
3575 spin_unlock_irq(&pring->ring_lock);
3576
3577 spin_lock_irq(&phba->hbalock);
3578
3579 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3580 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3581 spin_unlock_irq(&phba->hbalock);
3582 } else {
3583 spin_lock_irq(&phba->hbalock);
3584 list_splice_init(&pring->txq, &completions);
3585 pring->txq_cnt = 0;
3586
3587
3588 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3589 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3590 spin_unlock_irq(&phba->hbalock);
3591 }
3592
3593
3594 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3595 IOERR_SLI_ABORTED);
3596}
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608void
3609lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3610{
3611 struct lpfc_sli *psli = &phba->sli;
3612 struct lpfc_sli_ring *pring;
3613 uint32_t i;
3614
3615
3616 if (phba->sli_rev >= LPFC_SLI_REV4) {
3617 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3618 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3619 lpfc_sli_abort_iocb_ring(phba, pring);
3620 }
3621 } else {
3622 pring = &psli->ring[psli->fcp_ring];
3623 lpfc_sli_abort_iocb_ring(phba, pring);
3624 }
3625}
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638void
3639lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3640{
3641 LIST_HEAD(txq);
3642 LIST_HEAD(txcmplq);
3643 struct lpfc_sli *psli = &phba->sli;
3644 struct lpfc_sli_ring *pring;
3645 uint32_t i;
3646
3647 spin_lock_irq(&phba->hbalock);
3648
3649 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3650 spin_unlock_irq(&phba->hbalock);
3651
3652
3653 if (phba->sli_rev >= LPFC_SLI_REV4) {
3654 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3655 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3656
3657 spin_lock_irq(&pring->ring_lock);
3658
3659 list_splice_init(&pring->txq, &txq);
3660
3661 list_splice_init(&pring->txcmplq, &txcmplq);
3662 pring->txq_cnt = 0;
3663 pring->txcmplq_cnt = 0;
3664 spin_unlock_irq(&pring->ring_lock);
3665
3666
3667 lpfc_sli_cancel_iocbs(phba, &txq,
3668 IOSTAT_LOCAL_REJECT,
3669 IOERR_SLI_DOWN);
3670
3671 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3672 IOSTAT_LOCAL_REJECT,
3673 IOERR_SLI_DOWN);
3674 }
3675 } else {
3676 pring = &psli->ring[psli->fcp_ring];
3677
3678 spin_lock_irq(&phba->hbalock);
3679
3680 list_splice_init(&pring->txq, &txq);
3681
3682 list_splice_init(&pring->txcmplq, &txcmplq);
3683 pring->txq_cnt = 0;
3684 pring->txcmplq_cnt = 0;
3685 spin_unlock_irq(&phba->hbalock);
3686
3687
3688 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3689 IOERR_SLI_DOWN);
3690
3691 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3692 IOERR_SLI_DOWN);
3693 }
3694}
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709static int
3710lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3711{
3712 uint32_t status;
3713 int i = 0;
3714 int retval = 0;
3715
3716
3717 if (lpfc_readl(phba->HSregaddr, &status))
3718 return 1;
3719
3720
3721
3722
3723
3724
3725
3726 while (((status & mask) != mask) &&
3727 !(status & HS_FFERM) &&
3728 i++ < 20) {
3729
3730 if (i <= 5)
3731 msleep(10);
3732 else if (i <= 10)
3733 msleep(500);
3734 else
3735 msleep(2500);
3736
3737 if (i == 15) {
3738
3739 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3740 lpfc_sli_brdrestart(phba);
3741 }
3742
3743 if (lpfc_readl(phba->HSregaddr, &status)) {
3744 retval = 1;
3745 break;
3746 }
3747 }
3748
3749
3750 if ((status & HS_FFERM) || (i >= 20)) {
3751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3752 "2751 Adapter failed to restart, "
3753 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3754 status,
3755 readl(phba->MBslimaddr + 0xa8),
3756 readl(phba->MBslimaddr + 0xac));
3757 phba->link_state = LPFC_HBA_ERROR;
3758 retval = 1;
3759 }
3760
3761 return retval;
3762}
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775static int
3776lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3777{
3778 uint32_t status;
3779 int retval = 0;
3780
3781
3782 status = lpfc_sli4_post_status_check(phba);
3783
3784 if (status) {
3785 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3786 lpfc_sli_brdrestart(phba);
3787 status = lpfc_sli4_post_status_check(phba);
3788 }
3789
3790
3791 if (status) {
3792 phba->link_state = LPFC_HBA_ERROR;
3793 retval = 1;
3794 } else
3795 phba->sli4_hba.intr_enable = 0;
3796
3797 return retval;
3798}
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808int
3809lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3810{
3811 return phba->lpfc_sli_brdready(phba, mask);
3812}
3813
3814#define BARRIER_TEST_PATTERN (0xdeadbeef)
3815
3816
3817
3818
3819
3820
3821
3822
3823void lpfc_reset_barrier(struct lpfc_hba *phba)
3824{
3825 uint32_t __iomem *resp_buf;
3826 uint32_t __iomem *mbox_buf;
3827 volatile uint32_t mbox;
3828 uint32_t hc_copy, ha_copy, resp_data;
3829 int i;
3830 uint8_t hdrtype;
3831
3832 lockdep_assert_held(&phba->hbalock);
3833
3834 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3835 if (hdrtype != 0x80 ||
3836 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3837 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3838 return;
3839
3840
3841
3842
3843
3844 resp_buf = phba->MBslimaddr;
3845
3846
3847 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3848 return;
3849 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3850 readl(phba->HCregaddr);
3851 phba->link_flag |= LS_IGNORE_ERATT;
3852
3853 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3854 return;
3855 if (ha_copy & HA_ERATT) {
3856
3857 writel(HA_ERATT, phba->HAregaddr);
3858 phba->pport->stopped = 1;
3859 }
3860
3861 mbox = 0;
3862 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3863 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3864
3865 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3866 mbox_buf = phba->MBslimaddr;
3867 writel(mbox, mbox_buf);
3868
3869 for (i = 0; i < 50; i++) {
3870 if (lpfc_readl((resp_buf + 1), &resp_data))
3871 return;
3872 if (resp_data != ~(BARRIER_TEST_PATTERN))
3873 mdelay(1);
3874 else
3875 break;
3876 }
3877 resp_data = 0;
3878 if (lpfc_readl((resp_buf + 1), &resp_data))
3879 return;
3880 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
3881 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3882 phba->pport->stopped)
3883 goto restore_hc;
3884 else
3885 goto clear_errat;
3886 }
3887
3888 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3889 resp_data = 0;
3890 for (i = 0; i < 500; i++) {
3891 if (lpfc_readl(resp_buf, &resp_data))
3892 return;
3893 if (resp_data != mbox)
3894 mdelay(1);
3895 else
3896 break;
3897 }
3898
3899clear_errat:
3900
3901 while (++i < 500) {
3902 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3903 return;
3904 if (!(ha_copy & HA_ERATT))
3905 mdelay(1);
3906 else
3907 break;
3908 }
3909
3910 if (readl(phba->HAregaddr) & HA_ERATT) {
3911 writel(HA_ERATT, phba->HAregaddr);
3912 phba->pport->stopped = 1;
3913 }
3914
3915restore_hc:
3916 phba->link_flag &= ~LS_IGNORE_ERATT;
3917 writel(hc_copy, phba->HCregaddr);
3918 readl(phba->HCregaddr);
3919}
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932int
3933lpfc_sli_brdkill(struct lpfc_hba *phba)
3934{
3935 struct lpfc_sli *psli;
3936 LPFC_MBOXQ_t *pmb;
3937 uint32_t status;
3938 uint32_t ha_copy;
3939 int retval;
3940 int i = 0;
3941
3942 psli = &phba->sli;
3943
3944
3945 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3946 "0329 Kill HBA Data: x%x x%x\n",
3947 phba->pport->port_state, psli->sli_flag);
3948
3949 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3950 if (!pmb)
3951 return 1;
3952
3953
3954 spin_lock_irq(&phba->hbalock);
3955 if (lpfc_readl(phba->HCregaddr, &status)) {
3956 spin_unlock_irq(&phba->hbalock);
3957 mempool_free(pmb, phba->mbox_mem_pool);
3958 return 1;
3959 }
3960 status &= ~HC_ERINT_ENA;
3961 writel(status, phba->HCregaddr);
3962 readl(phba->HCregaddr);
3963 phba->link_flag |= LS_IGNORE_ERATT;
3964 spin_unlock_irq(&phba->hbalock);
3965
3966 lpfc_kill_board(phba, pmb);
3967 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3968 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3969
3970 if (retval != MBX_SUCCESS) {
3971 if (retval != MBX_BUSY)
3972 mempool_free(pmb, phba->mbox_mem_pool);
3973 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3974 "2752 KILL_BOARD command failed retval %d\n",
3975 retval);
3976 spin_lock_irq(&phba->hbalock);
3977 phba->link_flag &= ~LS_IGNORE_ERATT;
3978 spin_unlock_irq(&phba->hbalock);
3979 return 1;
3980 }
3981
3982 spin_lock_irq(&phba->hbalock);
3983 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3984 spin_unlock_irq(&phba->hbalock);
3985
3986 mempool_free(pmb, phba->mbox_mem_pool);
3987
3988
3989
3990
3991
3992
3993 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3994 return 1;
3995 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3996 mdelay(100);
3997 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3998 return 1;
3999 }
4000
4001 del_timer_sync(&psli->mbox_tmo);
4002 if (ha_copy & HA_ERATT) {
4003 writel(HA_ERATT, phba->HAregaddr);
4004 phba->pport->stopped = 1;
4005 }
4006 spin_lock_irq(&phba->hbalock);
4007 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4008 psli->mbox_active = NULL;
4009 phba->link_flag &= ~LS_IGNORE_ERATT;
4010 spin_unlock_irq(&phba->hbalock);
4011
4012 lpfc_hba_down_post(phba);
4013 phba->link_state = LPFC_HBA_ERROR;
4014
4015 return ha_copy & HA_ERATT ? 0 : 1;
4016}
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029int
4030lpfc_sli_brdreset(struct lpfc_hba *phba)
4031{
4032 struct lpfc_sli *psli;
4033 struct lpfc_sli_ring *pring;
4034 uint16_t cfg_value;
4035 int i;
4036
4037 psli = &phba->sli;
4038
4039
4040 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4041 "0325 Reset HBA Data: x%x x%x\n",
4042 phba->pport->port_state, psli->sli_flag);
4043
4044
4045 phba->fc_eventTag = 0;
4046 phba->link_events = 0;
4047 phba->pport->fc_myDID = 0;
4048 phba->pport->fc_prevDID = 0;
4049
4050
4051 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4052 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4053 (cfg_value &
4054 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4055
4056 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4057
4058
4059 writel(HC_INITFF, phba->HCregaddr);
4060 mdelay(1);
4061 readl(phba->HCregaddr);
4062 writel(0, phba->HCregaddr);
4063 readl(phba->HCregaddr);
4064
4065
4066 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4067
4068
4069 for (i = 0; i < psli->num_rings; i++) {
4070 pring = &psli->ring[i];
4071 pring->flag = 0;
4072 pring->sli.sli3.rspidx = 0;
4073 pring->sli.sli3.next_cmdidx = 0;
4074 pring->sli.sli3.local_getidx = 0;
4075 pring->sli.sli3.cmdidx = 0;
4076 pring->missbufcnt = 0;
4077 }
4078
4079 phba->link_state = LPFC_WARM_START;
4080 return 0;
4081}
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093int
4094lpfc_sli4_brdreset(struct lpfc_hba *phba)
4095{
4096 struct lpfc_sli *psli = &phba->sli;
4097 uint16_t cfg_value;
4098 int rc = 0;
4099
4100
4101 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4102 "0295 Reset HBA Data: x%x x%x x%x\n",
4103 phba->pport->port_state, psli->sli_flag,
4104 phba->hba_flag);
4105
4106
4107 phba->fc_eventTag = 0;
4108 phba->link_events = 0;
4109 phba->pport->fc_myDID = 0;
4110 phba->pport->fc_prevDID = 0;
4111
4112 spin_lock_irq(&phba->hbalock);
4113 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4114 phba->fcf.fcf_flag = 0;
4115 spin_unlock_irq(&phba->hbalock);
4116
4117
4118 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4119 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4120 return rc;
4121 }
4122
4123
4124 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4125 "0389 Performing PCI function reset!\n");
4126
4127
4128 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4129 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4130 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4131
4132
4133 rc = lpfc_pci_function_reset(phba);
4134 lpfc_sli4_queue_destroy(phba);
4135
4136
4137 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4138
4139 return rc;
4140}
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155static int
4156lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4157{
4158 MAILBOX_t *mb;
4159 struct lpfc_sli *psli;
4160 volatile uint32_t word0;
4161 void __iomem *to_slim;
4162 uint32_t hba_aer_enabled;
4163
4164 spin_lock_irq(&phba->hbalock);
4165
4166
4167 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4168
4169 psli = &phba->sli;
4170
4171
4172 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4173 "0337 Restart HBA Data: x%x x%x\n",
4174 phba->pport->port_state, psli->sli_flag);
4175
4176 word0 = 0;
4177 mb = (MAILBOX_t *) &word0;
4178 mb->mbxCommand = MBX_RESTART;
4179 mb->mbxHc = 1;
4180
4181 lpfc_reset_barrier(phba);
4182
4183 to_slim = phba->MBslimaddr;
4184 writel(*(uint32_t *) mb, to_slim);
4185 readl(to_slim);
4186
4187
4188 if (phba->pport->port_state)
4189 word0 = 1;
4190 else
4191 word0 = 0;
4192 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4193 writel(*(uint32_t *) mb, to_slim);
4194 readl(to_slim);
4195
4196 lpfc_sli_brdreset(phba);
4197 phba->pport->stopped = 0;
4198 phba->link_state = LPFC_INIT_START;
4199 phba->hba_flag = 0;
4200 spin_unlock_irq(&phba->hbalock);
4201
4202 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4203 psli->stats_start = get_seconds();
4204
4205
4206 mdelay(100);
4207
4208
4209 if (hba_aer_enabled)
4210 pci_disable_pcie_error_reporting(phba->pcidev);
4211
4212 lpfc_hba_down_post(phba);
4213
4214 return 0;
4215}
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226static int
4227lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4228{
4229 struct lpfc_sli *psli = &phba->sli;
4230 uint32_t hba_aer_enabled;
4231 int rc;
4232
4233
4234 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4235 "0296 Restart HBA Data: x%x x%x\n",
4236 phba->pport->port_state, psli->sli_flag);
4237
4238
4239 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4240
4241 rc = lpfc_sli4_brdreset(phba);
4242
4243 spin_lock_irq(&phba->hbalock);
4244 phba->pport->stopped = 0;
4245 phba->link_state = LPFC_INIT_START;
4246 phba->hba_flag = 0;
4247 spin_unlock_irq(&phba->hbalock);
4248
4249 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4250 psli->stats_start = get_seconds();
4251
4252
4253 if (hba_aer_enabled)
4254 pci_disable_pcie_error_reporting(phba->pcidev);
4255
4256 lpfc_hba_down_post(phba);
4257
4258 return rc;
4259}
4260
4261
4262
4263
4264
4265
4266
4267
4268int
4269lpfc_sli_brdrestart(struct lpfc_hba *phba)
4270{
4271 return phba->lpfc_sli_brdrestart(phba);
4272}
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284static int
4285lpfc_sli_chipset_init(struct lpfc_hba *phba)
4286{
4287 uint32_t status, i = 0;
4288
4289
4290 if (lpfc_readl(phba->HSregaddr, &status))
4291 return -EIO;
4292
4293
4294 i = 0;
4295 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305 if (i++ >= 200) {
4306
4307
4308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4309 "0436 Adapter failed to init, "
4310 "timeout, status reg x%x, "
4311 "FW Data: A8 x%x AC x%x\n", status,
4312 readl(phba->MBslimaddr + 0xa8),
4313 readl(phba->MBslimaddr + 0xac));
4314 phba->link_state = LPFC_HBA_ERROR;
4315 return -ETIMEDOUT;
4316 }
4317
4318
4319 if (status & HS_FFERM) {
4320
4321
4322
4323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4324 "0437 Adapter failed to init, "
4325 "chipset, status reg x%x, "
4326 "FW Data: A8 x%x AC x%x\n", status,
4327 readl(phba->MBslimaddr + 0xa8),
4328 readl(phba->MBslimaddr + 0xac));
4329 phba->link_state = LPFC_HBA_ERROR;
4330 return -EIO;
4331 }
4332
4333 if (i <= 10)
4334 msleep(10);
4335 else if (i <= 100)
4336 msleep(100);
4337 else
4338 msleep(1000);
4339
4340 if (i == 150) {
4341
4342 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4343 lpfc_sli_brdrestart(phba);
4344 }
4345
4346 if (lpfc_readl(phba->HSregaddr, &status))
4347 return -EIO;
4348 }
4349
4350
4351 if (status & HS_FFERM) {
4352
4353
4354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4355 "0438 Adapter failed to init, chipset, "
4356 "status reg x%x, "
4357 "FW Data: A8 x%x AC x%x\n", status,
4358 readl(phba->MBslimaddr + 0xa8),
4359 readl(phba->MBslimaddr + 0xac));
4360 phba->link_state = LPFC_HBA_ERROR;
4361 return -EIO;
4362 }
4363
4364
4365 writel(0, phba->HCregaddr);
4366 readl(phba->HCregaddr);
4367
4368
4369 writel(0xffffffff, phba->HAregaddr);
4370 readl(phba->HAregaddr);
4371 return 0;
4372}
4373
4374
4375
4376
4377
4378
4379
4380int
4381lpfc_sli_hbq_count(void)
4382{
4383 return ARRAY_SIZE(lpfc_hbq_defs);
4384}
4385
4386
4387
4388
4389
4390
4391
4392
4393static int
4394lpfc_sli_hbq_entry_count(void)
4395{
4396 int hbq_count = lpfc_sli_hbq_count();
4397 int count = 0;
4398 int i;
4399
4400 for (i = 0; i < hbq_count; ++i)
4401 count += lpfc_hbq_defs[i]->entry_count;
4402 return count;
4403}
4404
4405
4406
4407
4408
4409
4410
4411int
4412lpfc_sli_hbq_size(void)
4413{
4414 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4415}
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426static int
4427lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4428{
4429 int hbq_count = lpfc_sli_hbq_count();
4430 LPFC_MBOXQ_t *pmb;
4431 MAILBOX_t *pmbox;
4432 uint32_t hbqno;
4433 uint32_t hbq_entry_index;
4434
4435
4436
4437
4438 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4439
4440 if (!pmb)
4441 return -ENOMEM;
4442
4443 pmbox = &pmb->u.mb;
4444
4445
4446 phba->link_state = LPFC_INIT_MBX_CMDS;
4447 phba->hbq_in_use = 1;
4448
4449 hbq_entry_index = 0;
4450 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4451 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4452 phba->hbqs[hbqno].hbqPutIdx = 0;
4453 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4454 phba->hbqs[hbqno].entry_count =
4455 lpfc_hbq_defs[hbqno]->entry_count;
4456 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4457 hbq_entry_index, pmb);
4458 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4459
4460 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4461
4462
4463
4464 lpfc_printf_log(phba, KERN_ERR,
4465 LOG_SLI | LOG_VPORT,
4466 "1805 Adapter failed to init. "
4467 "Data: x%x x%x x%x\n",
4468 pmbox->mbxCommand,
4469 pmbox->mbxStatus, hbqno);
4470
4471 phba->link_state = LPFC_HBA_ERROR;
4472 mempool_free(pmb, phba->mbox_mem_pool);
4473 return -ENXIO;
4474 }
4475 }
4476 phba->hbq_count = hbq_count;
4477
4478 mempool_free(pmb, phba->mbox_mem_pool);
4479
4480
4481 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4482 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4483 return 0;
4484}
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495static int
4496lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4497{
4498 phba->hbq_in_use = 1;
4499 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4500 phba->hbq_count = 1;
4501
4502 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4503 return 0;
4504}
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519int
4520lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4521{
4522 LPFC_MBOXQ_t *pmb;
4523 uint32_t resetcount = 0, rc = 0, done = 0;
4524
4525 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4526 if (!pmb) {
4527 phba->link_state = LPFC_HBA_ERROR;
4528 return -ENOMEM;
4529 }
4530
4531 phba->sli_rev = sli_mode;
4532 while (resetcount < 2 && !done) {
4533 spin_lock_irq(&phba->hbalock);
4534 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4535 spin_unlock_irq(&phba->hbalock);
4536 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4537 lpfc_sli_brdrestart(phba);
4538 rc = lpfc_sli_chipset_init(phba);
4539 if (rc)
4540 break;
4541
4542 spin_lock_irq(&phba->hbalock);
4543 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4544 spin_unlock_irq(&phba->hbalock);
4545 resetcount++;
4546
4547
4548
4549
4550
4551
4552 rc = lpfc_config_port_prep(phba);
4553 if (rc == -ERESTART) {
4554 phba->link_state = LPFC_LINK_UNKNOWN;
4555 continue;
4556 } else if (rc)
4557 break;
4558
4559 phba->link_state = LPFC_INIT_MBX_CMDS;
4560 lpfc_config_port(phba, pmb);
4561 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4562 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4563 LPFC_SLI3_HBQ_ENABLED |
4564 LPFC_SLI3_CRP_ENABLED |
4565 LPFC_SLI3_BG_ENABLED |
4566 LPFC_SLI3_DSS_ENABLED);
4567 if (rc != MBX_SUCCESS) {
4568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4569 "0442 Adapter failed to init, mbxCmd x%x "
4570 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4571 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4572 spin_lock_irq(&phba->hbalock);
4573 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4574 spin_unlock_irq(&phba->hbalock);
4575 rc = -ENXIO;
4576 } else {
4577
4578 spin_lock_irq(&phba->hbalock);
4579 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4580 spin_unlock_irq(&phba->hbalock);
4581 done = 1;
4582
4583 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4584 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4585 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4586 "3110 Port did not grant ASABT\n");
4587 }
4588 }
4589 if (!done) {
4590 rc = -EINVAL;
4591 goto do_prep_failed;
4592 }
4593 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4594 if (!pmb->u.mb.un.varCfgPort.cMA) {
4595 rc = -ENXIO;
4596 goto do_prep_failed;
4597 }
4598 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4599 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4600 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4601 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4602 phba->max_vpi : phba->max_vports;
4603
4604 } else
4605 phba->max_vpi = 0;
4606 phba->fips_level = 0;
4607 phba->fips_spec_rev = 0;
4608 if (pmb->u.mb.un.varCfgPort.gdss) {
4609 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4610 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4611 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4612 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4613 "2850 Security Crypto Active. FIPS x%d "
4614 "(Spec Rev: x%d)",
4615 phba->fips_level, phba->fips_spec_rev);
4616 }
4617 if (pmb->u.mb.un.varCfgPort.sec_err) {
4618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4619 "2856 Config Port Security Crypto "
4620 "Error: x%x ",
4621 pmb->u.mb.un.varCfgPort.sec_err);
4622 }
4623 if (pmb->u.mb.un.varCfgPort.gerbm)
4624 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4625 if (pmb->u.mb.un.varCfgPort.gcrp)
4626 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4627
4628 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4629 phba->port_gp = phba->mbox->us.s3_pgp.port;
4630
4631 if (phba->cfg_enable_bg) {
4632 if (pmb->u.mb.un.varCfgPort.gbg)
4633 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4634 else
4635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4636 "0443 Adapter did not grant "
4637 "BlockGuard\n");
4638 }
4639 } else {
4640 phba->hbq_get = NULL;
4641 phba->port_gp = phba->mbox->us.s2.port;
4642 phba->max_vpi = 0;
4643 }
4644do_prep_failed:
4645 mempool_free(pmb, phba->mbox_mem_pool);
4646 return rc;
4647}
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663int
4664lpfc_sli_hba_setup(struct lpfc_hba *phba)
4665{
4666 uint32_t rc;
4667 int mode = 3, i;
4668 int longs;
4669
4670 switch (lpfc_sli_mode) {
4671 case 2:
4672 if (phba->cfg_enable_npiv) {
4673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4674 "1824 NPIV enabled: Override lpfc_sli_mode "
4675 "parameter (%d) to auto (0).\n",
4676 lpfc_sli_mode);
4677 break;
4678 }
4679 mode = 2;
4680 break;
4681 case 0:
4682 case 3:
4683 break;
4684 default:
4685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4686 "1819 Unrecognized lpfc_sli_mode "
4687 "parameter: %d.\n", lpfc_sli_mode);
4688
4689 break;
4690 }
4691
4692 rc = lpfc_sli_config_port(phba, mode);
4693
4694 if (rc && lpfc_sli_mode == 3)
4695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4696 "1820 Unable to select SLI-3. "
4697 "Not supported by adapter.\n");
4698 if (rc && mode != 2)
4699 rc = lpfc_sli_config_port(phba, 2);
4700 if (rc)
4701 goto lpfc_sli_hba_setup_error;
4702
4703
4704 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4705 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4706 if (!rc) {
4707 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4708 "2709 This device supports "
4709 "Advanced Error Reporting (AER)\n");
4710 spin_lock_irq(&phba->hbalock);
4711 phba->hba_flag |= HBA_AER_ENABLED;
4712 spin_unlock_irq(&phba->hbalock);
4713 } else {
4714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4715 "2708 This device does not support "
4716 "Advanced Error Reporting (AER): %d\n",
4717 rc);
4718 phba->cfg_aer_support = 0;
4719 }
4720 }
4721
4722 if (phba->sli_rev == 3) {
4723 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4724 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4725 } else {
4726 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4727 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4728 phba->sli3_options = 0;
4729 }
4730
4731 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4732 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4733 phba->sli_rev, phba->max_vpi);
4734 rc = lpfc_sli_ring_map(phba);
4735
4736 if (rc)
4737 goto lpfc_sli_hba_setup_error;
4738
4739
4740 if (phba->sli_rev == LPFC_SLI_REV3) {
4741
4742
4743
4744
4745
4746 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4747 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4748 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4749 GFP_KERNEL);
4750 if (!phba->vpi_bmask) {
4751 rc = -ENOMEM;
4752 goto lpfc_sli_hba_setup_error;
4753 }
4754
4755 phba->vpi_ids = kzalloc(
4756 (phba->max_vpi+1) * sizeof(uint16_t),
4757 GFP_KERNEL);
4758 if (!phba->vpi_ids) {
4759 kfree(phba->vpi_bmask);
4760 rc = -ENOMEM;
4761 goto lpfc_sli_hba_setup_error;
4762 }
4763 for (i = 0; i < phba->max_vpi; i++)
4764 phba->vpi_ids[i] = i;
4765 }
4766 }
4767
4768
4769 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4770 rc = lpfc_sli_hbq_setup(phba);
4771 if (rc)
4772 goto lpfc_sli_hba_setup_error;
4773 }
4774 spin_lock_irq(&phba->hbalock);
4775 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4776 spin_unlock_irq(&phba->hbalock);
4777
4778 rc = lpfc_config_port_post(phba);
4779 if (rc)
4780 goto lpfc_sli_hba_setup_error;
4781
4782 return rc;
4783
4784lpfc_sli_hba_setup_error:
4785 phba->link_state = LPFC_HBA_ERROR;
4786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4787 "0445 Firmware initialization failed\n");
4788 return rc;
4789}
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799static int
4800lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4801{
4802 LPFC_MBOXQ_t *mboxq;
4803 struct lpfc_dmabuf *mp;
4804 struct lpfc_mqe *mqe;
4805 uint32_t data_length;
4806 int rc;
4807
4808
4809 phba->valid_vlan = 0;
4810 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4811 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4812 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4813
4814 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4815 if (!mboxq)
4816 return -ENOMEM;
4817
4818 mqe = &mboxq->u.mqe;
4819 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4820 rc = -ENOMEM;
4821 goto out_free_mboxq;
4822 }
4823
4824 mp = (struct lpfc_dmabuf *) mboxq->context1;
4825 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4826
4827 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4828 "(%d):2571 Mailbox cmd x%x Status x%x "
4829 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4830 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4831 "CQ: x%x x%x x%x x%x\n",
4832 mboxq->vport ? mboxq->vport->vpi : 0,
4833 bf_get(lpfc_mqe_command, mqe),
4834 bf_get(lpfc_mqe_status, mqe),
4835 mqe->un.mb_words[0], mqe->un.mb_words[1],
4836 mqe->un.mb_words[2], mqe->un.mb_words[3],
4837 mqe->un.mb_words[4], mqe->un.mb_words[5],
4838 mqe->un.mb_words[6], mqe->un.mb_words[7],
4839 mqe->un.mb_words[8], mqe->un.mb_words[9],
4840 mqe->un.mb_words[10], mqe->un.mb_words[11],
4841 mqe->un.mb_words[12], mqe->un.mb_words[13],
4842 mqe->un.mb_words[14], mqe->un.mb_words[15],
4843 mqe->un.mb_words[16], mqe->un.mb_words[50],
4844 mboxq->mcqe.word0,
4845 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4846 mboxq->mcqe.trailer);
4847
4848 if (rc) {
4849 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4850 kfree(mp);
4851 rc = -EIO;
4852 goto out_free_mboxq;
4853 }
4854 data_length = mqe->un.mb_words[5];
4855 if (data_length > DMP_RGN23_SIZE) {
4856 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4857 kfree(mp);
4858 rc = -EIO;
4859 goto out_free_mboxq;
4860 }
4861
4862 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4863 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4864 kfree(mp);
4865 rc = 0;
4866
4867out_free_mboxq:
4868 mempool_free(mboxq, phba->mbox_mem_pool);
4869 return rc;
4870}
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887static int
4888lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4889 uint8_t *vpd, uint32_t *vpd_size)
4890{
4891 int rc = 0;
4892 uint32_t dma_size;
4893 struct lpfc_dmabuf *dmabuf;
4894 struct lpfc_mqe *mqe;
4895
4896 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4897 if (!dmabuf)
4898 return -ENOMEM;
4899
4900
4901
4902
4903
4904 dma_size = *vpd_size;
4905 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
4906 &dmabuf->phys, GFP_KERNEL);
4907 if (!dmabuf->virt) {
4908 kfree(dmabuf);
4909 return -ENOMEM;
4910 }
4911
4912
4913
4914
4915
4916
4917 lpfc_read_rev(phba, mboxq);
4918 mqe = &mboxq->u.mqe;
4919 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4920 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4921 mqe->un.read_rev.word1 &= 0x0000FFFF;
4922 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4923 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4924
4925 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4926 if (rc) {
4927 dma_free_coherent(&phba->pcidev->dev, dma_size,
4928 dmabuf->virt, dmabuf->phys);
4929 kfree(dmabuf);
4930 return -EIO;
4931 }
4932
4933
4934
4935
4936
4937
4938 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4939 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4940
4941 memcpy(vpd, dmabuf->virt, *vpd_size);
4942
4943 dma_free_coherent(&phba->pcidev->dev, dma_size,
4944 dmabuf->virt, dmabuf->phys);
4945 kfree(dmabuf);
4946 return 0;
4947}
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960static int
4961lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4962{
4963 LPFC_MBOXQ_t *mboxq;
4964 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4965 struct lpfc_controller_attribute *cntl_attr;
4966 struct lpfc_mbx_get_port_name *get_port_name;
4967 void *virtaddr = NULL;
4968 uint32_t alloclen, reqlen;
4969 uint32_t shdr_status, shdr_add_status;
4970 union lpfc_sli4_cfg_shdr *shdr;
4971 char cport_name = 0;
4972 int rc;
4973
4974
4975 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4976 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4977
4978 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4979 if (!mboxq)
4980 return -ENOMEM;
4981
4982 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4983 lpfc_sli4_read_config(phba);
4984 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4985 goto retrieve_ppname;
4986
4987
4988 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4989 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4990 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4991 LPFC_SLI4_MBX_NEMBED);
4992 if (alloclen < reqlen) {
4993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4994 "3084 Allocated DMA memory size (%d) is "
4995 "less than the requested DMA memory size "
4996 "(%d)\n", alloclen, reqlen);
4997 rc = -ENOMEM;
4998 goto out_free_mboxq;
4999 }
5000 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5001 virtaddr = mboxq->sge_array->addr[0];
5002 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5003 shdr = &mbx_cntl_attr->cfg_shdr;
5004 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5005 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5006 if (shdr_status || shdr_add_status || rc) {
5007 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5008 "3085 Mailbox x%x (x%x/x%x) failed, "
5009 "rc:x%x, status:x%x, add_status:x%x\n",
5010 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5011 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5012 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5013 rc, shdr_status, shdr_add_status);
5014 rc = -ENXIO;
5015 goto out_free_mboxq;
5016 }
5017 cntl_attr = &mbx_cntl_attr->cntl_attr;
5018 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5019 phba->sli4_hba.lnk_info.lnk_tp =
5020 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5021 phba->sli4_hba.lnk_info.lnk_no =
5022 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5023 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5024 "3086 lnk_type:%d, lnk_numb:%d\n",
5025 phba->sli4_hba.lnk_info.lnk_tp,
5026 phba->sli4_hba.lnk_info.lnk_no);
5027
5028retrieve_ppname:
5029 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5030 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5031 sizeof(struct lpfc_mbx_get_port_name) -
5032 sizeof(struct lpfc_sli4_cfg_mhdr),
5033 LPFC_SLI4_MBX_EMBED);
5034 get_port_name = &mboxq->u.mqe.un.get_port_name;
5035 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5036 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5037 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5038 phba->sli4_hba.lnk_info.lnk_tp);
5039 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5040 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5041 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5042 if (shdr_status || shdr_add_status || rc) {
5043 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5044 "3087 Mailbox x%x (x%x/x%x) failed: "
5045 "rc:x%x, status:x%x, add_status:x%x\n",
5046 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5047 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5048 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5049 rc, shdr_status, shdr_add_status);
5050 rc = -ENXIO;
5051 goto out_free_mboxq;
5052 }
5053 switch (phba->sli4_hba.lnk_info.lnk_no) {
5054 case LPFC_LINK_NUMBER_0:
5055 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5056 &get_port_name->u.response);
5057 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5058 break;
5059 case LPFC_LINK_NUMBER_1:
5060 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5061 &get_port_name->u.response);
5062 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5063 break;
5064 case LPFC_LINK_NUMBER_2:
5065 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5066 &get_port_name->u.response);
5067 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5068 break;
5069 case LPFC_LINK_NUMBER_3:
5070 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5071 &get_port_name->u.response);
5072 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5073 break;
5074 default:
5075 break;
5076 }
5077
5078 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5079 phba->Port[0] = cport_name;
5080 phba->Port[1] = '\0';
5081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5082 "3091 SLI get port name: %s\n", phba->Port);
5083 }
5084
5085out_free_mboxq:
5086 if (rc != MBX_TIMEOUT) {
5087 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5088 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5089 else
5090 mempool_free(mboxq, phba->mbox_mem_pool);
5091 }
5092 return rc;
5093}
5094
5095
5096
5097
5098
5099
5100
5101
5102static void
5103lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5104{
5105 int fcp_eqidx;
5106
5107 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5108 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
5109 fcp_eqidx = 0;
5110 if (phba->sli4_hba.fcp_cq) {
5111 do {
5112 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
5113 LPFC_QUEUE_REARM);
5114 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
5115 }
5116
5117 if (phba->cfg_fof)
5118 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5119
5120 if (phba->sli4_hba.hba_eq) {
5121 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
5122 fcp_eqidx++)
5123 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
5124 LPFC_QUEUE_REARM);
5125 }
5126
5127 if (phba->cfg_fof)
5128 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
5129}
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143int
5144lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5145 uint16_t *extnt_count, uint16_t *extnt_size)
5146{
5147 int rc = 0;
5148 uint32_t length;
5149 uint32_t mbox_tmo;
5150 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5151 LPFC_MBOXQ_t *mbox;
5152
5153 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5154 if (!mbox)
5155 return -ENOMEM;
5156
5157
5158 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5159 sizeof(struct lpfc_sli4_cfg_mhdr));
5160 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5161 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5162 length, LPFC_SLI4_MBX_EMBED);
5163
5164
5165 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5166 LPFC_SLI4_MBX_EMBED);
5167 if (unlikely(rc)) {
5168 rc = -EIO;
5169 goto err_exit;
5170 }
5171
5172 if (!phba->sli4_hba.intr_enable)
5173 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5174 else {
5175 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5176 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5177 }
5178 if (unlikely(rc)) {
5179 rc = -EIO;
5180 goto err_exit;
5181 }
5182
5183 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5184 if (bf_get(lpfc_mbox_hdr_status,
5185 &rsrc_info->header.cfg_shdr.response)) {
5186 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5187 "2930 Failed to get resource extents "
5188 "Status 0x%x Add'l Status 0x%x\n",
5189 bf_get(lpfc_mbox_hdr_status,
5190 &rsrc_info->header.cfg_shdr.response),
5191 bf_get(lpfc_mbox_hdr_add_status,
5192 &rsrc_info->header.cfg_shdr.response));
5193 rc = -EIO;
5194 goto err_exit;
5195 }
5196
5197 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5198 &rsrc_info->u.rsp);
5199 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5200 &rsrc_info->u.rsp);
5201
5202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5203 "3162 Retrieved extents type-%d from port: count:%d, "
5204 "size:%d\n", type, *extnt_count, *extnt_size);
5205
5206err_exit:
5207 mempool_free(mbox, phba->mbox_mem_pool);
5208 return rc;
5209}
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226static int
5227lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5228{
5229 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5230 uint16_t size_diff, rsrc_ext_size;
5231 int rc = 0;
5232 struct lpfc_rsrc_blks *rsrc_entry;
5233 struct list_head *rsrc_blk_list = NULL;
5234
5235 size_diff = 0;
5236 curr_ext_cnt = 0;
5237 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5238 &rsrc_ext_cnt,
5239 &rsrc_ext_size);
5240 if (unlikely(rc))
5241 return -EIO;
5242
5243 switch (type) {
5244 case LPFC_RSC_TYPE_FCOE_RPI:
5245 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5246 break;
5247 case LPFC_RSC_TYPE_FCOE_VPI:
5248 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5249 break;
5250 case LPFC_RSC_TYPE_FCOE_XRI:
5251 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5252 break;
5253 case LPFC_RSC_TYPE_FCOE_VFI:
5254 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5255 break;
5256 default:
5257 break;
5258 }
5259
5260 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5261 curr_ext_cnt++;
5262 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5263 size_diff++;
5264 }
5265
5266 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5267 rc = 1;
5268
5269 return rc;
5270}
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289static int
5290lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5291 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5292{
5293 int rc = 0;
5294 uint32_t req_len;
5295 uint32_t emb_len;
5296 uint32_t alloc_len, mbox_tmo;
5297
5298
5299 req_len = extnt_cnt * sizeof(uint16_t);
5300
5301
5302
5303
5304
5305 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5306 sizeof(uint32_t);
5307
5308
5309
5310
5311
5312 *emb = LPFC_SLI4_MBX_EMBED;
5313 if (req_len > emb_len) {
5314 req_len = extnt_cnt * sizeof(uint16_t) +
5315 sizeof(union lpfc_sli4_cfg_shdr) +
5316 sizeof(uint32_t);
5317 *emb = LPFC_SLI4_MBX_NEMBED;
5318 }
5319
5320 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5321 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5322 req_len, *emb);
5323 if (alloc_len < req_len) {
5324 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5325 "2982 Allocated DMA memory size (x%x) is "
5326 "less than the requested DMA memory "
5327 "size (x%x)\n", alloc_len, req_len);
5328 return -ENOMEM;
5329 }
5330 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5331 if (unlikely(rc))
5332 return -EIO;
5333
5334 if (!phba->sli4_hba.intr_enable)
5335 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5336 else {
5337 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5338 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5339 }
5340
5341 if (unlikely(rc))
5342 rc = -EIO;
5343 return rc;
5344}
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354static int
5355lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5356{
5357 bool emb = false;
5358 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5359 uint16_t rsrc_id, rsrc_start, j, k;
5360 uint16_t *ids;
5361 int i, rc;
5362 unsigned long longs;
5363 unsigned long *bmask;
5364 struct lpfc_rsrc_blks *rsrc_blks;
5365 LPFC_MBOXQ_t *mbox;
5366 uint32_t length;
5367 struct lpfc_id_range *id_array = NULL;
5368 void *virtaddr = NULL;
5369 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5370 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5371 struct list_head *ext_blk_list;
5372
5373 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5374 &rsrc_cnt,
5375 &rsrc_size);
5376 if (unlikely(rc))
5377 return -EIO;
5378
5379 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5380 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5381 "3009 No available Resource Extents "
5382 "for resource type 0x%x: Count: 0x%x, "
5383 "Size 0x%x\n", type, rsrc_cnt,
5384 rsrc_size);
5385 return -ENOMEM;
5386 }
5387
5388 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5389 "2903 Post resource extents type-0x%x: "
5390 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5391
5392 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5393 if (!mbox)
5394 return -ENOMEM;
5395
5396 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5397 if (unlikely(rc)) {
5398 rc = -EIO;
5399 goto err_exit;
5400 }
5401
5402
5403
5404
5405
5406
5407
5408 if (emb == LPFC_SLI4_MBX_EMBED) {
5409 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5410 id_array = &rsrc_ext->u.rsp.id[0];
5411 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5412 } else {
5413 virtaddr = mbox->sge_array->addr[0];
5414 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5415 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5416 id_array = &n_rsrc->id;
5417 }
5418
5419 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5420 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5421
5422
5423
5424
5425
5426 length = sizeof(struct lpfc_rsrc_blks);
5427 switch (type) {
5428 case LPFC_RSC_TYPE_FCOE_RPI:
5429 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5430 sizeof(unsigned long),
5431 GFP_KERNEL);
5432 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5433 rc = -ENOMEM;
5434 goto err_exit;
5435 }
5436 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5437 sizeof(uint16_t),
5438 GFP_KERNEL);
5439 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5440 kfree(phba->sli4_hba.rpi_bmask);
5441 rc = -ENOMEM;
5442 goto err_exit;
5443 }
5444
5445
5446
5447
5448
5449
5450 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5451
5452
5453 bmask = phba->sli4_hba.rpi_bmask;
5454 ids = phba->sli4_hba.rpi_ids;
5455 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5456 break;
5457 case LPFC_RSC_TYPE_FCOE_VPI:
5458 phba->vpi_bmask = kzalloc(longs *
5459 sizeof(unsigned long),
5460 GFP_KERNEL);
5461 if (unlikely(!phba->vpi_bmask)) {
5462 rc = -ENOMEM;
5463 goto err_exit;
5464 }
5465 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5466 sizeof(uint16_t),
5467 GFP_KERNEL);
5468 if (unlikely(!phba->vpi_ids)) {
5469 kfree(phba->vpi_bmask);
5470 rc = -ENOMEM;
5471 goto err_exit;
5472 }
5473
5474
5475 bmask = phba->vpi_bmask;
5476 ids = phba->vpi_ids;
5477 ext_blk_list = &phba->lpfc_vpi_blk_list;
5478 break;
5479 case LPFC_RSC_TYPE_FCOE_XRI:
5480 phba->sli4_hba.xri_bmask = kzalloc(longs *
5481 sizeof(unsigned long),
5482 GFP_KERNEL);
5483 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5484 rc = -ENOMEM;
5485 goto err_exit;
5486 }
5487 phba->sli4_hba.max_cfg_param.xri_used = 0;
5488 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5489 sizeof(uint16_t),
5490 GFP_KERNEL);
5491 if (unlikely(!phba->sli4_hba.xri_ids)) {
5492 kfree(phba->sli4_hba.xri_bmask);
5493 rc = -ENOMEM;
5494 goto err_exit;
5495 }
5496
5497
5498 bmask = phba->sli4_hba.xri_bmask;
5499 ids = phba->sli4_hba.xri_ids;
5500 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5501 break;
5502 case LPFC_RSC_TYPE_FCOE_VFI:
5503 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5504 sizeof(unsigned long),
5505 GFP_KERNEL);
5506 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5507 rc = -ENOMEM;
5508 goto err_exit;
5509 }
5510 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5511 sizeof(uint16_t),
5512 GFP_KERNEL);
5513 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5514 kfree(phba->sli4_hba.vfi_bmask);
5515 rc = -ENOMEM;
5516 goto err_exit;
5517 }
5518
5519
5520 bmask = phba->sli4_hba.vfi_bmask;
5521 ids = phba->sli4_hba.vfi_ids;
5522 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5523 break;
5524 default:
5525
5526 id_array = NULL;
5527 bmask = NULL;
5528 ids = NULL;
5529 ext_blk_list = NULL;
5530 goto err_exit;
5531 }
5532
5533
5534
5535
5536
5537
5538
5539 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5540 if ((i % 2) == 0)
5541 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5542 &id_array[k]);
5543 else
5544 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5545 &id_array[k]);
5546
5547 rsrc_blks = kzalloc(length, GFP_KERNEL);
5548 if (unlikely(!rsrc_blks)) {
5549 rc = -ENOMEM;
5550 kfree(bmask);
5551 kfree(ids);
5552 goto err_exit;
5553 }
5554 rsrc_blks->rsrc_start = rsrc_id;
5555 rsrc_blks->rsrc_size = rsrc_size;
5556 list_add_tail(&rsrc_blks->list, ext_blk_list);
5557 rsrc_start = rsrc_id;
5558 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5559 phba->sli4_hba.scsi_xri_start = rsrc_start +
5560 lpfc_sli4_get_els_iocb_cnt(phba);
5561
5562 while (rsrc_id < (rsrc_start + rsrc_size)) {
5563 ids[j] = rsrc_id;
5564 rsrc_id++;
5565 j++;
5566 }
5567
5568 if ((i % 2) == 1)
5569 k++;
5570 }
5571 err_exit:
5572 lpfc_sli4_mbox_cmd_free(phba, mbox);
5573 return rc;
5574}
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585static int
5586lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5587{
5588 int rc;
5589 uint32_t length, mbox_tmo = 0;
5590 LPFC_MBOXQ_t *mbox;
5591 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5592 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5593
5594 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5595 if (!mbox)
5596 return -ENOMEM;
5597
5598
5599
5600
5601
5602
5603 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5604 sizeof(struct lpfc_sli4_cfg_mhdr));
5605 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5606 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5607 length, LPFC_SLI4_MBX_EMBED);
5608
5609
5610 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5611 LPFC_SLI4_MBX_EMBED);
5612 if (unlikely(rc)) {
5613 rc = -EIO;
5614 goto out_free_mbox;
5615 }
5616 if (!phba->sli4_hba.intr_enable)
5617 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5618 else {
5619 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5620 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5621 }
5622 if (unlikely(rc)) {
5623 rc = -EIO;
5624 goto out_free_mbox;
5625 }
5626
5627 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5628 if (bf_get(lpfc_mbox_hdr_status,
5629 &dealloc_rsrc->header.cfg_shdr.response)) {
5630 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5631 "2919 Failed to release resource extents "
5632 "for type %d - Status 0x%x Add'l Status 0x%x. "
5633 "Resource memory not released.\n",
5634 type,
5635 bf_get(lpfc_mbox_hdr_status,
5636 &dealloc_rsrc->header.cfg_shdr.response),
5637 bf_get(lpfc_mbox_hdr_add_status,
5638 &dealloc_rsrc->header.cfg_shdr.response));
5639 rc = -EIO;
5640 goto out_free_mbox;
5641 }
5642
5643
5644 switch (type) {
5645 case LPFC_RSC_TYPE_FCOE_VPI:
5646 kfree(phba->vpi_bmask);
5647 kfree(phba->vpi_ids);
5648 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5649 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5650 &phba->lpfc_vpi_blk_list, list) {
5651 list_del_init(&rsrc_blk->list);
5652 kfree(rsrc_blk);
5653 }
5654 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5655 break;
5656 case LPFC_RSC_TYPE_FCOE_XRI:
5657 kfree(phba->sli4_hba.xri_bmask);
5658 kfree(phba->sli4_hba.xri_ids);
5659 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5660 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5661 list_del_init(&rsrc_blk->list);
5662 kfree(rsrc_blk);
5663 }
5664 break;
5665 case LPFC_RSC_TYPE_FCOE_VFI:
5666 kfree(phba->sli4_hba.vfi_bmask);
5667 kfree(phba->sli4_hba.vfi_ids);
5668 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5669 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5670 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5671 list_del_init(&rsrc_blk->list);
5672 kfree(rsrc_blk);
5673 }
5674 break;
5675 case LPFC_RSC_TYPE_FCOE_RPI:
5676
5677 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5678 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5679 list_del_init(&rsrc_blk->list);
5680 kfree(rsrc_blk);
5681 }
5682 break;
5683 default:
5684 break;
5685 }
5686
5687 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5688
5689 out_free_mbox:
5690 mempool_free(mbox, phba->mbox_mem_pool);
5691 return rc;
5692}
5693
5694
5695
5696
5697
5698
5699
5700int
5701lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5702{
5703 int i, rc, error = 0;
5704 uint16_t count, base;
5705 unsigned long longs;
5706
5707 if (!phba->sli4_hba.rpi_hdrs_in_use)
5708 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5709 if (phba->sli4_hba.extents_in_use) {
5710
5711
5712
5713
5714
5715 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5716 LPFC_IDX_RSRC_RDY) {
5717
5718
5719
5720
5721
5722 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5723 LPFC_RSC_TYPE_FCOE_VFI);
5724 if (rc != 0)
5725 error++;
5726 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5727 LPFC_RSC_TYPE_FCOE_VPI);
5728 if (rc != 0)
5729 error++;
5730 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5731 LPFC_RSC_TYPE_FCOE_XRI);
5732 if (rc != 0)
5733 error++;
5734 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5735 LPFC_RSC_TYPE_FCOE_RPI);
5736 if (rc != 0)
5737 error++;
5738
5739
5740
5741
5742
5743
5744
5745 if (error) {
5746 lpfc_printf_log(phba, KERN_INFO,
5747 LOG_MBOX | LOG_INIT,
5748 "2931 Detected extent resource "
5749 "change. Reallocating all "
5750 "extents.\n");
5751 rc = lpfc_sli4_dealloc_extent(phba,
5752 LPFC_RSC_TYPE_FCOE_VFI);
5753 rc = lpfc_sli4_dealloc_extent(phba,
5754 LPFC_RSC_TYPE_FCOE_VPI);
5755 rc = lpfc_sli4_dealloc_extent(phba,
5756 LPFC_RSC_TYPE_FCOE_XRI);
5757 rc = lpfc_sli4_dealloc_extent(phba,
5758 LPFC_RSC_TYPE_FCOE_RPI);
5759 } else
5760 return 0;
5761 }
5762
5763 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5764 if (unlikely(rc))
5765 goto err_exit;
5766
5767 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5768 if (unlikely(rc))
5769 goto err_exit;
5770
5771 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5772 if (unlikely(rc))
5773 goto err_exit;
5774
5775 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5776 if (unlikely(rc))
5777 goto err_exit;
5778 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5779 LPFC_IDX_RSRC_RDY);
5780 return rc;
5781 } else {
5782
5783
5784
5785
5786
5787
5788
5789 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5790 LPFC_IDX_RSRC_RDY) {
5791 lpfc_sli4_dealloc_resource_identifiers(phba);
5792 lpfc_sli4_remove_rpis(phba);
5793 }
5794
5795 count = phba->sli4_hba.max_cfg_param.max_rpi;
5796 if (count <= 0) {
5797 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5798 "3279 Invalid provisioning of "
5799 "rpi:%d\n", count);
5800 rc = -EINVAL;
5801 goto err_exit;
5802 }
5803 base = phba->sli4_hba.max_cfg_param.rpi_base;
5804 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5805 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5806 sizeof(unsigned long),
5807 GFP_KERNEL);
5808 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5809 rc = -ENOMEM;
5810 goto err_exit;
5811 }
5812 phba->sli4_hba.rpi_ids = kzalloc(count *
5813 sizeof(uint16_t),
5814 GFP_KERNEL);
5815 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5816 rc = -ENOMEM;
5817 goto free_rpi_bmask;
5818 }
5819
5820 for (i = 0; i < count; i++)
5821 phba->sli4_hba.rpi_ids[i] = base + i;
5822
5823
5824 count = phba->sli4_hba.max_cfg_param.max_vpi;
5825 if (count <= 0) {
5826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5827 "3280 Invalid provisioning of "
5828 "vpi:%d\n", count);
5829 rc = -EINVAL;
5830 goto free_rpi_ids;
5831 }
5832 base = phba->sli4_hba.max_cfg_param.vpi_base;
5833 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5834 phba->vpi_bmask = kzalloc(longs *
5835 sizeof(unsigned long),
5836 GFP_KERNEL);
5837 if (unlikely(!phba->vpi_bmask)) {
5838 rc = -ENOMEM;
5839 goto free_rpi_ids;
5840 }
5841 phba->vpi_ids = kzalloc(count *
5842 sizeof(uint16_t),
5843 GFP_KERNEL);
5844 if (unlikely(!phba->vpi_ids)) {
5845 rc = -ENOMEM;
5846 goto free_vpi_bmask;
5847 }
5848
5849 for (i = 0; i < count; i++)
5850 phba->vpi_ids[i] = base + i;
5851
5852
5853 count = phba->sli4_hba.max_cfg_param.max_xri;
5854 if (count <= 0) {
5855 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5856 "3281 Invalid provisioning of "
5857 "xri:%d\n", count);
5858 rc = -EINVAL;
5859 goto free_vpi_ids;
5860 }
5861 base = phba->sli4_hba.max_cfg_param.xri_base;
5862 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5863 phba->sli4_hba.xri_bmask = kzalloc(longs *
5864 sizeof(unsigned long),
5865 GFP_KERNEL);
5866 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5867 rc = -ENOMEM;
5868 goto free_vpi_ids;
5869 }
5870 phba->sli4_hba.max_cfg_param.xri_used = 0;
5871 phba->sli4_hba.xri_ids = kzalloc(count *
5872 sizeof(uint16_t),
5873 GFP_KERNEL);
5874 if (unlikely(!phba->sli4_hba.xri_ids)) {
5875 rc = -ENOMEM;
5876 goto free_xri_bmask;
5877 }
5878
5879 for (i = 0; i < count; i++)
5880 phba->sli4_hba.xri_ids[i] = base + i;
5881
5882
5883 count = phba->sli4_hba.max_cfg_param.max_vfi;
5884 if (count <= 0) {
5885 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5886 "3282 Invalid provisioning of "
5887 "vfi:%d\n", count);
5888 rc = -EINVAL;
5889 goto free_xri_ids;
5890 }
5891 base = phba->sli4_hba.max_cfg_param.vfi_base;
5892 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5893 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5894 sizeof(unsigned long),
5895 GFP_KERNEL);
5896 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5897 rc = -ENOMEM;
5898 goto free_xri_ids;
5899 }
5900 phba->sli4_hba.vfi_ids = kzalloc(count *
5901 sizeof(uint16_t),
5902 GFP_KERNEL);
5903 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5904 rc = -ENOMEM;
5905 goto free_vfi_bmask;
5906 }
5907
5908 for (i = 0; i < count; i++)
5909 phba->sli4_hba.vfi_ids[i] = base + i;
5910
5911
5912
5913
5914
5915 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5916 LPFC_IDX_RSRC_RDY);
5917 return 0;
5918 }
5919
5920 free_vfi_bmask:
5921 kfree(phba->sli4_hba.vfi_bmask);
5922 free_xri_ids:
5923 kfree(phba->sli4_hba.xri_ids);
5924 free_xri_bmask:
5925 kfree(phba->sli4_hba.xri_bmask);
5926 free_vpi_ids:
5927 kfree(phba->vpi_ids);
5928 free_vpi_bmask:
5929 kfree(phba->vpi_bmask);
5930 free_rpi_ids:
5931 kfree(phba->sli4_hba.rpi_ids);
5932 free_rpi_bmask:
5933 kfree(phba->sli4_hba.rpi_bmask);
5934 err_exit:
5935 return rc;
5936}
5937
5938
5939
5940
5941
5942
5943
5944
5945int
5946lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5947{
5948 if (phba->sli4_hba.extents_in_use) {
5949 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5950 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5951 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5952 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5953 } else {
5954 kfree(phba->vpi_bmask);
5955 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5956 kfree(phba->vpi_ids);
5957 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5958 kfree(phba->sli4_hba.xri_bmask);
5959 kfree(phba->sli4_hba.xri_ids);
5960 kfree(phba->sli4_hba.vfi_bmask);
5961 kfree(phba->sli4_hba.vfi_ids);
5962 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5963 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5964 }
5965
5966 return 0;
5967}
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979int
5980lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5981 uint16_t *extnt_cnt, uint16_t *extnt_size)
5982{
5983 bool emb;
5984 int rc = 0;
5985 uint16_t curr_blks = 0;
5986 uint32_t req_len, emb_len;
5987 uint32_t alloc_len, mbox_tmo;
5988 struct list_head *blk_list_head;
5989 struct lpfc_rsrc_blks *rsrc_blk;
5990 LPFC_MBOXQ_t *mbox;
5991 void *virtaddr = NULL;
5992 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5993 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5994 union lpfc_sli4_cfg_shdr *shdr;
5995
5996 switch (type) {
5997 case LPFC_RSC_TYPE_FCOE_VPI:
5998 blk_list_head = &phba->lpfc_vpi_blk_list;
5999 break;
6000 case LPFC_RSC_TYPE_FCOE_XRI:
6001 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6002 break;
6003 case LPFC_RSC_TYPE_FCOE_VFI:
6004 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6005 break;
6006 case LPFC_RSC_TYPE_FCOE_RPI:
6007 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6008 break;
6009 default:
6010 return -EIO;
6011 }
6012
6013
6014 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6015 if (curr_blks == 0) {
6016
6017
6018
6019
6020
6021
6022
6023 *extnt_size = rsrc_blk->rsrc_size;
6024 }
6025 curr_blks++;
6026 }
6027
6028
6029
6030
6031
6032 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6033 sizeof(uint32_t);
6034
6035
6036
6037
6038
6039 emb = LPFC_SLI4_MBX_EMBED;
6040 req_len = emb_len;
6041 if (req_len > emb_len) {
6042 req_len = curr_blks * sizeof(uint16_t) +
6043 sizeof(union lpfc_sli4_cfg_shdr) +
6044 sizeof(uint32_t);
6045 emb = LPFC_SLI4_MBX_NEMBED;
6046 }
6047
6048 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6049 if (!mbox)
6050 return -ENOMEM;
6051 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6052
6053 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6054 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6055 req_len, emb);
6056 if (alloc_len < req_len) {
6057 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6058 "2983 Allocated DMA memory size (x%x) is "
6059 "less than the requested DMA memory "
6060 "size (x%x)\n", alloc_len, req_len);
6061 rc = -ENOMEM;
6062 goto err_exit;
6063 }
6064 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6065 if (unlikely(rc)) {
6066 rc = -EIO;
6067 goto err_exit;
6068 }
6069
6070 if (!phba->sli4_hba.intr_enable)
6071 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6072 else {
6073 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6074 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6075 }
6076
6077 if (unlikely(rc)) {
6078 rc = -EIO;
6079 goto err_exit;
6080 }
6081
6082
6083
6084
6085
6086
6087
6088 if (emb == LPFC_SLI4_MBX_EMBED) {
6089 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6090 shdr = &rsrc_ext->header.cfg_shdr;
6091 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6092 } else {
6093 virtaddr = mbox->sge_array->addr[0];
6094 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6095 shdr = &n_rsrc->cfg_shdr;
6096 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6097 }
6098
6099 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6100 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6101 "2984 Failed to read allocated resources "
6102 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6103 type,
6104 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6105 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6106 rc = -EIO;
6107 goto err_exit;
6108 }
6109 err_exit:
6110 lpfc_sli4_mbox_cmd_free(phba, mbox);
6111 return rc;
6112}
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128static int
6129lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6130{
6131 struct lpfc_sglq *sglq_entry = NULL;
6132 struct lpfc_sglq *sglq_entry_next = NULL;
6133 struct lpfc_sglq *sglq_entry_first = NULL;
6134 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
6135 int last_xritag = NO_XRI;
6136 struct lpfc_sli_ring *pring;
6137 LIST_HEAD(prep_sgl_list);
6138 LIST_HEAD(blck_sgl_list);
6139 LIST_HEAD(allc_sgl_list);
6140 LIST_HEAD(post_sgl_list);
6141 LIST_HEAD(free_sgl_list);
6142
6143 pring = &phba->sli.ring[LPFC_ELS_RING];
6144 spin_lock_irq(&phba->hbalock);
6145 spin_lock(&pring->ring_lock);
6146 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6147 spin_unlock(&pring->ring_lock);
6148 spin_unlock_irq(&phba->hbalock);
6149
6150 total_cnt = phba->sli4_hba.els_xri_cnt;
6151 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6152 &allc_sgl_list, list) {
6153 list_del_init(&sglq_entry->list);
6154 block_cnt++;
6155 if ((last_xritag != NO_XRI) &&
6156 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6157
6158 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6159 post_cnt = block_cnt - 1;
6160
6161 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6162 block_cnt = 1;
6163 } else {
6164
6165 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6166
6167 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6168 list_splice_init(&prep_sgl_list,
6169 &blck_sgl_list);
6170 post_cnt = block_cnt;
6171 block_cnt = 0;
6172 }
6173 }
6174 num_posted++;
6175
6176
6177 last_xritag = sglq_entry->sli4_xritag;
6178
6179
6180 if (num_posted == phba->sli4_hba.els_xri_cnt) {
6181 if (post_cnt == 0) {
6182 list_splice_init(&prep_sgl_list,
6183 &blck_sgl_list);
6184 post_cnt = block_cnt;
6185 } else if (block_cnt == 1) {
6186 status = lpfc_sli4_post_sgl(phba,
6187 sglq_entry->phys, 0,
6188 sglq_entry->sli4_xritag);
6189 if (!status) {
6190
6191 list_add_tail(&sglq_entry->list,
6192 &post_sgl_list);
6193 } else {
6194
6195 lpfc_printf_log(phba, KERN_WARNING,
6196 LOG_SLI,
6197 "3159 Failed to post els "
6198 "sgl, xritag:x%x\n",
6199 sglq_entry->sli4_xritag);
6200 list_add_tail(&sglq_entry->list,
6201 &free_sgl_list);
6202 total_cnt--;
6203 }
6204 }
6205 }
6206
6207
6208 if (post_cnt == 0)
6209 continue;
6210
6211
6212 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6213 post_cnt);
6214
6215 if (!status) {
6216
6217 list_splice_init(&blck_sgl_list, &post_sgl_list);
6218 } else {
6219
6220 sglq_entry_first = list_first_entry(&blck_sgl_list,
6221 struct lpfc_sglq,
6222 list);
6223 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6224 "3160 Failed to post els sgl-list, "
6225 "xritag:x%x-x%x\n",
6226 sglq_entry_first->sli4_xritag,
6227 (sglq_entry_first->sli4_xritag +
6228 post_cnt - 1));
6229 list_splice_init(&blck_sgl_list, &free_sgl_list);
6230 total_cnt -= post_cnt;
6231 }
6232
6233
6234 if (block_cnt == 0)
6235 last_xritag = NO_XRI;
6236
6237
6238 post_cnt = 0;
6239 }
6240
6241 phba->sli4_hba.els_xri_cnt = total_cnt;
6242
6243
6244 lpfc_free_sgl_list(phba, &free_sgl_list);
6245
6246
6247 if (!list_empty(&post_sgl_list)) {
6248 spin_lock_irq(&phba->hbalock);
6249 spin_lock(&pring->ring_lock);
6250 list_splice_init(&post_sgl_list,
6251 &phba->sli4_hba.lpfc_sgl_list);
6252 spin_unlock(&pring->ring_lock);
6253 spin_unlock_irq(&phba->hbalock);
6254 } else {
6255 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6256 "3161 Failure to post els sgl to port.\n");
6257 return -EIO;
6258 }
6259 return 0;
6260}
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271int
6272lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6273{
6274 int rc;
6275 LPFC_MBOXQ_t *mboxq;
6276 struct lpfc_mqe *mqe;
6277 uint8_t *vpd;
6278 uint32_t vpd_size;
6279 uint32_t ftr_rsp = 0;
6280 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6281 struct lpfc_vport *vport = phba->pport;
6282 struct lpfc_dmabuf *mp;
6283
6284
6285 rc = lpfc_pci_function_reset(phba);
6286 if (unlikely(rc))
6287 return -ENODEV;
6288
6289
6290 rc = lpfc_sli4_post_status_check(phba);
6291 if (unlikely(rc))
6292 return -ENODEV;
6293 else {
6294 spin_lock_irq(&phba->hbalock);
6295 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6296 spin_unlock_irq(&phba->hbalock);
6297 }
6298
6299
6300
6301
6302
6303 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6304 if (!mboxq)
6305 return -ENOMEM;
6306
6307
6308 vpd_size = SLI4_PAGE_SIZE;
6309 vpd = kzalloc(vpd_size, GFP_KERNEL);
6310 if (!vpd) {
6311 rc = -ENOMEM;
6312 goto out_free_mbox;
6313 }
6314
6315 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6316 if (unlikely(rc)) {
6317 kfree(vpd);
6318 goto out_free_mbox;
6319 }
6320
6321 mqe = &mboxq->u.mqe;
6322 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6323 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
6324 phba->hba_flag |= HBA_FCOE_MODE;
6325 else
6326 phba->hba_flag &= ~HBA_FCOE_MODE;
6327
6328 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6329 LPFC_DCBX_CEE_MODE)
6330 phba->hba_flag |= HBA_FIP_SUPPORT;
6331 else
6332 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6333
6334 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6335
6336 if (phba->sli_rev != LPFC_SLI_REV4) {
6337 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6338 "0376 READ_REV Error. SLI Level %d "
6339 "FCoE enabled %d\n",
6340 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6341 rc = -EIO;
6342 kfree(vpd);
6343 goto out_free_mbox;
6344 }
6345
6346
6347
6348
6349
6350
6351 if (phba->hba_flag & HBA_FCOE_MODE &&
6352 lpfc_sli4_read_fcoe_params(phba))
6353 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6354 "2570 Failed to read FCoE parameters\n");
6355
6356
6357
6358
6359
6360 rc = lpfc_sli4_retrieve_pport_name(phba);
6361 if (!rc)
6362 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6363 "3080 Successful retrieving SLI4 device "
6364 "physical port name: %s.\n", phba->Port);
6365
6366
6367
6368
6369
6370
6371 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6372 if (unlikely(!rc)) {
6373 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6374 "0377 Error %d parsing vpd. "
6375 "Using defaults.\n", rc);
6376 rc = 0;
6377 }
6378 kfree(vpd);
6379
6380
6381 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6382 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6383 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6384 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6385 &mqe->un.read_rev);
6386 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6387 &mqe->un.read_rev);
6388 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6389 &mqe->un.read_rev);
6390 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6391 &mqe->un.read_rev);
6392 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6393 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6394 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6395 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6396 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6397 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6398 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6399 "(%d):0380 READ_REV Status x%x "
6400 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6401 mboxq->vport ? mboxq->vport->vpi : 0,
6402 bf_get(lpfc_mqe_status, mqe),
6403 phba->vpd.rev.opFwName,
6404 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6405 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6406
6407
6408 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6409 if (phba->pport->cfg_lun_queue_depth > rc) {
6410 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6411 "3362 LUN queue depth changed from %d to %d\n",
6412 phba->pport->cfg_lun_queue_depth, rc);
6413 phba->pport->cfg_lun_queue_depth = rc;
6414 }
6415
6416
6417
6418
6419
6420
6421 lpfc_request_features(phba, mboxq);
6422 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6423 if (unlikely(rc)) {
6424 rc = -EIO;
6425 goto out_free_mbox;
6426 }
6427
6428
6429
6430
6431
6432 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6433 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6434 "0378 No support for fcpi mode.\n");
6435 ftr_rsp++;
6436 }
6437 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6438 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6439 else
6440 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6441
6442
6443
6444
6445
6446 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6447 if (phba->cfg_enable_bg) {
6448 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6449 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6450 else
6451 ftr_rsp++;
6452 }
6453
6454 if (phba->max_vpi && phba->cfg_enable_npiv &&
6455 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6456 ftr_rsp++;
6457
6458 if (ftr_rsp) {
6459 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6460 "0379 Feature Mismatch Data: x%08x %08x "
6461 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6462 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6463 phba->cfg_enable_npiv, phba->max_vpi);
6464 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6465 phba->cfg_enable_bg = 0;
6466 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6467 phba->cfg_enable_npiv = 0;
6468 }
6469
6470
6471 spin_lock_irq(&phba->hbalock);
6472 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6473 spin_unlock_irq(&phba->hbalock);
6474
6475
6476
6477
6478
6479 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6480 if (rc) {
6481 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6482 "2920 Failed to alloc Resource IDs "
6483 "rc = x%x\n", rc);
6484 goto out_free_mbox;
6485 }
6486
6487
6488 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6489 if (rc) {
6490 phba->link_state = LPFC_HBA_ERROR;
6491 rc = -ENOMEM;
6492 goto out_free_mbox;
6493 }
6494
6495 mboxq->vport = vport;
6496 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6497 mp = (struct lpfc_dmabuf *) mboxq->context1;
6498 if (rc == MBX_SUCCESS) {
6499 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6500 rc = 0;
6501 }
6502
6503
6504
6505
6506
6507 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6508 kfree(mp);
6509 mboxq->context1 = NULL;
6510 if (unlikely(rc)) {
6511 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6512 "0382 READ_SPARAM command failed "
6513 "status %d, mbxStatus x%x\n",
6514 rc, bf_get(lpfc_mqe_status, mqe));
6515 phba->link_state = LPFC_HBA_ERROR;
6516 rc = -EIO;
6517 goto out_free_mbox;
6518 }
6519
6520 lpfc_update_vport_wwn(vport);
6521
6522
6523 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6524 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6525
6526
6527 rc = lpfc_sli4_xri_sgl_update(phba);
6528 if (unlikely(rc)) {
6529 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6530 "1400 Failed to update xri-sgl size and "
6531 "mapping: %d\n", rc);
6532 goto out_free_mbox;
6533 }
6534
6535
6536 rc = lpfc_sli4_repost_els_sgl_list(phba);
6537 if (unlikely(rc)) {
6538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6539 "0582 Error %d during els sgl post "
6540 "operation\n", rc);
6541 rc = -ENODEV;
6542 goto out_free_mbox;
6543 }
6544
6545
6546 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6547 if (unlikely(rc)) {
6548 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6549 "0383 Error %d during scsi sgl post "
6550 "operation\n", rc);
6551
6552
6553 rc = -ENODEV;
6554 goto out_free_mbox;
6555 }
6556
6557
6558 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6559 if (unlikely(rc)) {
6560 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6561 "0393 Error %d during rpi post operation\n",
6562 rc);
6563 rc = -ENODEV;
6564 goto out_free_mbox;
6565 }
6566 lpfc_sli4_node_prep(phba);
6567
6568
6569 rc = lpfc_sli4_queue_create(phba);
6570 if (rc) {
6571 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6572 "3089 Failed to allocate queues\n");
6573 rc = -ENODEV;
6574 goto out_stop_timers;
6575 }
6576
6577 rc = lpfc_sli4_queue_setup(phba);
6578 if (unlikely(rc)) {
6579 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6580 "0381 Error %d during queue setup.\n ", rc);
6581 goto out_destroy_queue;
6582 }
6583
6584
6585 lpfc_sli4_arm_cqeq_intr(phba);
6586
6587
6588 phba->sli4_hba.intr_enable = 1;
6589
6590
6591 spin_lock_irq(&phba->hbalock);
6592 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6593 spin_unlock_irq(&phba->hbalock);
6594
6595
6596 lpfc_sli4_rb_setup(phba);
6597
6598
6599 phba->fcf.fcf_flag = 0;
6600 phba->fcf.current_rec.flag = 0;
6601
6602
6603 mod_timer(&vport->els_tmofunc,
6604 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6605
6606
6607 mod_timer(&phba->hb_tmofunc,
6608 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6609 phba->hb_outstanding = 0;
6610 phba->last_completion_time = jiffies;
6611
6612
6613 mod_timer(&phba->eratt_poll,
6614 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL));
6615
6616
6617 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6618 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6619 if (!rc) {
6620 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6621 "2829 This device supports "
6622 "Advanced Error Reporting (AER)\n");
6623 spin_lock_irq(&phba->hbalock);
6624 phba->hba_flag |= HBA_AER_ENABLED;
6625 spin_unlock_irq(&phba->hbalock);
6626 } else {
6627 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6628 "2830 This device does not support "
6629 "Advanced Error Reporting (AER)\n");
6630 phba->cfg_aer_support = 0;
6631 }
6632 rc = 0;
6633 }
6634
6635 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6636
6637
6638
6639 lpfc_reg_fcfi(phba, mboxq);
6640 mboxq->vport = phba->pport;
6641 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6642 if (rc != MBX_SUCCESS)
6643 goto out_unset_queue;
6644 rc = 0;
6645 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6646 &mboxq->u.mqe.un.reg_fcfi);
6647
6648
6649 lpfc_sli_read_link_ste(phba);
6650 }
6651
6652
6653
6654
6655
6656 spin_lock_irq(&phba->hbalock);
6657 phba->link_state = LPFC_LINK_DOWN;
6658 spin_unlock_irq(&phba->hbalock);
6659 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6660 (phba->hba_flag & LINK_DISABLED)) {
6661 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6662 "3103 Adapter Link is disabled.\n");
6663 lpfc_down_link(phba, mboxq);
6664 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6665 if (rc != MBX_SUCCESS) {
6666 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6667 "3104 Adapter failed to issue "
6668 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6669 goto out_unset_queue;
6670 }
6671 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6672
6673 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6674 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6675 if (rc)
6676 goto out_unset_queue;
6677 }
6678 }
6679 mempool_free(mboxq, phba->mbox_mem_pool);
6680 return rc;
6681out_unset_queue:
6682
6683 lpfc_sli4_queue_unset(phba);
6684out_destroy_queue:
6685 lpfc_sli4_queue_destroy(phba);
6686out_stop_timers:
6687 lpfc_stop_hba_timers(phba);
6688out_free_mbox:
6689 mempool_free(mboxq, phba->mbox_mem_pool);
6690 return rc;
6691}
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704
6705void
6706lpfc_mbox_timeout(unsigned long ptr)
6707{
6708 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6709 unsigned long iflag;
6710 uint32_t tmo_posted;
6711
6712 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6713 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6714 if (!tmo_posted)
6715 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6716 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6717
6718 if (!tmo_posted)
6719 lpfc_worker_wake_up(phba);
6720 return;
6721}
6722
6723
6724
6725
6726
6727
6728
6729
6730
6731static bool
6732lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
6733{
6734
6735 uint32_t idx;
6736 struct lpfc_queue *mcq;
6737 struct lpfc_mcqe *mcqe;
6738 bool pending_completions = false;
6739
6740 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6741 return false;
6742
6743
6744
6745 mcq = phba->sli4_hba.mbx_cq;
6746 idx = mcq->hba_index;
6747 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
6748 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
6749 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
6750 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
6751 pending_completions = true;
6752 break;
6753 }
6754 idx = (idx + 1) % mcq->entry_count;
6755 if (mcq->hba_index == idx)
6756 break;
6757 }
6758 return pending_completions;
6759
6760}
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773bool
6774lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
6775{
6776
6777 uint32_t eqidx;
6778 struct lpfc_queue *fpeq = NULL;
6779 struct lpfc_eqe *eqe;
6780 bool mbox_pending;
6781
6782 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6783 return false;
6784
6785
6786
6787 if (phba->sli4_hba.hba_eq)
6788 for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
6789 if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
6790 phba->sli4_hba.mbx_cq->assoc_qid) {
6791 fpeq = phba->sli4_hba.hba_eq[eqidx];
6792 break;
6793 }
6794 if (!fpeq)
6795 return false;
6796
6797
6798
6799 lpfc_sli4_eq_clr_intr(fpeq);
6800
6801
6802
6803 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
6804
6805
6806
6807
6808
6809
6810
6811
6812 if (mbox_pending)
6813 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
6814 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
6815 fpeq->EQ_processed++;
6816 }
6817
6818
6819
6820 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
6821
6822 return mbox_pending;
6823
6824}
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834void
6835lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6836{
6837 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6838 MAILBOX_t *mb = NULL;
6839
6840 struct lpfc_sli *psli = &phba->sli;
6841
6842
6843 if (lpfc_sli4_process_missed_mbox_completions(phba))
6844 return;
6845
6846 if (pmbox != NULL)
6847 mb = &pmbox->u.mb;
6848
6849
6850
6851
6852
6853 spin_lock_irq(&phba->hbalock);
6854 if (pmbox == NULL) {
6855 lpfc_printf_log(phba, KERN_WARNING,
6856 LOG_MBOX | LOG_SLI,
6857 "0353 Active Mailbox cleared - mailbox timeout "
6858 "exiting\n");
6859 spin_unlock_irq(&phba->hbalock);
6860 return;
6861 }
6862
6863
6864 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6865 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6866 mb->mbxCommand,
6867 phba->pport->port_state,
6868 phba->sli.sli_flag,
6869 phba->sli.mbox_active);
6870 spin_unlock_irq(&phba->hbalock);
6871
6872
6873
6874
6875
6876 spin_lock_irq(&phba->pport->work_port_lock);
6877 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6878 spin_unlock_irq(&phba->pport->work_port_lock);
6879 spin_lock_irq(&phba->hbalock);
6880 phba->link_state = LPFC_LINK_UNKNOWN;
6881 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6882 spin_unlock_irq(&phba->hbalock);
6883
6884 lpfc_sli_abort_fcp_rings(phba);
6885
6886 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6887 "0345 Resetting board due to mailbox timeout\n");
6888
6889
6890 lpfc_reset_hba(phba);
6891}
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919static int
6920lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6921 uint32_t flag)
6922{
6923 MAILBOX_t *mbx;
6924 struct lpfc_sli *psli = &phba->sli;
6925 uint32_t status, evtctr;
6926 uint32_t ha_copy, hc_copy;
6927 int i;
6928 unsigned long timeout;
6929 unsigned long drvr_flag = 0;
6930 uint32_t word0, ldata;
6931 void __iomem *to_slim;
6932 int processing_queue = 0;
6933
6934 spin_lock_irqsave(&phba->hbalock, drvr_flag);
6935 if (!pmbox) {
6936 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
6937
6938 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
6939 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6940 return MBX_SUCCESS;
6941 }
6942 processing_queue = 1;
6943 pmbox = lpfc_mbox_get(phba);
6944 if (!pmbox) {
6945 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6946 return MBX_SUCCESS;
6947 }
6948 }
6949
6950 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
6951 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
6952 if(!pmbox->vport) {
6953 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6954 lpfc_printf_log(phba, KERN_ERR,
6955 LOG_MBOX | LOG_VPORT,
6956 "1806 Mbox x%x failed. No vport\n",
6957 pmbox->u.mb.mbxCommand);
6958 dump_stack();
6959 goto out_not_finished;
6960 }
6961 }
6962
6963
6964 if (unlikely(pci_channel_offline(phba->pcidev))) {
6965 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6966 goto out_not_finished;
6967 }
6968
6969
6970 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
6971 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6972 goto out_not_finished;
6973 }
6974
6975 psli = &phba->sli;
6976
6977 mbx = &pmbox->u.mb;
6978 status = MBX_SUCCESS;
6979
6980 if (phba->link_state == LPFC_HBA_ERROR) {
6981 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6982
6983
6984 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6985 "(%d):0311 Mailbox command x%x cannot "
6986 "issue Data: x%x x%x\n",
6987 pmbox->vport ? pmbox->vport->vpi : 0,
6988 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
6989 goto out_not_finished;
6990 }
6991
6992 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
6993 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
6994 !(hc_copy & HC_MBINT_ENA)) {
6995 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
6996 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6997 "(%d):2528 Mailbox command x%x cannot "
6998 "issue Data: x%x x%x\n",
6999 pmbox->vport ? pmbox->vport->vpi : 0,
7000 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7001 goto out_not_finished;
7002 }
7003 }
7004
7005 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7006
7007
7008
7009
7010
7011 if (flag & MBX_POLL) {
7012 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7013
7014
7015 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7016 "(%d):2529 Mailbox command x%x "
7017 "cannot issue Data: x%x x%x\n",
7018 pmbox->vport ? pmbox->vport->vpi : 0,
7019 pmbox->u.mb.mbxCommand,
7020 psli->sli_flag, flag);
7021 goto out_not_finished;
7022 }
7023
7024 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7025 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7026
7027 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7028 "(%d):2530 Mailbox command x%x "
7029 "cannot issue Data: x%x x%x\n",
7030 pmbox->vport ? pmbox->vport->vpi : 0,
7031 pmbox->u.mb.mbxCommand,
7032 psli->sli_flag, flag);
7033 goto out_not_finished;
7034 }
7035
7036
7037
7038
7039 lpfc_mbox_put(phba, pmbox);
7040
7041
7042 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7043 "(%d):0308 Mbox cmd issue - BUSY Data: "
7044 "x%x x%x x%x x%x\n",
7045 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7046 mbx->mbxCommand, phba->pport->port_state,
7047 psli->sli_flag, flag);
7048
7049 psli->slistat.mbox_busy++;
7050 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7051
7052 if (pmbox->vport) {
7053 lpfc_debugfs_disc_trc(pmbox->vport,
7054 LPFC_DISC_TRC_MBOX_VPORT,
7055 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7056 (uint32_t)mbx->mbxCommand,
7057 mbx->un.varWords[0], mbx->un.varWords[1]);
7058 }
7059 else {
7060 lpfc_debugfs_disc_trc(phba->pport,
7061 LPFC_DISC_TRC_MBOX,
7062 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7063 (uint32_t)mbx->mbxCommand,
7064 mbx->un.varWords[0], mbx->un.varWords[1]);
7065 }
7066
7067 return MBX_BUSY;
7068 }
7069
7070 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7071
7072
7073 if (flag != MBX_POLL) {
7074 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7075 (mbx->mbxCommand != MBX_KILL_BOARD)) {
7076 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7077 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7078
7079 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7080 "(%d):2531 Mailbox command x%x "
7081 "cannot issue Data: x%x x%x\n",
7082 pmbox->vport ? pmbox->vport->vpi : 0,
7083 pmbox->u.mb.mbxCommand,
7084 psli->sli_flag, flag);
7085 goto out_not_finished;
7086 }
7087
7088 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7089 1000);
7090 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7091 }
7092
7093
7094 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7095 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7096 "x%x\n",
7097 pmbox->vport ? pmbox->vport->vpi : 0,
7098 mbx->mbxCommand, phba->pport->port_state,
7099 psli->sli_flag, flag);
7100
7101 if (mbx->mbxCommand != MBX_HEARTBEAT) {
7102 if (pmbox->vport) {
7103 lpfc_debugfs_disc_trc(pmbox->vport,
7104 LPFC_DISC_TRC_MBOX_VPORT,
7105 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7106 (uint32_t)mbx->mbxCommand,
7107 mbx->un.varWords[0], mbx->un.varWords[1]);
7108 }
7109 else {
7110 lpfc_debugfs_disc_trc(phba->pport,
7111 LPFC_DISC_TRC_MBOX,
7112 "MBOX Send: cmd:x%x mb:x%x x%x",
7113 (uint32_t)mbx->mbxCommand,
7114 mbx->un.varWords[0], mbx->un.varWords[1]);
7115 }
7116 }
7117
7118 psli->slistat.mbox_cmd++;
7119 evtctr = psli->slistat.mbox_event;
7120
7121
7122 mbx->mbxOwner = OWN_CHIP;
7123
7124 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7125
7126 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7127 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7128 = (uint8_t *)phba->mbox_ext
7129 - (uint8_t *)phba->mbox;
7130 }
7131
7132
7133 if (pmbox->in_ext_byte_len && pmbox->context2) {
7134 lpfc_sli_pcimem_bcopy(pmbox->context2,
7135 (uint8_t *)phba->mbox_ext,
7136 pmbox->in_ext_byte_len);
7137 }
7138
7139 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7140 } else {
7141
7142 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7143 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7144 = MAILBOX_HBA_EXT_OFFSET;
7145
7146
7147 if (pmbox->in_ext_byte_len && pmbox->context2) {
7148 lpfc_memcpy_to_slim(phba->MBslimaddr +
7149 MAILBOX_HBA_EXT_OFFSET,
7150 pmbox->context2, pmbox->in_ext_byte_len);
7151
7152 }
7153 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7154
7155 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7156 }
7157
7158
7159
7160 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7161 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7162 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7163
7164
7165 ldata = *((uint32_t *)mbx);
7166 to_slim = phba->MBslimaddr;
7167 writel(ldata, to_slim);
7168 readl(to_slim);
7169
7170 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7171
7172 psli->sli_flag |= LPFC_SLI_ACTIVE;
7173 }
7174 }
7175
7176 wmb();
7177
7178 switch (flag) {
7179 case MBX_NOWAIT:
7180
7181 psli->mbox_active = pmbox;
7182
7183 writel(CA_MBATT, phba->CAregaddr);
7184 readl(phba->CAregaddr);
7185
7186 break;
7187
7188 case MBX_POLL:
7189
7190 psli->mbox_active = NULL;
7191
7192 writel(CA_MBATT, phba->CAregaddr);
7193 readl(phba->CAregaddr);
7194
7195 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7196
7197 word0 = *((uint32_t *)phba->mbox);
7198 word0 = le32_to_cpu(word0);
7199 } else {
7200
7201 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7202 spin_unlock_irqrestore(&phba->hbalock,
7203 drvr_flag);
7204 goto out_not_finished;
7205 }
7206 }
7207
7208
7209 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7210 spin_unlock_irqrestore(&phba->hbalock,
7211 drvr_flag);
7212 goto out_not_finished;
7213 }
7214 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7215 1000) + jiffies;
7216 i = 0;
7217
7218 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7219 (!(ha_copy & HA_MBATT) &&
7220 (phba->link_state > LPFC_WARM_START))) {
7221 if (time_after(jiffies, timeout)) {
7222 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7223 spin_unlock_irqrestore(&phba->hbalock,
7224 drvr_flag);
7225 goto out_not_finished;
7226 }
7227
7228
7229
7230 if (((word0 & OWN_CHIP) != OWN_CHIP)
7231 && (evtctr != psli->slistat.mbox_event))
7232 break;
7233
7234 if (i++ > 10) {
7235 spin_unlock_irqrestore(&phba->hbalock,
7236 drvr_flag);
7237 msleep(1);
7238 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7239 }
7240
7241 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7242
7243 word0 = *((uint32_t *)phba->mbox);
7244 word0 = le32_to_cpu(word0);
7245 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7246 MAILBOX_t *slimmb;
7247 uint32_t slimword0;
7248
7249 slimword0 = readl(phba->MBslimaddr);
7250 slimmb = (MAILBOX_t *) & slimword0;
7251 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7252 && slimmb->mbxStatus) {
7253 psli->sli_flag &=
7254 ~LPFC_SLI_ACTIVE;
7255 word0 = slimword0;
7256 }
7257 }
7258 } else {
7259
7260 word0 = readl(phba->MBslimaddr);
7261 }
7262
7263 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7264 spin_unlock_irqrestore(&phba->hbalock,
7265 drvr_flag);
7266 goto out_not_finished;
7267 }
7268 }
7269
7270 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7271
7272 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
7273
7274 if (pmbox->out_ext_byte_len && pmbox->context2) {
7275 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7276 pmbox->context2,
7277 pmbox->out_ext_byte_len);
7278 }
7279 } else {
7280
7281 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7282 MAILBOX_CMD_SIZE);
7283
7284 if (pmbox->out_ext_byte_len && pmbox->context2) {
7285 lpfc_memcpy_from_slim(pmbox->context2,
7286 phba->MBslimaddr +
7287 MAILBOX_HBA_EXT_OFFSET,
7288 pmbox->out_ext_byte_len);
7289 }
7290 }
7291
7292 writel(HA_MBATT, phba->HAregaddr);
7293 readl(phba->HAregaddr);
7294
7295 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7296 status = mbx->mbxStatus;
7297 }
7298
7299 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7300 return status;
7301
7302out_not_finished:
7303 if (processing_queue) {
7304 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7305 lpfc_mbox_cmpl_put(phba, pmbox);
7306 }
7307 return MBX_NOT_FINISHED;
7308}
7309
7310
7311
7312
7313
7314
7315
7316
7317
7318
7319
7320
7321
7322static int
7323lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7324{
7325 struct lpfc_sli *psli = &phba->sli;
7326 int rc = 0;
7327 unsigned long timeout = 0;
7328
7329
7330 spin_lock_irq(&phba->hbalock);
7331 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7332
7333
7334
7335 if (phba->sli.mbox_active)
7336 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7337 phba->sli.mbox_active) *
7338 1000) + jiffies;
7339 spin_unlock_irq(&phba->hbalock);
7340
7341
7342 if (timeout)
7343 lpfc_sli4_process_missed_mbox_completions(phba);
7344
7345
7346 while (phba->sli.mbox_active) {
7347
7348 msleep(2);
7349 if (time_after(jiffies, timeout)) {
7350
7351 rc = 1;
7352 break;
7353 }
7354 }
7355
7356
7357 if (rc) {
7358 spin_lock_irq(&phba->hbalock);
7359 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7360 spin_unlock_irq(&phba->hbalock);
7361 }
7362 return rc;
7363}
7364
7365
7366
7367
7368
7369
7370
7371
7372
7373
7374
7375
7376static void
7377lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7378{
7379 struct lpfc_sli *psli = &phba->sli;
7380
7381 spin_lock_irq(&phba->hbalock);
7382 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7383
7384 spin_unlock_irq(&phba->hbalock);
7385 return;
7386 }
7387
7388
7389
7390
7391
7392
7393 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7394 spin_unlock_irq(&phba->hbalock);
7395
7396
7397 lpfc_worker_wake_up(phba);
7398}
7399
7400
7401
7402
7403
7404
7405
7406
7407
7408
7409
7410
7411static int
7412lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7413{
7414 uint32_t db_ready;
7415 unsigned long timeout;
7416 struct lpfc_register bmbx_reg;
7417
7418 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7419 * 1000) + jiffies;
7420
7421 do {
7422 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7423 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7424 if (!db_ready)
7425 msleep(2);
7426
7427 if (time_after(jiffies, timeout))
7428 return MBXERR_ERROR;
7429 } while (!db_ready);
7430
7431 return 0;
7432}
7433
7434
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444
7445
7446
7447
7448
7449
7450static int
7451lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7452{
7453 int rc = MBX_SUCCESS;
7454 unsigned long iflag;
7455 uint32_t mcqe_status;
7456 uint32_t mbx_cmnd;
7457 struct lpfc_sli *psli = &phba->sli;
7458 struct lpfc_mqe *mb = &mboxq->u.mqe;
7459 struct lpfc_bmbx_create *mbox_rgn;
7460 struct dma_address *dma_address;
7461
7462
7463
7464
7465
7466 spin_lock_irqsave(&phba->hbalock, iflag);
7467 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7468 spin_unlock_irqrestore(&phba->hbalock, iflag);
7469 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7470 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7471 "cannot issue Data: x%x x%x\n",
7472 mboxq->vport ? mboxq->vport->vpi : 0,
7473 mboxq->u.mb.mbxCommand,
7474 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7475 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7476 psli->sli_flag, MBX_POLL);
7477 return MBXERR_ERROR;
7478 }
7479
7480 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7481 phba->sli.mbox_active = mboxq;
7482 spin_unlock_irqrestore(&phba->hbalock, iflag);
7483
7484
7485 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7486 if (rc)
7487 goto exit;
7488
7489
7490
7491
7492
7493
7494 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7495 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7496 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7497 sizeof(struct lpfc_mqe));
7498
7499
7500 dma_address = &phba->sli4_hba.bmbx.dma_address;
7501 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7502
7503
7504 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7505 if (rc)
7506 goto exit;
7507
7508
7509 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7510
7511
7512 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7513 if (rc)
7514 goto exit;
7515
7516
7517
7518
7519
7520
7521 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7522 sizeof(struct lpfc_mqe));
7523 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7524 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7525 sizeof(struct lpfc_mcqe));
7526 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7527
7528
7529
7530
7531
7532 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7533 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7534 bf_set(lpfc_mqe_status, mb,
7535 (LPFC_MBX_ERROR_RANGE | mcqe_status));
7536 rc = MBXERR_ERROR;
7537 } else
7538 lpfc_sli4_swap_str(phba, mboxq);
7539
7540 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7541 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7542 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7543 " x%x x%x CQ: x%x x%x x%x x%x\n",
7544 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7545 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7546 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7547 bf_get(lpfc_mqe_status, mb),
7548 mb->un.mb_words[0], mb->un.mb_words[1],
7549 mb->un.mb_words[2], mb->un.mb_words[3],
7550 mb->un.mb_words[4], mb->un.mb_words[5],
7551 mb->un.mb_words[6], mb->un.mb_words[7],
7552 mb->un.mb_words[8], mb->un.mb_words[9],
7553 mb->un.mb_words[10], mb->un.mb_words[11],
7554 mb->un.mb_words[12], mboxq->mcqe.word0,
7555 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7556 mboxq->mcqe.trailer);
7557exit:
7558
7559 spin_lock_irqsave(&phba->hbalock, iflag);
7560 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7561 phba->sli.mbox_active = NULL;
7562 spin_unlock_irqrestore(&phba->hbalock, iflag);
7563 return rc;
7564}
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574
7575
7576
7577
7578static int
7579lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7580 uint32_t flag)
7581{
7582 struct lpfc_sli *psli = &phba->sli;
7583 unsigned long iflags;
7584 int rc;
7585
7586
7587 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7588
7589 rc = lpfc_mbox_dev_check(phba);
7590 if (unlikely(rc)) {
7591 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7592 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7593 "cannot issue Data: x%x x%x\n",
7594 mboxq->vport ? mboxq->vport->vpi : 0,
7595 mboxq->u.mb.mbxCommand,
7596 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7597 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7598 psli->sli_flag, flag);
7599 goto out_not_finished;
7600 }
7601
7602
7603 if (!phba->sli4_hba.intr_enable) {
7604 if (flag == MBX_POLL)
7605 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7606 else
7607 rc = -EIO;
7608 if (rc != MBX_SUCCESS)
7609 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7610 "(%d):2541 Mailbox command x%x "
7611 "(x%x/x%x) failure: "
7612 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7613 "Data: x%x x%x\n,",
7614 mboxq->vport ? mboxq->vport->vpi : 0,
7615 mboxq->u.mb.mbxCommand,
7616 lpfc_sli_config_mbox_subsys_get(phba,
7617 mboxq),
7618 lpfc_sli_config_mbox_opcode_get(phba,
7619 mboxq),
7620 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7621 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7622 bf_get(lpfc_mcqe_ext_status,
7623 &mboxq->mcqe),
7624 psli->sli_flag, flag);
7625 return rc;
7626 } else if (flag == MBX_POLL) {
7627 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7628 "(%d):2542 Try to issue mailbox command "
7629 "x%x (x%x/x%x) synchronously ahead of async"
7630 "mailbox command queue: x%x x%x\n",
7631 mboxq->vport ? mboxq->vport->vpi : 0,
7632 mboxq->u.mb.mbxCommand,
7633 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7634 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7635 psli->sli_flag, flag);
7636
7637 rc = lpfc_sli4_async_mbox_block(phba);
7638 if (!rc) {
7639
7640 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7641 if (rc != MBX_SUCCESS)
7642 lpfc_printf_log(phba, KERN_WARNING,
7643 LOG_MBOX | LOG_SLI,
7644 "(%d):2597 Sync Mailbox command "
7645 "x%x (x%x/x%x) failure: "
7646 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7647 "Data: x%x x%x\n,",
7648 mboxq->vport ? mboxq->vport->vpi : 0,
7649 mboxq->u.mb.mbxCommand,
7650 lpfc_sli_config_mbox_subsys_get(phba,
7651 mboxq),
7652 lpfc_sli_config_mbox_opcode_get(phba,
7653 mboxq),
7654 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7655 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7656 bf_get(lpfc_mcqe_ext_status,
7657 &mboxq->mcqe),
7658 psli->sli_flag, flag);
7659
7660 lpfc_sli4_async_mbox_unblock(phba);
7661 }
7662 return rc;
7663 }
7664
7665
7666 rc = lpfc_mbox_cmd_check(phba, mboxq);
7667 if (rc) {
7668 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7669 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7670 "cannot issue Data: x%x x%x\n",
7671 mboxq->vport ? mboxq->vport->vpi : 0,
7672 mboxq->u.mb.mbxCommand,
7673 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7674 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7675 psli->sli_flag, flag);
7676 goto out_not_finished;
7677 }
7678
7679
7680 psli->slistat.mbox_busy++;
7681 spin_lock_irqsave(&phba->hbalock, iflags);
7682 lpfc_mbox_put(phba, mboxq);
7683 spin_unlock_irqrestore(&phba->hbalock, iflags);
7684 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7685 "(%d):0354 Mbox cmd issue - Enqueue Data: "
7686 "x%x (x%x/x%x) x%x x%x x%x\n",
7687 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7688 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7689 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7690 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7691 phba->pport->port_state,
7692 psli->sli_flag, MBX_NOWAIT);
7693
7694 lpfc_worker_wake_up(phba);
7695
7696 return MBX_BUSY;
7697
7698out_not_finished:
7699 return MBX_NOT_FINISHED;
7700}
7701
7702
7703
7704
7705
7706
7707
7708
7709
7710int
7711lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7712{
7713 struct lpfc_sli *psli = &phba->sli;
7714 LPFC_MBOXQ_t *mboxq;
7715 int rc = MBX_SUCCESS;
7716 unsigned long iflags;
7717 struct lpfc_mqe *mqe;
7718 uint32_t mbx_cmnd;
7719
7720
7721 if (unlikely(!phba->sli4_hba.intr_enable))
7722 return MBX_NOT_FINISHED;
7723
7724
7725 spin_lock_irqsave(&phba->hbalock, iflags);
7726 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7727 spin_unlock_irqrestore(&phba->hbalock, iflags);
7728 return MBX_NOT_FINISHED;
7729 }
7730 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7731 spin_unlock_irqrestore(&phba->hbalock, iflags);
7732 return MBX_NOT_FINISHED;
7733 }
7734 if (unlikely(phba->sli.mbox_active)) {
7735 spin_unlock_irqrestore(&phba->hbalock, iflags);
7736 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7737 "0384 There is pending active mailbox cmd\n");
7738 return MBX_NOT_FINISHED;
7739 }
7740
7741 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7742
7743
7744 mboxq = lpfc_mbox_get(phba);
7745
7746
7747 if (!mboxq) {
7748 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7749 spin_unlock_irqrestore(&phba->hbalock, iflags);
7750 return MBX_SUCCESS;
7751 }
7752 phba->sli.mbox_active = mboxq;
7753 spin_unlock_irqrestore(&phba->hbalock, iflags);
7754
7755
7756 rc = lpfc_mbox_dev_check(phba);
7757 if (unlikely(rc))
7758
7759 goto out_not_finished;
7760
7761
7762 mqe = &mboxq->u.mqe;
7763 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7764
7765
7766 mod_timer(&psli->mbox_tmo, (jiffies +
7767 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
7768
7769 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7770 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7771 "x%x x%x\n",
7772 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7773 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7774 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7775 phba->pport->port_state, psli->sli_flag);
7776
7777 if (mbx_cmnd != MBX_HEARTBEAT) {
7778 if (mboxq->vport) {
7779 lpfc_debugfs_disc_trc(mboxq->vport,
7780 LPFC_DISC_TRC_MBOX_VPORT,
7781 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7782 mbx_cmnd, mqe->un.mb_words[0],
7783 mqe->un.mb_words[1]);
7784 } else {
7785 lpfc_debugfs_disc_trc(phba->pport,
7786 LPFC_DISC_TRC_MBOX,
7787 "MBOX Send: cmd:x%x mb:x%x x%x",
7788 mbx_cmnd, mqe->un.mb_words[0],
7789 mqe->un.mb_words[1]);
7790 }
7791 }
7792 psli->slistat.mbox_cmd++;
7793
7794
7795 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7796 if (rc != MBX_SUCCESS) {
7797 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7798 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7799 "cannot issue Data: x%x x%x\n",
7800 mboxq->vport ? mboxq->vport->vpi : 0,
7801 mboxq->u.mb.mbxCommand,
7802 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7803 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7804 psli->sli_flag, MBX_NOWAIT);
7805 goto out_not_finished;
7806 }
7807
7808 return rc;
7809
7810out_not_finished:
7811 spin_lock_irqsave(&phba->hbalock, iflags);
7812 if (phba->sli.mbox_active) {
7813 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7814 __lpfc_mbox_cmpl_put(phba, mboxq);
7815
7816 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7817 phba->sli.mbox_active = NULL;
7818 }
7819 spin_unlock_irqrestore(&phba->hbalock, iflags);
7820
7821 return MBX_NOT_FINISHED;
7822}
7823
7824
7825
7826
7827
7828
7829
7830
7831
7832
7833
7834
7835
7836int
7837lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7838{
7839 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7840}
7841
7842
7843
7844
7845
7846
7847
7848
7849
7850
7851int
7852lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7853{
7854
7855 switch (dev_grp) {
7856 case LPFC_PCI_DEV_LP:
7857 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7858 phba->lpfc_sli_handle_slow_ring_event =
7859 lpfc_sli_handle_slow_ring_event_s3;
7860 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7861 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7862 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7863 break;
7864 case LPFC_PCI_DEV_OC:
7865 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7866 phba->lpfc_sli_handle_slow_ring_event =
7867 lpfc_sli_handle_slow_ring_event_s4;
7868 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7869 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7870 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7871 break;
7872 default:
7873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7874 "1420 Invalid HBA PCI-device group: 0x%x\n",
7875 dev_grp);
7876 return -ENODEV;
7877 break;
7878 }
7879 return 0;
7880}
7881
7882
7883
7884
7885
7886
7887
7888
7889
7890
7891
7892void
7893__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7894 struct lpfc_iocbq *piocb)
7895{
7896 lockdep_assert_held(&phba->hbalock);
7897
7898 list_add_tail(&piocb->list, &pring->txq);
7899}
7900
7901
7902
7903
7904
7905
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915
7916
7917
7918static struct lpfc_iocbq *
7919lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7920 struct lpfc_iocbq **piocb)
7921{
7922 struct lpfc_iocbq * nextiocb;
7923
7924 lockdep_assert_held(&phba->hbalock);
7925
7926 nextiocb = lpfc_sli_ringtx_get(phba, pring);
7927 if (!nextiocb) {
7928 nextiocb = *piocb;
7929 *piocb = NULL;
7930 }
7931
7932 return nextiocb;
7933}
7934
7935
7936
7937
7938
7939
7940
7941
7942
7943
7944
7945
7946
7947
7948
7949
7950
7951
7952
7953
7954
7955
7956
7957static int
7958__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
7959 struct lpfc_iocbq *piocb, uint32_t flag)
7960{
7961 struct lpfc_iocbq *nextiocb;
7962 IOCB_t *iocb;
7963 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
7964
7965 lockdep_assert_held(&phba->hbalock);
7966
7967 if (piocb->iocb_cmpl && (!piocb->vport) &&
7968 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
7969 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
7970 lpfc_printf_log(phba, KERN_ERR,
7971 LOG_SLI | LOG_VPORT,
7972 "1807 IOCB x%x failed. No vport\n",
7973 piocb->iocb.ulpCommand);
7974 dump_stack();
7975 return IOCB_ERROR;
7976 }
7977
7978
7979
7980 if (unlikely(pci_channel_offline(phba->pcidev)))
7981 return IOCB_ERROR;
7982
7983
7984 if (unlikely(phba->hba_flag & DEFER_ERATT))
7985 return IOCB_ERROR;
7986
7987
7988
7989
7990 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
7991 return IOCB_ERROR;
7992
7993
7994
7995
7996
7997 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
7998 goto iocb_busy;
7999
8000 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8001
8002
8003
8004
8005 switch (piocb->iocb.ulpCommand) {
8006 case CMD_GEN_REQUEST64_CR:
8007 case CMD_GEN_REQUEST64_CX:
8008 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8009 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8010 FC_RCTL_DD_UNSOL_CMD) ||
8011 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8012 MENLO_TRANSPORT_TYPE))
8013
8014 goto iocb_busy;
8015 break;
8016 case CMD_QUE_RING_BUF_CN:
8017 case CMD_QUE_RING_BUF64_CN:
8018
8019
8020
8021
8022 if (piocb->iocb_cmpl)
8023 piocb->iocb_cmpl = NULL;
8024
8025 case CMD_CREATE_XRI_CR:
8026 case CMD_CLOSE_XRI_CN:
8027 case CMD_CLOSE_XRI_CX:
8028 break;
8029 default:
8030 goto iocb_busy;
8031 }
8032
8033
8034
8035
8036
8037 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
8038 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8039 goto iocb_busy;
8040 }
8041
8042 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8043 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8044 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8045
8046 if (iocb)
8047 lpfc_sli_update_ring(phba, pring);
8048 else
8049 lpfc_sli_update_full_ring(phba, pring);
8050
8051 if (!piocb)
8052 return IOCB_SUCCESS;
8053
8054 goto out_busy;
8055
8056 iocb_busy:
8057 pring->stats.iocb_cmd_delay++;
8058
8059 out_busy:
8060
8061 if (!(flag & SLI_IOCB_RET_IOCB)) {
8062 __lpfc_sli_ringtx_put(phba, pring, piocb);
8063 return IOCB_SUCCESS;
8064 }
8065
8066 return IOCB_BUSY;
8067}
8068
8069
8070
8071
8072
8073
8074
8075
8076
8077
8078
8079
8080
8081
8082
8083
8084
8085
8086static uint16_t
8087lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8088 struct lpfc_sglq *sglq)
8089{
8090 uint16_t xritag = NO_XRI;
8091 struct ulp_bde64 *bpl = NULL;
8092 struct ulp_bde64 bde;
8093 struct sli4_sge *sgl = NULL;
8094 struct lpfc_dmabuf *dmabuf;
8095 IOCB_t *icmd;
8096 int numBdes = 0;
8097 int i = 0;
8098 uint32_t offset = 0;
8099 int inbound = 0;
8100
8101 if (!piocbq || !sglq)
8102 return xritag;
8103
8104 sgl = (struct sli4_sge *)sglq->sgl;
8105 icmd = &piocbq->iocb;
8106 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8107 return sglq->sli4_xritag;
8108 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8109 numBdes = icmd->un.genreq64.bdl.bdeSize /
8110 sizeof(struct ulp_bde64);
8111
8112
8113
8114
8115 if (piocbq->context3)
8116 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8117 else
8118 return xritag;
8119
8120 bpl = (struct ulp_bde64 *)dmabuf->virt;
8121 if (!bpl)
8122 return xritag;
8123
8124 for (i = 0; i < numBdes; i++) {
8125
8126 sgl->addr_hi = bpl->addrHigh;
8127 sgl->addr_lo = bpl->addrLow;
8128
8129 sgl->word2 = le32_to_cpu(sgl->word2);
8130 if ((i+1) == numBdes)
8131 bf_set(lpfc_sli4_sge_last, sgl, 1);
8132 else
8133 bf_set(lpfc_sli4_sge_last, sgl, 0);
8134
8135
8136
8137 bde.tus.w = le32_to_cpu(bpl->tus.w);
8138 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8139
8140
8141
8142
8143 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8144
8145 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8146 inbound++;
8147
8148 if (inbound == 1)
8149 offset = 0;
8150 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8151 bf_set(lpfc_sli4_sge_type, sgl,
8152 LPFC_SGE_TYPE_DATA);
8153 offset += bde.tus.f.bdeSize;
8154 }
8155 sgl->word2 = cpu_to_le32(sgl->word2);
8156 bpl++;
8157 sgl++;
8158 }
8159 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8160
8161
8162
8163
8164 sgl->addr_hi =
8165 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8166 sgl->addr_lo =
8167 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8168 sgl->word2 = le32_to_cpu(sgl->word2);
8169 bf_set(lpfc_sli4_sge_last, sgl, 1);
8170 sgl->word2 = cpu_to_le32(sgl->word2);
8171 sgl->sge_len =
8172 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8173 }
8174 return sglq->sli4_xritag;
8175}
8176
8177
8178
8179
8180
8181
8182
8183
8184
8185
8186
8187
8188
8189
8190
8191static int
8192lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8193 union lpfc_wqe *wqe)
8194{
8195 uint32_t xmit_len = 0, total_len = 0;
8196 uint8_t ct = 0;
8197 uint32_t fip;
8198 uint32_t abort_tag;
8199 uint8_t command_type = ELS_COMMAND_NON_FIP;
8200 uint8_t cmnd;
8201 uint16_t xritag;
8202 uint16_t abrt_iotag;
8203 struct lpfc_iocbq *abrtiocbq;
8204 struct ulp_bde64 *bpl = NULL;
8205 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8206 int numBdes, i;
8207 struct ulp_bde64 bde;
8208 struct lpfc_nodelist *ndlp;
8209 uint32_t *pcmd;
8210 uint32_t if_type;
8211
8212 fip = phba->hba_flag & HBA_FIP_SUPPORT;
8213
8214 if (iocbq->iocb_flag & LPFC_IO_FCP)
8215 command_type = FCP_COMMAND;
8216 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8217 command_type = ELS_COMMAND_FIP;
8218 else
8219 command_type = ELS_COMMAND_NON_FIP;
8220
8221
8222 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8223 abort_tag = (uint32_t) iocbq->iotag;
8224 xritag = iocbq->sli4_xritag;
8225 wqe->generic.wqe_com.word7 = 0;
8226 wqe->generic.wqe_com.word10 = 0;
8227
8228 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8229 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8230 sizeof(struct ulp_bde64);
8231 bpl = (struct ulp_bde64 *)
8232 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8233 if (!bpl)
8234 return IOCB_ERROR;
8235
8236
8237 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8238 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8239
8240
8241
8242 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
8243 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8244 total_len = 0;
8245 for (i = 0; i < numBdes; i++) {
8246 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8247 total_len += bde.tus.f.bdeSize;
8248 }
8249 } else
8250 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8251
8252 iocbq->iocb.ulpIoTag = iocbq->iotag;
8253 cmnd = iocbq->iocb.ulpCommand;
8254
8255 switch (iocbq->iocb.ulpCommand) {
8256 case CMD_ELS_REQUEST64_CR:
8257 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8258 ndlp = iocbq->context_un.ndlp;
8259 else
8260 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8261 if (!iocbq->iocb.ulpLe) {
8262 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8263 "2007 Only Limited Edition cmd Format"
8264 " supported 0x%x\n",
8265 iocbq->iocb.ulpCommand);
8266 return IOCB_ERROR;
8267 }
8268
8269 wqe->els_req.payload_len = xmit_len;
8270
8271 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8272 iocbq->iocb.ulpTimeout);
8273
8274 bf_set(els_req64_vf, &wqe->els_req, 0);
8275
8276 bf_set(els_req64_vfid, &wqe->els_req, 0);
8277 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8278 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8279 iocbq->iocb.ulpContext);
8280 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8281 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8282
8283 if (command_type == ELS_COMMAND_FIP)
8284 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8285 >> LPFC_FIP_ELS_ID_SHIFT);
8286 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8287 iocbq->context2)->virt);
8288 if_type = bf_get(lpfc_sli_intf_if_type,
8289 &phba->sli4_hba.sli_intf);
8290 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8291 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8292 *pcmd == ELS_CMD_SCR ||
8293 *pcmd == ELS_CMD_FDISC ||
8294 *pcmd == ELS_CMD_LOGO ||
8295 *pcmd == ELS_CMD_PLOGI)) {
8296 bf_set(els_req64_sp, &wqe->els_req, 1);
8297 bf_set(els_req64_sid, &wqe->els_req,
8298 iocbq->vport->fc_myDID);
8299 if ((*pcmd == ELS_CMD_FLOGI) &&
8300 !(phba->fc_topology ==
8301 LPFC_TOPOLOGY_LOOP))
8302 bf_set(els_req64_sid, &wqe->els_req, 0);
8303 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8304 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8305 phba->vpi_ids[iocbq->vport->vpi]);
8306 } else if (pcmd && iocbq->context1) {
8307 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8308 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8309 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8310 }
8311 }
8312 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8313 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8314 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8315 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8316 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8317 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8318 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8319 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8320 wqe->els_req.max_response_payload_len = total_len - xmit_len;
8321 break;
8322 case CMD_XMIT_SEQUENCE64_CX:
8323 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8324 iocbq->iocb.un.ulpWord[3]);
8325 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8326 iocbq->iocb.unsli3.rcvsli3.ox_id);
8327
8328 xmit_len = total_len;
8329 cmnd = CMD_XMIT_SEQUENCE64_CR;
8330 if (phba->link_flag & LS_LOOPBACK_MODE)
8331 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8332 case CMD_XMIT_SEQUENCE64_CR:
8333
8334 wqe->xmit_sequence.rsvd3 = 0;
8335
8336
8337 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8338 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8339 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8340 LPFC_WQE_IOD_WRITE);
8341 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8342 LPFC_WQE_LENLOC_WORD12);
8343 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8344 wqe->xmit_sequence.xmit_len = xmit_len;
8345 command_type = OTHER_COMMAND;
8346 break;
8347 case CMD_XMIT_BCAST64_CN:
8348
8349 wqe->xmit_bcast64.seq_payload_len = xmit_len;
8350
8351
8352
8353 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8354 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8355 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8356 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8357 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8358 LPFC_WQE_LENLOC_WORD3);
8359 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8360 break;
8361 case CMD_FCP_IWRITE64_CR:
8362 command_type = FCP_COMMAND_DATA_OUT;
8363
8364
8365 bf_set(payload_offset_len, &wqe->fcp_iwrite,
8366 xmit_len + sizeof(struct fcp_rsp));
8367 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8368 0);
8369
8370
8371 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8372 iocbq->iocb.ulpFCP2Rcvy);
8373 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8374
8375 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
8376 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8377 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8378 LPFC_WQE_LENLOC_WORD4);
8379 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
8380 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8381 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8382 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8383 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8384 if (phba->cfg_XLanePriority) {
8385 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8386 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8387 (phba->cfg_XLanePriority << 1));
8388 }
8389 }
8390 break;
8391 case CMD_FCP_IREAD64_CR:
8392
8393
8394 bf_set(payload_offset_len, &wqe->fcp_iread,
8395 xmit_len + sizeof(struct fcp_rsp));
8396 bf_set(cmd_buff_len, &wqe->fcp_iread,
8397 0);
8398
8399
8400 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8401 iocbq->iocb.ulpFCP2Rcvy);
8402 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8403
8404 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
8405 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8406 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8407 LPFC_WQE_LENLOC_WORD4);
8408 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
8409 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8410 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8411 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8412 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8413 if (phba->cfg_XLanePriority) {
8414 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8415 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8416 (phba->cfg_XLanePriority << 1));
8417 }
8418 }
8419 break;
8420 case CMD_FCP_ICMND64_CR:
8421
8422
8423 bf_set(payload_offset_len, &wqe->fcp_icmd,
8424 xmit_len + sizeof(struct fcp_rsp));
8425 bf_set(cmd_buff_len, &wqe->fcp_icmd,
8426 0);
8427
8428 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8429
8430 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
8431 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8432 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8433 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8434 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8435 LPFC_WQE_LENLOC_NONE);
8436 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
8437 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8438 iocbq->iocb.ulpFCP2Rcvy);
8439 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8440 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8441 if (phba->cfg_XLanePriority) {
8442 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8443 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8444 (phba->cfg_XLanePriority << 1));
8445 }
8446 }
8447 break;
8448 case CMD_GEN_REQUEST64_CR:
8449
8450
8451
8452 xmit_len = 0;
8453 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8454 sizeof(struct ulp_bde64);
8455 for (i = 0; i < numBdes; i++) {
8456 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8457 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8458 break;
8459 xmit_len += bde.tus.f.bdeSize;
8460 }
8461
8462 wqe->gen_req.request_payload_len = xmit_len;
8463
8464
8465
8466 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8467 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8468 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8469 "2015 Invalid CT %x command 0x%x\n",
8470 ct, iocbq->iocb.ulpCommand);
8471 return IOCB_ERROR;
8472 }
8473 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8474 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8475 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8476 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8477 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8478 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8479 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8480 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
8481 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
8482 command_type = OTHER_COMMAND;
8483 break;
8484 case CMD_XMIT_ELS_RSP64_CX:
8485 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8486
8487
8488 wqe->xmit_els_rsp.response_payload_len = xmit_len;
8489
8490 wqe->xmit_els_rsp.word4 = 0;
8491
8492 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8493 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8494
8495 if_type = bf_get(lpfc_sli_intf_if_type,
8496 &phba->sli4_hba.sli_intf);
8497 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8498 if (iocbq->vport->fc_flag & FC_PT2PT) {
8499 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8500 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8501 iocbq->vport->fc_myDID);
8502 if (iocbq->vport->fc_myDID == Fabric_DID) {
8503 bf_set(wqe_els_did,
8504 &wqe->xmit_els_rsp.wqe_dest, 0);
8505 }
8506 }
8507 }
8508 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8509 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8510 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8511 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8512 iocbq->iocb.unsli3.rcvsli3.ox_id);
8513 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
8514 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8515 phba->vpi_ids[iocbq->vport->vpi]);
8516 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8517 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8518 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8519 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8520 LPFC_WQE_LENLOC_WORD3);
8521 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
8522 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8523 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8524 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8525 iocbq->context2)->virt);
8526 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8527 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8528 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8529 iocbq->vport->fc_myDID);
8530 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8531 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8532 phba->vpi_ids[phba->pport->vpi]);
8533 }
8534 command_type = OTHER_COMMAND;
8535 break;
8536 case CMD_CLOSE_XRI_CN:
8537 case CMD_ABORT_XRI_CN:
8538 case CMD_ABORT_XRI_CX:
8539
8540
8541 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8542 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8543 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8544 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8545 } else
8546 fip = 0;
8547
8548 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
8549
8550
8551
8552
8553
8554 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8555 else
8556 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8557 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
8558
8559 wqe->abort_cmd.rsrvd5 = 0;
8560 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
8561 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8562 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
8563
8564
8565
8566
8567 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8568 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8569 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8570 LPFC_WQE_LENLOC_NONE);
8571 cmnd = CMD_ABORT_XRI_CX;
8572 command_type = OTHER_COMMAND;
8573 xritag = 0;
8574 break;
8575 case CMD_XMIT_BLS_RSP64_CX:
8576 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8577
8578
8579
8580
8581 memset(wqe, 0, sizeof(union lpfc_wqe));
8582
8583 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
8584 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8585 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
8586 LPFC_ABTS_UNSOL_INT) {
8587
8588
8589
8590
8591 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8592 iocbq->sli4_xritag);
8593 } else {
8594
8595
8596
8597
8598 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8599 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
8600 }
8601 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8602 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8603
8604
8605 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8606 ndlp->nlp_DID);
8607 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8608 iocbq->iocb.ulpContext);
8609 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8610 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8611 phba->vpi_ids[phba->pport->vpi]);
8612 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8613 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8614 LPFC_WQE_LENLOC_NONE);
8615
8616 command_type = OTHER_COMMAND;
8617 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8618 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8619 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8620 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8621 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8622 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8623 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8624 }
8625
8626 break;
8627 case CMD_XRI_ABORTED_CX:
8628 case CMD_CREATE_XRI_CR:
8629 case CMD_IOCB_FCP_IBIDIR64_CR:
8630 case CMD_FCP_TSEND64_CX:
8631 case CMD_FCP_TRSP64_CX:
8632 case CMD_FCP_AUTO_TRSP_CX:
8633 default:
8634 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8635 "2014 Invalid command 0x%x\n",
8636 iocbq->iocb.ulpCommand);
8637 return IOCB_ERROR;
8638 break;
8639 }
8640
8641 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8642 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8643 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8644 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8645 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8646 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8647 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8648 LPFC_IO_DIF_INSERT);
8649 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8650 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8651 wqe->generic.wqe_com.abort_tag = abort_tag;
8652 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8653 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8654 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8655 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
8656 return 0;
8657}
8658
8659
8660
8661
8662
8663
8664
8665
8666
8667
8668
8669
8670
8671
8672
8673static int
8674__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8675 struct lpfc_iocbq *piocb, uint32_t flag)
8676{
8677 struct lpfc_sglq *sglq;
8678 union lpfc_wqe wqe;
8679 struct lpfc_queue *wq;
8680 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8681
8682 lockdep_assert_held(&phba->hbalock);
8683
8684 if (piocb->sli4_xritag == NO_XRI) {
8685 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8686 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8687 sglq = NULL;
8688 else {
8689 if (!list_empty(&pring->txq)) {
8690 if (!(flag & SLI_IOCB_RET_IOCB)) {
8691 __lpfc_sli_ringtx_put(phba,
8692 pring, piocb);
8693 return IOCB_SUCCESS;
8694 } else {
8695 return IOCB_BUSY;
8696 }
8697 } else {
8698 sglq = __lpfc_sli_get_sglq(phba, piocb);
8699 if (!sglq) {
8700 if (!(flag & SLI_IOCB_RET_IOCB)) {
8701 __lpfc_sli_ringtx_put(phba,
8702 pring,
8703 piocb);
8704 return IOCB_SUCCESS;
8705 } else
8706 return IOCB_BUSY;
8707 }
8708 }
8709 }
8710 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
8711
8712 sglq = NULL;
8713 } else {
8714
8715
8716
8717
8718 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
8719 if (!sglq)
8720 return IOCB_ERROR;
8721 }
8722
8723 if (sglq) {
8724 piocb->sli4_lxritag = sglq->sli4_lxritag;
8725 piocb->sli4_xritag = sglq->sli4_xritag;
8726 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
8727 return IOCB_ERROR;
8728 }
8729
8730 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
8731 return IOCB_ERROR;
8732
8733 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8734 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8735 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
8736 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
8737 } else {
8738 wq = phba->sli4_hba.oas_wq;
8739 }
8740 if (lpfc_sli4_wq_put(wq, &wqe))
8741 return IOCB_ERROR;
8742 } else {
8743 if (unlikely(!phba->sli4_hba.els_wq))
8744 return IOCB_ERROR;
8745 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
8746 return IOCB_ERROR;
8747 }
8748 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8749
8750 return 0;
8751}
8752
8753
8754
8755
8756
8757
8758
8759
8760
8761
8762
8763
8764int
8765__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8766 struct lpfc_iocbq *piocb, uint32_t flag)
8767{
8768 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8769}
8770
8771
8772
8773
8774
8775
8776
8777
8778
8779
8780int
8781lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8782{
8783
8784 switch (dev_grp) {
8785 case LPFC_PCI_DEV_LP:
8786 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8787 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8788 break;
8789 case LPFC_PCI_DEV_OC:
8790 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8791 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8792 break;
8793 default:
8794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8795 "1419 Invalid HBA PCI-device group: 0x%x\n",
8796 dev_grp);
8797 return -ENODEV;
8798 break;
8799 }
8800 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8801 return 0;
8802}
8803
8804
8805
8806
8807
8808
8809
8810
8811
8812
8813
8814
8815int
8816lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
8817 struct lpfc_iocbq *piocb)
8818{
8819 if (phba->sli_rev < LPFC_SLI_REV4)
8820 return ring_number;
8821
8822 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
8823 if (!(phba->cfg_fof) ||
8824 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
8825 if (unlikely(!phba->sli4_hba.fcp_wq))
8826 return LPFC_HBA_ERROR;
8827
8828
8829
8830
8831 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
8832 piocb->fcp_wqidx =
8833 lpfc_sli4_scmd_to_wqidx_distr(phba,
8834 piocb->context1);
8835 ring_number = MAX_SLI3_CONFIGURED_RINGS +
8836 piocb->fcp_wqidx;
8837 } else {
8838 if (unlikely(!phba->sli4_hba.oas_wq))
8839 return LPFC_HBA_ERROR;
8840 piocb->fcp_wqidx = 0;
8841 ring_number = LPFC_FCP_OAS_RING;
8842 }
8843 }
8844 return ring_number;
8845}
8846
8847
8848
8849
8850
8851
8852
8853
8854
8855
8856
8857
8858
8859
8860int
8861lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8862 struct lpfc_iocbq *piocb, uint32_t flag)
8863{
8864 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
8865 struct lpfc_sli_ring *pring;
8866 struct lpfc_queue *fpeq;
8867 struct lpfc_eqe *eqe;
8868 unsigned long iflags;
8869 int rc, idx;
8870
8871 if (phba->sli_rev == LPFC_SLI_REV4) {
8872 ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
8873 if (unlikely(ring_number == LPFC_HBA_ERROR))
8874 return IOCB_ERROR;
8875 idx = piocb->fcp_wqidx;
8876
8877 pring = &phba->sli.ring[ring_number];
8878 spin_lock_irqsave(&pring->ring_lock, iflags);
8879 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8880 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8881
8882 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
8883 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8884
8885 if (atomic_dec_and_test(&fcp_eq_hdl->
8886 fcp_eq_in_use)) {
8887
8888
8889 fpeq = phba->sli4_hba.hba_eq[idx];
8890
8891
8892 lpfc_sli4_eq_clr_intr(fpeq);
8893
8894
8895
8896
8897 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8898 lpfc_sli4_hba_handle_eqe(phba,
8899 eqe, idx);
8900 fpeq->EQ_processed++;
8901 }
8902
8903
8904 lpfc_sli4_eq_release(fpeq,
8905 LPFC_QUEUE_REARM);
8906 }
8907 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8908 }
8909 } else {
8910
8911 spin_lock_irqsave(&phba->hbalock, iflags);
8912 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8913 spin_unlock_irqrestore(&phba->hbalock, iflags);
8914 }
8915 return rc;
8916}
8917
8918
8919
8920
8921
8922
8923
8924
8925
8926
8927
8928
8929static int
8930lpfc_extra_ring_setup( struct lpfc_hba *phba)
8931{
8932 struct lpfc_sli *psli;
8933 struct lpfc_sli_ring *pring;
8934
8935 psli = &phba->sli;
8936
8937
8938
8939
8940 pring = &psli->ring[psli->fcp_ring];
8941 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8942 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8943 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8944 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8945
8946
8947 pring = &psli->ring[psli->extra_ring];
8948
8949 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
8950 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
8951 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
8952 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
8953
8954
8955 pring->iotag_max = 4096;
8956 pring->num_mask = 1;
8957 pring->prt[0].profile = 0;
8958 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
8959 pring->prt[0].type = phba->cfg_multi_ring_type;
8960 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
8961 return 0;
8962}
8963
8964
8965
8966
8967
8968
8969
8970
8971
8972
8973
8974
8975
8976static void
8977lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
8978 struct lpfc_iocbq *iocbq)
8979{
8980 struct lpfc_nodelist *ndlp = NULL;
8981 uint16_t rpi = 0, vpi = 0;
8982 struct lpfc_vport *vport = NULL;
8983
8984
8985 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
8986 rpi = iocbq->iocb.ulpContext;
8987
8988 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8989 "3092 Port generated ABTS async event "
8990 "on vpi %d rpi %d status 0x%x\n",
8991 vpi, rpi, iocbq->iocb.ulpStatus);
8992
8993 vport = lpfc_find_vport_by_vpid(phba, vpi);
8994 if (!vport)
8995 goto err_exit;
8996 ndlp = lpfc_findnode_rpi(vport, rpi);
8997 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
8998 goto err_exit;
8999
9000 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9001 lpfc_sli_abts_recover_port(vport, ndlp);
9002 return;
9003
9004 err_exit:
9005 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9006 "3095 Event Context not found, no "
9007 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9008 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9009 vpi, rpi);
9010}
9011
9012
9013
9014
9015
9016
9017
9018
9019
9020
9021
9022void
9023lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9024 struct lpfc_nodelist *ndlp,
9025 struct sli4_wcqe_xri_aborted *axri)
9026{
9027 struct lpfc_vport *vport;
9028 uint32_t ext_status = 0;
9029
9030 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9031 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9032 "3115 Node Context not found, driver "
9033 "ignoring abts err event\n");
9034 return;
9035 }
9036
9037 vport = ndlp->vport;
9038 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9039 "3116 Port generated FCP XRI ABORT event on "
9040 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9041 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9042 bf_get(lpfc_wcqe_xa_xri, axri),
9043 bf_get(lpfc_wcqe_xa_status, axri),
9044 axri->parameter);
9045
9046
9047
9048
9049
9050
9051 ext_status = axri->parameter & IOERR_PARAM_MASK;
9052 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9053 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9054 lpfc_sli_abts_recover_port(vport, ndlp);
9055}
9056
9057
9058
9059
9060
9061
9062
9063
9064
9065
9066
9067
9068
9069
9070static void
9071lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9072 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9073{
9074 IOCB_t *icmd;
9075 uint16_t evt_code;
9076 struct temp_event temp_event_data;
9077 struct Scsi_Host *shost;
9078 uint32_t *iocb_w;
9079
9080 icmd = &iocbq->iocb;
9081 evt_code = icmd->un.asyncstat.evt_code;
9082
9083 switch (evt_code) {
9084 case ASYNC_TEMP_WARN:
9085 case ASYNC_TEMP_SAFE:
9086 temp_event_data.data = (uint32_t) icmd->ulpContext;
9087 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9088 if (evt_code == ASYNC_TEMP_WARN) {
9089 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9090 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9091 "0347 Adapter is very hot, please take "
9092 "corrective action. temperature : %d Celsius\n",
9093 (uint32_t) icmd->ulpContext);
9094 } else {
9095 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9096 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9097 "0340 Adapter temperature is OK now. "
9098 "temperature : %d Celsius\n",
9099 (uint32_t) icmd->ulpContext);
9100 }
9101
9102
9103 shost = lpfc_shost_from_vport(phba->pport);
9104 fc_host_post_vendor_event(shost, fc_get_event_number(),
9105 sizeof(temp_event_data), (char *) &temp_event_data,
9106 LPFC_NL_VENDOR_ID);
9107 break;
9108 case ASYNC_STATUS_CN:
9109 lpfc_sli_abts_err_handler(phba, iocbq);
9110 break;
9111 default:
9112 iocb_w = (uint32_t *) icmd;
9113 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9114 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9115 " evt_code 0x%x\n"
9116 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9117 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9118 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9119 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9120 pring->ringno, icmd->un.asyncstat.evt_code,
9121 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9122 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9123 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9124 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9125
9126 break;
9127 }
9128}
9129
9130
9131
9132
9133
9134
9135
9136
9137
9138
9139
9140
9141
9142int
9143lpfc_sli_setup(struct lpfc_hba *phba)
9144{
9145 int i, totiocbsize = 0;
9146 struct lpfc_sli *psli = &phba->sli;
9147 struct lpfc_sli_ring *pring;
9148
9149 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9150 if (phba->sli_rev == LPFC_SLI_REV4)
9151 psli->num_rings += phba->cfg_fcp_io_channel;
9152 psli->sli_flag = 0;
9153 psli->fcp_ring = LPFC_FCP_RING;
9154 psli->next_ring = LPFC_FCP_NEXT_RING;
9155 psli->extra_ring = LPFC_EXTRA_RING;
9156
9157 psli->iocbq_lookup = NULL;
9158 psli->iocbq_lookup_len = 0;
9159 psli->last_iotag = 0;
9160
9161 for (i = 0; i < psli->num_rings; i++) {
9162 pring = &psli->ring[i];
9163 switch (i) {
9164 case LPFC_FCP_RING:
9165
9166 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9167 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9168 pring->sli.sli3.numCiocb +=
9169 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9170 pring->sli.sli3.numRiocb +=
9171 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9172 pring->sli.sli3.numCiocb +=
9173 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9174 pring->sli.sli3.numRiocb +=
9175 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9176 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9177 SLI3_IOCB_CMD_SIZE :
9178 SLI2_IOCB_CMD_SIZE;
9179 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9180 SLI3_IOCB_RSP_SIZE :
9181 SLI2_IOCB_RSP_SIZE;
9182 pring->iotag_ctr = 0;
9183 pring->iotag_max =
9184 (phba->cfg_hba_queue_depth * 2);
9185 pring->fast_iotag = pring->iotag_max;
9186 pring->num_mask = 0;
9187 break;
9188 case LPFC_EXTRA_RING:
9189
9190 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9191 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9192 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9193 SLI3_IOCB_CMD_SIZE :
9194 SLI2_IOCB_CMD_SIZE;
9195 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9196 SLI3_IOCB_RSP_SIZE :
9197 SLI2_IOCB_RSP_SIZE;
9198 pring->iotag_max = phba->cfg_hba_queue_depth;
9199 pring->num_mask = 0;
9200 break;
9201 case LPFC_ELS_RING:
9202
9203 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9204 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9205 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9206 SLI3_IOCB_CMD_SIZE :
9207 SLI2_IOCB_CMD_SIZE;
9208 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9209 SLI3_IOCB_RSP_SIZE :
9210 SLI2_IOCB_RSP_SIZE;
9211 pring->fast_iotag = 0;
9212 pring->iotag_ctr = 0;
9213 pring->iotag_max = 4096;
9214 pring->lpfc_sli_rcv_async_status =
9215 lpfc_sli_async_event_handler;
9216 pring->num_mask = LPFC_MAX_RING_MASK;
9217 pring->prt[0].profile = 0;
9218 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9219 pring->prt[0].type = FC_TYPE_ELS;
9220 pring->prt[0].lpfc_sli_rcv_unsol_event =
9221 lpfc_els_unsol_event;
9222 pring->prt[1].profile = 0;
9223 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9224 pring->prt[1].type = FC_TYPE_ELS;
9225 pring->prt[1].lpfc_sli_rcv_unsol_event =
9226 lpfc_els_unsol_event;
9227 pring->prt[2].profile = 0;
9228
9229 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9230
9231 pring->prt[2].type = FC_TYPE_CT;
9232 pring->prt[2].lpfc_sli_rcv_unsol_event =
9233 lpfc_ct_unsol_event;
9234 pring->prt[3].profile = 0;
9235
9236 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9237
9238 pring->prt[3].type = FC_TYPE_CT;
9239 pring->prt[3].lpfc_sli_rcv_unsol_event =
9240 lpfc_ct_unsol_event;
9241 break;
9242 }
9243 totiocbsize += (pring->sli.sli3.numCiocb *
9244 pring->sli.sli3.sizeCiocb) +
9245 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
9246 }
9247 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
9248
9249 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9250 "SLI2 SLIM Data: x%x x%lx\n",
9251 phba->brd_no, totiocbsize,
9252 (unsigned long) MAX_SLIM_IOCB_SIZE);
9253 }
9254 if (phba->cfg_multi_ring_support == 2)
9255 lpfc_extra_ring_setup(phba);
9256
9257 return 0;
9258}
9259
9260
9261
9262
9263
9264
9265
9266
9267
9268
9269
9270
9271int
9272lpfc_sli_queue_setup(struct lpfc_hba *phba)
9273{
9274 struct lpfc_sli *psli;
9275 struct lpfc_sli_ring *pring;
9276 int i;
9277
9278 psli = &phba->sli;
9279 spin_lock_irq(&phba->hbalock);
9280 INIT_LIST_HEAD(&psli->mboxq);
9281 INIT_LIST_HEAD(&psli->mboxq_cmpl);
9282
9283 for (i = 0; i < psli->num_rings; i++) {
9284 pring = &psli->ring[i];
9285 pring->ringno = i;
9286 pring->sli.sli3.next_cmdidx = 0;
9287 pring->sli.sli3.local_getidx = 0;
9288 pring->sli.sli3.cmdidx = 0;
9289 pring->flag = 0;
9290 INIT_LIST_HEAD(&pring->txq);
9291 INIT_LIST_HEAD(&pring->txcmplq);
9292 INIT_LIST_HEAD(&pring->iocb_continueq);
9293 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
9294 INIT_LIST_HEAD(&pring->postbufq);
9295 spin_lock_init(&pring->ring_lock);
9296 }
9297 spin_unlock_irq(&phba->hbalock);
9298 return 1;
9299}
9300
9301
9302
9303
9304
9305
9306
9307
9308
9309
9310
9311
9312
9313
9314
9315
9316static void
9317lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9318{
9319 LIST_HEAD(completions);
9320 struct lpfc_sli *psli = &phba->sli;
9321 LPFC_MBOXQ_t *pmb;
9322 unsigned long iflag;
9323
9324
9325 spin_lock_irqsave(&phba->hbalock, iflag);
9326
9327 list_splice_init(&phba->sli.mboxq, &completions);
9328
9329 if (psli->mbox_active) {
9330 list_add_tail(&psli->mbox_active->list, &completions);
9331 psli->mbox_active = NULL;
9332 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9333 }
9334
9335 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9336 spin_unlock_irqrestore(&phba->hbalock, iflag);
9337
9338
9339 while (!list_empty(&completions)) {
9340 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9341 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9342 if (pmb->mbox_cmpl)
9343 pmb->mbox_cmpl(phba, pmb);
9344 }
9345}
9346
9347
9348
9349
9350
9351
9352
9353
9354
9355
9356
9357
9358
9359
9360
9361
9362
9363
9364int
9365lpfc_sli_host_down(struct lpfc_vport *vport)
9366{
9367 LIST_HEAD(completions);
9368 struct lpfc_hba *phba = vport->phba;
9369 struct lpfc_sli *psli = &phba->sli;
9370 struct lpfc_sli_ring *pring;
9371 struct lpfc_iocbq *iocb, *next_iocb;
9372 int i;
9373 unsigned long flags = 0;
9374 uint16_t prev_pring_flag;
9375
9376 lpfc_cleanup_discovery_resources(vport);
9377
9378 spin_lock_irqsave(&phba->hbalock, flags);
9379 for (i = 0; i < psli->num_rings; i++) {
9380 pring = &psli->ring[i];
9381 prev_pring_flag = pring->flag;
9382
9383 if (pring->ringno == LPFC_ELS_RING) {
9384 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9385
9386 set_bit(LPFC_DATA_READY, &phba->data_flags);
9387 }
9388
9389
9390
9391
9392 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9393 if (iocb->vport != vport)
9394 continue;
9395 list_move_tail(&iocb->list, &completions);
9396 }
9397
9398
9399 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9400 list) {
9401 if (iocb->vport != vport)
9402 continue;
9403 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9404 }
9405
9406 pring->flag = prev_pring_flag;
9407 }
9408
9409 spin_unlock_irqrestore(&phba->hbalock, flags);
9410
9411
9412 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9413 IOERR_SLI_DOWN);
9414 return 1;
9415}
9416
9417
9418
9419
9420
9421
9422
9423
9424
9425
9426
9427
9428
9429
9430
9431
9432int
9433lpfc_sli_hba_down(struct lpfc_hba *phba)
9434{
9435 LIST_HEAD(completions);
9436 struct lpfc_sli *psli = &phba->sli;
9437 struct lpfc_sli_ring *pring;
9438 struct lpfc_dmabuf *buf_ptr;
9439 unsigned long flags = 0;
9440 int i;
9441
9442
9443 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
9444
9445 lpfc_hba_down_prep(phba);
9446
9447 lpfc_fabric_abort_hba(phba);
9448
9449 spin_lock_irqsave(&phba->hbalock, flags);
9450 for (i = 0; i < psli->num_rings; i++) {
9451 pring = &psli->ring[i];
9452
9453 if (pring->ringno == LPFC_ELS_RING) {
9454 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9455
9456 set_bit(LPFC_DATA_READY, &phba->data_flags);
9457 }
9458
9459
9460
9461
9462
9463 list_splice_init(&pring->txq, &completions);
9464 }
9465 spin_unlock_irqrestore(&phba->hbalock, flags);
9466
9467
9468 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9469 IOERR_SLI_DOWN);
9470
9471 spin_lock_irqsave(&phba->hbalock, flags);
9472 list_splice_init(&phba->elsbuf, &completions);
9473 phba->elsbuf_cnt = 0;
9474 phba->elsbuf_prev_cnt = 0;
9475 spin_unlock_irqrestore(&phba->hbalock, flags);
9476
9477 while (!list_empty(&completions)) {
9478 list_remove_head(&completions, buf_ptr,
9479 struct lpfc_dmabuf, list);
9480 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9481 kfree(buf_ptr);
9482 }
9483
9484
9485 del_timer_sync(&psli->mbox_tmo);
9486
9487 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
9488 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9489 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
9490
9491 return 1;
9492}
9493
9494
9495
9496
9497
9498
9499
9500
9501
9502
9503
9504
9505
9506void
9507lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9508{
9509 uint32_t *src = srcp;
9510 uint32_t *dest = destp;
9511 uint32_t ldata;
9512 int i;
9513
9514 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9515 ldata = *src;
9516 ldata = le32_to_cpu(ldata);
9517 *dest = ldata;
9518 src++;
9519 dest++;
9520 }
9521}
9522
9523
9524
9525
9526
9527
9528
9529
9530
9531
9532
9533
9534void
9535lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9536{
9537 uint32_t *src = srcp;
9538 uint32_t *dest = destp;
9539 uint32_t ldata;
9540 int i;
9541
9542 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9543 ldata = *src;
9544 ldata = be32_to_cpu(ldata);
9545 *dest = ldata;
9546 src++;
9547 dest++;
9548 }
9549}
9550
9551
9552
9553
9554
9555
9556
9557
9558
9559
9560
9561int
9562lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9563 struct lpfc_dmabuf *mp)
9564{
9565
9566
9567 spin_lock_irq(&phba->hbalock);
9568 list_add_tail(&mp->list, &pring->postbufq);
9569 pring->postbufq_cnt++;
9570 spin_unlock_irq(&phba->hbalock);
9571 return 0;
9572}
9573
9574
9575
9576
9577
9578
9579
9580
9581
9582
9583
9584
9585uint32_t
9586lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9587{
9588 spin_lock_irq(&phba->hbalock);
9589 phba->buffer_tag_count++;
9590
9591
9592
9593
9594 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9595 spin_unlock_irq(&phba->hbalock);
9596 return phba->buffer_tag_count;
9597}
9598
9599
9600
9601
9602
9603
9604
9605
9606
9607
9608
9609
9610
9611
9612
9613
9614struct lpfc_dmabuf *
9615lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9616 uint32_t tag)
9617{
9618 struct lpfc_dmabuf *mp, *next_mp;
9619 struct list_head *slp = &pring->postbufq;
9620
9621
9622 spin_lock_irq(&phba->hbalock);
9623 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9624 if (mp->buffer_tag == tag) {
9625 list_del_init(&mp->list);
9626 pring->postbufq_cnt--;
9627 spin_unlock_irq(&phba->hbalock);
9628 return mp;
9629 }
9630 }
9631
9632 spin_unlock_irq(&phba->hbalock);
9633 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9634 "0402 Cannot find virtual addr for buffer tag on "
9635 "ring %d Data x%lx x%p x%p x%x\n",
9636 pring->ringno, (unsigned long) tag,
9637 slp->next, slp->prev, pring->postbufq_cnt);
9638
9639 return NULL;
9640}
9641
9642
9643
9644
9645
9646
9647
9648
9649
9650
9651
9652
9653
9654
9655
9656
9657
9658struct lpfc_dmabuf *
9659lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9660 dma_addr_t phys)
9661{
9662 struct lpfc_dmabuf *mp, *next_mp;
9663 struct list_head *slp = &pring->postbufq;
9664
9665
9666 spin_lock_irq(&phba->hbalock);
9667 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9668 if (mp->phys == phys) {
9669 list_del_init(&mp->list);
9670 pring->postbufq_cnt--;
9671 spin_unlock_irq(&phba->hbalock);
9672 return mp;
9673 }
9674 }
9675
9676 spin_unlock_irq(&phba->hbalock);
9677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9678 "0410 Cannot find virtual addr for mapped buf on "
9679 "ring %d Data x%llx x%p x%p x%x\n",
9680 pring->ringno, (unsigned long long)phys,
9681 slp->next, slp->prev, pring->postbufq_cnt);
9682 return NULL;
9683}
9684
9685
9686
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696static void
9697lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9698 struct lpfc_iocbq *rspiocb)
9699{
9700 IOCB_t *irsp = &rspiocb->iocb;
9701 uint16_t abort_iotag, abort_context;
9702 struct lpfc_iocbq *abort_iocb = NULL;
9703
9704 if (irsp->ulpStatus) {
9705
9706
9707
9708
9709
9710 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9711 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9712
9713 spin_lock_irq(&phba->hbalock);
9714 if (phba->sli_rev < LPFC_SLI_REV4) {
9715 if (abort_iotag != 0 &&
9716 abort_iotag <= phba->sli.last_iotag)
9717 abort_iocb =
9718 phba->sli.iocbq_lookup[abort_iotag];
9719 } else
9720
9721
9722
9723
9724
9725 abort_iocb = phba->sli.iocbq_lookup[abort_context];
9726
9727 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9728 "0327 Cannot abort els iocb %p "
9729 "with tag %x context %x, abort status %x, "
9730 "abort code %x\n",
9731 abort_iocb, abort_iotag, abort_context,
9732 irsp->ulpStatus, irsp->un.ulpWord[4]);
9733
9734 spin_unlock_irq(&phba->hbalock);
9735 }
9736 lpfc_sli_release_iocbq(phba, cmdiocb);
9737 return;
9738}
9739
9740
9741
9742
9743
9744
9745
9746
9747
9748
9749
9750
9751static void
9752lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9753 struct lpfc_iocbq *rspiocb)
9754{
9755 IOCB_t *irsp = &rspiocb->iocb;
9756
9757
9758 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9759 "0139 Ignoring ELS cmd tag x%x completion Data: "
9760 "x%x x%x x%x\n",
9761 irsp->ulpIoTag, irsp->ulpStatus,
9762 irsp->un.ulpWord[4], irsp->ulpTimeout);
9763 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9764 lpfc_ct_free_iocb(phba, cmdiocb);
9765 else
9766 lpfc_els_free_iocb(phba, cmdiocb);
9767 return;
9768}
9769
9770
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780
9781
9782static int
9783lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9784 struct lpfc_iocbq *cmdiocb)
9785{
9786 struct lpfc_vport *vport = cmdiocb->vport;
9787 struct lpfc_iocbq *abtsiocbp;
9788 IOCB_t *icmd = NULL;
9789 IOCB_t *iabt = NULL;
9790 int ring_number;
9791 int retval;
9792 unsigned long iflags;
9793
9794 lockdep_assert_held(&phba->hbalock);
9795
9796
9797
9798
9799
9800
9801 icmd = &cmdiocb->iocb;
9802 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9803 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9804 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9805 return 0;
9806
9807
9808 abtsiocbp = __lpfc_sli_get_iocbq(phba);
9809 if (abtsiocbp == NULL)
9810 return 0;
9811
9812
9813
9814
9815 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
9816
9817 iabt = &abtsiocbp->iocb;
9818 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
9819 iabt->un.acxri.abortContextTag = icmd->ulpContext;
9820 if (phba->sli_rev == LPFC_SLI_REV4) {
9821 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
9822 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
9823 }
9824 else
9825 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
9826 iabt->ulpLe = 1;
9827 iabt->ulpClass = icmd->ulpClass;
9828
9829
9830 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
9831 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9832 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9833 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
9834 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
9835
9836 if (phba->link_state >= LPFC_LINK_UP)
9837 iabt->ulpCommand = CMD_ABORT_XRI_CN;
9838 else
9839 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
9840
9841 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
9842
9843 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
9844 "0339 Abort xri x%x, original iotag x%x, "
9845 "abort cmd iotag x%x\n",
9846 iabt->un.acxri.abortIoTag,
9847 iabt->un.acxri.abortContextTag,
9848 abtsiocbp->iotag);
9849
9850 if (phba->sli_rev == LPFC_SLI_REV4) {
9851 ring_number =
9852 lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
9853 if (unlikely(ring_number == LPFC_HBA_ERROR))
9854 return 0;
9855 pring = &phba->sli.ring[ring_number];
9856
9857 spin_lock_irqsave(&pring->ring_lock, iflags);
9858 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9859 abtsiocbp, 0);
9860 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9861 } else {
9862 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
9863 abtsiocbp, 0);
9864 }
9865
9866 if (retval)
9867 __lpfc_sli_release_iocbq(phba, abtsiocbp);
9868
9869
9870
9871
9872
9873
9874 return retval;
9875}
9876
9877
9878
9879
9880
9881
9882
9883
9884
9885
9886
9887
9888
9889
9890int
9891lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9892 struct lpfc_iocbq *cmdiocb)
9893{
9894 struct lpfc_vport *vport = cmdiocb->vport;
9895 int retval = IOCB_ERROR;
9896 IOCB_t *icmd = NULL;
9897
9898 lockdep_assert_held(&phba->hbalock);
9899
9900
9901
9902
9903
9904
9905 icmd = &cmdiocb->iocb;
9906 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9907 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
9908 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
9909 return 0;
9910
9911
9912
9913
9914
9915 if ((vport->load_flag & FC_UNLOADING) &&
9916 (pring->ringno == LPFC_ELS_RING)) {
9917 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
9918 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
9919 else
9920 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
9921 goto abort_iotag_exit;
9922 }
9923
9924
9925 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
9926
9927abort_iotag_exit:
9928
9929
9930
9931
9932
9933 return retval;
9934}
9935
9936
9937
9938
9939
9940
9941
9942void
9943lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
9944{
9945 struct lpfc_sli *psli = &phba->sli;
9946 struct lpfc_sli_ring *pring;
9947 int i;
9948
9949 for (i = 0; i < psli->num_rings; i++) {
9950 pring = &psli->ring[i];
9951 lpfc_sli_abort_iocb_ring(phba, pring);
9952 }
9953}
9954
9955
9956
9957
9958
9959
9960
9961
9962
9963
9964
9965
9966
9967
9968
9969
9970
9971
9972
9973
9974
9975
9976
9977static int
9978lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
9979 uint16_t tgt_id, uint64_t lun_id,
9980 lpfc_ctx_cmd ctx_cmd)
9981{
9982 struct lpfc_scsi_buf *lpfc_cmd;
9983 int rc = 1;
9984
9985 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
9986 return rc;
9987
9988 if (iocbq->vport != vport)
9989 return rc;
9990
9991 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
9992
9993 if (lpfc_cmd->pCmd == NULL)
9994 return rc;
9995
9996 switch (ctx_cmd) {
9997 case LPFC_CTX_LUN:
9998 if ((lpfc_cmd->rdata->pnode) &&
9999 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10000 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
10001 rc = 0;
10002 break;
10003 case LPFC_CTX_TGT:
10004 if ((lpfc_cmd->rdata->pnode) &&
10005 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
10006 rc = 0;
10007 break;
10008 case LPFC_CTX_HOST:
10009 rc = 0;
10010 break;
10011 default:
10012 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
10013 __func__, ctx_cmd);
10014 break;
10015 }
10016
10017 return rc;
10018}
10019
10020
10021
10022
10023
10024
10025
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039int
10040lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10041 lpfc_ctx_cmd ctx_cmd)
10042{
10043 struct lpfc_hba *phba = vport->phba;
10044 struct lpfc_iocbq *iocbq;
10045 int sum, i;
10046
10047 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10048 iocbq = phba->sli.iocbq_lookup[i];
10049
10050 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10051 ctx_cmd) == 0)
10052 sum++;
10053 }
10054
10055 return sum;
10056}
10057
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068void
10069lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10070 struct lpfc_iocbq *rspiocb)
10071{
10072 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10073 "3096 ABORT_XRI_CN completing on rpi x%x "
10074 "original iotag x%x, abort cmd iotag x%x "
10075 "status 0x%x, reason 0x%x\n",
10076 cmdiocb->iocb.un.acxri.abortContextTag,
10077 cmdiocb->iocb.un.acxri.abortIoTag,
10078 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10079 rspiocb->iocb.un.ulpWord[4]);
10080 lpfc_sli_release_iocbq(phba, cmdiocb);
10081 return;
10082}
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105int
10106lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10107 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
10108{
10109 struct lpfc_hba *phba = vport->phba;
10110 struct lpfc_iocbq *iocbq;
10111 struct lpfc_iocbq *abtsiocb;
10112 IOCB_t *cmd = NULL;
10113 int errcnt = 0, ret_val = 0;
10114 int i;
10115
10116 for (i = 1; i <= phba->sli.last_iotag; i++) {
10117 iocbq = phba->sli.iocbq_lookup[i];
10118
10119 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10120 abort_cmd) != 0)
10121 continue;
10122
10123
10124
10125
10126
10127 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10128 continue;
10129
10130
10131 abtsiocb = lpfc_sli_get_iocbq(phba);
10132 if (abtsiocb == NULL) {
10133 errcnt++;
10134 continue;
10135 }
10136
10137
10138 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10139
10140 cmd = &iocbq->iocb;
10141 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10142 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
10143 if (phba->sli_rev == LPFC_SLI_REV4)
10144 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10145 else
10146 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
10147 abtsiocb->iocb.ulpLe = 1;
10148 abtsiocb->iocb.ulpClass = cmd->ulpClass;
10149 abtsiocb->vport = vport;
10150
10151
10152 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
10153 if (iocbq->iocb_flag & LPFC_IO_FCP)
10154 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
10155 if (iocbq->iocb_flag & LPFC_IO_FOF)
10156 abtsiocb->iocb_flag |= LPFC_IO_FOF;
10157
10158 if (lpfc_is_link_up(phba))
10159 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10160 else
10161 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10162
10163
10164 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10165 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
10166 abtsiocb, 0);
10167 if (ret_val == IOCB_ERROR) {
10168 lpfc_sli_release_iocbq(phba, abtsiocb);
10169 errcnt++;
10170 continue;
10171 }
10172 }
10173
10174 return errcnt;
10175}
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188
10189
10190
10191
10192
10193
10194
10195
10196
10197
10198
10199int
10200lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10201 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10202{
10203 struct lpfc_hba *phba = vport->phba;
10204 struct lpfc_scsi_buf *lpfc_cmd;
10205 struct lpfc_iocbq *abtsiocbq;
10206 struct lpfc_nodelist *ndlp;
10207 struct lpfc_iocbq *iocbq;
10208 IOCB_t *icmd;
10209 int sum, i, ret_val;
10210 unsigned long iflags;
10211 struct lpfc_sli_ring *pring_s4;
10212 uint32_t ring_number;
10213
10214 spin_lock_irq(&phba->hbalock);
10215
10216
10217 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10218 spin_unlock_irq(&phba->hbalock);
10219 return 0;
10220 }
10221 sum = 0;
10222
10223 for (i = 1; i <= phba->sli.last_iotag; i++) {
10224 iocbq = phba->sli.iocbq_lookup[i];
10225
10226 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10227 cmd) != 0)
10228 continue;
10229
10230
10231
10232
10233
10234 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10235 continue;
10236
10237
10238 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10239 if (abtsiocbq == NULL)
10240 continue;
10241
10242 icmd = &iocbq->iocb;
10243 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10244 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
10245 if (phba->sli_rev == LPFC_SLI_REV4)
10246 abtsiocbq->iocb.un.acxri.abortIoTag =
10247 iocbq->sli4_xritag;
10248 else
10249 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
10250 abtsiocbq->iocb.ulpLe = 1;
10251 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
10252 abtsiocbq->vport = vport;
10253
10254
10255 abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
10256 if (iocbq->iocb_flag & LPFC_IO_FCP)
10257 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
10258 if (iocbq->iocb_flag & LPFC_IO_FOF)
10259 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
10260
10261 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10262 ndlp = lpfc_cmd->rdata->pnode;
10263
10264 if (lpfc_is_link_up(phba) &&
10265 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
10266 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10267 else
10268 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10269
10270
10271 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10272
10273
10274
10275
10276
10277 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10278
10279 if (phba->sli_rev == LPFC_SLI_REV4) {
10280 ring_number = MAX_SLI3_CONFIGURED_RINGS +
10281 iocbq->fcp_wqidx;
10282 pring_s4 = &phba->sli.ring[ring_number];
10283
10284 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
10285 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
10286 abtsiocbq, 0);
10287 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
10288 } else {
10289 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
10290 abtsiocbq, 0);
10291 }
10292
10293
10294 if (ret_val == IOCB_ERROR)
10295 __lpfc_sli_release_iocbq(phba, abtsiocbq);
10296 else
10297 sum++;
10298 }
10299 spin_unlock_irq(&phba->hbalock);
10300 return sum;
10301}
10302
10303
10304
10305
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318
10319
10320static void
10321lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
10322 struct lpfc_iocbq *cmdiocbq,
10323 struct lpfc_iocbq *rspiocbq)
10324{
10325 wait_queue_head_t *pdone_q;
10326 unsigned long iflags;
10327 struct lpfc_scsi_buf *lpfc_cmd;
10328
10329 spin_lock_irqsave(&phba->hbalock, iflags);
10330 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
10331
10332
10333
10334
10335
10336
10337
10338 spin_unlock_irqrestore(&phba->hbalock, iflags);
10339 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
10340 cmdiocbq->wait_iocb_cmpl = NULL;
10341 if (cmdiocbq->iocb_cmpl)
10342 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
10343 else
10344 lpfc_sli_release_iocbq(phba, cmdiocbq);
10345 return;
10346 }
10347
10348 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
10349 if (cmdiocbq->context2 && rspiocbq)
10350 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
10351 &rspiocbq->iocb, sizeof(IOCB_t));
10352
10353
10354 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
10355 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
10356 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
10357 cur_iocbq);
10358 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
10359 }
10360
10361 pdone_q = cmdiocbq->context_un.wait_queue;
10362 if (pdone_q)
10363 wake_up(pdone_q);
10364 spin_unlock_irqrestore(&phba->hbalock, iflags);
10365 return;
10366}
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379
10380static int
10381lpfc_chk_iocb_flg(struct lpfc_hba *phba,
10382 struct lpfc_iocbq *piocbq, uint32_t flag)
10383{
10384 unsigned long iflags;
10385 int ret;
10386
10387 spin_lock_irqsave(&phba->hbalock, iflags);
10388 ret = piocbq->iocb_flag & flag;
10389 spin_unlock_irqrestore(&phba->hbalock, iflags);
10390 return ret;
10391
10392}
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423
10424
10425
10426
10427
10428
10429
10430int
10431lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10432 uint32_t ring_number,
10433 struct lpfc_iocbq *piocb,
10434 struct lpfc_iocbq *prspiocbq,
10435 uint32_t timeout)
10436{
10437 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10438 long timeleft, timeout_req = 0;
10439 int retval = IOCB_SUCCESS;
10440 uint32_t creg_val;
10441 struct lpfc_iocbq *iocb;
10442 int txq_cnt = 0;
10443 int txcmplq_cnt = 0;
10444 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10445 unsigned long iflags;
10446 bool iocb_completed = true;
10447
10448
10449
10450
10451
10452 if (prspiocbq) {
10453 if (piocb->context2)
10454 return IOCB_ERROR;
10455 piocb->context2 = prspiocbq;
10456 }
10457
10458 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
10459 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
10460 piocb->context_un.wait_queue = &done_q;
10461 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
10462
10463 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10464 if (lpfc_readl(phba->HCregaddr, &creg_val))
10465 return IOCB_ERROR;
10466 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
10467 writel(creg_val, phba->HCregaddr);
10468 readl(phba->HCregaddr);
10469 }
10470
10471 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10472 SLI_IOCB_RET_IOCB);
10473 if (retval == IOCB_SUCCESS) {
10474 timeout_req = msecs_to_jiffies(timeout * 1000);
10475 timeleft = wait_event_timeout(done_q,
10476 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10477 timeout_req);
10478 spin_lock_irqsave(&phba->hbalock, iflags);
10479 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
10480
10481
10482
10483
10484
10485
10486 iocb_completed = false;
10487 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
10488 }
10489 spin_unlock_irqrestore(&phba->hbalock, iflags);
10490 if (iocb_completed) {
10491 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10492 "0331 IOCB wake signaled\n");
10493
10494
10495
10496
10497
10498 } else if (timeleft == 0) {
10499 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10500 "0338 IOCB wait timeout error - no "
10501 "wake response Data x%x\n", timeout);
10502 retval = IOCB_TIMEDOUT;
10503 } else {
10504 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10505 "0330 IOCB wake NOT set, "
10506 "Data x%x x%lx\n",
10507 timeout, (timeleft / jiffies));
10508 retval = IOCB_TIMEDOUT;
10509 }
10510 } else if (retval == IOCB_BUSY) {
10511 if (phba->cfg_log_verbose & LOG_SLI) {
10512 list_for_each_entry(iocb, &pring->txq, list) {
10513 txq_cnt++;
10514 }
10515 list_for_each_entry(iocb, &pring->txcmplq, list) {
10516 txcmplq_cnt++;
10517 }
10518 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10519 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10520 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
10521 }
10522 return retval;
10523 } else {
10524 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10525 "0332 IOCB wait issue failed, Data x%x\n",
10526 retval);
10527 retval = IOCB_ERROR;
10528 }
10529
10530 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10531 if (lpfc_readl(phba->HCregaddr, &creg_val))
10532 return IOCB_ERROR;
10533 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
10534 writel(creg_val, phba->HCregaddr);
10535 readl(phba->HCregaddr);
10536 }
10537
10538 if (prspiocbq)
10539 piocb->context2 = NULL;
10540
10541 piocb->context_un.wait_queue = NULL;
10542 piocb->iocb_cmpl = NULL;
10543 return retval;
10544}
10545
10546
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568
10569
10570
10571
10572int
10573lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10574 uint32_t timeout)
10575{
10576 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10577 MAILBOX_t *mb = NULL;
10578 int retval;
10579 unsigned long flag;
10580
10581
10582 if (pmboxq->context1)
10583 mb = (MAILBOX_t *)pmboxq->context1;
10584
10585 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
10586
10587 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10588
10589 pmboxq->context1 = &done_q;
10590
10591
10592 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
10593 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10594 wait_event_interruptible_timeout(done_q,
10595 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10596 msecs_to_jiffies(timeout * 1000));
10597
10598 spin_lock_irqsave(&phba->hbalock, flag);
10599
10600 pmboxq->context1 = (uint8_t *)mb;
10601
10602
10603
10604
10605 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
10606 retval = MBX_SUCCESS;
10607 } else {
10608 retval = MBX_TIMEOUT;
10609 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10610 }
10611 spin_unlock_irqrestore(&phba->hbalock, flag);
10612 } else {
10613
10614 pmboxq->context1 = (uint8_t *)mb;
10615 }
10616
10617 return retval;
10618}
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635void
10636lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
10637{
10638 struct lpfc_sli *psli = &phba->sli;
10639 unsigned long timeout;
10640
10641 if (mbx_action == LPFC_MBX_NO_WAIT) {
10642
10643 msleep(100);
10644 lpfc_sli_mbox_sys_flush(phba);
10645 return;
10646 }
10647 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
10648
10649 spin_lock_irq(&phba->hbalock);
10650 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10651
10652 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
10653
10654
10655
10656 if (phba->sli.mbox_active)
10657 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10658 phba->sli.mbox_active) *
10659 1000) + jiffies;
10660 spin_unlock_irq(&phba->hbalock);
10661
10662 while (phba->sli.mbox_active) {
10663
10664 msleep(2);
10665 if (time_after(jiffies, timeout))
10666
10667
10668
10669 break;
10670 }
10671 } else
10672 spin_unlock_irq(&phba->hbalock);
10673
10674 lpfc_sli_mbox_sys_flush(phba);
10675}
10676
10677
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688static int
10689lpfc_sli_eratt_read(struct lpfc_hba *phba)
10690{
10691 uint32_t ha_copy;
10692
10693
10694 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10695 goto unplug_err;
10696
10697 if (ha_copy & HA_ERATT) {
10698
10699 if (lpfc_sli_read_hs(phba))
10700 goto unplug_err;
10701
10702
10703 if ((HS_FFER1 & phba->work_hs) &&
10704 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10705 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
10706 phba->hba_flag |= DEFER_ERATT;
10707
10708 writel(0, phba->HCregaddr);
10709 readl(phba->HCregaddr);
10710 }
10711
10712
10713 phba->work_ha |= HA_ERATT;
10714
10715 phba->hba_flag |= HBA_ERATT_HANDLED;
10716 return 1;
10717 }
10718 return 0;
10719
10720unplug_err:
10721
10722 phba->work_hs |= UNPLUG_ERR;
10723
10724 phba->work_ha |= HA_ERATT;
10725
10726 phba->hba_flag |= HBA_ERATT_HANDLED;
10727 return 1;
10728}
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741static int
10742lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10743{
10744 uint32_t uerr_sta_hi, uerr_sta_lo;
10745 uint32_t if_type, portsmphr;
10746 struct lpfc_register portstat_reg;
10747
10748
10749
10750
10751
10752 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10753 switch (if_type) {
10754 case LPFC_SLI_INTF_IF_TYPE_0:
10755 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10756 &uerr_sta_lo) ||
10757 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10758 &uerr_sta_hi)) {
10759 phba->work_hs |= UNPLUG_ERR;
10760 phba->work_ha |= HA_ERATT;
10761 phba->hba_flag |= HBA_ERATT_HANDLED;
10762 return 1;
10763 }
10764 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10765 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10766 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10767 "1423 HBA Unrecoverable error: "
10768 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10769 "ue_mask_lo_reg=0x%x, "
10770 "ue_mask_hi_reg=0x%x\n",
10771 uerr_sta_lo, uerr_sta_hi,
10772 phba->sli4_hba.ue_mask_lo,
10773 phba->sli4_hba.ue_mask_hi);
10774 phba->work_status[0] = uerr_sta_lo;
10775 phba->work_status[1] = uerr_sta_hi;
10776 phba->work_ha |= HA_ERATT;
10777 phba->hba_flag |= HBA_ERATT_HANDLED;
10778 return 1;
10779 }
10780 break;
10781 case LPFC_SLI_INTF_IF_TYPE_2:
10782 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10783 &portstat_reg.word0) ||
10784 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10785 &portsmphr)){
10786 phba->work_hs |= UNPLUG_ERR;
10787 phba->work_ha |= HA_ERATT;
10788 phba->hba_flag |= HBA_ERATT_HANDLED;
10789 return 1;
10790 }
10791 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10792 phba->work_status[0] =
10793 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10794 phba->work_status[1] =
10795 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10796 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10797 "2885 Port Status Event: "
10798 "port status reg 0x%x, "
10799 "port smphr reg 0x%x, "
10800 "error 1=0x%x, error 2=0x%x\n",
10801 portstat_reg.word0,
10802 portsmphr,
10803 phba->work_status[0],
10804 phba->work_status[1]);
10805 phba->work_ha |= HA_ERATT;
10806 phba->hba_flag |= HBA_ERATT_HANDLED;
10807 return 1;
10808 }
10809 break;
10810 case LPFC_SLI_INTF_IF_TYPE_1:
10811 default:
10812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10813 "2886 HBA Error Attention on unsupported "
10814 "if type %d.", if_type);
10815 return 1;
10816 }
10817
10818 return 0;
10819}
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831int
10832lpfc_sli_check_eratt(struct lpfc_hba *phba)
10833{
10834 uint32_t ha_copy;
10835
10836
10837
10838
10839 if (phba->link_flag & LS_IGNORE_ERATT)
10840 return 0;
10841
10842
10843 spin_lock_irq(&phba->hbalock);
10844 if (phba->hba_flag & HBA_ERATT_HANDLED) {
10845
10846 spin_unlock_irq(&phba->hbalock);
10847 return 0;
10848 }
10849
10850
10851
10852
10853
10854 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10855 spin_unlock_irq(&phba->hbalock);
10856 return 0;
10857 }
10858
10859
10860 if (unlikely(pci_channel_offline(phba->pcidev))) {
10861 spin_unlock_irq(&phba->hbalock);
10862 return 0;
10863 }
10864
10865 switch (phba->sli_rev) {
10866 case LPFC_SLI_REV2:
10867 case LPFC_SLI_REV3:
10868
10869 ha_copy = lpfc_sli_eratt_read(phba);
10870 break;
10871 case LPFC_SLI_REV4:
10872
10873 ha_copy = lpfc_sli4_eratt_read(phba);
10874 break;
10875 default:
10876 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10877 "0299 Invalid SLI revision (%d)\n",
10878 phba->sli_rev);
10879 ha_copy = 0;
10880 break;
10881 }
10882 spin_unlock_irq(&phba->hbalock);
10883
10884 return ha_copy;
10885}
10886
10887
10888
10889
10890
10891
10892
10893
10894
10895
10896
10897static inline int
10898lpfc_intr_state_check(struct lpfc_hba *phba)
10899{
10900
10901 if (unlikely(pci_channel_offline(phba->pcidev)))
10902 return -EIO;
10903
10904
10905 phba->sli.slistat.sli_intr++;
10906
10907
10908 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10909 return -EIO;
10910
10911 return 0;
10912}
10913
10914
10915
10916
10917
10918
10919
10920
10921
10922
10923
10924
10925
10926
10927
10928
10929
10930
10931
10932
10933
10934
10935irqreturn_t
10936lpfc_sli_sp_intr_handler(int irq, void *dev_id)
10937{
10938 struct lpfc_hba *phba;
10939 uint32_t ha_copy, hc_copy;
10940 uint32_t work_ha_copy;
10941 unsigned long status;
10942 unsigned long iflag;
10943 uint32_t control;
10944
10945 MAILBOX_t *mbox, *pmbox;
10946 struct lpfc_vport *vport;
10947 struct lpfc_nodelist *ndlp;
10948 struct lpfc_dmabuf *mp;
10949 LPFC_MBOXQ_t *pmb;
10950 int rc;
10951
10952
10953
10954
10955
10956 phba = (struct lpfc_hba *)dev_id;
10957
10958 if (unlikely(!phba))
10959 return IRQ_NONE;
10960
10961
10962
10963
10964
10965 if (phba->intr_type == MSIX) {
10966
10967 if (lpfc_intr_state_check(phba))
10968 return IRQ_NONE;
10969
10970 spin_lock_irqsave(&phba->hbalock, iflag);
10971 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10972 goto unplug_error;
10973
10974
10975
10976 if (phba->link_flag & LS_IGNORE_ERATT)
10977 ha_copy &= ~HA_ERATT;
10978
10979 if (ha_copy & HA_ERATT) {
10980 if (phba->hba_flag & HBA_ERATT_HANDLED)
10981
10982 ha_copy &= ~HA_ERATT;
10983 else
10984
10985 phba->hba_flag |= HBA_ERATT_HANDLED;
10986 }
10987
10988
10989
10990
10991
10992 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
10993 spin_unlock_irqrestore(&phba->hbalock, iflag);
10994 return IRQ_NONE;
10995 }
10996
10997
10998 if (lpfc_readl(phba->HCregaddr, &hc_copy))
10999 goto unplug_error;
11000
11001 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11002 HC_LAINT_ENA | HC_ERINT_ENA),
11003 phba->HCregaddr);
11004 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11005 phba->HAregaddr);
11006 writel(hc_copy, phba->HCregaddr);
11007 readl(phba->HAregaddr);
11008 spin_unlock_irqrestore(&phba->hbalock, iflag);
11009 } else
11010 ha_copy = phba->ha_copy;
11011
11012 work_ha_copy = ha_copy & phba->work_ha_mask;
11013
11014 if (work_ha_copy) {
11015 if (work_ha_copy & HA_LATT) {
11016 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11017
11018
11019
11020
11021 spin_lock_irqsave(&phba->hbalock, iflag);
11022 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
11023 if (lpfc_readl(phba->HCregaddr, &control))
11024 goto unplug_error;
11025 control &= ~HC_LAINT_ENA;
11026 writel(control, phba->HCregaddr);
11027 readl(phba->HCregaddr);
11028 spin_unlock_irqrestore(&phba->hbalock, iflag);
11029 }
11030 else
11031 work_ha_copy &= ~HA_LATT;
11032 }
11033
11034 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
11035
11036
11037
11038
11039 status = (work_ha_copy &
11040 (HA_RXMASK << (4*LPFC_ELS_RING)));
11041 status >>= (4*LPFC_ELS_RING);
11042 if (status & HA_RXMASK) {
11043 spin_lock_irqsave(&phba->hbalock, iflag);
11044 if (lpfc_readl(phba->HCregaddr, &control))
11045 goto unplug_error;
11046
11047 lpfc_debugfs_slow_ring_trc(phba,
11048 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11049 control, status,
11050 (uint32_t)phba->sli.slistat.sli_intr);
11051
11052 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
11053 lpfc_debugfs_slow_ring_trc(phba,
11054 "ISR Disable ring:"
11055 "pwork:x%x hawork:x%x wait:x%x",
11056 phba->work_ha, work_ha_copy,
11057 (uint32_t)((unsigned long)
11058 &phba->work_waitq));
11059
11060 control &=
11061 ~(HC_R0INT_ENA << LPFC_ELS_RING);
11062 writel(control, phba->HCregaddr);
11063 readl(phba->HCregaddr);
11064 }
11065 else {
11066 lpfc_debugfs_slow_ring_trc(phba,
11067 "ISR slow ring: pwork:"
11068 "x%x hawork:x%x wait:x%x",
11069 phba->work_ha, work_ha_copy,
11070 (uint32_t)((unsigned long)
11071 &phba->work_waitq));
11072 }
11073 spin_unlock_irqrestore(&phba->hbalock, iflag);
11074 }
11075 }
11076 spin_lock_irqsave(&phba->hbalock, iflag);
11077 if (work_ha_copy & HA_ERATT) {
11078 if (lpfc_sli_read_hs(phba))
11079 goto unplug_error;
11080
11081
11082
11083
11084 if ((HS_FFER1 & phba->work_hs) &&
11085 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11086 HS_FFER6 | HS_FFER7 | HS_FFER8) &
11087 phba->work_hs)) {
11088 phba->hba_flag |= DEFER_ERATT;
11089
11090 writel(0, phba->HCregaddr);
11091 readl(phba->HCregaddr);
11092 }
11093 }
11094
11095 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
11096 pmb = phba->sli.mbox_active;
11097 pmbox = &pmb->u.mb;
11098 mbox = phba->mbox;
11099 vport = pmb->vport;
11100
11101
11102 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11103 if (pmbox->mbxOwner != OWN_HOST) {
11104 spin_unlock_irqrestore(&phba->hbalock, iflag);
11105
11106
11107
11108
11109 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11110 LOG_SLI,
11111 "(%d):0304 Stray Mailbox "
11112 "Interrupt mbxCommand x%x "
11113 "mbxStatus x%x\n",
11114 (vport ? vport->vpi : 0),
11115 pmbox->mbxCommand,
11116 pmbox->mbxStatus);
11117
11118 work_ha_copy &= ~HA_MBATT;
11119 } else {
11120 phba->sli.mbox_active = NULL;
11121 spin_unlock_irqrestore(&phba->hbalock, iflag);
11122 phba->last_completion_time = jiffies;
11123 del_timer(&phba->sli.mbox_tmo);
11124 if (pmb->mbox_cmpl) {
11125 lpfc_sli_pcimem_bcopy(mbox, pmbox,
11126 MAILBOX_CMD_SIZE);
11127 if (pmb->out_ext_byte_len &&
11128 pmb->context2)
11129 lpfc_sli_pcimem_bcopy(
11130 phba->mbox_ext,
11131 pmb->context2,
11132 pmb->out_ext_byte_len);
11133 }
11134 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11135 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11136
11137 lpfc_debugfs_disc_trc(vport,
11138 LPFC_DISC_TRC_MBOX_VPORT,
11139 "MBOX dflt rpi: : "
11140 "status:x%x rpi:x%x",
11141 (uint32_t)pmbox->mbxStatus,
11142 pmbox->un.varWords[0], 0);
11143
11144 if (!pmbox->mbxStatus) {
11145 mp = (struct lpfc_dmabuf *)
11146 (pmb->context1);
11147 ndlp = (struct lpfc_nodelist *)
11148 pmb->context2;
11149
11150
11151
11152
11153
11154
11155 lpfc_unreg_login(phba,
11156 vport->vpi,
11157 pmbox->un.varWords[0],
11158 pmb);
11159 pmb->mbox_cmpl =
11160 lpfc_mbx_cmpl_dflt_rpi;
11161 pmb->context1 = mp;
11162 pmb->context2 = ndlp;
11163 pmb->vport = vport;
11164 rc = lpfc_sli_issue_mbox(phba,
11165 pmb,
11166 MBX_NOWAIT);
11167 if (rc != MBX_BUSY)
11168 lpfc_printf_log(phba,
11169 KERN_ERR,
11170 LOG_MBOX | LOG_SLI,
11171 "0350 rc should have"
11172 "been MBX_BUSY\n");
11173 if (rc != MBX_NOT_FINISHED)
11174 goto send_current_mbox;
11175 }
11176 }
11177 spin_lock_irqsave(
11178 &phba->pport->work_port_lock,
11179 iflag);
11180 phba->pport->work_port_events &=
11181 ~WORKER_MBOX_TMO;
11182 spin_unlock_irqrestore(
11183 &phba->pport->work_port_lock,
11184 iflag);
11185 lpfc_mbox_cmpl_put(phba, pmb);
11186 }
11187 } else
11188 spin_unlock_irqrestore(&phba->hbalock, iflag);
11189
11190 if ((work_ha_copy & HA_MBATT) &&
11191 (phba->sli.mbox_active == NULL)) {
11192send_current_mbox:
11193
11194 do {
11195 rc = lpfc_sli_issue_mbox(phba, NULL,
11196 MBX_NOWAIT);
11197 } while (rc == MBX_NOT_FINISHED);
11198 if (rc != MBX_SUCCESS)
11199 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11200 LOG_SLI, "0349 rc should be "
11201 "MBX_SUCCESS\n");
11202 }
11203
11204 spin_lock_irqsave(&phba->hbalock, iflag);
11205 phba->work_ha |= work_ha_copy;
11206 spin_unlock_irqrestore(&phba->hbalock, iflag);
11207 lpfc_worker_wake_up(phba);
11208 }
11209 return IRQ_HANDLED;
11210unplug_error:
11211 spin_unlock_irqrestore(&phba->hbalock, iflag);
11212 return IRQ_HANDLED;
11213
11214}
11215
11216
11217
11218
11219
11220
11221
11222
11223
11224
11225
11226
11227
11228
11229
11230
11231
11232
11233
11234
11235irqreturn_t
11236lpfc_sli_fp_intr_handler(int irq, void *dev_id)
11237{
11238 struct lpfc_hba *phba;
11239 uint32_t ha_copy;
11240 unsigned long status;
11241 unsigned long iflag;
11242
11243
11244
11245
11246 phba = (struct lpfc_hba *) dev_id;
11247
11248 if (unlikely(!phba))
11249 return IRQ_NONE;
11250
11251
11252
11253
11254
11255 if (phba->intr_type == MSIX) {
11256
11257 if (lpfc_intr_state_check(phba))
11258 return IRQ_NONE;
11259
11260 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11261 return IRQ_HANDLED;
11262
11263 spin_lock_irqsave(&phba->hbalock, iflag);
11264
11265
11266
11267
11268 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11269 spin_unlock_irqrestore(&phba->hbalock, iflag);
11270 return IRQ_NONE;
11271 }
11272 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
11273 phba->HAregaddr);
11274 readl(phba->HAregaddr);
11275 spin_unlock_irqrestore(&phba->hbalock, iflag);
11276 } else
11277 ha_copy = phba->ha_copy;
11278
11279
11280
11281
11282 ha_copy &= ~(phba->work_ha_mask);
11283
11284 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11285 status >>= (4*LPFC_FCP_RING);
11286 if (status & HA_RXMASK)
11287 lpfc_sli_handle_fast_ring_event(phba,
11288 &phba->sli.ring[LPFC_FCP_RING],
11289 status);
11290
11291 if (phba->cfg_multi_ring_support == 2) {
11292
11293
11294
11295
11296 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11297 status >>= (4*LPFC_EXTRA_RING);
11298 if (status & HA_RXMASK) {
11299 lpfc_sli_handle_fast_ring_event(phba,
11300 &phba->sli.ring[LPFC_EXTRA_RING],
11301 status);
11302 }
11303 }
11304 return IRQ_HANDLED;
11305}
11306
11307
11308
11309
11310
11311
11312
11313
11314
11315
11316
11317
11318
11319
11320
11321
11322
11323
11324irqreturn_t
11325lpfc_sli_intr_handler(int irq, void *dev_id)
11326{
11327 struct lpfc_hba *phba;
11328 irqreturn_t sp_irq_rc, fp_irq_rc;
11329 unsigned long status1, status2;
11330 uint32_t hc_copy;
11331
11332
11333
11334
11335
11336 phba = (struct lpfc_hba *) dev_id;
11337
11338 if (unlikely(!phba))
11339 return IRQ_NONE;
11340
11341
11342 if (lpfc_intr_state_check(phba))
11343 return IRQ_NONE;
11344
11345 spin_lock(&phba->hbalock);
11346 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
11347 spin_unlock(&phba->hbalock);
11348 return IRQ_HANDLED;
11349 }
11350
11351 if (unlikely(!phba->ha_copy)) {
11352 spin_unlock(&phba->hbalock);
11353 return IRQ_NONE;
11354 } else if (phba->ha_copy & HA_ERATT) {
11355 if (phba->hba_flag & HBA_ERATT_HANDLED)
11356
11357 phba->ha_copy &= ~HA_ERATT;
11358 else
11359
11360 phba->hba_flag |= HBA_ERATT_HANDLED;
11361 }
11362
11363
11364
11365
11366 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11367 spin_unlock(&phba->hbalock);
11368 return IRQ_NONE;
11369 }
11370
11371
11372 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
11373 spin_unlock(&phba->hbalock);
11374 return IRQ_HANDLED;
11375 }
11376 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
11377 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
11378 phba->HCregaddr);
11379 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
11380 writel(hc_copy, phba->HCregaddr);
11381 readl(phba->HAregaddr);
11382 spin_unlock(&phba->hbalock);
11383
11384
11385
11386
11387
11388
11389 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
11390
11391
11392 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
11393 status2 >>= (4*LPFC_ELS_RING);
11394
11395 if (status1 || (status2 & HA_RXMASK))
11396 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
11397 else
11398 sp_irq_rc = IRQ_NONE;
11399
11400
11401
11402
11403
11404
11405 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11406 status1 >>= (4*LPFC_FCP_RING);
11407
11408
11409 if (phba->cfg_multi_ring_support == 2) {
11410 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11411 status2 >>= (4*LPFC_EXTRA_RING);
11412 } else
11413 status2 = 0;
11414
11415 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
11416 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
11417 else
11418 fp_irq_rc = IRQ_NONE;
11419
11420
11421 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
11422}
11423
11424
11425
11426
11427
11428
11429
11430
11431void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
11432{
11433 struct lpfc_cq_event *cq_event;
11434
11435
11436 spin_lock_irq(&phba->hbalock);
11437 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
11438 spin_unlock_irq(&phba->hbalock);
11439
11440 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
11441
11442 spin_lock_irq(&phba->hbalock);
11443 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
11444 cq_event, struct lpfc_cq_event, list);
11445 spin_unlock_irq(&phba->hbalock);
11446
11447 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11448
11449 lpfc_sli4_cq_event_release(phba, cq_event);
11450 }
11451}
11452
11453
11454
11455
11456
11457
11458
11459
11460void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
11461{
11462 struct lpfc_cq_event *cq_event;
11463
11464
11465 spin_lock_irq(&phba->hbalock);
11466 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
11467 spin_unlock_irq(&phba->hbalock);
11468
11469 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
11470
11471 spin_lock_irq(&phba->hbalock);
11472 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11473 cq_event, struct lpfc_cq_event, list);
11474 spin_unlock_irq(&phba->hbalock);
11475
11476 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11477
11478 lpfc_sli4_cq_event_release(phba, cq_event);
11479 }
11480}
11481
11482
11483
11484
11485
11486
11487
11488
11489
11490
11491
11492
11493static void
11494lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
11495 struct lpfc_iocbq *pIocbIn,
11496 struct lpfc_iocbq *pIocbOut,
11497 struct lpfc_wcqe_complete *wcqe)
11498{
11499 int numBdes, i;
11500 unsigned long iflags;
11501 uint32_t status, max_response;
11502 struct lpfc_dmabuf *dmabuf;
11503 struct ulp_bde64 *bpl, bde;
11504 size_t offset = offsetof(struct lpfc_iocbq, iocb);
11505
11506 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
11507 sizeof(struct lpfc_iocbq) - offset);
11508
11509 status = bf_get(lpfc_wcqe_c_status, wcqe);
11510 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
11511 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
11512 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
11513 pIocbIn->iocb.un.fcpi.fcpi_parm =
11514 pIocbOut->iocb.un.fcpi.fcpi_parm -
11515 wcqe->total_data_placed;
11516 else
11517 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11518 else {
11519 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11520 switch (pIocbOut->iocb.ulpCommand) {
11521 case CMD_ELS_REQUEST64_CR:
11522 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
11523 bpl = (struct ulp_bde64 *)dmabuf->virt;
11524 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
11525 max_response = bde.tus.f.bdeSize;
11526 break;
11527 case CMD_GEN_REQUEST64_CR:
11528 max_response = 0;
11529 if (!pIocbOut->context3)
11530 break;
11531 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
11532 sizeof(struct ulp_bde64);
11533 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
11534 bpl = (struct ulp_bde64 *)dmabuf->virt;
11535 for (i = 0; i < numBdes; i++) {
11536 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
11537 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
11538 max_response += bde.tus.f.bdeSize;
11539 }
11540 break;
11541 default:
11542 max_response = wcqe->total_data_placed;
11543 break;
11544 }
11545 if (max_response < wcqe->total_data_placed)
11546 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
11547 else
11548 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
11549 wcqe->total_data_placed;
11550 }
11551
11552
11553 if (status == CQE_STATUS_DI_ERROR) {
11554 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
11555
11556 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
11557 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
11558 else
11559 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
11560
11561 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
11562 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
11563 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11564 BGS_GUARD_ERR_MASK;
11565 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
11566 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11567 BGS_APPTAG_ERR_MASK;
11568 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
11569 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11570 BGS_REFTAG_ERR_MASK;
11571
11572
11573 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
11574 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11575 BGS_HI_WATER_MARK_PRESENT_MASK;
11576 pIocbIn->iocb.unsli3.sli3_bg.bghm =
11577 wcqe->total_data_placed;
11578 }
11579
11580
11581
11582
11583
11584 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11585 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11586 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11587 BGS_GUARD_ERR_MASK);
11588 }
11589
11590
11591 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11592 spin_lock_irqsave(&phba->hbalock, iflags);
11593 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11594 spin_unlock_irqrestore(&phba->hbalock, iflags);
11595 }
11596}
11597
11598
11599
11600
11601
11602
11603
11604
11605
11606
11607
11608
11609static struct lpfc_iocbq *
11610lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11611 struct lpfc_iocbq *irspiocbq)
11612{
11613 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11614 struct lpfc_iocbq *cmdiocbq;
11615 struct lpfc_wcqe_complete *wcqe;
11616 unsigned long iflags;
11617
11618 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
11619 spin_lock_irqsave(&pring->ring_lock, iflags);
11620 pring->stats.iocb_event++;
11621
11622 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11623 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11624 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11625
11626 if (unlikely(!cmdiocbq)) {
11627 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11628 "0386 ELS complete with no corresponding "
11629 "cmdiocb: iotag (%d)\n",
11630 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11631 lpfc_sli_release_iocbq(phba, irspiocbq);
11632 return NULL;
11633 }
11634
11635
11636 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
11637
11638 return irspiocbq;
11639}
11640
11641
11642
11643
11644
11645
11646
11647
11648
11649
11650
11651static bool
11652lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11653{
11654 struct lpfc_cq_event *cq_event;
11655 unsigned long iflags;
11656
11657 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11658 "0392 Async Event: word0:x%x, word1:x%x, "
11659 "word2:x%x, word3:x%x\n", mcqe->word0,
11660 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11661
11662
11663 cq_event = lpfc_sli4_cq_event_alloc(phba);
11664 if (!cq_event) {
11665 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11666 "0394 Failed to allocate CQ_EVENT entry\n");
11667 return false;
11668 }
11669
11670
11671 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11672 spin_lock_irqsave(&phba->hbalock, iflags);
11673 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11674
11675 phba->hba_flag |= ASYNC_EVENT;
11676 spin_unlock_irqrestore(&phba->hbalock, iflags);
11677
11678 return true;
11679}
11680
11681
11682
11683
11684
11685
11686
11687
11688
11689
11690
11691static bool
11692lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11693{
11694 uint32_t mcqe_status;
11695 MAILBOX_t *mbox, *pmbox;
11696 struct lpfc_mqe *mqe;
11697 struct lpfc_vport *vport;
11698 struct lpfc_nodelist *ndlp;
11699 struct lpfc_dmabuf *mp;
11700 unsigned long iflags;
11701 LPFC_MBOXQ_t *pmb;
11702 bool workposted = false;
11703 int rc;
11704
11705
11706 if (!bf_get(lpfc_trailer_completed, mcqe))
11707 goto out_no_mqe_complete;
11708
11709
11710 spin_lock_irqsave(&phba->hbalock, iflags);
11711 pmb = phba->sli.mbox_active;
11712 if (unlikely(!pmb)) {
11713 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11714 "1832 No pending MBOX command to handle\n");
11715 spin_unlock_irqrestore(&phba->hbalock, iflags);
11716 goto out_no_mqe_complete;
11717 }
11718 spin_unlock_irqrestore(&phba->hbalock, iflags);
11719 mqe = &pmb->u.mqe;
11720 pmbox = (MAILBOX_t *)&pmb->u.mqe;
11721 mbox = phba->mbox;
11722 vport = pmb->vport;
11723
11724
11725 phba->last_completion_time = jiffies;
11726 del_timer(&phba->sli.mbox_tmo);
11727
11728
11729 if (pmb->mbox_cmpl && mbox)
11730 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
11731
11732
11733
11734
11735
11736 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11737 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11738 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11739 bf_set(lpfc_mqe_status, mqe,
11740 (LPFC_MBX_ERROR_RANGE | mcqe_status));
11741 }
11742 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11743 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11744 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11745 "MBOX dflt rpi: status:x%x rpi:x%x",
11746 mcqe_status,
11747 pmbox->un.varWords[0], 0);
11748 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11749 mp = (struct lpfc_dmabuf *)(pmb->context1);
11750 ndlp = (struct lpfc_nodelist *)pmb->context2;
11751
11752
11753
11754 lpfc_unreg_login(phba, vport->vpi,
11755 pmbox->un.varWords[0], pmb);
11756 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11757 pmb->context1 = mp;
11758 pmb->context2 = ndlp;
11759 pmb->vport = vport;
11760 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11761 if (rc != MBX_BUSY)
11762 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11763 LOG_SLI, "0385 rc should "
11764 "have been MBX_BUSY\n");
11765 if (rc != MBX_NOT_FINISHED)
11766 goto send_current_mbox;
11767 }
11768 }
11769 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11770 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11771 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11772
11773
11774 spin_lock_irqsave(&phba->hbalock, iflags);
11775 __lpfc_mbox_cmpl_put(phba, pmb);
11776 phba->work_ha |= HA_MBATT;
11777 spin_unlock_irqrestore(&phba->hbalock, iflags);
11778 workposted = true;
11779
11780send_current_mbox:
11781 spin_lock_irqsave(&phba->hbalock, iflags);
11782
11783 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11784
11785 phba->sli.mbox_active = NULL;
11786 spin_unlock_irqrestore(&phba->hbalock, iflags);
11787
11788 lpfc_worker_wake_up(phba);
11789out_no_mqe_complete:
11790 if (bf_get(lpfc_trailer_consumed, mcqe))
11791 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11792 return workposted;
11793}
11794
11795
11796
11797
11798
11799
11800
11801
11802
11803
11804
11805
11806static bool
11807lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
11808{
11809 struct lpfc_mcqe mcqe;
11810 bool workposted;
11811
11812
11813 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
11814
11815
11816 if (!bf_get(lpfc_trailer_async, &mcqe))
11817 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
11818 else
11819 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
11820 return workposted;
11821}
11822
11823
11824
11825
11826
11827
11828
11829
11830
11831
11832
11833static bool
11834lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
11835 struct lpfc_wcqe_complete *wcqe)
11836{
11837 struct lpfc_iocbq *irspiocbq;
11838 unsigned long iflags;
11839 struct lpfc_sli_ring *pring = cq->pring;
11840 int txq_cnt = 0;
11841 int txcmplq_cnt = 0;
11842 int fcp_txcmplq_cnt = 0;
11843
11844
11845 irspiocbq = lpfc_sli_get_iocbq(phba);
11846 if (!irspiocbq) {
11847 if (!list_empty(&pring->txq))
11848 txq_cnt++;
11849 if (!list_empty(&pring->txcmplq))
11850 txcmplq_cnt++;
11851 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
11852 fcp_txcmplq_cnt++;
11853 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11854 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11855 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11856 txq_cnt, phba->iocb_cnt,
11857 fcp_txcmplq_cnt,
11858 txcmplq_cnt);
11859 return false;
11860 }
11861
11862
11863 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
11864 spin_lock_irqsave(&phba->hbalock, iflags);
11865 list_add_tail(&irspiocbq->cq_event.list,
11866 &phba->sli4_hba.sp_queue_event);
11867 phba->hba_flag |= HBA_SP_QUEUE_EVT;
11868 spin_unlock_irqrestore(&phba->hbalock, iflags);
11869
11870 return true;
11871}
11872
11873
11874
11875
11876
11877
11878
11879
11880
11881static void
11882lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
11883 struct lpfc_wcqe_release *wcqe)
11884{
11885
11886 if (unlikely(!phba->sli4_hba.els_wq))
11887 return;
11888
11889 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
11890 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
11891 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
11892 else
11893 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11894 "2579 Slow-path wqe consume event carries "
11895 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11896 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
11897 phba->sli4_hba.els_wq->queue_id);
11898}
11899
11900
11901
11902
11903
11904
11905
11906
11907
11908
11909
11910static bool
11911lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
11912 struct lpfc_queue *cq,
11913 struct sli4_wcqe_xri_aborted *wcqe)
11914{
11915 bool workposted = false;
11916 struct lpfc_cq_event *cq_event;
11917 unsigned long iflags;
11918
11919
11920 cq_event = lpfc_sli4_cq_event_alloc(phba);
11921 if (!cq_event) {
11922 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11923 "0602 Failed to allocate CQ_EVENT entry\n");
11924 return false;
11925 }
11926
11927
11928 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
11929 switch (cq->subtype) {
11930 case LPFC_FCP:
11931 spin_lock_irqsave(&phba->hbalock, iflags);
11932 list_add_tail(&cq_event->list,
11933 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
11934
11935 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
11936 spin_unlock_irqrestore(&phba->hbalock, iflags);
11937 workposted = true;
11938 break;
11939 case LPFC_ELS:
11940 spin_lock_irqsave(&phba->hbalock, iflags);
11941 list_add_tail(&cq_event->list,
11942 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
11943
11944 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
11945 spin_unlock_irqrestore(&phba->hbalock, iflags);
11946 workposted = true;
11947 break;
11948 default:
11949 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11950 "0603 Invalid work queue CQE subtype (x%x)\n",
11951 cq->subtype);
11952 workposted = false;
11953 break;
11954 }
11955 return workposted;
11956}
11957
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967static bool
11968lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
11969{
11970 bool workposted = false;
11971 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
11972 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
11973 struct hbq_dmabuf *dma_buf;
11974 uint32_t status, rq_id;
11975 unsigned long iflags;
11976
11977
11978 if (unlikely(!hrq) || unlikely(!drq))
11979 return workposted;
11980
11981 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
11982 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
11983 else
11984 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
11985 if (rq_id != hrq->queue_id)
11986 goto out;
11987
11988 status = bf_get(lpfc_rcqe_status, rcqe);
11989 switch (status) {
11990 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
11991 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11992 "2537 Receive Frame Truncated!!\n");
11993 hrq->RQ_buf_trunc++;
11994 case FC_STATUS_RQ_SUCCESS:
11995 lpfc_sli4_rq_release(hrq, drq);
11996 spin_lock_irqsave(&phba->hbalock, iflags);
11997 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
11998 if (!dma_buf) {
11999 hrq->RQ_no_buf_found++;
12000 spin_unlock_irqrestore(&phba->hbalock, iflags);
12001 goto out;
12002 }
12003 hrq->RQ_rcv_buf++;
12004 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12005
12006 list_add_tail(&dma_buf->cq_event.list,
12007 &phba->sli4_hba.sp_queue_event);
12008
12009 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12010 spin_unlock_irqrestore(&phba->hbalock, iflags);
12011 workposted = true;
12012 break;
12013 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12014 case FC_STATUS_INSUFF_BUF_FRM_DISC:
12015 hrq->RQ_no_posted_buf++;
12016
12017 spin_lock_irqsave(&phba->hbalock, iflags);
12018 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12019 spin_unlock_irqrestore(&phba->hbalock, iflags);
12020 workposted = true;
12021 break;
12022 }
12023out:
12024 return workposted;
12025}
12026
12027
12028
12029
12030
12031
12032
12033
12034
12035
12036
12037
12038static bool
12039lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12040 struct lpfc_cqe *cqe)
12041{
12042 struct lpfc_cqe cqevt;
12043 bool workposted = false;
12044
12045
12046 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
12047
12048
12049 switch (bf_get(lpfc_cqe_code, &cqevt)) {
12050 case CQE_CODE_COMPL_WQE:
12051
12052 phba->last_completion_time = jiffies;
12053 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
12054 (struct lpfc_wcqe_complete *)&cqevt);
12055 break;
12056 case CQE_CODE_RELEASE_WQE:
12057
12058 lpfc_sli4_sp_handle_rel_wcqe(phba,
12059 (struct lpfc_wcqe_release *)&cqevt);
12060 break;
12061 case CQE_CODE_XRI_ABORTED:
12062
12063 phba->last_completion_time = jiffies;
12064 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12065 (struct sli4_wcqe_xri_aborted *)&cqevt);
12066 break;
12067 case CQE_CODE_RECEIVE:
12068 case CQE_CODE_RECEIVE_V1:
12069
12070 phba->last_completion_time = jiffies;
12071 workposted = lpfc_sli4_sp_handle_rcqe(phba,
12072 (struct lpfc_rcqe *)&cqevt);
12073 break;
12074 default:
12075 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12076 "0388 Not a valid WCQE code: x%x\n",
12077 bf_get(lpfc_cqe_code, &cqevt));
12078 break;
12079 }
12080 return workposted;
12081}
12082
12083
12084
12085
12086
12087
12088
12089
12090
12091
12092
12093
12094
12095
12096static void
12097lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12098 struct lpfc_queue *speq)
12099{
12100 struct lpfc_queue *cq = NULL, *childq;
12101 struct lpfc_cqe *cqe;
12102 bool workposted = false;
12103 int ecount = 0;
12104 uint16_t cqid;
12105
12106
12107 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12108
12109 list_for_each_entry(childq, &speq->child_list, list) {
12110 if (childq->queue_id == cqid) {
12111 cq = childq;
12112 break;
12113 }
12114 }
12115 if (unlikely(!cq)) {
12116 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12117 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12118 "0365 Slow-path CQ identifier "
12119 "(%d) does not exist\n", cqid);
12120 return;
12121 }
12122
12123
12124 switch (cq->type) {
12125 case LPFC_MCQ:
12126 while ((cqe = lpfc_sli4_cq_get(cq))) {
12127 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
12128 if (!(++ecount % cq->entry_repost))
12129 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12130 cq->CQ_mbox++;
12131 }
12132 break;
12133 case LPFC_WCQ:
12134 while ((cqe = lpfc_sli4_cq_get(cq))) {
12135 if (cq->subtype == LPFC_FCP)
12136 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
12137 cqe);
12138 else
12139 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12140 cqe);
12141 if (!(++ecount % cq->entry_repost))
12142 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12143 }
12144
12145
12146 if (ecount > cq->CQ_max_cqe)
12147 cq->CQ_max_cqe = ecount;
12148 break;
12149 default:
12150 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12151 "0370 Invalid completion queue type (%d)\n",
12152 cq->type);
12153 return;
12154 }
12155
12156
12157 if (unlikely(ecount == 0))
12158 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12159 "0371 No entry from the CQ: identifier "
12160 "(x%x), type (%d)\n", cq->queue_id, cq->type);
12161
12162
12163 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12164
12165
12166 if (workposted)
12167 lpfc_worker_wake_up(phba);
12168}
12169
12170
12171
12172
12173
12174
12175
12176
12177
12178
12179static void
12180lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12181 struct lpfc_wcqe_complete *wcqe)
12182{
12183 struct lpfc_sli_ring *pring = cq->pring;
12184 struct lpfc_iocbq *cmdiocbq;
12185 struct lpfc_iocbq irspiocbq;
12186 unsigned long iflags;
12187
12188
12189 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
12190
12191
12192
12193 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
12194 IOSTAT_LOCAL_REJECT)) &&
12195 ((wcqe->parameter & IOERR_PARAM_MASK) ==
12196 IOERR_NO_RESOURCES))
12197 phba->lpfc_rampdown_queue_depth(phba);
12198
12199
12200 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12201 "0373 FCP complete error: status=x%x, "
12202 "hw_status=x%x, total_data_specified=%d, "
12203 "parameter=x%x, word3=x%x\n",
12204 bf_get(lpfc_wcqe_c_status, wcqe),
12205 bf_get(lpfc_wcqe_c_hw_status, wcqe),
12206 wcqe->total_data_placed, wcqe->parameter,
12207 wcqe->word3);
12208 }
12209
12210
12211 spin_lock_irqsave(&pring->ring_lock, iflags);
12212 pring->stats.iocb_event++;
12213 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12214 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12215 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12216 if (unlikely(!cmdiocbq)) {
12217 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12218 "0374 FCP complete with no corresponding "
12219 "cmdiocb: iotag (%d)\n",
12220 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12221 return;
12222 }
12223 if (unlikely(!cmdiocbq->iocb_cmpl)) {
12224 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12225 "0375 FCP cmdiocb not callback function "
12226 "iotag: (%d)\n",
12227 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12228 return;
12229 }
12230
12231
12232 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
12233
12234 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
12235 spin_lock_irqsave(&phba->hbalock, iflags);
12236 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12237 spin_unlock_irqrestore(&phba->hbalock, iflags);
12238 }
12239
12240
12241 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
12242}
12243
12244
12245
12246
12247
12248
12249
12250
12251
12252
12253static void
12254lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12255 struct lpfc_wcqe_release *wcqe)
12256{
12257 struct lpfc_queue *childwq;
12258 bool wqid_matched = false;
12259 uint16_t fcp_wqid;
12260
12261
12262 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
12263 list_for_each_entry(childwq, &cq->child_list, list) {
12264 if (childwq->queue_id == fcp_wqid) {
12265 lpfc_sli4_wq_release(childwq,
12266 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12267 wqid_matched = true;
12268 break;
12269 }
12270 }
12271
12272 if (wqid_matched != true)
12273 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12274 "2580 Fast-path wqe consume event carries "
12275 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
12276}
12277
12278
12279
12280
12281
12282
12283
12284
12285
12286static int
12287lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12288 struct lpfc_cqe *cqe)
12289{
12290 struct lpfc_wcqe_release wcqe;
12291 bool workposted = false;
12292
12293
12294 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
12295
12296
12297 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
12298 case CQE_CODE_COMPL_WQE:
12299 cq->CQ_wq++;
12300
12301 phba->last_completion_time = jiffies;
12302 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
12303 (struct lpfc_wcqe_complete *)&wcqe);
12304 break;
12305 case CQE_CODE_RELEASE_WQE:
12306 cq->CQ_release_wqe++;
12307
12308 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
12309 (struct lpfc_wcqe_release *)&wcqe);
12310 break;
12311 case CQE_CODE_XRI_ABORTED:
12312 cq->CQ_xri_aborted++;
12313
12314 phba->last_completion_time = jiffies;
12315 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12316 (struct sli4_wcqe_xri_aborted *)&wcqe);
12317 break;
12318 default:
12319 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12320 "0144 Not a valid WCQE code: x%x\n",
12321 bf_get(lpfc_wcqe_c_code, &wcqe));
12322 break;
12323 }
12324 return workposted;
12325}
12326
12327
12328
12329
12330
12331
12332
12333
12334
12335
12336
12337
12338
12339static void
12340lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12341 uint32_t qidx)
12342{
12343 struct lpfc_queue *cq;
12344 struct lpfc_cqe *cqe;
12345 bool workposted = false;
12346 uint16_t cqid;
12347 int ecount = 0;
12348
12349 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12351 "0366 Not a valid completion "
12352 "event: majorcode=x%x, minorcode=x%x\n",
12353 bf_get_le32(lpfc_eqe_major_code, eqe),
12354 bf_get_le32(lpfc_eqe_minor_code, eqe));
12355 return;
12356 }
12357
12358
12359 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12360
12361
12362 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
12363 lpfc_sli4_sp_handle_eqe(phba, eqe,
12364 phba->sli4_hba.hba_eq[qidx]);
12365 return;
12366 }
12367
12368 if (unlikely(!phba->sli4_hba.fcp_cq)) {
12369 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12370 "3146 Fast-path completion queues "
12371 "does not exist\n");
12372 return;
12373 }
12374 cq = phba->sli4_hba.fcp_cq[qidx];
12375 if (unlikely(!cq)) {
12376 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12377 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12378 "0367 Fast-path completion queue "
12379 "(%d) does not exist\n", qidx);
12380 return;
12381 }
12382
12383 if (unlikely(cqid != cq->queue_id)) {
12384 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12385 "0368 Miss-matched fast-path completion "
12386 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
12387 cqid, cq->queue_id);
12388 return;
12389 }
12390
12391
12392 while ((cqe = lpfc_sli4_cq_get(cq))) {
12393 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12394 if (!(++ecount % cq->entry_repost))
12395 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12396 }
12397
12398
12399 if (ecount > cq->CQ_max_cqe)
12400 cq->CQ_max_cqe = ecount;
12401
12402
12403 if (unlikely(ecount == 0))
12404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12405 "0369 No entry from fast-path completion "
12406 "queue fcpcqid=%d\n", cq->queue_id);
12407
12408
12409 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12410
12411
12412 if (workposted)
12413 lpfc_worker_wake_up(phba);
12414}
12415
12416static void
12417lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
12418{
12419 struct lpfc_eqe *eqe;
12420
12421
12422 while ((eqe = lpfc_sli4_eq_get(eq)))
12423 ;
12424
12425
12426 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12427}
12428
12429
12430
12431
12432
12433
12434
12435
12436
12437
12438
12439
12440
12441
12442
12443static void
12444lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
12445{
12446 struct lpfc_queue *cq;
12447 struct lpfc_cqe *cqe;
12448 bool workposted = false;
12449 uint16_t cqid;
12450 int ecount = 0;
12451
12452 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12454 "9147 Not a valid completion "
12455 "event: majorcode=x%x, minorcode=x%x\n",
12456 bf_get_le32(lpfc_eqe_major_code, eqe),
12457 bf_get_le32(lpfc_eqe_minor_code, eqe));
12458 return;
12459 }
12460
12461
12462 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12463
12464
12465 cq = phba->sli4_hba.oas_cq;
12466 if (unlikely(!cq)) {
12467 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12468 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12469 "9148 OAS completion queue "
12470 "does not exist\n");
12471 return;
12472 }
12473
12474 if (unlikely(cqid != cq->queue_id)) {
12475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12476 "9149 Miss-matched fast-path compl "
12477 "queue id: eqcqid=%d, fcpcqid=%d\n",
12478 cqid, cq->queue_id);
12479 return;
12480 }
12481
12482
12483 while ((cqe = lpfc_sli4_cq_get(cq))) {
12484 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12485 if (!(++ecount % cq->entry_repost))
12486 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12487 }
12488
12489
12490 if (ecount > cq->CQ_max_cqe)
12491 cq->CQ_max_cqe = ecount;
12492
12493
12494 if (unlikely(ecount == 0))
12495 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12496 "9153 No entry from fast-path completion "
12497 "queue fcpcqid=%d\n", cq->queue_id);
12498
12499
12500 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12501
12502
12503 if (workposted)
12504 lpfc_worker_wake_up(phba);
12505}
12506
12507
12508
12509
12510
12511
12512
12513
12514
12515
12516
12517
12518
12519
12520
12521
12522
12523
12524
12525
12526
12527
12528irqreturn_t
12529lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
12530{
12531 struct lpfc_hba *phba;
12532 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12533 struct lpfc_queue *eq;
12534 struct lpfc_eqe *eqe;
12535 unsigned long iflag;
12536 int ecount = 0;
12537
12538
12539 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12540 phba = fcp_eq_hdl->phba;
12541
12542 if (unlikely(!phba))
12543 return IRQ_NONE;
12544
12545
12546 eq = phba->sli4_hba.fof_eq;
12547 if (unlikely(!eq))
12548 return IRQ_NONE;
12549
12550
12551 if (unlikely(lpfc_intr_state_check(phba))) {
12552 eq->EQ_badstate++;
12553
12554 spin_lock_irqsave(&phba->hbalock, iflag);
12555 if (phba->link_state < LPFC_LINK_DOWN)
12556
12557 lpfc_sli4_eq_flush(phba, eq);
12558 spin_unlock_irqrestore(&phba->hbalock, iflag);
12559 return IRQ_NONE;
12560 }
12561
12562
12563
12564
12565 while ((eqe = lpfc_sli4_eq_get(eq))) {
12566 lpfc_sli4_fof_handle_eqe(phba, eqe);
12567 if (!(++ecount % eq->entry_repost))
12568 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
12569 eq->EQ_processed++;
12570 }
12571
12572
12573 if (ecount > eq->EQ_max_eqe)
12574 eq->EQ_max_eqe = ecount;
12575
12576
12577 if (unlikely(ecount == 0)) {
12578 eq->EQ_no_entry++;
12579
12580 if (phba->intr_type == MSIX)
12581
12582 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12583 "9145 MSI-X interrupt with no EQE\n");
12584 else {
12585 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12586 "9146 ISR interrupt with no EQE\n");
12587
12588 return IRQ_NONE;
12589 }
12590 }
12591
12592 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12593 return IRQ_HANDLED;
12594}
12595
12596
12597
12598
12599
12600
12601
12602
12603
12604
12605
12606
12607
12608
12609
12610
12611
12612
12613
12614
12615
12616
12617
12618
12619
12620
12621
12622irqreturn_t
12623lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
12624{
12625 struct lpfc_hba *phba;
12626 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12627 struct lpfc_queue *fpeq;
12628 struct lpfc_eqe *eqe;
12629 unsigned long iflag;
12630 int ecount = 0;
12631 int fcp_eqidx;
12632
12633
12634 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12635 phba = fcp_eq_hdl->phba;
12636 fcp_eqidx = fcp_eq_hdl->idx;
12637
12638 if (unlikely(!phba))
12639 return IRQ_NONE;
12640 if (unlikely(!phba->sli4_hba.hba_eq))
12641 return IRQ_NONE;
12642
12643
12644 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
12645 if (unlikely(!fpeq))
12646 return IRQ_NONE;
12647
12648 if (lpfc_fcp_look_ahead) {
12649 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
12650 lpfc_sli4_eq_clr_intr(fpeq);
12651 else {
12652 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12653 return IRQ_NONE;
12654 }
12655 }
12656
12657
12658 if (unlikely(lpfc_intr_state_check(phba))) {
12659 fpeq->EQ_badstate++;
12660
12661 spin_lock_irqsave(&phba->hbalock, iflag);
12662 if (phba->link_state < LPFC_LINK_DOWN)
12663
12664 lpfc_sli4_eq_flush(phba, fpeq);
12665 spin_unlock_irqrestore(&phba->hbalock, iflag);
12666 if (lpfc_fcp_look_ahead)
12667 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12668 return IRQ_NONE;
12669 }
12670
12671
12672
12673
12674 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
12675 if (eqe == NULL)
12676 break;
12677
12678 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
12679 if (!(++ecount % fpeq->entry_repost))
12680 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
12681 fpeq->EQ_processed++;
12682 }
12683
12684
12685 if (ecount > fpeq->EQ_max_eqe)
12686 fpeq->EQ_max_eqe = ecount;
12687
12688
12689 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
12690
12691 if (unlikely(ecount == 0)) {
12692 fpeq->EQ_no_entry++;
12693
12694 if (lpfc_fcp_look_ahead) {
12695 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12696 return IRQ_NONE;
12697 }
12698
12699 if (phba->intr_type == MSIX)
12700
12701 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12702 "0358 MSI-X interrupt with no EQE\n");
12703 else
12704
12705 return IRQ_NONE;
12706 }
12707
12708 if (lpfc_fcp_look_ahead)
12709 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12710 return IRQ_HANDLED;
12711}
12712
12713
12714
12715
12716
12717
12718
12719
12720
12721
12722
12723
12724
12725
12726
12727
12728
12729
12730irqreturn_t
12731lpfc_sli4_intr_handler(int irq, void *dev_id)
12732{
12733 struct lpfc_hba *phba;
12734 irqreturn_t hba_irq_rc;
12735 bool hba_handled = false;
12736 int fcp_eqidx;
12737
12738
12739 phba = (struct lpfc_hba *)dev_id;
12740
12741 if (unlikely(!phba))
12742 return IRQ_NONE;
12743
12744
12745
12746
12747 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12748 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
12749 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
12750 if (hba_irq_rc == IRQ_HANDLED)
12751 hba_handled |= true;
12752 }
12753
12754 if (phba->cfg_fof) {
12755 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
12756 &phba->sli4_hba.fcp_eq_hdl[0]);
12757 if (hba_irq_rc == IRQ_HANDLED)
12758 hba_handled |= true;
12759 }
12760
12761 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12762}
12763
12764
12765
12766
12767
12768
12769
12770
12771
12772void
12773lpfc_sli4_queue_free(struct lpfc_queue *queue)
12774{
12775 struct lpfc_dmabuf *dmabuf;
12776
12777 if (!queue)
12778 return;
12779
12780 while (!list_empty(&queue->page_list)) {
12781 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12782 list);
12783 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
12784 dmabuf->virt, dmabuf->phys);
12785 kfree(dmabuf);
12786 }
12787 kfree(queue);
12788 return;
12789}
12790
12791
12792
12793
12794
12795
12796
12797
12798
12799
12800
12801struct lpfc_queue *
12802lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12803 uint32_t entry_count)
12804{
12805 struct lpfc_queue *queue;
12806 struct lpfc_dmabuf *dmabuf;
12807 int x, total_qe_count;
12808 void *dma_pointer;
12809 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
12810
12811 if (!phba->sli4_hba.pc_sli4_params.supported)
12812 hw_page_size = SLI4_PAGE_SIZE;
12813
12814 queue = kzalloc(sizeof(struct lpfc_queue) +
12815 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
12816 if (!queue)
12817 return NULL;
12818 queue->page_count = (ALIGN(entry_size * entry_count,
12819 hw_page_size))/hw_page_size;
12820 INIT_LIST_HEAD(&queue->list);
12821 INIT_LIST_HEAD(&queue->page_list);
12822 INIT_LIST_HEAD(&queue->child_list);
12823 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
12824 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12825 if (!dmabuf)
12826 goto out_fail;
12827 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
12828 hw_page_size, &dmabuf->phys,
12829 GFP_KERNEL);
12830 if (!dmabuf->virt) {
12831 kfree(dmabuf);
12832 goto out_fail;
12833 }
12834 dmabuf->buffer_tag = x;
12835 list_add_tail(&dmabuf->list, &queue->page_list);
12836
12837 dma_pointer = dmabuf->virt;
12838 for (; total_qe_count < entry_count &&
12839 dma_pointer < (hw_page_size + dmabuf->virt);
12840 total_qe_count++, dma_pointer += entry_size) {
12841 queue->qe[total_qe_count].address = dma_pointer;
12842 }
12843 }
12844 queue->entry_size = entry_size;
12845 queue->entry_count = entry_count;
12846
12847
12848
12849
12850
12851
12852 queue->entry_repost = (entry_count >> 3);
12853 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
12854 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
12855 queue->phba = phba;
12856
12857 return queue;
12858out_fail:
12859 lpfc_sli4_queue_free(queue);
12860 return NULL;
12861}
12862
12863
12864
12865
12866
12867
12868
12869
12870
12871
12872static void __iomem *
12873lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12874{
12875 if (!phba->pcidev)
12876 return NULL;
12877
12878 switch (pci_barset) {
12879 case WQ_PCI_BAR_0_AND_1:
12880 return phba->pci_bar0_memmap_p;
12881 case WQ_PCI_BAR_2_AND_3:
12882 return phba->pci_bar2_memmap_p;
12883 case WQ_PCI_BAR_4_AND_5:
12884 return phba->pci_bar4_memmap_p;
12885 default:
12886 break;
12887 }
12888 return NULL;
12889}
12890
12891
12892
12893
12894
12895
12896
12897
12898
12899
12900
12901
12902
12903
12904
12905
12906
12907int
12908lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
12909{
12910 struct lpfc_mbx_modify_eq_delay *eq_delay;
12911 LPFC_MBOXQ_t *mbox;
12912 struct lpfc_queue *eq;
12913 int cnt, rc, length, status = 0;
12914 uint32_t shdr_status, shdr_add_status;
12915 uint32_t result;
12916 int fcp_eqidx;
12917 union lpfc_sli4_cfg_shdr *shdr;
12918 uint16_t dmult;
12919
12920 if (startq >= phba->cfg_fcp_io_channel)
12921 return 0;
12922
12923 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12924 if (!mbox)
12925 return -ENOMEM;
12926 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
12927 sizeof(struct lpfc_sli4_cfg_mhdr));
12928 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
12929 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
12930 length, LPFC_SLI4_MBX_EMBED);
12931 eq_delay = &mbox->u.mqe.un.eq_delay;
12932
12933
12934 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
12935 if (result > LPFC_DMULT_CONST)
12936 dmult = 0;
12937 else
12938 dmult = LPFC_DMULT_CONST/result - 1;
12939
12940 cnt = 0;
12941 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
12942 fcp_eqidx++) {
12943 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
12944 if (!eq)
12945 continue;
12946 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
12947 eq_delay->u.request.eq[cnt].phase = 0;
12948 eq_delay->u.request.eq[cnt].delay_multi = dmult;
12949 cnt++;
12950 if (cnt >= LPFC_MAX_EQ_DELAY)
12951 break;
12952 }
12953 eq_delay->u.request.num_eq = cnt;
12954
12955 mbox->vport = phba->pport;
12956 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12957 mbox->context1 = NULL;
12958 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12959 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
12960 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
12961 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
12962 if (shdr_status || shdr_add_status || rc) {
12963 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12964 "2512 MODIFY_EQ_DELAY mailbox failed with "
12965 "status x%x add_status x%x, mbx status x%x\n",
12966 shdr_status, shdr_add_status, rc);
12967 status = -ENXIO;
12968 }
12969 mempool_free(mbox, phba->mbox_mem_pool);
12970 return status;
12971}
12972
12973
12974
12975
12976
12977
12978
12979
12980
12981
12982
12983
12984
12985
12986
12987
12988
12989
12990
12991
12992
12993int
12994lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
12995{
12996 struct lpfc_mbx_eq_create *eq_create;
12997 LPFC_MBOXQ_t *mbox;
12998 int rc, length, status = 0;
12999 struct lpfc_dmabuf *dmabuf;
13000 uint32_t shdr_status, shdr_add_status;
13001 union lpfc_sli4_cfg_shdr *shdr;
13002 uint16_t dmult;
13003 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13004
13005
13006 if (!eq)
13007 return -ENODEV;
13008 if (!phba->sli4_hba.pc_sli4_params.supported)
13009 hw_page_size = SLI4_PAGE_SIZE;
13010
13011 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13012 if (!mbox)
13013 return -ENOMEM;
13014 length = (sizeof(struct lpfc_mbx_eq_create) -
13015 sizeof(struct lpfc_sli4_cfg_mhdr));
13016 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13017 LPFC_MBOX_OPCODE_EQ_CREATE,
13018 length, LPFC_SLI4_MBX_EMBED);
13019 eq_create = &mbox->u.mqe.un.eq_create;
13020 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
13021 eq->page_count);
13022 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
13023 LPFC_EQE_SIZE);
13024 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
13025
13026 dmult = 0;
13027 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
13028 dmult);
13029 switch (eq->entry_count) {
13030 default:
13031 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13032 "0360 Unsupported EQ count. (%d)\n",
13033 eq->entry_count);
13034 if (eq->entry_count < 256)
13035 return -EINVAL;
13036
13037 case 256:
13038 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13039 LPFC_EQ_CNT_256);
13040 break;
13041 case 512:
13042 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13043 LPFC_EQ_CNT_512);
13044 break;
13045 case 1024:
13046 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13047 LPFC_EQ_CNT_1024);
13048 break;
13049 case 2048:
13050 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13051 LPFC_EQ_CNT_2048);
13052 break;
13053 case 4096:
13054 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13055 LPFC_EQ_CNT_4096);
13056 break;
13057 }
13058 list_for_each_entry(dmabuf, &eq->page_list, list) {
13059 memset(dmabuf->virt, 0, hw_page_size);
13060 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13061 putPaddrLow(dmabuf->phys);
13062 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13063 putPaddrHigh(dmabuf->phys);
13064 }
13065 mbox->vport = phba->pport;
13066 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13067 mbox->context1 = NULL;
13068 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13069 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
13070 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13071 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13072 if (shdr_status || shdr_add_status || rc) {
13073 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13074 "2500 EQ_CREATE mailbox failed with "
13075 "status x%x add_status x%x, mbx status x%x\n",
13076 shdr_status, shdr_add_status, rc);
13077 status = -ENXIO;
13078 }
13079 eq->type = LPFC_EQ;
13080 eq->subtype = LPFC_NONE;
13081 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
13082 if (eq->queue_id == 0xFFFF)
13083 status = -ENXIO;
13084 eq->host_index = 0;
13085 eq->hba_index = 0;
13086
13087 mempool_free(mbox, phba->mbox_mem_pool);
13088 return status;
13089}
13090
13091
13092
13093
13094
13095
13096
13097
13098
13099
13100
13101
13102
13103
13104
13105
13106
13107
13108
13109
13110
13111
13112int
13113lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
13114 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
13115{
13116 struct lpfc_mbx_cq_create *cq_create;
13117 struct lpfc_dmabuf *dmabuf;
13118 LPFC_MBOXQ_t *mbox;
13119 int rc, length, status = 0;
13120 uint32_t shdr_status, shdr_add_status;
13121 union lpfc_sli4_cfg_shdr *shdr;
13122 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13123
13124
13125 if (!cq || !eq)
13126 return -ENODEV;
13127 if (!phba->sli4_hba.pc_sli4_params.supported)
13128 hw_page_size = SLI4_PAGE_SIZE;
13129
13130 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13131 if (!mbox)
13132 return -ENOMEM;
13133 length = (sizeof(struct lpfc_mbx_cq_create) -
13134 sizeof(struct lpfc_sli4_cfg_mhdr));
13135 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13136 LPFC_MBOX_OPCODE_CQ_CREATE,
13137 length, LPFC_SLI4_MBX_EMBED);
13138 cq_create = &mbox->u.mqe.un.cq_create;
13139 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
13140 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
13141 cq->page_count);
13142 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
13143 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
13144 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13145 phba->sli4_hba.pc_sli4_params.cqv);
13146 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
13147
13148 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
13149 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
13150 eq->queue_id);
13151 } else {
13152 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
13153 eq->queue_id);
13154 }
13155 switch (cq->entry_count) {
13156 default:
13157 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13158 "0361 Unsupported CQ count. (%d)\n",
13159 cq->entry_count);
13160 if (cq->entry_count < 256) {
13161 status = -EINVAL;
13162 goto out;
13163 }
13164
13165 case 256:
13166 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13167 LPFC_CQ_CNT_256);
13168 break;
13169 case 512:
13170 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13171 LPFC_CQ_CNT_512);
13172 break;
13173 case 1024:
13174 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13175 LPFC_CQ_CNT_1024);
13176 break;
13177 }
13178 list_for_each_entry(dmabuf, &cq->page_list, list) {
13179 memset(dmabuf->virt, 0, hw_page_size);
13180 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13181 putPaddrLow(dmabuf->phys);
13182 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13183 putPaddrHigh(dmabuf->phys);
13184 }
13185 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13186
13187
13188 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13189 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13190 if (shdr_status || shdr_add_status || rc) {
13191 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13192 "2501 CQ_CREATE mailbox failed with "
13193 "status x%x add_status x%x, mbx status x%x\n",
13194 shdr_status, shdr_add_status, rc);
13195 status = -ENXIO;
13196 goto out;
13197 }
13198 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13199 if (cq->queue_id == 0xFFFF) {
13200 status = -ENXIO;
13201 goto out;
13202 }
13203
13204 list_add_tail(&cq->list, &eq->child_list);
13205
13206 cq->type = type;
13207 cq->subtype = subtype;
13208 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13209 cq->assoc_qid = eq->queue_id;
13210 cq->host_index = 0;
13211 cq->hba_index = 0;
13212
13213out:
13214 mempool_free(mbox, phba->mbox_mem_pool);
13215 return status;
13216}
13217
13218
13219
13220
13221
13222
13223
13224
13225
13226
13227
13228
13229
13230
13231
13232static void
13233lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
13234 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
13235{
13236 struct lpfc_mbx_mq_create *mq_create;
13237 struct lpfc_dmabuf *dmabuf;
13238 int length;
13239
13240 length = (sizeof(struct lpfc_mbx_mq_create) -
13241 sizeof(struct lpfc_sli4_cfg_mhdr));
13242 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13243 LPFC_MBOX_OPCODE_MQ_CREATE,
13244 length, LPFC_SLI4_MBX_EMBED);
13245 mq_create = &mbox->u.mqe.un.mq_create;
13246 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
13247 mq->page_count);
13248 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
13249 cq->queue_id);
13250 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
13251 switch (mq->entry_count) {
13252 case 16:
13253 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13254 LPFC_MQ_RING_SIZE_16);
13255 break;
13256 case 32:
13257 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13258 LPFC_MQ_RING_SIZE_32);
13259 break;
13260 case 64:
13261 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13262 LPFC_MQ_RING_SIZE_64);
13263 break;
13264 case 128:
13265 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13266 LPFC_MQ_RING_SIZE_128);
13267 break;
13268 }
13269 list_for_each_entry(dmabuf, &mq->page_list, list) {
13270 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13271 putPaddrLow(dmabuf->phys);
13272 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13273 putPaddrHigh(dmabuf->phys);
13274 }
13275}
13276
13277
13278
13279
13280
13281
13282
13283
13284
13285
13286
13287
13288
13289
13290
13291
13292
13293
13294
13295
13296
13297
13298int32_t
13299lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
13300 struct lpfc_queue *cq, uint32_t subtype)
13301{
13302 struct lpfc_mbx_mq_create *mq_create;
13303 struct lpfc_mbx_mq_create_ext *mq_create_ext;
13304 struct lpfc_dmabuf *dmabuf;
13305 LPFC_MBOXQ_t *mbox;
13306 int rc, length, status = 0;
13307 uint32_t shdr_status, shdr_add_status;
13308 union lpfc_sli4_cfg_shdr *shdr;
13309 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13310
13311
13312 if (!mq || !cq)
13313 return -ENODEV;
13314 if (!phba->sli4_hba.pc_sli4_params.supported)
13315 hw_page_size = SLI4_PAGE_SIZE;
13316
13317 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13318 if (!mbox)
13319 return -ENOMEM;
13320 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
13321 sizeof(struct lpfc_sli4_cfg_mhdr));
13322 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13323 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
13324 length, LPFC_SLI4_MBX_EMBED);
13325
13326 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
13327 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
13328 bf_set(lpfc_mbx_mq_create_ext_num_pages,
13329 &mq_create_ext->u.request, mq->page_count);
13330 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
13331 &mq_create_ext->u.request, 1);
13332 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
13333 &mq_create_ext->u.request, 1);
13334 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
13335 &mq_create_ext->u.request, 1);
13336 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
13337 &mq_create_ext->u.request, 1);
13338 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
13339 &mq_create_ext->u.request, 1);
13340 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
13341 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13342 phba->sli4_hba.pc_sli4_params.mqv);
13343 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
13344 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
13345 cq->queue_id);
13346 else
13347 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
13348 cq->queue_id);
13349 switch (mq->entry_count) {
13350 default:
13351 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13352 "0362 Unsupported MQ count. (%d)\n",
13353 mq->entry_count);
13354 if (mq->entry_count < 16) {
13355 status = -EINVAL;
13356 goto out;
13357 }
13358
13359 case 16:
13360 bf_set(lpfc_mq_context_ring_size,
13361 &mq_create_ext->u.request.context,
13362 LPFC_MQ_RING_SIZE_16);
13363 break;
13364 case 32:
13365 bf_set(lpfc_mq_context_ring_size,
13366 &mq_create_ext->u.request.context,
13367 LPFC_MQ_RING_SIZE_32);
13368 break;
13369 case 64:
13370 bf_set(lpfc_mq_context_ring_size,
13371 &mq_create_ext->u.request.context,
13372 LPFC_MQ_RING_SIZE_64);
13373 break;
13374 case 128:
13375 bf_set(lpfc_mq_context_ring_size,
13376 &mq_create_ext->u.request.context,
13377 LPFC_MQ_RING_SIZE_128);
13378 break;
13379 }
13380 list_for_each_entry(dmabuf, &mq->page_list, list) {
13381 memset(dmabuf->virt, 0, hw_page_size);
13382 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
13383 putPaddrLow(dmabuf->phys);
13384 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
13385 putPaddrHigh(dmabuf->phys);
13386 }
13387 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13388 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
13389 &mq_create_ext->u.response);
13390 if (rc != MBX_SUCCESS) {
13391 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13392 "2795 MQ_CREATE_EXT failed with "
13393 "status x%x. Failback to MQ_CREATE.\n",
13394 rc);
13395 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
13396 mq_create = &mbox->u.mqe.un.mq_create;
13397 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13398 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
13399 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
13400 &mq_create->u.response);
13401 }
13402
13403
13404 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13405 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13406 if (shdr_status || shdr_add_status || rc) {
13407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13408 "2502 MQ_CREATE mailbox failed with "
13409 "status x%x add_status x%x, mbx status x%x\n",
13410 shdr_status, shdr_add_status, rc);
13411 status = -ENXIO;
13412 goto out;
13413 }
13414 if (mq->queue_id == 0xFFFF) {
13415 status = -ENXIO;
13416 goto out;
13417 }
13418 mq->type = LPFC_MQ;
13419 mq->assoc_qid = cq->queue_id;
13420 mq->subtype = subtype;
13421 mq->host_index = 0;
13422 mq->hba_index = 0;
13423
13424
13425 list_add_tail(&mq->list, &cq->child_list);
13426out:
13427 mempool_free(mbox, phba->mbox_mem_pool);
13428 return status;
13429}
13430
13431
13432
13433
13434
13435
13436
13437
13438
13439
13440
13441
13442
13443
13444
13445
13446
13447
13448
13449
13450
13451
13452
13453int
13454lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
13455 struct lpfc_queue *cq, uint32_t subtype)
13456{
13457 struct lpfc_mbx_wq_create *wq_create;
13458 struct lpfc_dmabuf *dmabuf;
13459 LPFC_MBOXQ_t *mbox;
13460 int rc, length, status = 0;
13461 uint32_t shdr_status, shdr_add_status;
13462 union lpfc_sli4_cfg_shdr *shdr;
13463 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13464 struct dma_address *page;
13465 void __iomem *bar_memmap_p;
13466 uint32_t db_offset;
13467 uint16_t pci_barset;
13468
13469
13470 if (!wq || !cq)
13471 return -ENODEV;
13472 if (!phba->sli4_hba.pc_sli4_params.supported)
13473 hw_page_size = SLI4_PAGE_SIZE;
13474
13475 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13476 if (!mbox)
13477 return -ENOMEM;
13478 length = (sizeof(struct lpfc_mbx_wq_create) -
13479 sizeof(struct lpfc_sli4_cfg_mhdr));
13480 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13481 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
13482 length, LPFC_SLI4_MBX_EMBED);
13483 wq_create = &mbox->u.mqe.un.wq_create;
13484 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
13485 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
13486 wq->page_count);
13487 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
13488 cq->queue_id);
13489
13490
13491 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13492 phba->sli4_hba.pc_sli4_params.wqv);
13493
13494 switch (phba->sli4_hba.pc_sli4_params.wqv) {
13495 case LPFC_Q_CREATE_VERSION_0:
13496 switch (wq->entry_size) {
13497 default:
13498 case 64:
13499
13500 page = wq_create->u.request.page;
13501 break;
13502 case 128:
13503 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
13504 LPFC_WQ_SZ128_SUPPORT)) {
13505 status = -ERANGE;
13506 goto out;
13507 }
13508
13509
13510
13511 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13512 LPFC_Q_CREATE_VERSION_1);
13513
13514 bf_set(lpfc_mbx_wq_create_wqe_count,
13515 &wq_create->u.request_1, wq->entry_count);
13516 bf_set(lpfc_mbx_wq_create_wqe_size,
13517 &wq_create->u.request_1,
13518 LPFC_WQ_WQE_SIZE_128);
13519 bf_set(lpfc_mbx_wq_create_page_size,
13520 &wq_create->u.request_1,
13521 (PAGE_SIZE/SLI4_PAGE_SIZE));
13522 page = wq_create->u.request_1.page;
13523 break;
13524 }
13525 break;
13526 case LPFC_Q_CREATE_VERSION_1:
13527 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
13528 wq->entry_count);
13529 switch (wq->entry_size) {
13530 default:
13531 case 64:
13532 bf_set(lpfc_mbx_wq_create_wqe_size,
13533 &wq_create->u.request_1,
13534 LPFC_WQ_WQE_SIZE_64);
13535 break;
13536 case 128:
13537 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
13538 LPFC_WQ_SZ128_SUPPORT)) {
13539 status = -ERANGE;
13540 goto out;
13541 }
13542 bf_set(lpfc_mbx_wq_create_wqe_size,
13543 &wq_create->u.request_1,
13544 LPFC_WQ_WQE_SIZE_128);
13545 break;
13546 }
13547 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
13548 (PAGE_SIZE/SLI4_PAGE_SIZE));
13549 page = wq_create->u.request_1.page;
13550 break;
13551 default:
13552 status = -ERANGE;
13553 goto out;
13554 }
13555
13556 list_for_each_entry(dmabuf, &wq->page_list, list) {
13557 memset(dmabuf->virt, 0, hw_page_size);
13558 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
13559 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
13560 }
13561
13562 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13563 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
13564
13565 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13566
13567 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13568 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13569 if (shdr_status || shdr_add_status || rc) {
13570 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13571 "2503 WQ_CREATE mailbox failed with "
13572 "status x%x add_status x%x, mbx status x%x\n",
13573 shdr_status, shdr_add_status, rc);
13574 status = -ENXIO;
13575 goto out;
13576 }
13577 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
13578 if (wq->queue_id == 0xFFFF) {
13579 status = -ENXIO;
13580 goto out;
13581 }
13582 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13583 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
13584 &wq_create->u.response);
13585 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
13586 (wq->db_format != LPFC_DB_RING_FORMAT)) {
13587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13588 "3265 WQ[%d] doorbell format not "
13589 "supported: x%x\n", wq->queue_id,
13590 wq->db_format);
13591 status = -EINVAL;
13592 goto out;
13593 }
13594 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
13595 &wq_create->u.response);
13596 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13597 if (!bar_memmap_p) {
13598 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13599 "3263 WQ[%d] failed to memmap pci "
13600 "barset:x%x\n", wq->queue_id,
13601 pci_barset);
13602 status = -ENOMEM;
13603 goto out;
13604 }
13605 db_offset = wq_create->u.response.doorbell_offset;
13606 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
13607 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
13608 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13609 "3252 WQ[%d] doorbell offset not "
13610 "supported: x%x\n", wq->queue_id,
13611 db_offset);
13612 status = -EINVAL;
13613 goto out;
13614 }
13615 wq->db_regaddr = bar_memmap_p + db_offset;
13616 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13617 "3264 WQ[%d]: barset:x%x, offset:x%x, "
13618 "format:x%x\n", wq->queue_id, pci_barset,
13619 db_offset, wq->db_format);
13620 } else {
13621 wq->db_format = LPFC_DB_LIST_FORMAT;
13622 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
13623 }
13624 wq->type = LPFC_WQ;
13625 wq->assoc_qid = cq->queue_id;
13626 wq->subtype = subtype;
13627 wq->host_index = 0;
13628 wq->hba_index = 0;
13629 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
13630
13631
13632 list_add_tail(&wq->list, &cq->child_list);
13633out:
13634 mempool_free(mbox, phba->mbox_mem_pool);
13635 return status;
13636}
13637
13638
13639
13640
13641
13642
13643
13644
13645
13646
13647
13648void
13649lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
13650{
13651 uint32_t cnt;
13652
13653
13654 if (!rq)
13655 return;
13656 cnt = lpfc_hbq_defs[qno]->entry_count;
13657
13658
13659 cnt = (cnt >> 3);
13660 if (cnt < LPFC_QUEUE_MIN_REPOST)
13661 cnt = LPFC_QUEUE_MIN_REPOST;
13662
13663 rq->entry_repost = cnt;
13664}
13665
13666
13667
13668
13669
13670
13671
13672
13673
13674
13675
13676
13677
13678
13679
13680
13681
13682
13683
13684
13685
13686
13687
13688
13689int
13690lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13691 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
13692{
13693 struct lpfc_mbx_rq_create *rq_create;
13694 struct lpfc_dmabuf *dmabuf;
13695 LPFC_MBOXQ_t *mbox;
13696 int rc, length, status = 0;
13697 uint32_t shdr_status, shdr_add_status;
13698 union lpfc_sli4_cfg_shdr *shdr;
13699 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13700 void __iomem *bar_memmap_p;
13701 uint32_t db_offset;
13702 uint16_t pci_barset;
13703
13704
13705 if (!hrq || !drq || !cq)
13706 return -ENODEV;
13707 if (!phba->sli4_hba.pc_sli4_params.supported)
13708 hw_page_size = SLI4_PAGE_SIZE;
13709
13710 if (hrq->entry_count != drq->entry_count)
13711 return -EINVAL;
13712 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13713 if (!mbox)
13714 return -ENOMEM;
13715 length = (sizeof(struct lpfc_mbx_rq_create) -
13716 sizeof(struct lpfc_sli4_cfg_mhdr));
13717 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13718 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13719 length, LPFC_SLI4_MBX_EMBED);
13720 rq_create = &mbox->u.mqe.un.rq_create;
13721 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13722 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13723 phba->sli4_hba.pc_sli4_params.rqv);
13724 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13725 bf_set(lpfc_rq_context_rqe_count_1,
13726 &rq_create->u.request.context,
13727 hrq->entry_count);
13728 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
13729 bf_set(lpfc_rq_context_rqe_size,
13730 &rq_create->u.request.context,
13731 LPFC_RQE_SIZE_8);
13732 bf_set(lpfc_rq_context_page_size,
13733 &rq_create->u.request.context,
13734 (PAGE_SIZE/SLI4_PAGE_SIZE));
13735 } else {
13736 switch (hrq->entry_count) {
13737 default:
13738 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13739 "2535 Unsupported RQ count. (%d)\n",
13740 hrq->entry_count);
13741 if (hrq->entry_count < 512) {
13742 status = -EINVAL;
13743 goto out;
13744 }
13745
13746 case 512:
13747 bf_set(lpfc_rq_context_rqe_count,
13748 &rq_create->u.request.context,
13749 LPFC_RQ_RING_SIZE_512);
13750 break;
13751 case 1024:
13752 bf_set(lpfc_rq_context_rqe_count,
13753 &rq_create->u.request.context,
13754 LPFC_RQ_RING_SIZE_1024);
13755 break;
13756 case 2048:
13757 bf_set(lpfc_rq_context_rqe_count,
13758 &rq_create->u.request.context,
13759 LPFC_RQ_RING_SIZE_2048);
13760 break;
13761 case 4096:
13762 bf_set(lpfc_rq_context_rqe_count,
13763 &rq_create->u.request.context,
13764 LPFC_RQ_RING_SIZE_4096);
13765 break;
13766 }
13767 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13768 LPFC_HDR_BUF_SIZE);
13769 }
13770 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13771 cq->queue_id);
13772 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13773 hrq->page_count);
13774 list_for_each_entry(dmabuf, &hrq->page_list, list) {
13775 memset(dmabuf->virt, 0, hw_page_size);
13776 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13777 putPaddrLow(dmabuf->phys);
13778 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13779 putPaddrHigh(dmabuf->phys);
13780 }
13781 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13782 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13783
13784 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13785
13786 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13787 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13788 if (shdr_status || shdr_add_status || rc) {
13789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13790 "2504 RQ_CREATE mailbox failed with "
13791 "status x%x add_status x%x, mbx status x%x\n",
13792 shdr_status, shdr_add_status, rc);
13793 status = -ENXIO;
13794 goto out;
13795 }
13796 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13797 if (hrq->queue_id == 0xFFFF) {
13798 status = -ENXIO;
13799 goto out;
13800 }
13801
13802 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13803 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
13804 &rq_create->u.response);
13805 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
13806 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
13807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13808 "3262 RQ [%d] doorbell format not "
13809 "supported: x%x\n", hrq->queue_id,
13810 hrq->db_format);
13811 status = -EINVAL;
13812 goto out;
13813 }
13814
13815 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
13816 &rq_create->u.response);
13817 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13818 if (!bar_memmap_p) {
13819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13820 "3269 RQ[%d] failed to memmap pci "
13821 "barset:x%x\n", hrq->queue_id,
13822 pci_barset);
13823 status = -ENOMEM;
13824 goto out;
13825 }
13826
13827 db_offset = rq_create->u.response.doorbell_offset;
13828 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
13829 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
13830 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13831 "3270 RQ[%d] doorbell offset not "
13832 "supported: x%x\n", hrq->queue_id,
13833 db_offset);
13834 status = -EINVAL;
13835 goto out;
13836 }
13837 hrq->db_regaddr = bar_memmap_p + db_offset;
13838 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13839 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
13840 "format:x%x\n", hrq->queue_id, pci_barset,
13841 db_offset, hrq->db_format);
13842 } else {
13843 hrq->db_format = LPFC_DB_RING_FORMAT;
13844 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
13845 }
13846 hrq->type = LPFC_HRQ;
13847 hrq->assoc_qid = cq->queue_id;
13848 hrq->subtype = subtype;
13849 hrq->host_index = 0;
13850 hrq->hba_index = 0;
13851
13852
13853 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13854 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13855 length, LPFC_SLI4_MBX_EMBED);
13856 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13857 phba->sli4_hba.pc_sli4_params.rqv);
13858 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13859 bf_set(lpfc_rq_context_rqe_count_1,
13860 &rq_create->u.request.context, hrq->entry_count);
13861 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
13862 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
13863 LPFC_RQE_SIZE_8);
13864 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
13865 (PAGE_SIZE/SLI4_PAGE_SIZE));
13866 } else {
13867 switch (drq->entry_count) {
13868 default:
13869 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13870 "2536 Unsupported RQ count. (%d)\n",
13871 drq->entry_count);
13872 if (drq->entry_count < 512) {
13873 status = -EINVAL;
13874 goto out;
13875 }
13876
13877 case 512:
13878 bf_set(lpfc_rq_context_rqe_count,
13879 &rq_create->u.request.context,
13880 LPFC_RQ_RING_SIZE_512);
13881 break;
13882 case 1024:
13883 bf_set(lpfc_rq_context_rqe_count,
13884 &rq_create->u.request.context,
13885 LPFC_RQ_RING_SIZE_1024);
13886 break;
13887 case 2048:
13888 bf_set(lpfc_rq_context_rqe_count,
13889 &rq_create->u.request.context,
13890 LPFC_RQ_RING_SIZE_2048);
13891 break;
13892 case 4096:
13893 bf_set(lpfc_rq_context_rqe_count,
13894 &rq_create->u.request.context,
13895 LPFC_RQ_RING_SIZE_4096);
13896 break;
13897 }
13898 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13899 LPFC_DATA_BUF_SIZE);
13900 }
13901 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13902 cq->queue_id);
13903 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13904 drq->page_count);
13905 list_for_each_entry(dmabuf, &drq->page_list, list) {
13906 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13907 putPaddrLow(dmabuf->phys);
13908 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13909 putPaddrHigh(dmabuf->phys);
13910 }
13911 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13912 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13913 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13914
13915 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13916 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13917 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13918 if (shdr_status || shdr_add_status || rc) {
13919 status = -ENXIO;
13920 goto out;
13921 }
13922 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13923 if (drq->queue_id == 0xFFFF) {
13924 status = -ENXIO;
13925 goto out;
13926 }
13927 drq->type = LPFC_DRQ;
13928 drq->assoc_qid = cq->queue_id;
13929 drq->subtype = subtype;
13930 drq->host_index = 0;
13931 drq->hba_index = 0;
13932
13933
13934 list_add_tail(&hrq->list, &cq->child_list);
13935 list_add_tail(&drq->list, &cq->child_list);
13936
13937out:
13938 mempool_free(mbox, phba->mbox_mem_pool);
13939 return status;
13940}
13941
13942
13943
13944
13945
13946
13947
13948
13949
13950
13951
13952
13953
13954int
13955lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13956{
13957 LPFC_MBOXQ_t *mbox;
13958 int rc, length, status = 0;
13959 uint32_t shdr_status, shdr_add_status;
13960 union lpfc_sli4_cfg_shdr *shdr;
13961
13962
13963 if (!eq)
13964 return -ENODEV;
13965 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
13966 if (!mbox)
13967 return -ENOMEM;
13968 length = (sizeof(struct lpfc_mbx_eq_destroy) -
13969 sizeof(struct lpfc_sli4_cfg_mhdr));
13970 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13971 LPFC_MBOX_OPCODE_EQ_DESTROY,
13972 length, LPFC_SLI4_MBX_EMBED);
13973 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
13974 eq->queue_id);
13975 mbox->vport = eq->phba->pport;
13976 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13977
13978 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
13979
13980 shdr = (union lpfc_sli4_cfg_shdr *)
13981 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
13982 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13983 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13984 if (shdr_status || shdr_add_status || rc) {
13985 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13986 "2505 EQ_DESTROY mailbox failed with "
13987 "status x%x add_status x%x, mbx status x%x\n",
13988 shdr_status, shdr_add_status, rc);
13989 status = -ENXIO;
13990 }
13991
13992
13993 list_del_init(&eq->list);
13994 mempool_free(mbox, eq->phba->mbox_mem_pool);
13995 return status;
13996}
13997
13998
13999
14000
14001
14002
14003
14004
14005
14006
14007
14008
14009
14010int
14011lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
14012{
14013 LPFC_MBOXQ_t *mbox;
14014 int rc, length, status = 0;
14015 uint32_t shdr_status, shdr_add_status;
14016 union lpfc_sli4_cfg_shdr *shdr;
14017
14018
14019 if (!cq)
14020 return -ENODEV;
14021 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
14022 if (!mbox)
14023 return -ENOMEM;
14024 length = (sizeof(struct lpfc_mbx_cq_destroy) -
14025 sizeof(struct lpfc_sli4_cfg_mhdr));
14026 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14027 LPFC_MBOX_OPCODE_CQ_DESTROY,
14028 length, LPFC_SLI4_MBX_EMBED);
14029 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
14030 cq->queue_id);
14031 mbox->vport = cq->phba->pport;
14032 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14033 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
14034
14035 shdr = (union lpfc_sli4_cfg_shdr *)
14036 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
14037 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14038 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14039 if (shdr_status || shdr_add_status || rc) {
14040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14041 "2506 CQ_DESTROY mailbox failed with "
14042 "status x%x add_status x%x, mbx status x%x\n",
14043 shdr_status, shdr_add_status, rc);
14044 status = -ENXIO;
14045 }
14046
14047 list_del_init(&cq->list);
14048 mempool_free(mbox, cq->phba->mbox_mem_pool);
14049 return status;
14050}
14051
14052
14053
14054
14055
14056
14057
14058
14059
14060
14061
14062
14063
14064int
14065lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
14066{
14067 LPFC_MBOXQ_t *mbox;
14068 int rc, length, status = 0;
14069 uint32_t shdr_status, shdr_add_status;
14070 union lpfc_sli4_cfg_shdr *shdr;
14071
14072
14073 if (!mq)
14074 return -ENODEV;
14075 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
14076 if (!mbox)
14077 return -ENOMEM;
14078 length = (sizeof(struct lpfc_mbx_mq_destroy) -
14079 sizeof(struct lpfc_sli4_cfg_mhdr));
14080 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14081 LPFC_MBOX_OPCODE_MQ_DESTROY,
14082 length, LPFC_SLI4_MBX_EMBED);
14083 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
14084 mq->queue_id);
14085 mbox->vport = mq->phba->pport;
14086 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14087 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
14088
14089 shdr = (union lpfc_sli4_cfg_shdr *)
14090 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
14091 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14092 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14093 if (shdr_status || shdr_add_status || rc) {
14094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14095 "2507 MQ_DESTROY mailbox failed with "
14096 "status x%x add_status x%x, mbx status x%x\n",
14097 shdr_status, shdr_add_status, rc);
14098 status = -ENXIO;
14099 }
14100
14101 list_del_init(&mq->list);
14102 mempool_free(mbox, mq->phba->mbox_mem_pool);
14103 return status;
14104}
14105
14106
14107
14108
14109
14110
14111
14112
14113
14114
14115
14116
14117
14118int
14119lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
14120{
14121 LPFC_MBOXQ_t *mbox;
14122 int rc, length, status = 0;
14123 uint32_t shdr_status, shdr_add_status;
14124 union lpfc_sli4_cfg_shdr *shdr;
14125
14126
14127 if (!wq)
14128 return -ENODEV;
14129 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
14130 if (!mbox)
14131 return -ENOMEM;
14132 length = (sizeof(struct lpfc_mbx_wq_destroy) -
14133 sizeof(struct lpfc_sli4_cfg_mhdr));
14134 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14135 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
14136 length, LPFC_SLI4_MBX_EMBED);
14137 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
14138 wq->queue_id);
14139 mbox->vport = wq->phba->pport;
14140 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14141 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
14142 shdr = (union lpfc_sli4_cfg_shdr *)
14143 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
14144 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14145 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14146 if (shdr_status || shdr_add_status || rc) {
14147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14148 "2508 WQ_DESTROY mailbox failed with "
14149 "status x%x add_status x%x, mbx status x%x\n",
14150 shdr_status, shdr_add_status, rc);
14151 status = -ENXIO;
14152 }
14153
14154 list_del_init(&wq->list);
14155 mempool_free(mbox, wq->phba->mbox_mem_pool);
14156 return status;
14157}
14158
14159
14160
14161
14162
14163
14164
14165
14166
14167
14168
14169
14170
14171int
14172lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14173 struct lpfc_queue *drq)
14174{
14175 LPFC_MBOXQ_t *mbox;
14176 int rc, length, status = 0;
14177 uint32_t shdr_status, shdr_add_status;
14178 union lpfc_sli4_cfg_shdr *shdr;
14179
14180
14181 if (!hrq || !drq)
14182 return -ENODEV;
14183 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
14184 if (!mbox)
14185 return -ENOMEM;
14186 length = (sizeof(struct lpfc_mbx_rq_destroy) -
14187 sizeof(struct lpfc_sli4_cfg_mhdr));
14188 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14189 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
14190 length, LPFC_SLI4_MBX_EMBED);
14191 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14192 hrq->queue_id);
14193 mbox->vport = hrq->phba->pport;
14194 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14195 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
14196
14197 shdr = (union lpfc_sli4_cfg_shdr *)
14198 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14199 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14200 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14201 if (shdr_status || shdr_add_status || rc) {
14202 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14203 "2509 RQ_DESTROY mailbox failed with "
14204 "status x%x add_status x%x, mbx status x%x\n",
14205 shdr_status, shdr_add_status, rc);
14206 if (rc != MBX_TIMEOUT)
14207 mempool_free(mbox, hrq->phba->mbox_mem_pool);
14208 return -ENXIO;
14209 }
14210 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14211 drq->queue_id);
14212 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
14213 shdr = (union lpfc_sli4_cfg_shdr *)
14214 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14215 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14216 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14217 if (shdr_status || shdr_add_status || rc) {
14218 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14219 "2510 RQ_DESTROY mailbox failed with "
14220 "status x%x add_status x%x, mbx status x%x\n",
14221 shdr_status, shdr_add_status, rc);
14222 status = -ENXIO;
14223 }
14224 list_del_init(&hrq->list);
14225 list_del_init(&drq->list);
14226 mempool_free(mbox, hrq->phba->mbox_mem_pool);
14227 return status;
14228}
14229
14230
14231
14232
14233
14234
14235
14236
14237
14238
14239
14240
14241
14242
14243
14244
14245
14246
14247
14248
14249
14250
14251
14252int
14253lpfc_sli4_post_sgl(struct lpfc_hba *phba,
14254 dma_addr_t pdma_phys_addr0,
14255 dma_addr_t pdma_phys_addr1,
14256 uint16_t xritag)
14257{
14258 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
14259 LPFC_MBOXQ_t *mbox;
14260 int rc;
14261 uint32_t shdr_status, shdr_add_status;
14262 uint32_t mbox_tmo;
14263 union lpfc_sli4_cfg_shdr *shdr;
14264
14265 if (xritag == NO_XRI) {
14266 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14267 "0364 Invalid param:\n");
14268 return -EINVAL;
14269 }
14270
14271 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14272 if (!mbox)
14273 return -ENOMEM;
14274
14275 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14276 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
14277 sizeof(struct lpfc_mbx_post_sgl_pages) -
14278 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
14279
14280 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
14281 &mbox->u.mqe.un.post_sgl_pages;
14282 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
14283 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
14284
14285 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
14286 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
14287 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
14288 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
14289
14290 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
14291 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
14292 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
14293 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
14294 if (!phba->sli4_hba.intr_enable)
14295 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14296 else {
14297 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14298 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14299 }
14300
14301 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
14302 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14303 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14304 if (rc != MBX_TIMEOUT)
14305 mempool_free(mbox, phba->mbox_mem_pool);
14306 if (shdr_status || shdr_add_status || rc) {
14307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14308 "2511 POST_SGL mailbox failed with "
14309 "status x%x add_status x%x, mbx status x%x\n",
14310 shdr_status, shdr_add_status, rc);
14311 }
14312 return 0;
14313}
14314
14315
14316
14317
14318
14319
14320
14321
14322
14323
14324
14325
14326
14327
14328static uint16_t
14329lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
14330{
14331 unsigned long xri;
14332
14333
14334
14335
14336
14337 spin_lock_irq(&phba->hbalock);
14338 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
14339 phba->sli4_hba.max_cfg_param.max_xri, 0);
14340 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
14341 spin_unlock_irq(&phba->hbalock);
14342 return NO_XRI;
14343 } else {
14344 set_bit(xri, phba->sli4_hba.xri_bmask);
14345 phba->sli4_hba.max_cfg_param.xri_used++;
14346 }
14347 spin_unlock_irq(&phba->hbalock);
14348 return xri;
14349}
14350
14351
14352
14353
14354
14355
14356
14357
14358static void
14359__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14360{
14361 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
14362 phba->sli4_hba.max_cfg_param.xri_used--;
14363 }
14364}
14365
14366
14367
14368
14369
14370
14371
14372
14373void
14374lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14375{
14376 spin_lock_irq(&phba->hbalock);
14377 __lpfc_sli4_free_xri(phba, xri);
14378 spin_unlock_irq(&phba->hbalock);
14379}
14380
14381
14382
14383
14384
14385
14386
14387
14388
14389
14390
14391uint16_t
14392lpfc_sli4_next_xritag(struct lpfc_hba *phba)
14393{
14394 uint16_t xri_index;
14395
14396 xri_index = lpfc_sli4_alloc_xri(phba);
14397 if (xri_index == NO_XRI)
14398 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14399 "2004 Failed to allocate XRI.last XRITAG is %d"
14400 " Max XRI is %d, Used XRI is %d\n",
14401 xri_index,
14402 phba->sli4_hba.max_cfg_param.max_xri,
14403 phba->sli4_hba.max_cfg_param.xri_used);
14404 return xri_index;
14405}
14406
14407
14408
14409
14410
14411
14412
14413
14414
14415
14416
14417
14418static int
14419lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
14420 struct list_head *post_sgl_list,
14421 int post_cnt)
14422{
14423 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
14424 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
14425 struct sgl_page_pairs *sgl_pg_pairs;
14426 void *viraddr;
14427 LPFC_MBOXQ_t *mbox;
14428 uint32_t reqlen, alloclen, pg_pairs;
14429 uint32_t mbox_tmo;
14430 uint16_t xritag_start = 0;
14431 int rc = 0;
14432 uint32_t shdr_status, shdr_add_status;
14433 union lpfc_sli4_cfg_shdr *shdr;
14434
14435 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
14436 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
14437 if (reqlen > SLI4_PAGE_SIZE) {
14438 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
14439 "2559 Block sgl registration required DMA "
14440 "size (%d) great than a page\n", reqlen);
14441 return -ENOMEM;
14442 }
14443 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14444 if (!mbox)
14445 return -ENOMEM;
14446
14447
14448 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14449 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
14450 LPFC_SLI4_MBX_NEMBED);
14451
14452 if (alloclen < reqlen) {
14453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14454 "0285 Allocated DMA memory size (%d) is "
14455 "less than the requested DMA memory "
14456 "size (%d)\n", alloclen, reqlen);
14457 lpfc_sli4_mbox_cmd_free(phba, mbox);
14458 return -ENOMEM;
14459 }
14460
14461 viraddr = mbox->sge_array->addr[0];
14462 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
14463 sgl_pg_pairs = &sgl->sgl_pg_pairs;
14464
14465 pg_pairs = 0;
14466 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
14467
14468 sgl_pg_pairs->sgl_pg0_addr_lo =
14469 cpu_to_le32(putPaddrLow(sglq_entry->phys));
14470 sgl_pg_pairs->sgl_pg0_addr_hi =
14471 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
14472 sgl_pg_pairs->sgl_pg1_addr_lo =
14473 cpu_to_le32(putPaddrLow(0));
14474 sgl_pg_pairs->sgl_pg1_addr_hi =
14475 cpu_to_le32(putPaddrHigh(0));
14476
14477
14478 if (pg_pairs == 0)
14479 xritag_start = sglq_entry->sli4_xritag;
14480 sgl_pg_pairs++;
14481 pg_pairs++;
14482 }
14483
14484
14485 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
14486 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
14487 sgl->word0 = cpu_to_le32(sgl->word0);
14488 if (!phba->sli4_hba.intr_enable)
14489 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14490 else {
14491 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14492 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14493 }
14494 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
14495 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14496 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14497 if (rc != MBX_TIMEOUT)
14498 lpfc_sli4_mbox_cmd_free(phba, mbox);
14499 if (shdr_status || shdr_add_status || rc) {
14500 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14501 "2513 POST_SGL_BLOCK mailbox command failed "
14502 "status x%x add_status x%x mbx status x%x\n",
14503 shdr_status, shdr_add_status, rc);
14504 rc = -ENXIO;
14505 }
14506 return rc;
14507}
14508
14509
14510
14511
14512
14513
14514
14515
14516
14517
14518
14519
14520int
14521lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
14522 struct list_head *sblist,
14523 int count)
14524{
14525 struct lpfc_scsi_buf *psb;
14526 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
14527 struct sgl_page_pairs *sgl_pg_pairs;
14528 void *viraddr;
14529 LPFC_MBOXQ_t *mbox;
14530 uint32_t reqlen, alloclen, pg_pairs;
14531 uint32_t mbox_tmo;
14532 uint16_t xritag_start = 0;
14533 int rc = 0;
14534 uint32_t shdr_status, shdr_add_status;
14535 dma_addr_t pdma_phys_bpl1;
14536 union lpfc_sli4_cfg_shdr *shdr;
14537
14538
14539 reqlen = count * sizeof(struct sgl_page_pairs) +
14540 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
14541 if (reqlen > SLI4_PAGE_SIZE) {
14542 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
14543 "0217 Block sgl registration required DMA "
14544 "size (%d) great than a page\n", reqlen);
14545 return -ENOMEM;
14546 }
14547 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14548 if (!mbox) {
14549 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14550 "0283 Failed to allocate mbox cmd memory\n");
14551 return -ENOMEM;
14552 }
14553
14554
14555 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14556 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
14557 LPFC_SLI4_MBX_NEMBED);
14558
14559 if (alloclen < reqlen) {
14560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14561 "2561 Allocated DMA memory size (%d) is "
14562 "less than the requested DMA memory "
14563 "size (%d)\n", alloclen, reqlen);
14564 lpfc_sli4_mbox_cmd_free(phba, mbox);
14565 return -ENOMEM;
14566 }
14567
14568
14569 viraddr = mbox->sge_array->addr[0];
14570
14571
14572 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
14573 sgl_pg_pairs = &sgl->sgl_pg_pairs;
14574
14575 pg_pairs = 0;
14576 list_for_each_entry(psb, sblist, list) {
14577
14578 sgl_pg_pairs->sgl_pg0_addr_lo =
14579 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
14580 sgl_pg_pairs->sgl_pg0_addr_hi =
14581 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
14582 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
14583 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
14584 else
14585 pdma_phys_bpl1 = 0;
14586 sgl_pg_pairs->sgl_pg1_addr_lo =
14587 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
14588 sgl_pg_pairs->sgl_pg1_addr_hi =
14589 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
14590
14591 if (pg_pairs == 0)
14592 xritag_start = psb->cur_iocbq.sli4_xritag;
14593 sgl_pg_pairs++;
14594 pg_pairs++;
14595 }
14596 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
14597 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
14598
14599 sgl->word0 = cpu_to_le32(sgl->word0);
14600
14601 if (!phba->sli4_hba.intr_enable)
14602 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14603 else {
14604 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14605 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14606 }
14607 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
14608 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14609 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14610 if (rc != MBX_TIMEOUT)
14611 lpfc_sli4_mbox_cmd_free(phba, mbox);
14612 if (shdr_status || shdr_add_status || rc) {
14613 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14614 "2564 POST_SGL_BLOCK mailbox command failed "
14615 "status x%x add_status x%x mbx status x%x\n",
14616 shdr_status, shdr_add_status, rc);
14617 rc = -ENXIO;
14618 }
14619 return rc;
14620}
14621
14622
14623
14624
14625
14626
14627
14628
14629
14630
14631
14632static int
14633lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
14634{
14635
14636 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
14637 char *type_names[] = FC_TYPE_NAMES_INIT;
14638 struct fc_vft_header *fc_vft_hdr;
14639 uint32_t *header = (uint32_t *) fc_hdr;
14640
14641 switch (fc_hdr->fh_r_ctl) {
14642 case FC_RCTL_DD_UNCAT:
14643 case FC_RCTL_DD_SOL_DATA:
14644 case FC_RCTL_DD_UNSOL_CTL:
14645 case FC_RCTL_DD_SOL_CTL:
14646 case FC_RCTL_DD_UNSOL_DATA:
14647 case FC_RCTL_DD_DATA_DESC:
14648 case FC_RCTL_DD_UNSOL_CMD:
14649 case FC_RCTL_DD_CMD_STATUS:
14650 case FC_RCTL_ELS_REQ:
14651 case FC_RCTL_ELS_REP:
14652 case FC_RCTL_ELS4_REQ:
14653 case FC_RCTL_ELS4_REP:
14654 case FC_RCTL_BA_NOP:
14655 case FC_RCTL_BA_ABTS:
14656 case FC_RCTL_BA_RMC:
14657 case FC_RCTL_BA_ACC:
14658 case FC_RCTL_BA_RJT:
14659 case FC_RCTL_BA_PRMT:
14660 case FC_RCTL_ACK_1:
14661 case FC_RCTL_ACK_0:
14662 case FC_RCTL_P_RJT:
14663 case FC_RCTL_F_RJT:
14664 case FC_RCTL_P_BSY:
14665 case FC_RCTL_F_BSY:
14666 case FC_RCTL_F_BSYL:
14667 case FC_RCTL_LCR:
14668 case FC_RCTL_END:
14669 break;
14670 case FC_RCTL_VFTH:
14671 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14672 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
14673 return lpfc_fc_frame_check(phba, fc_hdr);
14674 default:
14675 goto drop;
14676 }
14677 switch (fc_hdr->fh_type) {
14678 case FC_TYPE_BLS:
14679 case FC_TYPE_ELS:
14680 case FC_TYPE_FCP:
14681 case FC_TYPE_CT:
14682 break;
14683 case FC_TYPE_IP:
14684 case FC_TYPE_ILS:
14685 default:
14686 goto drop;
14687 }
14688
14689 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
14690 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
14691 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
14692 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
14693 type_names[fc_hdr->fh_type], fc_hdr->fh_type,
14694 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
14695 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
14696 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
14697 be32_to_cpu(header[6]));
14698 return 0;
14699drop:
14700 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
14701 "2539 Dropped frame rctl:%s type:%s\n",
14702 rctl_names[fc_hdr->fh_r_ctl],
14703 type_names[fc_hdr->fh_type]);
14704 return 1;
14705}
14706
14707
14708
14709
14710
14711
14712
14713
14714
14715static uint32_t
14716lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
14717{
14718 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14719
14720 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
14721 return 0;
14722 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
14723}
14724
14725
14726
14727
14728
14729
14730
14731
14732
14733
14734
14735
14736
14737static struct lpfc_vport *
14738lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
14739 uint16_t fcfi)
14740{
14741 struct lpfc_vport **vports;
14742 struct lpfc_vport *vport = NULL;
14743 int i;
14744 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
14745 fc_hdr->fh_d_id[1] << 8 |
14746 fc_hdr->fh_d_id[2]);
14747
14748 if (did == Fabric_DID)
14749 return phba->pport;
14750 if ((phba->pport->fc_flag & FC_PT2PT) &&
14751 !(phba->link_state == LPFC_HBA_READY))
14752 return phba->pport;
14753
14754 vports = lpfc_create_vport_work_array(phba);
14755 if (vports != NULL)
14756 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14757 if (phba->fcf.fcfi == fcfi &&
14758 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
14759 vports[i]->fc_myDID == did) {
14760 vport = vports[i];
14761 break;
14762 }
14763 }
14764 lpfc_destroy_vport_work_array(phba, vports);
14765 return vport;
14766}
14767
14768
14769
14770
14771
14772
14773
14774
14775
14776
14777
14778static void
14779lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14780{
14781 struct lpfc_dmabuf *h_buf;
14782 struct hbq_dmabuf *dmabuf = NULL;
14783
14784
14785 h_buf = list_get_first(&vport->rcv_buffer_list,
14786 struct lpfc_dmabuf, list);
14787 if (!h_buf)
14788 return;
14789 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14790 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
14791}
14792
14793
14794
14795
14796
14797
14798
14799
14800
14801void
14802lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
14803{
14804 struct lpfc_dmabuf *h_buf, *hnext;
14805 struct lpfc_dmabuf *d_buf, *dnext;
14806 struct hbq_dmabuf *dmabuf = NULL;
14807
14808
14809 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14810 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14811 list_del_init(&dmabuf->hbuf.list);
14812 list_for_each_entry_safe(d_buf, dnext,
14813 &dmabuf->dbuf.list, list) {
14814 list_del_init(&d_buf->list);
14815 lpfc_in_buf_free(vport->phba, d_buf);
14816 }
14817 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14818 }
14819}
14820
14821
14822
14823
14824
14825
14826
14827
14828
14829
14830
14831
14832
14833void
14834lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
14835{
14836 struct lpfc_dmabuf *h_buf, *hnext;
14837 struct lpfc_dmabuf *d_buf, *dnext;
14838 struct hbq_dmabuf *dmabuf = NULL;
14839 unsigned long timeout;
14840 int abort_count = 0;
14841
14842 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14843 vport->rcv_buffer_time_stamp);
14844 if (list_empty(&vport->rcv_buffer_list) ||
14845 time_before(jiffies, timeout))
14846 return;
14847
14848 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
14849 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14850 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
14851 dmabuf->time_stamp);
14852 if (time_before(jiffies, timeout))
14853 break;
14854 abort_count++;
14855 list_del_init(&dmabuf->hbuf.list);
14856 list_for_each_entry_safe(d_buf, dnext,
14857 &dmabuf->dbuf.list, list) {
14858 list_del_init(&d_buf->list);
14859 lpfc_in_buf_free(vport->phba, d_buf);
14860 }
14861 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
14862 }
14863 if (abort_count)
14864 lpfc_update_rcv_time_stamp(vport);
14865}
14866
14867
14868
14869
14870
14871
14872
14873
14874
14875
14876
14877
14878
14879static struct hbq_dmabuf *
14880lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
14881{
14882 struct fc_frame_header *new_hdr;
14883 struct fc_frame_header *temp_hdr;
14884 struct lpfc_dmabuf *d_buf;
14885 struct lpfc_dmabuf *h_buf;
14886 struct hbq_dmabuf *seq_dmabuf = NULL;
14887 struct hbq_dmabuf *temp_dmabuf = NULL;
14888 uint8_t found = 0;
14889
14890 INIT_LIST_HEAD(&dmabuf->dbuf.list);
14891 dmabuf->time_stamp = jiffies;
14892 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14893
14894
14895 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14896 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14897 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14898 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14899 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14900 continue;
14901
14902 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14903 break;
14904 }
14905 if (!seq_dmabuf) {
14906
14907
14908
14909
14910 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
14911 lpfc_update_rcv_time_stamp(vport);
14912 return dmabuf;
14913 }
14914 temp_hdr = seq_dmabuf->hbuf.virt;
14915 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
14916 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
14917 list_del_init(&seq_dmabuf->hbuf.list);
14918 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
14919 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
14920 lpfc_update_rcv_time_stamp(vport);
14921 return dmabuf;
14922 }
14923
14924 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
14925 seq_dmabuf->time_stamp = jiffies;
14926 lpfc_update_rcv_time_stamp(vport);
14927 if (list_empty(&seq_dmabuf->dbuf.list)) {
14928 temp_hdr = dmabuf->hbuf.virt;
14929 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
14930 return seq_dmabuf;
14931 }
14932
14933 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
14934 while (!found) {
14935 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
14936 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
14937
14938
14939
14940
14941 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
14942 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
14943 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
14944 found = 1;
14945 break;
14946 }
14947
14948 if (&d_buf->list == &seq_dmabuf->dbuf.list)
14949 break;
14950 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
14951 }
14952
14953 if (found)
14954 return seq_dmabuf;
14955 return NULL;
14956}
14957
14958
14959
14960
14961
14962
14963
14964
14965
14966
14967
14968
14969
14970
14971
14972
14973
14974static bool
14975lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
14976 struct hbq_dmabuf *dmabuf)
14977{
14978 struct fc_frame_header *new_hdr;
14979 struct fc_frame_header *temp_hdr;
14980 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
14981 struct hbq_dmabuf *seq_dmabuf = NULL;
14982
14983
14984 INIT_LIST_HEAD(&dmabuf->dbuf.list);
14985 INIT_LIST_HEAD(&dmabuf->hbuf.list);
14986 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
14987 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
14988 temp_hdr = (struct fc_frame_header *)h_buf->virt;
14989 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
14990 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
14991 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
14992 continue;
14993
14994 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14995 break;
14996 }
14997
14998
14999 if (seq_dmabuf) {
15000 list_for_each_entry_safe(d_buf, n_buf,
15001 &seq_dmabuf->dbuf.list, list) {
15002 list_del_init(&d_buf->list);
15003 lpfc_in_buf_free(vport->phba, d_buf);
15004 }
15005 return true;
15006 }
15007 return false;
15008}
15009
15010
15011
15012
15013
15014
15015
15016
15017
15018
15019
15020
15021
15022
15023
15024
15025
15026static bool
15027lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15028{
15029 struct lpfc_hba *phba = vport->phba;
15030 int handled;
15031
15032
15033 if (phba->sli_rev < LPFC_SLI_REV4)
15034 return false;
15035
15036
15037 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
15038 if (handled)
15039 return true;
15040
15041 return false;
15042}
15043
15044
15045
15046
15047
15048
15049
15050
15051
15052
15053
15054static void
15055lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
15056 struct lpfc_iocbq *cmd_iocbq,
15057 struct lpfc_iocbq *rsp_iocbq)
15058{
15059 struct lpfc_nodelist *ndlp;
15060
15061 if (cmd_iocbq) {
15062 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
15063 lpfc_nlp_put(ndlp);
15064 lpfc_nlp_not_used(ndlp);
15065 lpfc_sli_release_iocbq(phba, cmd_iocbq);
15066 }
15067
15068
15069 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
15070 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15071 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
15072 rsp_iocbq->iocb.ulpStatus,
15073 rsp_iocbq->iocb.un.ulpWord[4]);
15074}
15075
15076
15077
15078
15079
15080
15081
15082
15083
15084uint16_t
15085lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
15086 uint16_t xri)
15087{
15088 uint16_t i;
15089
15090 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
15091 if (xri == phba->sli4_hba.xri_ids[i])
15092 return i;
15093 }
15094 return NO_XRI;
15095}
15096
15097
15098
15099
15100
15101
15102
15103
15104
15105static void
15106lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
15107 struct fc_frame_header *fc_hdr, bool aborted)
15108{
15109 struct lpfc_hba *phba = vport->phba;
15110 struct lpfc_iocbq *ctiocb = NULL;
15111 struct lpfc_nodelist *ndlp;
15112 uint16_t oxid, rxid, xri, lxri;
15113 uint32_t sid, fctl;
15114 IOCB_t *icmd;
15115 int rc;
15116
15117 if (!lpfc_is_link_up(phba))
15118 return;
15119
15120 sid = sli4_sid_from_fc_hdr(fc_hdr);
15121 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
15122 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
15123
15124 ndlp = lpfc_findnode_did(vport, sid);
15125 if (!ndlp) {
15126 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
15127 if (!ndlp) {
15128 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15129 "1268 Failed to allocate ndlp for "
15130 "oxid:x%x SID:x%x\n", oxid, sid);
15131 return;
15132 }
15133 lpfc_nlp_init(vport, ndlp, sid);
15134
15135 lpfc_enqueue_node(vport, ndlp);
15136 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
15137
15138 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
15139 if (!ndlp) {
15140 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15141 "3275 Failed to active ndlp found "
15142 "for oxid:x%x SID:x%x\n", oxid, sid);
15143 return;
15144 }
15145 }
15146
15147
15148 ctiocb = lpfc_sli_get_iocbq(phba);
15149 if (!ctiocb)
15150 return;
15151
15152
15153 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
15154
15155 icmd = &ctiocb->iocb;
15156 icmd->un.xseq64.bdl.bdeSize = 0;
15157 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
15158 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
15159 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
15160 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
15161
15162
15163 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
15164 icmd->ulpBdeCount = 0;
15165 icmd->ulpLe = 1;
15166 icmd->ulpClass = CLASS3;
15167 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
15168 ctiocb->context1 = lpfc_nlp_get(ndlp);
15169
15170 ctiocb->iocb_cmpl = NULL;
15171 ctiocb->vport = phba->pport;
15172 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
15173 ctiocb->sli4_lxritag = NO_XRI;
15174 ctiocb->sli4_xritag = NO_XRI;
15175
15176 if (fctl & FC_FC_EX_CTX)
15177
15178
15179
15180 xri = oxid;
15181 else
15182 xri = rxid;
15183 lxri = lpfc_sli4_xri_inrange(phba, xri);
15184 if (lxri != NO_XRI)
15185 lpfc_set_rrq_active(phba, ndlp, lxri,
15186 (xri == oxid) ? rxid : oxid, 0);
15187
15188
15189
15190
15191
15192 if ((fctl & FC_FC_EX_CTX) &&
15193 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
15194 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15195 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15196 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15197 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15198 }
15199
15200
15201
15202
15203
15204 if (aborted == false) {
15205 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15206 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15207 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15208 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15209 }
15210
15211 if (fctl & FC_FC_EX_CTX) {
15212
15213
15214
15215
15216 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
15217 } else {
15218
15219
15220
15221
15222 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
15223 }
15224 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
15225 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
15226
15227
15228 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
15229 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
15230 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
15231
15232 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
15233 if (rc == IOCB_ERROR) {
15234 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
15235 "2925 Failed to issue CT ABTS RSP x%x on "
15236 "xri x%x, Data x%x\n",
15237 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
15238 phba->link_state);
15239 lpfc_nlp_put(ndlp);
15240 ctiocb->context1 = NULL;
15241 lpfc_sli_release_iocbq(phba, ctiocb);
15242 }
15243}
15244
15245
15246
15247
15248
15249
15250
15251
15252
15253
15254
15255
15256
15257
15258static void
15259lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
15260 struct hbq_dmabuf *dmabuf)
15261{
15262 struct lpfc_hba *phba = vport->phba;
15263 struct fc_frame_header fc_hdr;
15264 uint32_t fctl;
15265 bool aborted;
15266
15267
15268 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
15269 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
15270
15271 if (fctl & FC_FC_EX_CTX) {
15272
15273 aborted = true;
15274 } else {
15275
15276 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
15277 if (aborted == false)
15278 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
15279 }
15280 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15281
15282
15283 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
15284}
15285
15286
15287
15288
15289
15290
15291
15292
15293
15294
15295
15296
15297
15298static int
15299lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
15300{
15301 struct fc_frame_header *hdr;
15302 struct lpfc_dmabuf *d_buf;
15303 struct hbq_dmabuf *seq_dmabuf;
15304 uint32_t fctl;
15305 int seq_count = 0;
15306
15307 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15308
15309 if (hdr->fh_seq_cnt != seq_count)
15310 return 0;
15311 fctl = (hdr->fh_f_ctl[0] << 16 |
15312 hdr->fh_f_ctl[1] << 8 |
15313 hdr->fh_f_ctl[2]);
15314
15315 if (fctl & FC_FC_END_SEQ)
15316 return 1;
15317 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
15318 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15319 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15320
15321 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
15322 return 0;
15323 fctl = (hdr->fh_f_ctl[0] << 16 |
15324 hdr->fh_f_ctl[1] << 8 |
15325 hdr->fh_f_ctl[2]);
15326
15327 if (fctl & FC_FC_END_SEQ)
15328 return 1;
15329 }
15330 return 0;
15331}
15332
15333
15334
15335
15336
15337
15338
15339
15340
15341
15342
15343
15344
15345
15346static struct lpfc_iocbq *
15347lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
15348{
15349 struct hbq_dmabuf *hbq_buf;
15350 struct lpfc_dmabuf *d_buf, *n_buf;
15351 struct lpfc_iocbq *first_iocbq, *iocbq;
15352 struct fc_frame_header *fc_hdr;
15353 uint32_t sid;
15354 uint32_t len, tot_len;
15355 struct ulp_bde64 *pbde;
15356
15357 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15358
15359 list_del_init(&seq_dmabuf->hbuf.list);
15360 lpfc_update_rcv_time_stamp(vport);
15361
15362 sid = sli4_sid_from_fc_hdr(fc_hdr);
15363 tot_len = 0;
15364
15365 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
15366 if (first_iocbq) {
15367
15368 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
15369 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
15370
15371
15372 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
15373 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
15374 first_iocbq->iocb.un.rcvels.parmRo =
15375 sli4_did_from_fc_hdr(fc_hdr);
15376 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
15377 } else
15378 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
15379 first_iocbq->iocb.ulpContext = NO_XRI;
15380 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
15381 be16_to_cpu(fc_hdr->fh_ox_id);
15382
15383 first_iocbq->iocb.unsli3.rcvsli3.vpi =
15384 vport->phba->vpi_ids[vport->vpi];
15385
15386 tot_len = bf_get(lpfc_rcqe_length,
15387 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
15388
15389 first_iocbq->context2 = &seq_dmabuf->dbuf;
15390 first_iocbq->context3 = NULL;
15391 first_iocbq->iocb.ulpBdeCount = 1;
15392 if (tot_len > LPFC_DATA_BUF_SIZE)
15393 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
15394 LPFC_DATA_BUF_SIZE;
15395 else
15396 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
15397
15398 first_iocbq->iocb.un.rcvels.remoteID = sid;
15399
15400 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
15401 }
15402 iocbq = first_iocbq;
15403
15404
15405
15406
15407 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
15408 if (!iocbq) {
15409 lpfc_in_buf_free(vport->phba, d_buf);
15410 continue;
15411 }
15412 if (!iocbq->context3) {
15413 iocbq->context3 = d_buf;
15414 iocbq->iocb.ulpBdeCount++;
15415
15416 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15417 len = bf_get(lpfc_rcqe_length,
15418 &hbq_buf->cq_event.cqe.rcqe_cmpl);
15419 pbde = (struct ulp_bde64 *)
15420 &iocbq->iocb.unsli3.sli3Words[4];
15421 if (len > LPFC_DATA_BUF_SIZE)
15422 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
15423 else
15424 pbde->tus.f.bdeSize = len;
15425
15426 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
15427 tot_len += len;
15428 } else {
15429 iocbq = lpfc_sli_get_iocbq(vport->phba);
15430 if (!iocbq) {
15431 if (first_iocbq) {
15432 first_iocbq->iocb.ulpStatus =
15433 IOSTAT_FCP_RSP_ERROR;
15434 first_iocbq->iocb.un.ulpWord[4] =
15435 IOERR_NO_RESOURCES;
15436 }
15437 lpfc_in_buf_free(vport->phba, d_buf);
15438 continue;
15439 }
15440
15441 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15442 len = bf_get(lpfc_rcqe_length,
15443 &hbq_buf->cq_event.cqe.rcqe_cmpl);
15444 iocbq->context2 = d_buf;
15445 iocbq->context3 = NULL;
15446 iocbq->iocb.ulpBdeCount = 1;
15447 if (len > LPFC_DATA_BUF_SIZE)
15448 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
15449 LPFC_DATA_BUF_SIZE;
15450 else
15451 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
15452
15453 tot_len += len;
15454 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
15455
15456 iocbq->iocb.un.rcvels.remoteID = sid;
15457 list_add_tail(&iocbq->list, &first_iocbq->list);
15458 }
15459 }
15460 return first_iocbq;
15461}
15462
15463static void
15464lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
15465 struct hbq_dmabuf *seq_dmabuf)
15466{
15467 struct fc_frame_header *fc_hdr;
15468 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
15469 struct lpfc_hba *phba = vport->phba;
15470
15471 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15472 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
15473 if (!iocbq) {
15474 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15475 "2707 Ring %d handler: Failed to allocate "
15476 "iocb Rctl x%x Type x%x received\n",
15477 LPFC_ELS_RING,
15478 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
15479 return;
15480 }
15481 if (!lpfc_complete_unsol_iocb(phba,
15482 &phba->sli.ring[LPFC_ELS_RING],
15483 iocbq, fc_hdr->fh_r_ctl,
15484 fc_hdr->fh_type))
15485 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15486 "2540 Ring %d handler: unexpected Rctl "
15487 "x%x Type x%x received\n",
15488 LPFC_ELS_RING,
15489 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
15490
15491
15492 list_for_each_entry_safe(curr_iocb, next_iocb,
15493 &iocbq->list, list) {
15494 list_del_init(&curr_iocb->list);
15495 lpfc_sli_release_iocbq(phba, curr_iocb);
15496 }
15497 lpfc_sli_release_iocbq(phba, iocbq);
15498}
15499
15500
15501
15502
15503
15504
15505
15506
15507
15508
15509
15510
15511
15512void
15513lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
15514 struct hbq_dmabuf *dmabuf)
15515{
15516 struct hbq_dmabuf *seq_dmabuf;
15517 struct fc_frame_header *fc_hdr;
15518 struct lpfc_vport *vport;
15519 uint32_t fcfi;
15520 uint32_t did;
15521
15522
15523 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15524
15525 if (lpfc_fc_frame_check(phba, fc_hdr)) {
15526 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15527 return;
15528 }
15529 if ((bf_get(lpfc_cqe_code,
15530 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
15531 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
15532 &dmabuf->cq_event.cqe.rcqe_cmpl);
15533 else
15534 fcfi = bf_get(lpfc_rcqe_fcf_id,
15535 &dmabuf->cq_event.cqe.rcqe_cmpl);
15536
15537 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
15538 if (!vport) {
15539
15540 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15541 return;
15542 }
15543
15544
15545 did = sli4_did_from_fc_hdr(fc_hdr);
15546
15547
15548 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
15549 (did != Fabric_DID)) {
15550
15551
15552
15553
15554
15555 if (!(vport->fc_flag & FC_PT2PT) ||
15556 (phba->link_state == LPFC_HBA_READY)) {
15557 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15558 return;
15559 }
15560 }
15561
15562
15563 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
15564 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
15565 return;
15566 }
15567
15568
15569 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
15570 if (!seq_dmabuf) {
15571
15572 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15573 return;
15574 }
15575
15576 if (!lpfc_seq_complete(seq_dmabuf))
15577 return;
15578
15579
15580 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
15581}
15582
15583
15584
15585
15586
15587
15588
15589
15590
15591
15592
15593
15594
15595
15596
15597
15598
15599
15600
15601
15602
15603
15604int
15605lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
15606{
15607 struct lpfc_rpi_hdr *rpi_page;
15608 uint32_t rc = 0;
15609 uint16_t lrpi = 0;
15610
15611
15612 if (!phba->sli4_hba.rpi_hdrs_in_use)
15613 goto exit;
15614 if (phba->sli4_hba.extents_in_use)
15615 return -EIO;
15616
15617 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
15618
15619
15620
15621
15622
15623 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
15624 LPFC_RPI_RSRC_RDY)
15625 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15626
15627 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
15628 if (rc != MBX_SUCCESS) {
15629 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15630 "2008 Error %d posting all rpi "
15631 "headers\n", rc);
15632 rc = -EIO;
15633 break;
15634 }
15635 }
15636
15637 exit:
15638 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
15639 LPFC_RPI_RSRC_RDY);
15640 return rc;
15641}
15642
15643
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655
15656
15657int
15658lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
15659{
15660 LPFC_MBOXQ_t *mboxq;
15661 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
15662 uint32_t rc = 0;
15663 uint32_t shdr_status, shdr_add_status;
15664 union lpfc_sli4_cfg_shdr *shdr;
15665
15666
15667 if (!phba->sli4_hba.rpi_hdrs_in_use)
15668 return rc;
15669 if (phba->sli4_hba.extents_in_use)
15670 return -EIO;
15671
15672
15673 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15674 if (!mboxq) {
15675 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15676 "2001 Unable to allocate memory for issuing "
15677 "SLI_CONFIG_SPECIAL mailbox command\n");
15678 return -ENOMEM;
15679 }
15680
15681
15682 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
15683 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15684 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
15685 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
15686 sizeof(struct lpfc_sli4_cfg_mhdr),
15687 LPFC_SLI4_MBX_EMBED);
15688
15689
15690
15691 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
15692 rpi_page->start_rpi);
15693 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
15694 hdr_tmpl, rpi_page->page_count);
15695
15696 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
15697 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
15698 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15699 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
15700 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15701 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15702 if (rc != MBX_TIMEOUT)
15703 mempool_free(mboxq, phba->mbox_mem_pool);
15704 if (shdr_status || shdr_add_status || rc) {
15705 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15706 "2514 POST_RPI_HDR mailbox failed with "
15707 "status x%x add_status x%x, mbx status x%x\n",
15708 shdr_status, shdr_add_status, rc);
15709 rc = -ENXIO;
15710 }
15711 return rc;
15712}
15713
15714
15715
15716
15717
15718
15719
15720
15721
15722
15723
15724
15725
15726
15727int
15728lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
15729{
15730 unsigned long rpi;
15731 uint16_t max_rpi, rpi_limit;
15732 uint16_t rpi_remaining, lrpi = 0;
15733 struct lpfc_rpi_hdr *rpi_hdr;
15734 unsigned long iflag;
15735
15736
15737
15738
15739
15740 spin_lock_irqsave(&phba->hbalock, iflag);
15741 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
15742 rpi_limit = phba->sli4_hba.next_rpi;
15743
15744 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
15745 if (rpi >= rpi_limit)
15746 rpi = LPFC_RPI_ALLOC_ERROR;
15747 else {
15748 set_bit(rpi, phba->sli4_hba.rpi_bmask);
15749 phba->sli4_hba.max_cfg_param.rpi_used++;
15750 phba->sli4_hba.rpi_count++;
15751 }
15752 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15753 "0001 rpi:%x max:%x lim:%x\n",
15754 (int) rpi, max_rpi, rpi_limit);
15755
15756
15757
15758
15759
15760 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
15761 (phba->sli4_hba.rpi_count >= max_rpi)) {
15762 spin_unlock_irqrestore(&phba->hbalock, iflag);
15763 return rpi;
15764 }
15765
15766
15767
15768
15769
15770 if (!phba->sli4_hba.rpi_hdrs_in_use) {
15771 spin_unlock_irqrestore(&phba->hbalock, iflag);
15772 return rpi;
15773 }
15774
15775
15776
15777
15778
15779
15780
15781 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
15782 spin_unlock_irqrestore(&phba->hbalock, iflag);
15783 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
15784 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
15785 if (!rpi_hdr) {
15786 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15787 "2002 Error Could not grow rpi "
15788 "count\n");
15789 } else {
15790 lrpi = rpi_hdr->start_rpi;
15791 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15792 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
15793 }
15794 }
15795
15796 return rpi;
15797}
15798
15799
15800
15801
15802
15803
15804
15805
15806static void
15807__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15808{
15809 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
15810 phba->sli4_hba.rpi_count--;
15811 phba->sli4_hba.max_cfg_param.rpi_used--;
15812 }
15813}
15814
15815
15816
15817
15818
15819
15820
15821
15822void
15823lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15824{
15825 spin_lock_irq(&phba->hbalock);
15826 __lpfc_sli4_free_rpi(phba, rpi);
15827 spin_unlock_irq(&phba->hbalock);
15828}
15829
15830
15831
15832
15833
15834
15835
15836
15837void
15838lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
15839{
15840 kfree(phba->sli4_hba.rpi_bmask);
15841 kfree(phba->sli4_hba.rpi_ids);
15842 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
15843}
15844
15845
15846
15847
15848
15849
15850
15851
15852int
15853lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
15854 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
15855{
15856 LPFC_MBOXQ_t *mboxq;
15857 struct lpfc_hba *phba = ndlp->phba;
15858 int rc;
15859
15860
15861 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15862 if (!mboxq)
15863 return -ENOMEM;
15864
15865
15866 lpfc_resume_rpi(mboxq, ndlp);
15867 if (cmpl) {
15868 mboxq->mbox_cmpl = cmpl;
15869 mboxq->context1 = arg;
15870 mboxq->context2 = ndlp;
15871 } else
15872 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15873 mboxq->vport = ndlp->vport;
15874 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
15875 if (rc == MBX_NOT_FINISHED) {
15876 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15877 "2010 Resume RPI Mailbox failed "
15878 "status %d, mbxStatus x%x\n", rc,
15879 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
15880 mempool_free(mboxq, phba->mbox_mem_pool);
15881 return -EIO;
15882 }
15883 return 0;
15884}
15885
15886
15887
15888
15889
15890
15891
15892
15893
15894
15895
15896int
15897lpfc_sli4_init_vpi(struct lpfc_vport *vport)
15898{
15899 LPFC_MBOXQ_t *mboxq;
15900 int rc = 0;
15901 int retval = MBX_SUCCESS;
15902 uint32_t mbox_tmo;
15903 struct lpfc_hba *phba = vport->phba;
15904 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15905 if (!mboxq)
15906 return -ENOMEM;
15907 lpfc_init_vpi(phba, mboxq, vport->vpi);
15908 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
15909 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
15910 if (rc != MBX_SUCCESS) {
15911 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
15912 "2022 INIT VPI Mailbox failed "
15913 "status %d, mbxStatus x%x\n", rc,
15914 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
15915 retval = -EIO;
15916 }
15917 if (rc != MBX_TIMEOUT)
15918 mempool_free(mboxq, vport->phba->mbox_mem_pool);
15919
15920 return retval;
15921}
15922
15923
15924
15925
15926
15927
15928
15929
15930
15931
15932static void
15933lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
15934{
15935 void *virt_addr;
15936 union lpfc_sli4_cfg_shdr *shdr;
15937 uint32_t shdr_status, shdr_add_status;
15938
15939 virt_addr = mboxq->sge_array->addr[0];
15940
15941 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
15942 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15943 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15944
15945 if ((shdr_status || shdr_add_status) &&
15946 (shdr_status != STATUS_FCF_IN_USE))
15947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15948 "2558 ADD_FCF_RECORD mailbox failed with "
15949 "status x%x add_status x%x\n",
15950 shdr_status, shdr_add_status);
15951
15952 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15953}
15954
15955
15956
15957
15958
15959
15960
15961
15962
15963
15964int
15965lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
15966{
15967 int rc = 0;
15968 LPFC_MBOXQ_t *mboxq;
15969 uint8_t *bytep;
15970 void *virt_addr;
15971 struct lpfc_mbx_sge sge;
15972 uint32_t alloc_len, req_len;
15973 uint32_t fcfindex;
15974
15975 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15976 if (!mboxq) {
15977 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15978 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
15979 return -ENOMEM;
15980 }
15981
15982 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
15983 sizeof(uint32_t);
15984
15985
15986 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15987 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
15988 req_len, LPFC_SLI4_MBX_NEMBED);
15989 if (alloc_len < req_len) {
15990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15991 "2523 Allocated DMA memory size (x%x) is "
15992 "less than the requested DMA memory "
15993 "size (x%x)\n", alloc_len, req_len);
15994 lpfc_sli4_mbox_cmd_free(phba, mboxq);
15995 return -ENOMEM;
15996 }
15997
15998
15999
16000
16001
16002 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
16003 virt_addr = mboxq->sge_array->addr[0];
16004
16005
16006
16007
16008 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
16009 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
16010 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
16011
16012
16013
16014
16015
16016
16017 bytep += sizeof(uint32_t);
16018 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
16019 mboxq->vport = phba->pport;
16020 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
16021 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16022 if (rc == MBX_NOT_FINISHED) {
16023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16024 "2515 ADD_FCF_RECORD mailbox failed with "
16025 "status 0x%x\n", rc);
16026 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16027 rc = -EIO;
16028 } else
16029 rc = 0;
16030
16031 return rc;
16032}
16033
16034
16035
16036
16037
16038
16039
16040
16041
16042
16043
16044void
16045lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
16046 struct fcf_record *fcf_record,
16047 uint16_t fcf_index)
16048{
16049 memset(fcf_record, 0, sizeof(struct fcf_record));
16050 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
16051 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
16052 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
16053 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
16054 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
16055 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
16056 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
16057 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
16058 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
16059 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
16060 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
16061 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
16062 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
16063 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
16064 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
16065 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
16066 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
16067
16068 if (phba->valid_vlan) {
16069 fcf_record->vlan_bitmap[phba->vlan_id / 8]
16070 = 1 << (phba->vlan_id % 8);
16071 }
16072}
16073
16074
16075
16076
16077
16078
16079
16080
16081
16082
16083
16084
16085
16086int
16087lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16088{
16089 int rc = 0, error;
16090 LPFC_MBOXQ_t *mboxq;
16091
16092 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
16093 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
16094 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16095 if (!mboxq) {
16096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16097 "2000 Failed to allocate mbox for "
16098 "READ_FCF cmd\n");
16099 error = -ENOMEM;
16100 goto fail_fcf_scan;
16101 }
16102
16103 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16104 if (rc) {
16105 error = -EINVAL;
16106 goto fail_fcf_scan;
16107 }
16108
16109 mboxq->vport = phba->pport;
16110 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
16111
16112 spin_lock_irq(&phba->hbalock);
16113 phba->hba_flag |= FCF_TS_INPROG;
16114 spin_unlock_irq(&phba->hbalock);
16115
16116 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16117 if (rc == MBX_NOT_FINISHED)
16118 error = -EIO;
16119 else {
16120
16121 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
16122 phba->fcf.eligible_fcf_cnt = 0;
16123 error = 0;
16124 }
16125fail_fcf_scan:
16126 if (error) {
16127 if (mboxq)
16128 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16129
16130 spin_lock_irq(&phba->hbalock);
16131 phba->hba_flag &= ~FCF_TS_INPROG;
16132 spin_unlock_irq(&phba->hbalock);
16133 }
16134 return error;
16135}
16136
16137
16138
16139
16140
16141
16142
16143
16144
16145
16146
16147
16148int
16149lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16150{
16151 int rc = 0, error;
16152 LPFC_MBOXQ_t *mboxq;
16153
16154 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16155 if (!mboxq) {
16156 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16157 "2763 Failed to allocate mbox for "
16158 "READ_FCF cmd\n");
16159 error = -ENOMEM;
16160 goto fail_fcf_read;
16161 }
16162
16163 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16164 if (rc) {
16165 error = -EINVAL;
16166 goto fail_fcf_read;
16167 }
16168
16169 mboxq->vport = phba->pport;
16170 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
16171 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16172 if (rc == MBX_NOT_FINISHED)
16173 error = -EIO;
16174 else
16175 error = 0;
16176
16177fail_fcf_read:
16178 if (error && mboxq)
16179 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16180 return error;
16181}
16182
16183
16184
16185
16186
16187
16188
16189
16190
16191
16192
16193
16194int
16195lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16196{
16197 int rc = 0, error;
16198 LPFC_MBOXQ_t *mboxq;
16199
16200 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16201 if (!mboxq) {
16202 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16203 "2758 Failed to allocate mbox for "
16204 "READ_FCF cmd\n");
16205 error = -ENOMEM;
16206 goto fail_fcf_read;
16207 }
16208
16209 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16210 if (rc) {
16211 error = -EINVAL;
16212 goto fail_fcf_read;
16213 }
16214
16215 mboxq->vport = phba->pport;
16216 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
16217 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16218 if (rc == MBX_NOT_FINISHED)
16219 error = -EIO;
16220 else
16221 error = 0;
16222
16223fail_fcf_read:
16224 if (error && mboxq)
16225 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16226 return error;
16227}
16228
16229
16230
16231
16232
16233
16234
16235
16236
16237
16238
16239
16240
16241
16242static int
16243lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
16244{
16245 uint16_t next_fcf_pri;
16246 uint16_t last_index;
16247 struct lpfc_fcf_pri *fcf_pri;
16248 int rc;
16249 int ret = 0;
16250
16251 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
16252 LPFC_SLI4_FCF_TBL_INDX_MAX);
16253 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16254 "3060 Last IDX %d\n", last_index);
16255
16256
16257 spin_lock_irq(&phba->hbalock);
16258 if (list_empty(&phba->fcf.fcf_pri_list) ||
16259 list_is_singular(&phba->fcf.fcf_pri_list)) {
16260 spin_unlock_irq(&phba->hbalock);
16261 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16262 "3061 Last IDX %d\n", last_index);
16263 return 0;
16264 }
16265 spin_unlock_irq(&phba->hbalock);
16266
16267 next_fcf_pri = 0;
16268
16269
16270
16271
16272 memset(phba->fcf.fcf_rr_bmask, 0,
16273 sizeof(*phba->fcf.fcf_rr_bmask));
16274 spin_lock_irq(&phba->hbalock);
16275 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16276 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
16277 continue;
16278
16279
16280
16281
16282 if (!next_fcf_pri)
16283 next_fcf_pri = fcf_pri->fcf_rec.priority;
16284 spin_unlock_irq(&phba->hbalock);
16285 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16286 rc = lpfc_sli4_fcf_rr_index_set(phba,
16287 fcf_pri->fcf_rec.fcf_index);
16288 if (rc)
16289 return 0;
16290 }
16291 spin_lock_irq(&phba->hbalock);
16292 }
16293
16294
16295
16296
16297
16298 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
16299 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16300 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
16301
16302
16303
16304
16305 if (!next_fcf_pri)
16306 next_fcf_pri = fcf_pri->fcf_rec.priority;
16307 spin_unlock_irq(&phba->hbalock);
16308 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16309 rc = lpfc_sli4_fcf_rr_index_set(phba,
16310 fcf_pri->fcf_rec.fcf_index);
16311 if (rc)
16312 return 0;
16313 }
16314 spin_lock_irq(&phba->hbalock);
16315 }
16316 } else
16317 ret = 1;
16318 spin_unlock_irq(&phba->hbalock);
16319
16320 return ret;
16321}
16322
16323
16324
16325
16326
16327
16328
16329
16330
16331
16332uint16_t
16333lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
16334{
16335 uint16_t next_fcf_index;
16336
16337initial_priority:
16338
16339 next_fcf_index = phba->fcf.current_rec.fcf_indx;
16340
16341next_priority:
16342
16343 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
16344 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
16345 LPFC_SLI4_FCF_TBL_INDX_MAX,
16346 next_fcf_index);
16347
16348
16349 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16350
16351
16352
16353
16354
16355 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
16356 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
16357 }
16358
16359
16360
16361 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
16362 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
16363
16364
16365
16366
16367
16368
16369 if (lpfc_check_next_fcf_pri_level(phba))
16370 goto initial_priority;
16371 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
16372 "2844 No roundrobin failover FCF available\n");
16373 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
16374 return LPFC_FCOE_FCF_NEXT_NONE;
16375 else {
16376 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
16377 "3063 Only FCF available idx %d, flag %x\n",
16378 next_fcf_index,
16379 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
16380 return next_fcf_index;
16381 }
16382 }
16383
16384 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
16385 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
16386 LPFC_FCF_FLOGI_FAILED) {
16387 if (list_is_singular(&phba->fcf.fcf_pri_list))
16388 return LPFC_FCOE_FCF_NEXT_NONE;
16389
16390 goto next_priority;
16391 }
16392
16393 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16394 "2845 Get next roundrobin failover FCF (x%x)\n",
16395 next_fcf_index);
16396
16397 return next_fcf_index;
16398}
16399
16400
16401
16402
16403
16404
16405
16406
16407
16408
16409
16410
16411
16412int
16413lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
16414{
16415 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16416 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16417 "2610 FCF (x%x) reached driver's book "
16418 "keeping dimension:x%x\n",
16419 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
16420 return -EINVAL;
16421 }
16422
16423 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
16424
16425 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16426 "2790 Set FCF (x%x) to roundrobin FCF failover "
16427 "bmask\n", fcf_index);
16428
16429 return 0;
16430}
16431
16432
16433
16434
16435
16436
16437
16438
16439
16440
16441void
16442lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
16443{
16444 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
16445 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16446 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16447 "2762 FCF (x%x) reached driver's book "
16448 "keeping dimension:x%x\n",
16449 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
16450 return;
16451 }
16452
16453 spin_lock_irq(&phba->hbalock);
16454 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
16455 list) {
16456 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
16457 list_del_init(&fcf_pri->list);
16458 break;
16459 }
16460 }
16461 spin_unlock_irq(&phba->hbalock);
16462 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
16463
16464 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16465 "2791 Clear FCF (x%x) from roundrobin failover "
16466 "bmask\n", fcf_index);
16467}
16468
16469
16470
16471
16472
16473
16474
16475
16476
16477static void
16478lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
16479{
16480 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
16481 uint32_t shdr_status, shdr_add_status;
16482
16483 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
16484
16485 shdr_status = bf_get(lpfc_mbox_hdr_status,
16486 &redisc_fcf->header.cfg_shdr.response);
16487 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
16488 &redisc_fcf->header.cfg_shdr.response);
16489 if (shdr_status || shdr_add_status) {
16490 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16491 "2746 Requesting for FCF rediscovery failed "
16492 "status x%x add_status x%x\n",
16493 shdr_status, shdr_add_status);
16494 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
16495 spin_lock_irq(&phba->hbalock);
16496 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
16497 spin_unlock_irq(&phba->hbalock);
16498
16499
16500
16501
16502 lpfc_retry_pport_discovery(phba);
16503 } else {
16504 spin_lock_irq(&phba->hbalock);
16505 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
16506 spin_unlock_irq(&phba->hbalock);
16507
16508
16509
16510
16511
16512 lpfc_sli4_fcf_dead_failthrough(phba);
16513 }
16514 } else {
16515 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16516 "2775 Start FCF rediscover quiescent timer\n");
16517
16518
16519
16520
16521 lpfc_fcf_redisc_wait_start_timer(phba);
16522 }
16523
16524 mempool_free(mbox, phba->mbox_mem_pool);
16525}
16526
16527
16528
16529
16530
16531
16532
16533
16534int
16535lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
16536{
16537 LPFC_MBOXQ_t *mbox;
16538 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
16539 int rc, length;
16540
16541
16542 lpfc_cancel_all_vport_retry_delay_timer(phba);
16543
16544 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16545 if (!mbox) {
16546 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16547 "2745 Failed to allocate mbox for "
16548 "requesting FCF rediscover.\n");
16549 return -ENOMEM;
16550 }
16551
16552 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
16553 sizeof(struct lpfc_sli4_cfg_mhdr));
16554 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16555 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
16556 length, LPFC_SLI4_MBX_EMBED);
16557
16558 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
16559
16560 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
16561
16562
16563 mbox->vport = phba->pport;
16564 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
16565 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
16566
16567 if (rc == MBX_NOT_FINISHED) {
16568 mempool_free(mbox, phba->mbox_mem_pool);
16569 return -EIO;
16570 }
16571 return 0;
16572}
16573
16574
16575
16576
16577
16578
16579
16580
16581void
16582lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
16583{
16584 uint32_t link_state;
16585
16586
16587
16588
16589
16590
16591 link_state = phba->link_state;
16592 lpfc_linkdown(phba);
16593 phba->link_state = link_state;
16594
16595
16596 lpfc_unregister_unused_fcf(phba);
16597}
16598
16599
16600
16601
16602
16603
16604
16605
16606
16607
16608static uint32_t
16609lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
16610{
16611 LPFC_MBOXQ_t *pmb = NULL;
16612 MAILBOX_t *mb;
16613 uint32_t offset = 0;
16614 int rc;
16615
16616 if (!rgn23_data)
16617 return 0;
16618
16619 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16620 if (!pmb) {
16621 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16622 "2600 failed to allocate mailbox memory\n");
16623 return 0;
16624 }
16625 mb = &pmb->u.mb;
16626
16627 do {
16628 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
16629 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
16630
16631 if (rc != MBX_SUCCESS) {
16632 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16633 "2601 failed to read config "
16634 "region 23, rc 0x%x Status 0x%x\n",
16635 rc, mb->mbxStatus);
16636 mb->un.varDmp.word_cnt = 0;
16637 }
16638
16639
16640
16641
16642 if (mb->un.varDmp.word_cnt == 0)
16643 break;
16644 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
16645 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
16646
16647 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
16648 rgn23_data + offset,
16649 mb->un.varDmp.word_cnt);
16650 offset += mb->un.varDmp.word_cnt;
16651 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
16652
16653 mempool_free(pmb, phba->mbox_mem_pool);
16654 return offset;
16655}
16656
16657
16658
16659
16660
16661
16662
16663
16664
16665
16666static uint32_t
16667lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
16668{
16669 LPFC_MBOXQ_t *mboxq = NULL;
16670 struct lpfc_dmabuf *mp = NULL;
16671 struct lpfc_mqe *mqe;
16672 uint32_t data_length = 0;
16673 int rc;
16674
16675 if (!rgn23_data)
16676 return 0;
16677
16678 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16679 if (!mboxq) {
16680 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16681 "3105 failed to allocate mailbox memory\n");
16682 return 0;
16683 }
16684
16685 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
16686 goto out;
16687 mqe = &mboxq->u.mqe;
16688 mp = (struct lpfc_dmabuf *) mboxq->context1;
16689 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
16690 if (rc)
16691 goto out;
16692 data_length = mqe->un.mb_words[5];
16693 if (data_length == 0)
16694 goto out;
16695 if (data_length > DMP_RGN23_SIZE) {
16696 data_length = 0;
16697 goto out;
16698 }
16699 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
16700out:
16701 mempool_free(mboxq, phba->mbox_mem_pool);
16702 if (mp) {
16703 lpfc_mbuf_free(phba, mp->virt, mp->phys);
16704 kfree(mp);
16705 }
16706 return data_length;
16707}
16708
16709
16710
16711
16712
16713
16714
16715
16716
16717void
16718lpfc_sli_read_link_ste(struct lpfc_hba *phba)
16719{
16720 uint8_t *rgn23_data = NULL;
16721 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
16722 uint32_t offset = 0;
16723
16724
16725 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
16726 if (!rgn23_data)
16727 goto out;
16728
16729 if (phba->sli_rev < LPFC_SLI_REV4)
16730 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
16731 else {
16732 if_type = bf_get(lpfc_sli_intf_if_type,
16733 &phba->sli4_hba.sli_intf);
16734 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
16735 goto out;
16736 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
16737 }
16738
16739 if (!data_size)
16740 goto out;
16741
16742
16743 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
16744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16745 "2619 Config region 23 has bad signature\n");
16746 goto out;
16747 }
16748 offset += 4;
16749
16750
16751 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
16752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16753 "2620 Config region 23 has bad version\n");
16754 goto out;
16755 }
16756 offset += 4;
16757
16758
16759 while (offset < data_size) {
16760 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
16761 break;
16762
16763
16764
16765
16766 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
16767 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
16768 (rgn23_data[offset + 3] != 0)) {
16769 offset += rgn23_data[offset + 1] * 4 + 4;
16770 continue;
16771 }
16772
16773
16774 sub_tlv_len = rgn23_data[offset + 1] * 4;
16775 offset += 4;
16776 tlv_offset = 0;
16777
16778
16779
16780
16781 while ((offset < data_size) &&
16782 (tlv_offset < sub_tlv_len)) {
16783 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
16784 offset += 4;
16785 tlv_offset += 4;
16786 break;
16787 }
16788 if (rgn23_data[offset] != PORT_STE_TYPE) {
16789 offset += rgn23_data[offset + 1] * 4 + 4;
16790 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
16791 continue;
16792 }
16793
16794
16795 if (!rgn23_data[offset + 2])
16796 phba->hba_flag |= LINK_DISABLED;
16797
16798 goto out;
16799 }
16800 }
16801
16802out:
16803 kfree(rgn23_data);
16804 return;
16805}
16806
16807
16808
16809
16810
16811
16812
16813
16814
16815
16816
16817
16818
16819
16820
16821
16822
16823
16824
16825
16826int
16827lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
16828 uint32_t size, uint32_t *offset)
16829{
16830 struct lpfc_mbx_wr_object *wr_object;
16831 LPFC_MBOXQ_t *mbox;
16832 int rc = 0, i = 0;
16833 uint32_t shdr_status, shdr_add_status;
16834 uint32_t mbox_tmo;
16835 union lpfc_sli4_cfg_shdr *shdr;
16836 struct lpfc_dmabuf *dmabuf;
16837 uint32_t written = 0;
16838
16839 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16840 if (!mbox)
16841 return -ENOMEM;
16842
16843 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16844 LPFC_MBOX_OPCODE_WRITE_OBJECT,
16845 sizeof(struct lpfc_mbx_wr_object) -
16846 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16847
16848 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
16849 wr_object->u.request.write_offset = *offset;
16850 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
16851 wr_object->u.request.object_name[0] =
16852 cpu_to_le32(wr_object->u.request.object_name[0]);
16853 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
16854 list_for_each_entry(dmabuf, dmabuf_list, list) {
16855 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
16856 break;
16857 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
16858 wr_object->u.request.bde[i].addrHigh =
16859 putPaddrHigh(dmabuf->phys);
16860 if (written + SLI4_PAGE_SIZE >= size) {
16861 wr_object->u.request.bde[i].tus.f.bdeSize =
16862 (size - written);
16863 written += (size - written);
16864 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
16865 } else {
16866 wr_object->u.request.bde[i].tus.f.bdeSize =
16867 SLI4_PAGE_SIZE;
16868 written += SLI4_PAGE_SIZE;
16869 }
16870 i++;
16871 }
16872 wr_object->u.request.bde_count = i;
16873 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
16874 if (!phba->sli4_hba.intr_enable)
16875 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16876 else {
16877 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16878 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16879 }
16880
16881 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
16882 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16883 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16884 if (rc != MBX_TIMEOUT)
16885 mempool_free(mbox, phba->mbox_mem_pool);
16886 if (shdr_status || shdr_add_status || rc) {
16887 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16888 "3025 Write Object mailbox failed with "
16889 "status x%x add_status x%x, mbx status x%x\n",
16890 shdr_status, shdr_add_status, rc);
16891 rc = -ENXIO;
16892 } else
16893 *offset += wr_object->u.response.actual_write_length;
16894 return rc;
16895}
16896
16897
16898
16899
16900
16901
16902
16903
16904
16905
16906void
16907lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
16908{
16909 struct lpfc_hba *phba = vport->phba;
16910 LPFC_MBOXQ_t *mb, *nextmb;
16911 struct lpfc_dmabuf *mp;
16912 struct lpfc_nodelist *ndlp;
16913 struct lpfc_nodelist *act_mbx_ndlp = NULL;
16914 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
16915 LIST_HEAD(mbox_cmd_list);
16916 uint8_t restart_loop;
16917
16918
16919 spin_lock_irq(&phba->hbalock);
16920 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
16921 if (mb->vport != vport)
16922 continue;
16923
16924 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16925 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16926 continue;
16927
16928 list_del(&mb->list);
16929 list_add_tail(&mb->list, &mbox_cmd_list);
16930 }
16931
16932 mb = phba->sli.mbox_active;
16933 if (mb && (mb->vport == vport)) {
16934 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
16935 (mb->u.mb.mbxCommand == MBX_REG_VPI))
16936 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16937 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16938 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
16939
16940 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
16941
16942 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16943 }
16944 }
16945
16946 do {
16947 restart_loop = 0;
16948 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
16949
16950
16951
16952
16953 if ((mb->vport != vport) ||
16954 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
16955 continue;
16956
16957 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
16958 (mb->u.mb.mbxCommand != MBX_REG_VPI))
16959 continue;
16960
16961 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16962 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16963 ndlp = (struct lpfc_nodelist *)mb->context2;
16964
16965 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
16966 restart_loop = 1;
16967 spin_unlock_irq(&phba->hbalock);
16968 spin_lock(shost->host_lock);
16969 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16970 spin_unlock(shost->host_lock);
16971 spin_lock_irq(&phba->hbalock);
16972 break;
16973 }
16974 }
16975 } while (restart_loop);
16976
16977 spin_unlock_irq(&phba->hbalock);
16978
16979
16980 while (!list_empty(&mbox_cmd_list)) {
16981 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
16982 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
16983 mp = (struct lpfc_dmabuf *) (mb->context1);
16984 if (mp) {
16985 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
16986 kfree(mp);
16987 }
16988 ndlp = (struct lpfc_nodelist *) mb->context2;
16989 mb->context2 = NULL;
16990 if (ndlp) {
16991 spin_lock(shost->host_lock);
16992 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
16993 spin_unlock(shost->host_lock);
16994 lpfc_nlp_put(ndlp);
16995 }
16996 }
16997 mempool_free(mb, phba->mbox_mem_pool);
16998 }
16999
17000
17001 if (act_mbx_ndlp) {
17002 spin_lock(shost->host_lock);
17003 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17004 spin_unlock(shost->host_lock);
17005 lpfc_nlp_put(act_mbx_ndlp);
17006 }
17007}
17008
17009
17010
17011
17012
17013
17014
17015
17016
17017
17018
17019
17020uint32_t
17021lpfc_drain_txq(struct lpfc_hba *phba)
17022{
17023 LIST_HEAD(completions);
17024 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
17025 struct lpfc_iocbq *piocbq = NULL;
17026 unsigned long iflags = 0;
17027 char *fail_msg = NULL;
17028 struct lpfc_sglq *sglq;
17029 union lpfc_wqe wqe;
17030 uint32_t txq_cnt = 0;
17031
17032 spin_lock_irqsave(&pring->ring_lock, iflags);
17033 list_for_each_entry(piocbq, &pring->txq, list) {
17034 txq_cnt++;
17035 }
17036
17037 if (txq_cnt > pring->txq_max)
17038 pring->txq_max = txq_cnt;
17039
17040 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17041
17042 while (!list_empty(&pring->txq)) {
17043 spin_lock_irqsave(&pring->ring_lock, iflags);
17044
17045 piocbq = lpfc_sli_ringtx_get(phba, pring);
17046 if (!piocbq) {
17047 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17048 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17049 "2823 txq empty and txq_cnt is %d\n ",
17050 txq_cnt);
17051 break;
17052 }
17053 sglq = __lpfc_sli_get_sglq(phba, piocbq);
17054 if (!sglq) {
17055 __lpfc_sli_ringtx_put(phba, pring, piocbq);
17056 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17057 break;
17058 }
17059 txq_cnt--;
17060
17061
17062
17063
17064 piocbq->sli4_lxritag = sglq->sli4_lxritag;
17065 piocbq->sli4_xritag = sglq->sli4_xritag;
17066 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
17067 fail_msg = "to convert bpl to sgl";
17068 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
17069 fail_msg = "to convert iocb to wqe";
17070 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
17071 fail_msg = " - Wq is full";
17072 else
17073 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
17074
17075 if (fail_msg) {
17076
17077 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17078 "2822 IOCB failed %s iotag 0x%x "
17079 "xri 0x%x\n",
17080 fail_msg,
17081 piocbq->iotag, piocbq->sli4_xritag);
17082 list_add_tail(&piocbq->list, &completions);
17083 }
17084 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17085 }
17086
17087
17088 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
17089 IOERR_SLI_ABORTED);
17090
17091 return txq_cnt;
17092}
17093