1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/lockdep.h>
28
29#include <scsi/scsi.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_transport_fc.h>
34#include <scsi/fc/fc_fs.h>
35#include <linux/aer.h>
36
37#include "lpfc_hw4.h"
38#include "lpfc_hw.h"
39#include "lpfc_sli.h"
40#include "lpfc_sli4.h"
41#include "lpfc_nl.h"
42#include "lpfc_disc.h"
43#include "lpfc_scsi.h"
44#include "lpfc.h"
45#include "lpfc_crtn.h"
46#include "lpfc_logmsg.h"
47#include "lpfc_compat.h"
48#include "lpfc_debugfs.h"
49#include "lpfc_vport.h"
50#include "lpfc_version.h"
51
52
53typedef enum _lpfc_iocb_type {
54 LPFC_UNKNOWN_IOCB,
55 LPFC_UNSOL_IOCB,
56 LPFC_SOL_IOCB,
57 LPFC_ABORT_IOCB
58} lpfc_iocb_type;
59
60
61
62static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
63 uint32_t);
64static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
65 uint8_t *, uint32_t *);
66static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
67 struct lpfc_iocbq *);
68static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
69 struct hbq_dmabuf *);
70static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
71 struct lpfc_cqe *);
72static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
73 int);
74static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
75 uint32_t);
76static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
77static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
78
79static IOCB_t *
80lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
81{
82 return &iocbq->iocb;
83}
84
85
86
87
88
89
90
91
92
93
94
95
96
97static uint32_t
98lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
99{
100 union lpfc_wqe *temp_wqe;
101 struct lpfc_register doorbell;
102 uint32_t host_index;
103 uint32_t idx;
104
105
106 if (unlikely(!q))
107 return -ENOMEM;
108 temp_wqe = q->qe[q->host_index].wqe;
109
110
111 idx = ((q->host_index + 1) % q->entry_count);
112 if (idx == q->hba_index) {
113 q->WQ_overflow++;
114 return -ENOMEM;
115 }
116 q->WQ_posted++;
117
118 if (!((q->host_index + 1) % q->entry_repost))
119 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
120 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
121 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
122 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
123
124
125 host_index = q->host_index;
126
127 q->host_index = idx;
128
129
130 doorbell.word0 = 0;
131 if (q->db_format == LPFC_DB_LIST_FORMAT) {
132 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
133 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
134 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
135 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
136 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
137 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
138 } else {
139 return -EINVAL;
140 }
141 writel(doorbell.word0, q->db_regaddr);
142
143 return 0;
144}
145
146
147
148
149
150
151
152
153
154
155
156
157static uint32_t
158lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
159{
160 uint32_t released = 0;
161
162
163 if (unlikely(!q))
164 return 0;
165
166 if (q->hba_index == index)
167 return 0;
168 do {
169 q->hba_index = ((q->hba_index + 1) % q->entry_count);
170 released++;
171 } while (q->hba_index != index);
172 return released;
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187static uint32_t
188lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
189{
190 struct lpfc_mqe *temp_mqe;
191 struct lpfc_register doorbell;
192
193
194 if (unlikely(!q))
195 return -ENOMEM;
196 temp_mqe = q->qe[q->host_index].mqe;
197
198
199 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
200 return -ENOMEM;
201 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
202
203 q->phba->mbox = (MAILBOX_t *)temp_mqe;
204
205
206 q->host_index = ((q->host_index + 1) % q->entry_count);
207
208
209 doorbell.word0 = 0;
210 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
211 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
212 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
213 return 0;
214}
215
216
217
218
219
220
221
222
223
224
225
226static uint32_t
227lpfc_sli4_mq_release(struct lpfc_queue *q)
228{
229
230 if (unlikely(!q))
231 return 0;
232
233
234 q->phba->mbox = NULL;
235 q->hba_index = ((q->hba_index + 1) % q->entry_count);
236 return 1;
237}
238
239
240
241
242
243
244
245
246
247
248static struct lpfc_eqe *
249lpfc_sli4_eq_get(struct lpfc_queue *q)
250{
251 struct lpfc_eqe *eqe;
252 uint32_t idx;
253
254
255 if (unlikely(!q))
256 return NULL;
257 eqe = q->qe[q->hba_index].eqe;
258
259
260 if (!bf_get_le32(lpfc_eqe_valid, eqe))
261 return NULL;
262
263 idx = ((q->hba_index + 1) % q->entry_count);
264 if (idx == q->host_index)
265 return NULL;
266
267 q->hba_index = idx;
268
269
270
271
272
273
274
275
276
277 mb();
278 return eqe;
279}
280
281
282
283
284
285
286static inline void
287lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
288{
289 struct lpfc_register doorbell;
290
291 doorbell.word0 = 0;
292 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
293 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
294 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
295 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
296 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
297 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315uint32_t
316lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
317{
318 uint32_t released = 0;
319 struct lpfc_eqe *temp_eqe;
320 struct lpfc_register doorbell;
321
322
323 if (unlikely(!q))
324 return 0;
325
326
327 while (q->hba_index != q->host_index) {
328 temp_eqe = q->qe[q->host_index].eqe;
329 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
330 released++;
331 q->host_index = ((q->host_index + 1) % q->entry_count);
332 }
333 if (unlikely(released == 0 && !arm))
334 return 0;
335
336
337 doorbell.word0 = 0;
338 if (arm) {
339 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
340 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
341 }
342 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
343 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
344 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
345 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
346 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
347 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
348
349 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
350 readl(q->phba->sli4_hba.EQCQDBregaddr);
351 return released;
352}
353
354
355
356
357
358
359
360
361
362
363static struct lpfc_cqe *
364lpfc_sli4_cq_get(struct lpfc_queue *q)
365{
366 struct lpfc_cqe *cqe;
367 uint32_t idx;
368
369
370 if (unlikely(!q))
371 return NULL;
372
373
374 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
375 return NULL;
376
377 idx = ((q->hba_index + 1) % q->entry_count);
378 if (idx == q->host_index)
379 return NULL;
380
381 cqe = q->qe[q->hba_index].cqe;
382 q->hba_index = idx;
383
384
385
386
387
388
389
390
391
392
393 mb();
394 return cqe;
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412uint32_t
413lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
414{
415 uint32_t released = 0;
416 struct lpfc_cqe *temp_qe;
417 struct lpfc_register doorbell;
418
419
420 if (unlikely(!q))
421 return 0;
422
423 while (q->hba_index != q->host_index) {
424 temp_qe = q->qe[q->host_index].cqe;
425 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
426 released++;
427 q->host_index = ((q->host_index + 1) % q->entry_count);
428 }
429 if (unlikely(released == 0 && !arm))
430 return 0;
431
432
433 doorbell.word0 = 0;
434 if (arm)
435 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
436 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
437 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
438 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
439 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
440 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
441 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
442 return released;
443}
444
445
446
447
448
449
450
451
452
453
454
455
456
457static int
458lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
459 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
460{
461 struct lpfc_rqe *temp_hrqe;
462 struct lpfc_rqe *temp_drqe;
463 struct lpfc_register doorbell;
464 int put_index;
465
466
467 if (unlikely(!hq) || unlikely(!dq))
468 return -ENOMEM;
469 put_index = hq->host_index;
470 temp_hrqe = hq->qe[hq->host_index].rqe;
471 temp_drqe = dq->qe[dq->host_index].rqe;
472
473 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
474 return -EINVAL;
475 if (hq->host_index != dq->host_index)
476 return -EINVAL;
477
478 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
479 return -EBUSY;
480 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
481 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
482
483
484 hq->host_index = ((hq->host_index + 1) % hq->entry_count);
485 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
486
487
488 if (!(hq->host_index % hq->entry_repost)) {
489 doorbell.word0 = 0;
490 if (hq->db_format == LPFC_DB_RING_FORMAT) {
491 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
492 hq->entry_repost);
493 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
494 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
495 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
496 hq->entry_repost);
497 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
498 hq->host_index);
499 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
500 } else {
501 return -EINVAL;
502 }
503 writel(doorbell.word0, hq->db_regaddr);
504 }
505 return put_index;
506}
507
508
509
510
511
512
513
514
515
516
517
518static uint32_t
519lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
520{
521
522 if (unlikely(!hq) || unlikely(!dq))
523 return 0;
524
525 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
526 return 0;
527 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
528 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
529 return 1;
530}
531
532
533
534
535
536
537
538
539
540
541
542static inline IOCB_t *
543lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
544{
545 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
546 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
547}
548
549
550
551
552
553
554
555
556
557
558
559static inline IOCB_t *
560lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
561{
562 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
563 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
564}
565
566
567
568
569
570
571
572
573
574
575struct lpfc_iocbq *
576__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
577{
578 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
579 struct lpfc_iocbq * iocbq = NULL;
580
581 lockdep_assert_held(&phba->hbalock);
582
583 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
584 if (iocbq)
585 phba->iocb_cnt++;
586 if (phba->iocb_cnt > phba->iocb_max)
587 phba->iocb_max = phba->iocb_cnt;
588 return iocbq;
589}
590
591
592
593
594
595
596
597
598
599
600
601
602
603static struct lpfc_sglq *
604__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
605{
606 struct lpfc_sglq *sglq;
607
608 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
609 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
610 return sglq;
611}
612
613
614
615
616
617
618
619
620
621
622
623
624
625struct lpfc_sglq *
626__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
627{
628 struct lpfc_sglq *sglq;
629
630 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
631 return sglq;
632}
633
634
635
636
637
638
639
640
641void
642lpfc_clr_rrq_active(struct lpfc_hba *phba,
643 uint16_t xritag,
644 struct lpfc_node_rrq *rrq)
645{
646 struct lpfc_nodelist *ndlp = NULL;
647
648 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
649 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
650
651
652
653
654
655 if ((!ndlp) && rrq->ndlp)
656 ndlp = rrq->ndlp;
657
658 if (!ndlp)
659 goto out;
660
661 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
662 rrq->send_rrq = 0;
663 rrq->xritag = 0;
664 rrq->rrq_stop_time = 0;
665 }
666out:
667 mempool_free(rrq, phba->rrq_pool);
668}
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684void
685lpfc_handle_rrq_active(struct lpfc_hba *phba)
686{
687 struct lpfc_node_rrq *rrq;
688 struct lpfc_node_rrq *nextrrq;
689 unsigned long next_time;
690 unsigned long iflags;
691 LIST_HEAD(send_rrq);
692
693 spin_lock_irqsave(&phba->hbalock, iflags);
694 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
695 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
696 list_for_each_entry_safe(rrq, nextrrq,
697 &phba->active_rrq_list, list) {
698 if (time_after(jiffies, rrq->rrq_stop_time))
699 list_move(&rrq->list, &send_rrq);
700 else if (time_before(rrq->rrq_stop_time, next_time))
701 next_time = rrq->rrq_stop_time;
702 }
703 spin_unlock_irqrestore(&phba->hbalock, iflags);
704 if ((!list_empty(&phba->active_rrq_list)) &&
705 (!(phba->pport->load_flag & FC_UNLOADING)))
706 mod_timer(&phba->rrq_tmr, next_time);
707 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
708 list_del(&rrq->list);
709 if (!rrq->send_rrq)
710
711 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
712 else if (lpfc_send_rrq(phba, rrq)) {
713
714
715
716 lpfc_clr_rrq_active(phba, rrq->xritag,
717 rrq);
718 }
719 }
720}
721
722
723
724
725
726
727
728
729
730
731struct lpfc_node_rrq *
732lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
733{
734 struct lpfc_hba *phba = vport->phba;
735 struct lpfc_node_rrq *rrq;
736 struct lpfc_node_rrq *nextrrq;
737 unsigned long iflags;
738
739 if (phba->sli_rev != LPFC_SLI_REV4)
740 return NULL;
741 spin_lock_irqsave(&phba->hbalock, iflags);
742 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
743 if (rrq->vport == vport && rrq->xritag == xri &&
744 rrq->nlp_DID == did){
745 list_del(&rrq->list);
746 spin_unlock_irqrestore(&phba->hbalock, iflags);
747 return rrq;
748 }
749 }
750 spin_unlock_irqrestore(&phba->hbalock, iflags);
751 return NULL;
752}
753
754
755
756
757
758
759
760
761
762void
763lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
764
765{
766 struct lpfc_hba *phba = vport->phba;
767 struct lpfc_node_rrq *rrq;
768 struct lpfc_node_rrq *nextrrq;
769 unsigned long iflags;
770 LIST_HEAD(rrq_list);
771
772 if (phba->sli_rev != LPFC_SLI_REV4)
773 return;
774 if (!ndlp) {
775 lpfc_sli4_vport_delete_els_xri_aborted(vport);
776 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
777 }
778 spin_lock_irqsave(&phba->hbalock, iflags);
779 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
780 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
781 list_move(&rrq->list, &rrq_list);
782 spin_unlock_irqrestore(&phba->hbalock, iflags);
783
784 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
785 list_del(&rrq->list);
786 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
787 }
788}
789
790
791
792
793
794
795
796
797
798
799
800int
801lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
802 uint16_t xritag)
803{
804 lockdep_assert_held(&phba->hbalock);
805 if (!ndlp)
806 return 0;
807 if (!ndlp->active_rrqs_xri_bitmap)
808 return 0;
809 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
810 return 1;
811 else
812 return 0;
813}
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830int
831lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
832 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
833{
834 unsigned long iflags;
835 struct lpfc_node_rrq *rrq;
836 int empty;
837
838 if (!ndlp)
839 return -EINVAL;
840
841 if (!phba->cfg_enable_rrq)
842 return -EINVAL;
843
844 spin_lock_irqsave(&phba->hbalock, iflags);
845 if (phba->pport->load_flag & FC_UNLOADING) {
846 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
847 goto out;
848 }
849
850
851
852
853 if (NLP_CHK_FREE_REQ(ndlp))
854 goto out;
855
856 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
857 goto out;
858
859 if (!ndlp->active_rrqs_xri_bitmap)
860 goto out;
861
862 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
863 goto out;
864
865 spin_unlock_irqrestore(&phba->hbalock, iflags);
866 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
867 if (!rrq) {
868 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
869 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
870 " DID:0x%x Send:%d\n",
871 xritag, rxid, ndlp->nlp_DID, send_rrq);
872 return -EINVAL;
873 }
874 if (phba->cfg_enable_rrq == 1)
875 rrq->send_rrq = send_rrq;
876 else
877 rrq->send_rrq = 0;
878 rrq->xritag = xritag;
879 rrq->rrq_stop_time = jiffies +
880 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
881 rrq->ndlp = ndlp;
882 rrq->nlp_DID = ndlp->nlp_DID;
883 rrq->vport = ndlp->vport;
884 rrq->rxid = rxid;
885 spin_lock_irqsave(&phba->hbalock, iflags);
886 empty = list_empty(&phba->active_rrq_list);
887 list_add_tail(&rrq->list, &phba->active_rrq_list);
888 phba->hba_flag |= HBA_RRQ_ACTIVE;
889 if (empty)
890 lpfc_worker_wake_up(phba);
891 spin_unlock_irqrestore(&phba->hbalock, iflags);
892 return 0;
893out:
894 spin_unlock_irqrestore(&phba->hbalock, iflags);
895 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
896 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
897 " DID:0x%x Send:%d\n",
898 xritag, rxid, ndlp->nlp_DID, send_rrq);
899 return -EINVAL;
900}
901
902
903
904
905
906
907
908
909
910
911
912static struct lpfc_sglq *
913__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
914{
915 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
916 struct lpfc_sglq *sglq = NULL;
917 struct lpfc_sglq *start_sglq = NULL;
918 struct lpfc_scsi_buf *lpfc_cmd;
919 struct lpfc_nodelist *ndlp;
920 int found = 0;
921
922 lockdep_assert_held(&phba->hbalock);
923
924 if (piocbq->iocb_flag & LPFC_IO_FCP) {
925 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
926 ndlp = lpfc_cmd->rdata->pnode;
927 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
928 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
929 ndlp = piocbq->context_un.ndlp;
930 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
931 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
932 ndlp = NULL;
933 else
934 ndlp = piocbq->context_un.ndlp;
935 } else {
936 ndlp = piocbq->context1;
937 }
938
939 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
940 start_sglq = sglq;
941 while (!found) {
942 if (!sglq)
943 return NULL;
944 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
945
946
947
948 list_add_tail(&sglq->list, lpfc_sgl_list);
949 sglq = NULL;
950 list_remove_head(lpfc_sgl_list, sglq,
951 struct lpfc_sglq, list);
952 if (sglq == start_sglq) {
953 sglq = NULL;
954 break;
955 } else
956 continue;
957 }
958 sglq->ndlp = ndlp;
959 found = 1;
960 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
961 sglq->state = SGL_ALLOCATED;
962 }
963 return sglq;
964}
965
966
967
968
969
970
971
972
973
974
975struct lpfc_iocbq *
976lpfc_sli_get_iocbq(struct lpfc_hba *phba)
977{
978 struct lpfc_iocbq * iocbq = NULL;
979 unsigned long iflags;
980
981 spin_lock_irqsave(&phba->hbalock, iflags);
982 iocbq = __lpfc_sli_get_iocbq(phba);
983 spin_unlock_irqrestore(&phba->hbalock, iflags);
984 return iocbq;
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005static void
1006__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1007{
1008 struct lpfc_sglq *sglq;
1009 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1010 unsigned long iflag = 0;
1011 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
1012
1013 lockdep_assert_held(&phba->hbalock);
1014
1015 if (iocbq->sli4_xritag == NO_XRI)
1016 sglq = NULL;
1017 else
1018 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1019
1020
1021 if (sglq) {
1022 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1023 (sglq->state != SGL_XRI_ABORTED)) {
1024 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
1025 iflag);
1026 list_add(&sglq->list,
1027 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1028 spin_unlock_irqrestore(
1029 &phba->sli4_hba.abts_sgl_list_lock, iflag);
1030 } else {
1031 spin_lock_irqsave(&pring->ring_lock, iflag);
1032 sglq->state = SGL_FREED;
1033 sglq->ndlp = NULL;
1034 list_add_tail(&sglq->list,
1035 &phba->sli4_hba.lpfc_sgl_list);
1036 spin_unlock_irqrestore(&pring->ring_lock, iflag);
1037
1038
1039 if (!list_empty(&pring->txq))
1040 lpfc_worker_wake_up(phba);
1041 }
1042 }
1043
1044
1045
1046
1047
1048 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1049 iocbq->sli4_lxritag = NO_XRI;
1050 iocbq->sli4_xritag = NO_XRI;
1051 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1052}
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065static void
1066__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1067{
1068 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1069
1070 lockdep_assert_held(&phba->hbalock);
1071
1072
1073
1074
1075 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1076 iocbq->sli4_xritag = NO_XRI;
1077 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090static void
1091__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1092{
1093 lockdep_assert_held(&phba->hbalock);
1094
1095 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1096 phba->iocb_cnt--;
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107void
1108lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1109{
1110 unsigned long iflags;
1111
1112
1113
1114
1115 spin_lock_irqsave(&phba->hbalock, iflags);
1116 __lpfc_sli_release_iocbq(phba, iocbq);
1117 spin_unlock_irqrestore(&phba->hbalock, iflags);
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132void
1133lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1134 uint32_t ulpstatus, uint32_t ulpWord4)
1135{
1136 struct lpfc_iocbq *piocb;
1137
1138 while (!list_empty(iocblist)) {
1139 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1140 if (!piocb->iocb_cmpl)
1141 lpfc_sli_release_iocbq(phba, piocb);
1142 else {
1143 piocb->iocb.ulpStatus = ulpstatus;
1144 piocb->iocb.un.ulpWord[4] = ulpWord4;
1145 (piocb->iocb_cmpl) (phba, piocb, piocb);
1146 }
1147 }
1148 return;
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166static lpfc_iocb_type
1167lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1168{
1169 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1170
1171 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1172 return 0;
1173
1174 switch (iocb_cmnd) {
1175 case CMD_XMIT_SEQUENCE_CR:
1176 case CMD_XMIT_SEQUENCE_CX:
1177 case CMD_XMIT_BCAST_CN:
1178 case CMD_XMIT_BCAST_CX:
1179 case CMD_ELS_REQUEST_CR:
1180 case CMD_ELS_REQUEST_CX:
1181 case CMD_CREATE_XRI_CR:
1182 case CMD_CREATE_XRI_CX:
1183 case CMD_GET_RPI_CN:
1184 case CMD_XMIT_ELS_RSP_CX:
1185 case CMD_GET_RPI_CR:
1186 case CMD_FCP_IWRITE_CR:
1187 case CMD_FCP_IWRITE_CX:
1188 case CMD_FCP_IREAD_CR:
1189 case CMD_FCP_IREAD_CX:
1190 case CMD_FCP_ICMND_CR:
1191 case CMD_FCP_ICMND_CX:
1192 case CMD_FCP_TSEND_CX:
1193 case CMD_FCP_TRSP_CX:
1194 case CMD_FCP_TRECEIVE_CX:
1195 case CMD_FCP_AUTO_TRSP_CX:
1196 case CMD_ADAPTER_MSG:
1197 case CMD_ADAPTER_DUMP:
1198 case CMD_XMIT_SEQUENCE64_CR:
1199 case CMD_XMIT_SEQUENCE64_CX:
1200 case CMD_XMIT_BCAST64_CN:
1201 case CMD_XMIT_BCAST64_CX:
1202 case CMD_ELS_REQUEST64_CR:
1203 case CMD_ELS_REQUEST64_CX:
1204 case CMD_FCP_IWRITE64_CR:
1205 case CMD_FCP_IWRITE64_CX:
1206 case CMD_FCP_IREAD64_CR:
1207 case CMD_FCP_IREAD64_CX:
1208 case CMD_FCP_ICMND64_CR:
1209 case CMD_FCP_ICMND64_CX:
1210 case CMD_FCP_TSEND64_CX:
1211 case CMD_FCP_TRSP64_CX:
1212 case CMD_FCP_TRECEIVE64_CX:
1213 case CMD_GEN_REQUEST64_CR:
1214 case CMD_GEN_REQUEST64_CX:
1215 case CMD_XMIT_ELS_RSP64_CX:
1216 case DSSCMD_IWRITE64_CR:
1217 case DSSCMD_IWRITE64_CX:
1218 case DSSCMD_IREAD64_CR:
1219 case DSSCMD_IREAD64_CX:
1220 type = LPFC_SOL_IOCB;
1221 break;
1222 case CMD_ABORT_XRI_CN:
1223 case CMD_ABORT_XRI_CX:
1224 case CMD_CLOSE_XRI_CN:
1225 case CMD_CLOSE_XRI_CX:
1226 case CMD_XRI_ABORTED_CX:
1227 case CMD_ABORT_MXRI64_CN:
1228 case CMD_XMIT_BLS_RSP64_CX:
1229 type = LPFC_ABORT_IOCB;
1230 break;
1231 case CMD_RCV_SEQUENCE_CX:
1232 case CMD_RCV_ELS_REQ_CX:
1233 case CMD_RCV_SEQUENCE64_CX:
1234 case CMD_RCV_ELS_REQ64_CX:
1235 case CMD_ASYNC_STATUS:
1236 case CMD_IOCB_RCV_SEQ64_CX:
1237 case CMD_IOCB_RCV_ELS64_CX:
1238 case CMD_IOCB_RCV_CONT64_CX:
1239 case CMD_IOCB_RET_XRI64_CX:
1240 type = LPFC_UNSOL_IOCB;
1241 break;
1242 case CMD_IOCB_XMIT_MSEQ64_CR:
1243 case CMD_IOCB_XMIT_MSEQ64_CX:
1244 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1245 case CMD_IOCB_RCV_ELS_LIST64_CX:
1246 case CMD_IOCB_CLOSE_EXTENDED_CN:
1247 case CMD_IOCB_ABORT_EXTENDED_CN:
1248 case CMD_IOCB_RET_HBQE64_CN:
1249 case CMD_IOCB_FCP_IBIDIR64_CR:
1250 case CMD_IOCB_FCP_IBIDIR64_CX:
1251 case CMD_IOCB_FCP_ITASKMGT64_CX:
1252 case CMD_IOCB_LOGENTRY_CN:
1253 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1254 printk("%s - Unhandled SLI-3 Command x%x\n",
1255 __func__, iocb_cmnd);
1256 type = LPFC_UNKNOWN_IOCB;
1257 break;
1258 default:
1259 type = LPFC_UNKNOWN_IOCB;
1260 break;
1261 }
1262
1263 return type;
1264}
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277static int
1278lpfc_sli_ring_map(struct lpfc_hba *phba)
1279{
1280 struct lpfc_sli *psli = &phba->sli;
1281 LPFC_MBOXQ_t *pmb;
1282 MAILBOX_t *pmbox;
1283 int i, rc, ret = 0;
1284
1285 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1286 if (!pmb)
1287 return -ENOMEM;
1288 pmbox = &pmb->u.mb;
1289 phba->link_state = LPFC_INIT_MBX_CMDS;
1290 for (i = 0; i < psli->num_rings; i++) {
1291 lpfc_config_ring(phba, i, pmb);
1292 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1293 if (rc != MBX_SUCCESS) {
1294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1295 "0446 Adapter failed to init (%d), "
1296 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1297 "ring %d\n",
1298 rc, pmbox->mbxCommand,
1299 pmbox->mbxStatus, i);
1300 phba->link_state = LPFC_HBA_ERROR;
1301 ret = -ENXIO;
1302 break;
1303 }
1304 }
1305 mempool_free(pmb, phba->mbox_mem_pool);
1306 return ret;
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static int
1322lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1323 struct lpfc_iocbq *piocb)
1324{
1325 lockdep_assert_held(&phba->hbalock);
1326
1327 BUG_ON(!piocb);
1328
1329 list_add_tail(&piocb->list, &pring->txcmplq);
1330 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1331
1332 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1333 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1334 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1335 BUG_ON(!piocb->vport);
1336 if (!(piocb->vport->load_flag & FC_UNLOADING))
1337 mod_timer(&piocb->vport->els_tmofunc,
1338 jiffies +
1339 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1340 }
1341
1342 return 0;
1343}
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355struct lpfc_iocbq *
1356lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1357{
1358 struct lpfc_iocbq *cmd_iocb;
1359
1360 lockdep_assert_held(&phba->hbalock);
1361
1362 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1363 return cmd_iocb;
1364}
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380static IOCB_t *
1381lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1382{
1383 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1384 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1385
1386 lockdep_assert_held(&phba->hbalock);
1387
1388 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1389 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1390 pring->sli.sli3.next_cmdidx = 0;
1391
1392 if (unlikely(pring->sli.sli3.local_getidx ==
1393 pring->sli.sli3.next_cmdidx)) {
1394
1395 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1396
1397 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1398 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1399 "0315 Ring %d issue: portCmdGet %d "
1400 "is bigger than cmd ring %d\n",
1401 pring->ringno,
1402 pring->sli.sli3.local_getidx,
1403 max_cmd_idx);
1404
1405 phba->link_state = LPFC_HBA_ERROR;
1406
1407
1408
1409
1410 phba->work_ha |= HA_ERATT;
1411 phba->work_hs = HS_FFER3;
1412
1413 lpfc_worker_wake_up(phba);
1414
1415 return NULL;
1416 }
1417
1418 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1419 return NULL;
1420 }
1421
1422 return lpfc_cmd_iocb(phba, pring);
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437uint16_t
1438lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1439{
1440 struct lpfc_iocbq **new_arr;
1441 struct lpfc_iocbq **old_arr;
1442 size_t new_len;
1443 struct lpfc_sli *psli = &phba->sli;
1444 uint16_t iotag;
1445
1446 spin_lock_irq(&phba->hbalock);
1447 iotag = psli->last_iotag;
1448 if(++iotag < psli->iocbq_lookup_len) {
1449 psli->last_iotag = iotag;
1450 psli->iocbq_lookup[iotag] = iocbq;
1451 spin_unlock_irq(&phba->hbalock);
1452 iocbq->iotag = iotag;
1453 return iotag;
1454 } else if (psli->iocbq_lookup_len < (0xffff
1455 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1456 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1457 spin_unlock_irq(&phba->hbalock);
1458 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1459 GFP_KERNEL);
1460 if (new_arr) {
1461 spin_lock_irq(&phba->hbalock);
1462 old_arr = psli->iocbq_lookup;
1463 if (new_len <= psli->iocbq_lookup_len) {
1464
1465 kfree(new_arr);
1466 iotag = psli->last_iotag;
1467 if(++iotag < psli->iocbq_lookup_len) {
1468 psli->last_iotag = iotag;
1469 psli->iocbq_lookup[iotag] = iocbq;
1470 spin_unlock_irq(&phba->hbalock);
1471 iocbq->iotag = iotag;
1472 return iotag;
1473 }
1474 spin_unlock_irq(&phba->hbalock);
1475 return 0;
1476 }
1477 if (psli->iocbq_lookup)
1478 memcpy(new_arr, old_arr,
1479 ((psli->last_iotag + 1) *
1480 sizeof (struct lpfc_iocbq *)));
1481 psli->iocbq_lookup = new_arr;
1482 psli->iocbq_lookup_len = new_len;
1483 psli->last_iotag = iotag;
1484 psli->iocbq_lookup[iotag] = iocbq;
1485 spin_unlock_irq(&phba->hbalock);
1486 iocbq->iotag = iotag;
1487 kfree(old_arr);
1488 return iotag;
1489 }
1490 } else
1491 spin_unlock_irq(&phba->hbalock);
1492
1493 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1494 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1495 psli->last_iotag);
1496
1497 return 0;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static void
1515lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1516 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1517{
1518 lockdep_assert_held(&phba->hbalock);
1519
1520
1521
1522 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1523
1524
1525 if (pring->ringno == LPFC_ELS_RING) {
1526 lpfc_debugfs_slow_ring_trc(phba,
1527 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1528 *(((uint32_t *) &nextiocb->iocb) + 4),
1529 *(((uint32_t *) &nextiocb->iocb) + 6),
1530 *(((uint32_t *) &nextiocb->iocb) + 7));
1531 }
1532
1533
1534
1535
1536 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1537 wmb();
1538 pring->stats.iocb_cmd++;
1539
1540
1541
1542
1543
1544
1545 if (nextiocb->iocb_cmpl)
1546 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1547 else
1548 __lpfc_sli_release_iocbq(phba, nextiocb);
1549
1550
1551
1552
1553
1554 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1555 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570static void
1571lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1572{
1573 int ringno = pring->ringno;
1574
1575 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1576
1577 wmb();
1578
1579
1580
1581
1582
1583 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1584 readl(phba->CAregaddr);
1585
1586 pring->stats.iocb_cmd_full++;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598static void
1599lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1600{
1601 int ringno = pring->ringno;
1602
1603
1604
1605
1606 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1607 wmb();
1608 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1609 readl(phba->CAregaddr);
1610 }
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static void
1623lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1624{
1625 IOCB_t *iocb;
1626 struct lpfc_iocbq *nextiocb;
1627
1628 lockdep_assert_held(&phba->hbalock);
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 if (lpfc_is_link_up(phba) &&
1639 (!list_empty(&pring->txq)) &&
1640 (pring->ringno != phba->sli.fcp_ring ||
1641 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1642
1643 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1644 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1645 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1646
1647 if (iocb)
1648 lpfc_sli_update_ring(phba, pring);
1649 else
1650 lpfc_sli_update_full_ring(phba, pring);
1651 }
1652
1653 return;
1654}
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666static struct lpfc_hbq_entry *
1667lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1668{
1669 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1670
1671 lockdep_assert_held(&phba->hbalock);
1672
1673 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1674 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1675 hbqp->next_hbqPutIdx = 0;
1676
1677 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1678 uint32_t raw_index = phba->hbq_get[hbqno];
1679 uint32_t getidx = le32_to_cpu(raw_index);
1680
1681 hbqp->local_hbqGetIdx = getidx;
1682
1683 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1684 lpfc_printf_log(phba, KERN_ERR,
1685 LOG_SLI | LOG_VPORT,
1686 "1802 HBQ %d: local_hbqGetIdx "
1687 "%u is > than hbqp->entry_count %u\n",
1688 hbqno, hbqp->local_hbqGetIdx,
1689 hbqp->entry_count);
1690
1691 phba->link_state = LPFC_HBA_ERROR;
1692 return NULL;
1693 }
1694
1695 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1696 return NULL;
1697 }
1698
1699 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1700 hbqp->hbqPutIdx;
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712void
1713lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1714{
1715 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1716 struct hbq_dmabuf *hbq_buf;
1717 unsigned long flags;
1718 int i, hbq_count;
1719 uint32_t hbqno;
1720
1721 hbq_count = lpfc_sli_hbq_count();
1722
1723 spin_lock_irqsave(&phba->hbalock, flags);
1724 for (i = 0; i < hbq_count; ++i) {
1725 list_for_each_entry_safe(dmabuf, next_dmabuf,
1726 &phba->hbqs[i].hbq_buffer_list, list) {
1727 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1728 list_del(&hbq_buf->dbuf.list);
1729 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1730 }
1731 phba->hbqs[i].buffer_count = 0;
1732 }
1733
1734 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
1735 list) {
1736 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1737 list_del(&hbq_buf->dbuf.list);
1738 if (hbq_buf->tag == -1) {
1739 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1740 (phba, hbq_buf);
1741 } else {
1742 hbqno = hbq_buf->tag >> 16;
1743 if (hbqno >= LPFC_MAX_HBQS)
1744 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
1745 (phba, hbq_buf);
1746 else
1747 (phba->hbqs[hbqno].hbq_free_buffer)(phba,
1748 hbq_buf);
1749 }
1750 }
1751
1752
1753 phba->hbq_in_use = 0;
1754 spin_unlock_irqrestore(&phba->hbalock, flags);
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769static int
1770lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1771 struct hbq_dmabuf *hbq_buf)
1772{
1773 lockdep_assert_held(&phba->hbalock);
1774 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1775}
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788static int
1789lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1790 struct hbq_dmabuf *hbq_buf)
1791{
1792 struct lpfc_hbq_entry *hbqe;
1793 dma_addr_t physaddr = hbq_buf->dbuf.phys;
1794
1795 lockdep_assert_held(&phba->hbalock);
1796
1797 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1798 if (hbqe) {
1799 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1800
1801 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1802 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
1803 hbqe->bde.tus.f.bdeSize = hbq_buf->size;
1804 hbqe->bde.tus.f.bdeFlags = 0;
1805 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1806 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1807
1808 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1809 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1810
1811 readl(phba->hbq_put + hbqno);
1812 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1813 return 0;
1814 } else
1815 return -ENOMEM;
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static int
1829lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1830 struct hbq_dmabuf *hbq_buf)
1831{
1832 int rc;
1833 struct lpfc_rqe hrqe;
1834 struct lpfc_rqe drqe;
1835
1836 lockdep_assert_held(&phba->hbalock);
1837 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1838 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1839 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1840 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1841 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
1842 &hrqe, &drqe);
1843 if (rc < 0)
1844 return rc;
1845 hbq_buf->tag = rc;
1846 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1847 return 0;
1848}
1849
1850
1851static struct lpfc_hbq_init lpfc_els_hbq = {
1852 .rn = 1,
1853 .entry_count = 256,
1854 .mask_count = 0,
1855 .profile = 0,
1856 .ring_mask = (1 << LPFC_ELS_RING),
1857 .buffer_count = 0,
1858 .init_count = 40,
1859 .add_count = 40,
1860};
1861
1862
1863static struct lpfc_hbq_init lpfc_extra_hbq = {
1864 .rn = 1,
1865 .entry_count = 200,
1866 .mask_count = 0,
1867 .profile = 0,
1868 .ring_mask = (1 << LPFC_EXTRA_RING),
1869 .buffer_count = 0,
1870 .init_count = 0,
1871 .add_count = 5,
1872};
1873
1874
1875struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1876 &lpfc_els_hbq,
1877 &lpfc_extra_hbq,
1878};
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890static int
1891lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1892{
1893 uint32_t i, posted = 0;
1894 unsigned long flags;
1895 struct hbq_dmabuf *hbq_buffer;
1896 LIST_HEAD(hbq_buf_list);
1897 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1898 return 0;
1899
1900 if ((phba->hbqs[hbqno].buffer_count + count) >
1901 lpfc_hbq_defs[hbqno]->entry_count)
1902 count = lpfc_hbq_defs[hbqno]->entry_count -
1903 phba->hbqs[hbqno].buffer_count;
1904 if (!count)
1905 return 0;
1906
1907 for (i = 0; i < count; i++) {
1908 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1909 if (!hbq_buffer)
1910 break;
1911 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1912 }
1913
1914 spin_lock_irqsave(&phba->hbalock, flags);
1915 if (!phba->hbq_in_use)
1916 goto err;
1917 while (!list_empty(&hbq_buf_list)) {
1918 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1919 dbuf.list);
1920 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1921 (hbqno << 16));
1922 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1923 phba->hbqs[hbqno].buffer_count++;
1924 posted++;
1925 } else
1926 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1927 }
1928 spin_unlock_irqrestore(&phba->hbalock, flags);
1929 return posted;
1930err:
1931 spin_unlock_irqrestore(&phba->hbalock, flags);
1932 while (!list_empty(&hbq_buf_list)) {
1933 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1934 dbuf.list);
1935 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1936 }
1937 return 0;
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949int
1950lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1951{
1952 if (phba->sli_rev == LPFC_SLI_REV4)
1953 return 0;
1954 else
1955 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1956 lpfc_hbq_defs[qno]->add_count);
1957}
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968static int
1969lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1970{
1971 if (phba->sli_rev == LPFC_SLI_REV4)
1972 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1973 lpfc_hbq_defs[qno]->entry_count);
1974 else
1975 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1976 lpfc_hbq_defs[qno]->init_count);
1977}
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987static struct hbq_dmabuf *
1988lpfc_sli_hbqbuf_get(struct list_head *rb_list)
1989{
1990 struct lpfc_dmabuf *d_buf;
1991
1992 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
1993 if (!d_buf)
1994 return NULL;
1995 return container_of(d_buf, struct hbq_dmabuf, dbuf);
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007static struct hbq_dmabuf *
2008lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2009{
2010 struct lpfc_dmabuf *d_buf;
2011 struct hbq_dmabuf *hbq_buf;
2012 uint32_t hbqno;
2013
2014 hbqno = tag >> 16;
2015 if (hbqno >= LPFC_MAX_HBQS)
2016 return NULL;
2017
2018 spin_lock_irq(&phba->hbalock);
2019 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2020 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2021 if (hbq_buf->tag == tag) {
2022 spin_unlock_irq(&phba->hbalock);
2023 return hbq_buf;
2024 }
2025 }
2026 spin_unlock_irq(&phba->hbalock);
2027 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2028 "1803 Bad hbq tag. Data: x%x x%x\n",
2029 tag, phba->hbqs[tag >> 16].buffer_count);
2030 return NULL;
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042void
2043lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2044{
2045 uint32_t hbqno;
2046
2047 if (hbq_buffer) {
2048 hbqno = hbq_buffer->tag >> 16;
2049 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2050 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2051 }
2052}
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063static int
2064lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2065{
2066 uint8_t ret;
2067
2068 switch (mbxCommand) {
2069 case MBX_LOAD_SM:
2070 case MBX_READ_NV:
2071 case MBX_WRITE_NV:
2072 case MBX_WRITE_VPARMS:
2073 case MBX_RUN_BIU_DIAG:
2074 case MBX_INIT_LINK:
2075 case MBX_DOWN_LINK:
2076 case MBX_CONFIG_LINK:
2077 case MBX_CONFIG_RING:
2078 case MBX_RESET_RING:
2079 case MBX_READ_CONFIG:
2080 case MBX_READ_RCONFIG:
2081 case MBX_READ_SPARM:
2082 case MBX_READ_STATUS:
2083 case MBX_READ_RPI:
2084 case MBX_READ_XRI:
2085 case MBX_READ_REV:
2086 case MBX_READ_LNK_STAT:
2087 case MBX_REG_LOGIN:
2088 case MBX_UNREG_LOGIN:
2089 case MBX_CLEAR_LA:
2090 case MBX_DUMP_MEMORY:
2091 case MBX_DUMP_CONTEXT:
2092 case MBX_RUN_DIAGS:
2093 case MBX_RESTART:
2094 case MBX_UPDATE_CFG:
2095 case MBX_DOWN_LOAD:
2096 case MBX_DEL_LD_ENTRY:
2097 case MBX_RUN_PROGRAM:
2098 case MBX_SET_MASK:
2099 case MBX_SET_VARIABLE:
2100 case MBX_UNREG_D_ID:
2101 case MBX_KILL_BOARD:
2102 case MBX_CONFIG_FARP:
2103 case MBX_BEACON:
2104 case MBX_LOAD_AREA:
2105 case MBX_RUN_BIU_DIAG64:
2106 case MBX_CONFIG_PORT:
2107 case MBX_READ_SPARM64:
2108 case MBX_READ_RPI64:
2109 case MBX_REG_LOGIN64:
2110 case MBX_READ_TOPOLOGY:
2111 case MBX_WRITE_WWN:
2112 case MBX_SET_DEBUG:
2113 case MBX_LOAD_EXP_ROM:
2114 case MBX_ASYNCEVT_ENABLE:
2115 case MBX_REG_VPI:
2116 case MBX_UNREG_VPI:
2117 case MBX_HEARTBEAT:
2118 case MBX_PORT_CAPABILITIES:
2119 case MBX_PORT_IOV_CONTROL:
2120 case MBX_SLI4_CONFIG:
2121 case MBX_SLI4_REQ_FTRS:
2122 case MBX_REG_FCFI:
2123 case MBX_UNREG_FCFI:
2124 case MBX_REG_VFI:
2125 case MBX_UNREG_VFI:
2126 case MBX_INIT_VPI:
2127 case MBX_INIT_VFI:
2128 case MBX_RESUME_RPI:
2129 case MBX_READ_EVENT_LOG_STATUS:
2130 case MBX_READ_EVENT_LOG:
2131 case MBX_SECURITY_MGMT:
2132 case MBX_AUTH_PORT:
2133 case MBX_ACCESS_VDATA:
2134 ret = mbxCommand;
2135 break;
2136 default:
2137 ret = MBX_SHUTDOWN;
2138 break;
2139 }
2140 return ret;
2141}
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154void
2155lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2156{
2157 wait_queue_head_t *pdone_q;
2158 unsigned long drvr_flag;
2159
2160
2161
2162
2163
2164 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2165 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2166 pdone_q = (wait_queue_head_t *) pmboxq->context1;
2167 if (pdone_q)
2168 wake_up_interruptible(pdone_q);
2169 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2170 return;
2171}
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184void
2185lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2186{
2187 struct lpfc_vport *vport = pmb->vport;
2188 struct lpfc_dmabuf *mp;
2189 struct lpfc_nodelist *ndlp;
2190 struct Scsi_Host *shost;
2191 uint16_t rpi, vpi;
2192 int rc;
2193
2194 mp = (struct lpfc_dmabuf *) (pmb->context1);
2195
2196 if (mp) {
2197 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2198 kfree(mp);
2199 }
2200
2201
2202
2203
2204
2205 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2206 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2207 !pmb->u.mb.mbxStatus) {
2208 rpi = pmb->u.mb.un.varWords[0];
2209 vpi = pmb->u.mb.un.varRegLogin.vpi;
2210 lpfc_unreg_login(phba, vpi, rpi, pmb);
2211 pmb->vport = vport;
2212 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2213 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2214 if (rc != MBX_NOT_FINISHED)
2215 return;
2216 }
2217
2218 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2219 !(phba->pport->load_flag & FC_UNLOADING) &&
2220 !pmb->u.mb.mbxStatus) {
2221 shost = lpfc_shost_from_vport(vport);
2222 spin_lock_irq(shost->host_lock);
2223 vport->vpi_state |= LPFC_VPI_REGISTERED;
2224 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2225 spin_unlock_irq(shost->host_lock);
2226 }
2227
2228 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2229 ndlp = (struct lpfc_nodelist *)pmb->context2;
2230 lpfc_nlp_put(ndlp);
2231 pmb->context2 = NULL;
2232 }
2233
2234
2235 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2236 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2237 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2238 "2860 SLI authentication is required "
2239 "for INIT_LINK but has not done yet\n");
2240
2241 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2242 lpfc_sli4_mbox_cmd_free(phba, pmb);
2243 else
2244 mempool_free(pmb, phba->mbox_mem_pool);
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259void
2260lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2261{
2262 struct lpfc_vport *vport = pmb->vport;
2263 struct lpfc_nodelist *ndlp;
2264
2265 ndlp = pmb->context1;
2266 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2267 if (phba->sli_rev == LPFC_SLI_REV4 &&
2268 (bf_get(lpfc_sli_intf_if_type,
2269 &phba->sli4_hba.sli_intf) ==
2270 LPFC_SLI_INTF_IF_TYPE_2)) {
2271 if (ndlp) {
2272 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2273 "0010 UNREG_LOGIN vpi:%x "
2274 "rpi:%x DID:%x map:%x %p\n",
2275 vport->vpi, ndlp->nlp_rpi,
2276 ndlp->nlp_DID,
2277 ndlp->nlp_usg_map, ndlp);
2278 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2279 lpfc_nlp_put(ndlp);
2280 }
2281 }
2282 }
2283
2284 mempool_free(pmb, phba->mbox_mem_pool);
2285}
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300int
2301lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2302{
2303 MAILBOX_t *pmbox;
2304 LPFC_MBOXQ_t *pmb;
2305 int rc;
2306 LIST_HEAD(cmplq);
2307
2308 phba->sli.slistat.mbox_event++;
2309
2310
2311 spin_lock_irq(&phba->hbalock);
2312 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2313 spin_unlock_irq(&phba->hbalock);
2314
2315
2316 do {
2317 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2318 if (pmb == NULL)
2319 break;
2320
2321 pmbox = &pmb->u.mb;
2322
2323 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2324 if (pmb->vport) {
2325 lpfc_debugfs_disc_trc(pmb->vport,
2326 LPFC_DISC_TRC_MBOX_VPORT,
2327 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2328 (uint32_t)pmbox->mbxCommand,
2329 pmbox->un.varWords[0],
2330 pmbox->un.varWords[1]);
2331 }
2332 else {
2333 lpfc_debugfs_disc_trc(phba->pport,
2334 LPFC_DISC_TRC_MBOX,
2335 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2336 (uint32_t)pmbox->mbxCommand,
2337 pmbox->un.varWords[0],
2338 pmbox->un.varWords[1]);
2339 }
2340 }
2341
2342
2343
2344
2345 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2346 MBX_SHUTDOWN) {
2347
2348 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2349 "(%d):0323 Unknown Mailbox command "
2350 "x%x (x%x/x%x) Cmpl\n",
2351 pmb->vport ? pmb->vport->vpi : 0,
2352 pmbox->mbxCommand,
2353 lpfc_sli_config_mbox_subsys_get(phba,
2354 pmb),
2355 lpfc_sli_config_mbox_opcode_get(phba,
2356 pmb));
2357 phba->link_state = LPFC_HBA_ERROR;
2358 phba->work_hs = HS_FFER3;
2359 lpfc_handle_eratt(phba);
2360 continue;
2361 }
2362
2363 if (pmbox->mbxStatus) {
2364 phba->sli.slistat.mbox_stat_err++;
2365 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2366
2367 lpfc_printf_log(phba, KERN_INFO,
2368 LOG_MBOX | LOG_SLI,
2369 "(%d):0305 Mbox cmd cmpl "
2370 "error - RETRYing Data: x%x "
2371 "(x%x/x%x) x%x x%x x%x\n",
2372 pmb->vport ? pmb->vport->vpi : 0,
2373 pmbox->mbxCommand,
2374 lpfc_sli_config_mbox_subsys_get(phba,
2375 pmb),
2376 lpfc_sli_config_mbox_opcode_get(phba,
2377 pmb),
2378 pmbox->mbxStatus,
2379 pmbox->un.varWords[0],
2380 pmb->vport->port_state);
2381 pmbox->mbxStatus = 0;
2382 pmbox->mbxOwner = OWN_HOST;
2383 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2384 if (rc != MBX_NOT_FINISHED)
2385 continue;
2386 }
2387 }
2388
2389
2390 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2391 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2392 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2393 "x%x x%x x%x\n",
2394 pmb->vport ? pmb->vport->vpi : 0,
2395 pmbox->mbxCommand,
2396 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2397 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2398 pmb->mbox_cmpl,
2399 *((uint32_t *) pmbox),
2400 pmbox->un.varWords[0],
2401 pmbox->un.varWords[1],
2402 pmbox->un.varWords[2],
2403 pmbox->un.varWords[3],
2404 pmbox->un.varWords[4],
2405 pmbox->un.varWords[5],
2406 pmbox->un.varWords[6],
2407 pmbox->un.varWords[7],
2408 pmbox->un.varWords[8],
2409 pmbox->un.varWords[9],
2410 pmbox->un.varWords[10]);
2411
2412 if (pmb->mbox_cmpl)
2413 pmb->mbox_cmpl(phba,pmb);
2414 } while (1);
2415 return 0;
2416}
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430static struct lpfc_dmabuf *
2431lpfc_sli_get_buff(struct lpfc_hba *phba,
2432 struct lpfc_sli_ring *pring,
2433 uint32_t tag)
2434{
2435 struct hbq_dmabuf *hbq_entry;
2436
2437 if (tag & QUE_BUFTAG_BIT)
2438 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2439 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2440 if (!hbq_entry)
2441 return NULL;
2442 return &hbq_entry->dbuf;
2443}
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457static int
2458lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2459 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2460 uint32_t fch_type)
2461{
2462 int i;
2463
2464
2465 if (pring->prt[0].profile) {
2466 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2467 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2468 saveq);
2469 return 1;
2470 }
2471
2472
2473 for (i = 0; i < pring->num_mask; i++) {
2474 if ((pring->prt[i].rctl == fch_r_ctl) &&
2475 (pring->prt[i].type == fch_type)) {
2476 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2477 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2478 (phba, pring, saveq);
2479 return 1;
2480 }
2481 }
2482 return 0;
2483}
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499static int
2500lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2501 struct lpfc_iocbq *saveq)
2502{
2503 IOCB_t * irsp;
2504 WORD5 * w5p;
2505 uint32_t Rctl, Type;
2506 struct lpfc_iocbq *iocbq;
2507 struct lpfc_dmabuf *dmzbuf;
2508
2509 irsp = &(saveq->iocb);
2510
2511 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2512 if (pring->lpfc_sli_rcv_async_status)
2513 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2514 else
2515 lpfc_printf_log(phba,
2516 KERN_WARNING,
2517 LOG_SLI,
2518 "0316 Ring %d handler: unexpected "
2519 "ASYNC_STATUS iocb received evt_code "
2520 "0x%x\n",
2521 pring->ringno,
2522 irsp->un.asyncstat.evt_code);
2523 return 1;
2524 }
2525
2526 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2527 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2528 if (irsp->ulpBdeCount > 0) {
2529 dmzbuf = lpfc_sli_get_buff(phba, pring,
2530 irsp->un.ulpWord[3]);
2531 lpfc_in_buf_free(phba, dmzbuf);
2532 }
2533
2534 if (irsp->ulpBdeCount > 1) {
2535 dmzbuf = lpfc_sli_get_buff(phba, pring,
2536 irsp->unsli3.sli3Words[3]);
2537 lpfc_in_buf_free(phba, dmzbuf);
2538 }
2539
2540 if (irsp->ulpBdeCount > 2) {
2541 dmzbuf = lpfc_sli_get_buff(phba, pring,
2542 irsp->unsli3.sli3Words[7]);
2543 lpfc_in_buf_free(phba, dmzbuf);
2544 }
2545
2546 return 1;
2547 }
2548
2549 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2550 if (irsp->ulpBdeCount != 0) {
2551 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2552 irsp->un.ulpWord[3]);
2553 if (!saveq->context2)
2554 lpfc_printf_log(phba,
2555 KERN_ERR,
2556 LOG_SLI,
2557 "0341 Ring %d Cannot find buffer for "
2558 "an unsolicited iocb. tag 0x%x\n",
2559 pring->ringno,
2560 irsp->un.ulpWord[3]);
2561 }
2562 if (irsp->ulpBdeCount == 2) {
2563 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2564 irsp->unsli3.sli3Words[7]);
2565 if (!saveq->context3)
2566 lpfc_printf_log(phba,
2567 KERN_ERR,
2568 LOG_SLI,
2569 "0342 Ring %d Cannot find buffer for an"
2570 " unsolicited iocb. tag 0x%x\n",
2571 pring->ringno,
2572 irsp->unsli3.sli3Words[7]);
2573 }
2574 list_for_each_entry(iocbq, &saveq->list, list) {
2575 irsp = &(iocbq->iocb);
2576 if (irsp->ulpBdeCount != 0) {
2577 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2578 irsp->un.ulpWord[3]);
2579 if (!iocbq->context2)
2580 lpfc_printf_log(phba,
2581 KERN_ERR,
2582 LOG_SLI,
2583 "0343 Ring %d Cannot find "
2584 "buffer for an unsolicited iocb"
2585 ". tag 0x%x\n", pring->ringno,
2586 irsp->un.ulpWord[3]);
2587 }
2588 if (irsp->ulpBdeCount == 2) {
2589 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2590 irsp->unsli3.sli3Words[7]);
2591 if (!iocbq->context3)
2592 lpfc_printf_log(phba,
2593 KERN_ERR,
2594 LOG_SLI,
2595 "0344 Ring %d Cannot find "
2596 "buffer for an unsolicited "
2597 "iocb. tag 0x%x\n",
2598 pring->ringno,
2599 irsp->unsli3.sli3Words[7]);
2600 }
2601 }
2602 }
2603 if (irsp->ulpBdeCount != 0 &&
2604 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2605 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2606 int found = 0;
2607
2608
2609 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2610 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2611 saveq->iocb.unsli3.rcvsli3.ox_id) {
2612 list_add_tail(&saveq->list, &iocbq->list);
2613 found = 1;
2614 break;
2615 }
2616 }
2617 if (!found)
2618 list_add_tail(&saveq->clist,
2619 &pring->iocb_continue_saveq);
2620 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2621 list_del_init(&iocbq->clist);
2622 saveq = iocbq;
2623 irsp = &(saveq->iocb);
2624 } else
2625 return 0;
2626 }
2627 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2628 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2629 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2630 Rctl = FC_RCTL_ELS_REQ;
2631 Type = FC_TYPE_ELS;
2632 } else {
2633 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2634 Rctl = w5p->hcsw.Rctl;
2635 Type = w5p->hcsw.Type;
2636
2637
2638 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2639 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2640 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2641 Rctl = FC_RCTL_ELS_REQ;
2642 Type = FC_TYPE_ELS;
2643 w5p->hcsw.Rctl = Rctl;
2644 w5p->hcsw.Type = Type;
2645 }
2646 }
2647
2648 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2649 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2650 "0313 Ring %d handler: unexpected Rctl x%x "
2651 "Type x%x received\n",
2652 pring->ringno, Rctl, Type);
2653
2654 return 1;
2655}
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669static struct lpfc_iocbq *
2670lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2671 struct lpfc_sli_ring *pring,
2672 struct lpfc_iocbq *prspiocb)
2673{
2674 struct lpfc_iocbq *cmd_iocb = NULL;
2675 uint16_t iotag;
2676 lockdep_assert_held(&phba->hbalock);
2677
2678 iotag = prspiocb->iocb.ulpIoTag;
2679
2680 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2681 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2682 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2683
2684 list_del_init(&cmd_iocb->list);
2685 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2686 return cmd_iocb;
2687 }
2688 }
2689
2690 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2691 "0317 iotag x%x is out of "
2692 "range: max iotag x%x wd0 x%x\n",
2693 iotag, phba->sli.last_iotag,
2694 *(((uint32_t *) &prspiocb->iocb) + 7));
2695 return NULL;
2696}
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710static struct lpfc_iocbq *
2711lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2712 struct lpfc_sli_ring *pring, uint16_t iotag)
2713{
2714 struct lpfc_iocbq *cmd_iocb;
2715
2716 lockdep_assert_held(&phba->hbalock);
2717 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2718 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2719 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2720
2721 list_del_init(&cmd_iocb->list);
2722 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2723 return cmd_iocb;
2724 }
2725 }
2726
2727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2728 "0372 iotag x%x is out of range: max iotag (x%x)\n",
2729 iotag, phba->sli.last_iotag);
2730 return NULL;
2731}
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750static int
2751lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2752 struct lpfc_iocbq *saveq)
2753{
2754 struct lpfc_iocbq *cmdiocbp;
2755 int rc = 1;
2756 unsigned long iflag;
2757
2758
2759 spin_lock_irqsave(&phba->hbalock, iflag);
2760 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2761 spin_unlock_irqrestore(&phba->hbalock, iflag);
2762
2763 if (cmdiocbp) {
2764 if (cmdiocbp->iocb_cmpl) {
2765
2766
2767
2768
2769 if (saveq->iocb.ulpStatus &&
2770 (pring->ringno == LPFC_ELS_RING) &&
2771 (cmdiocbp->iocb.ulpCommand ==
2772 CMD_ELS_REQUEST64_CR))
2773 lpfc_send_els_failure_event(phba,
2774 cmdiocbp, saveq);
2775
2776
2777
2778
2779
2780 if (pring->ringno == LPFC_ELS_RING) {
2781 if ((phba->sli_rev < LPFC_SLI_REV4) &&
2782 (cmdiocbp->iocb_flag &
2783 LPFC_DRIVER_ABORTED)) {
2784 spin_lock_irqsave(&phba->hbalock,
2785 iflag);
2786 cmdiocbp->iocb_flag &=
2787 ~LPFC_DRIVER_ABORTED;
2788 spin_unlock_irqrestore(&phba->hbalock,
2789 iflag);
2790 saveq->iocb.ulpStatus =
2791 IOSTAT_LOCAL_REJECT;
2792 saveq->iocb.un.ulpWord[4] =
2793 IOERR_SLI_ABORTED;
2794
2795
2796
2797
2798
2799 spin_lock_irqsave(&phba->hbalock,
2800 iflag);
2801 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2802 spin_unlock_irqrestore(&phba->hbalock,
2803 iflag);
2804 }
2805 if (phba->sli_rev == LPFC_SLI_REV4) {
2806 if (saveq->iocb_flag &
2807 LPFC_EXCHANGE_BUSY) {
2808
2809
2810
2811
2812
2813
2814 spin_lock_irqsave(
2815 &phba->hbalock, iflag);
2816 cmdiocbp->iocb_flag |=
2817 LPFC_EXCHANGE_BUSY;
2818 spin_unlock_irqrestore(
2819 &phba->hbalock, iflag);
2820 }
2821 if (cmdiocbp->iocb_flag &
2822 LPFC_DRIVER_ABORTED) {
2823
2824
2825
2826
2827
2828 spin_lock_irqsave(
2829 &phba->hbalock, iflag);
2830 cmdiocbp->iocb_flag &=
2831 ~LPFC_DRIVER_ABORTED;
2832 spin_unlock_irqrestore(
2833 &phba->hbalock, iflag);
2834 cmdiocbp->iocb.ulpStatus =
2835 IOSTAT_LOCAL_REJECT;
2836 cmdiocbp->iocb.un.ulpWord[4] =
2837 IOERR_ABORT_REQUESTED;
2838
2839
2840
2841
2842
2843
2844 saveq->iocb.ulpStatus =
2845 IOSTAT_LOCAL_REJECT;
2846 saveq->iocb.un.ulpWord[4] =
2847 IOERR_SLI_ABORTED;
2848 spin_lock_irqsave(
2849 &phba->hbalock, iflag);
2850 saveq->iocb_flag |=
2851 LPFC_DELAY_MEM_FREE;
2852 spin_unlock_irqrestore(
2853 &phba->hbalock, iflag);
2854 }
2855 }
2856 }
2857 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2858 } else
2859 lpfc_sli_release_iocbq(phba, cmdiocbp);
2860 } else {
2861
2862
2863
2864
2865
2866 if (pring->ringno != LPFC_ELS_RING) {
2867
2868
2869
2870
2871 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2872 "0322 Ring %d handler: "
2873 "unexpected completion IoTag x%x "
2874 "Data: x%x x%x x%x x%x\n",
2875 pring->ringno,
2876 saveq->iocb.ulpIoTag,
2877 saveq->iocb.ulpStatus,
2878 saveq->iocb.un.ulpWord[4],
2879 saveq->iocb.ulpCommand,
2880 saveq->iocb.ulpContext);
2881 }
2882 }
2883
2884 return rc;
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897static void
2898lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2899{
2900 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2901
2902
2903
2904
2905 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2906 "0312 Ring %d handler: portRspPut %d "
2907 "is bigger than rsp ring %d\n",
2908 pring->ringno, le32_to_cpu(pgp->rspPutInx),
2909 pring->sli.sli3.numRiocb);
2910
2911 phba->link_state = LPFC_HBA_ERROR;
2912
2913
2914
2915
2916
2917 phba->work_ha |= HA_ERATT;
2918 phba->work_hs = HS_FFER3;
2919
2920 lpfc_worker_wake_up(phba);
2921
2922 return;
2923}
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935void lpfc_poll_eratt(unsigned long ptr)
2936{
2937 struct lpfc_hba *phba;
2938 uint32_t eratt = 0;
2939 uint64_t sli_intr, cnt;
2940
2941 phba = (struct lpfc_hba *)ptr;
2942
2943
2944 sli_intr = phba->sli.slistat.sli_intr;
2945
2946 if (phba->sli.slistat.sli_prev_intr > sli_intr)
2947 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
2948 sli_intr);
2949 else
2950 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2951
2952
2953 do_div(cnt, phba->eratt_poll_interval);
2954 phba->sli.slistat.sli_ips = cnt;
2955
2956 phba->sli.slistat.sli_prev_intr = sli_intr;
2957
2958
2959 eratt = lpfc_sli_check_eratt(phba);
2960
2961 if (eratt)
2962
2963 lpfc_worker_wake_up(phba);
2964 else
2965
2966 mod_timer(&phba->eratt_poll,
2967 jiffies +
2968 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
2969 return;
2970}
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990int
2991lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
2992 struct lpfc_sli_ring *pring, uint32_t mask)
2993{
2994 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2995 IOCB_t *irsp = NULL;
2996 IOCB_t *entry = NULL;
2997 struct lpfc_iocbq *cmdiocbq = NULL;
2998 struct lpfc_iocbq rspiocbq;
2999 uint32_t status;
3000 uint32_t portRspPut, portRspMax;
3001 int rc = 1;
3002 lpfc_iocb_type type;
3003 unsigned long iflag;
3004 uint32_t rsp_cmpl = 0;
3005
3006 spin_lock_irqsave(&phba->hbalock, iflag);
3007 pring->stats.iocb_event++;
3008
3009
3010
3011
3012
3013 portRspMax = pring->sli.sli3.numRiocb;
3014 portRspPut = le32_to_cpu(pgp->rspPutInx);
3015 if (unlikely(portRspPut >= portRspMax)) {
3016 lpfc_sli_rsp_pointers_error(phba, pring);
3017 spin_unlock_irqrestore(&phba->hbalock, iflag);
3018 return 1;
3019 }
3020 if (phba->fcp_ring_in_use) {
3021 spin_unlock_irqrestore(&phba->hbalock, iflag);
3022 return 1;
3023 } else
3024 phba->fcp_ring_in_use = 1;
3025
3026 rmb();
3027 while (pring->sli.sli3.rspidx != portRspPut) {
3028
3029
3030
3031
3032
3033 entry = lpfc_resp_iocb(phba, pring);
3034 phba->last_completion_time = jiffies;
3035
3036 if (++pring->sli.sli3.rspidx >= portRspMax)
3037 pring->sli.sli3.rspidx = 0;
3038
3039 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3040 (uint32_t *) &rspiocbq.iocb,
3041 phba->iocb_rsp_size);
3042 INIT_LIST_HEAD(&(rspiocbq.list));
3043 irsp = &rspiocbq.iocb;
3044
3045 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3046 pring->stats.iocb_rsp++;
3047 rsp_cmpl++;
3048
3049 if (unlikely(irsp->ulpStatus)) {
3050
3051
3052
3053
3054 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3055 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3056 IOERR_NO_RESOURCES)) {
3057 spin_unlock_irqrestore(&phba->hbalock, iflag);
3058 phba->lpfc_rampdown_queue_depth(phba);
3059 spin_lock_irqsave(&phba->hbalock, iflag);
3060 }
3061
3062
3063 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3064 "0336 Rsp Ring %d error: IOCB Data: "
3065 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3066 pring->ringno,
3067 irsp->un.ulpWord[0],
3068 irsp->un.ulpWord[1],
3069 irsp->un.ulpWord[2],
3070 irsp->un.ulpWord[3],
3071 irsp->un.ulpWord[4],
3072 irsp->un.ulpWord[5],
3073 *(uint32_t *)&irsp->un1,
3074 *((uint32_t *)&irsp->un1 + 1));
3075 }
3076
3077 switch (type) {
3078 case LPFC_ABORT_IOCB:
3079 case LPFC_SOL_IOCB:
3080
3081
3082
3083
3084 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3085 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3086 "0333 IOCB cmd 0x%x"
3087 " processed. Skipping"
3088 " completion\n",
3089 irsp->ulpCommand);
3090 break;
3091 }
3092
3093 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3094 &rspiocbq);
3095 if (unlikely(!cmdiocbq))
3096 break;
3097 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3098 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3099 if (cmdiocbq->iocb_cmpl) {
3100 spin_unlock_irqrestore(&phba->hbalock, iflag);
3101 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3102 &rspiocbq);
3103 spin_lock_irqsave(&phba->hbalock, iflag);
3104 }
3105 break;
3106 case LPFC_UNSOL_IOCB:
3107 spin_unlock_irqrestore(&phba->hbalock, iflag);
3108 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3109 spin_lock_irqsave(&phba->hbalock, iflag);
3110 break;
3111 default:
3112 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3113 char adaptermsg[LPFC_MAX_ADPTMSG];
3114 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3115 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3116 MAX_MSG_DATA);
3117 dev_warn(&((phba->pcidev)->dev),
3118 "lpfc%d: %s\n",
3119 phba->brd_no, adaptermsg);
3120 } else {
3121
3122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3123 "0334 Unknown IOCB command "
3124 "Data: x%x, x%x x%x x%x x%x\n",
3125 type, irsp->ulpCommand,
3126 irsp->ulpStatus,
3127 irsp->ulpIoTag,
3128 irsp->ulpContext);
3129 }
3130 break;
3131 }
3132
3133
3134
3135
3136
3137
3138
3139 writel(pring->sli.sli3.rspidx,
3140 &phba->host_gp[pring->ringno].rspGetInx);
3141
3142 if (pring->sli.sli3.rspidx == portRspPut)
3143 portRspPut = le32_to_cpu(pgp->rspPutInx);
3144 }
3145
3146 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3147 pring->stats.iocb_rsp_full++;
3148 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3149 writel(status, phba->CAregaddr);
3150 readl(phba->CAregaddr);
3151 }
3152 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3153 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3154 pring->stats.iocb_cmd_empty++;
3155
3156
3157 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3158 lpfc_sli_resume_iocb(phba, pring);
3159
3160 if ((pring->lpfc_sli_cmd_available))
3161 (pring->lpfc_sli_cmd_available) (phba, pring);
3162
3163 }
3164
3165 phba->fcp_ring_in_use = 0;
3166 spin_unlock_irqrestore(&phba->hbalock, iflag);
3167 return rc;
3168}
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static struct lpfc_iocbq *
3189lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3190 struct lpfc_iocbq *rspiocbp)
3191{
3192 struct lpfc_iocbq *saveq;
3193 struct lpfc_iocbq *cmdiocbp;
3194 struct lpfc_iocbq *next_iocb;
3195 IOCB_t *irsp = NULL;
3196 uint32_t free_saveq;
3197 uint8_t iocb_cmd_type;
3198 lpfc_iocb_type type;
3199 unsigned long iflag;
3200 int rc;
3201
3202 spin_lock_irqsave(&phba->hbalock, iflag);
3203
3204 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3205 pring->iocb_continueq_cnt++;
3206
3207
3208 irsp = &rspiocbp->iocb;
3209 if (irsp->ulpLe) {
3210
3211
3212
3213
3214 free_saveq = 1;
3215 saveq = list_get_first(&pring->iocb_continueq,
3216 struct lpfc_iocbq, list);
3217 irsp = &(saveq->iocb);
3218 list_del_init(&pring->iocb_continueq);
3219 pring->iocb_continueq_cnt = 0;
3220
3221 pring->stats.iocb_rsp++;
3222
3223
3224
3225
3226
3227 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3228 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3229 IOERR_NO_RESOURCES)) {
3230 spin_unlock_irqrestore(&phba->hbalock, iflag);
3231 phba->lpfc_rampdown_queue_depth(phba);
3232 spin_lock_irqsave(&phba->hbalock, iflag);
3233 }
3234
3235 if (irsp->ulpStatus) {
3236
3237 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3238 "0328 Rsp Ring %d error: "
3239 "IOCB Data: "
3240 "x%x x%x x%x x%x "
3241 "x%x x%x x%x x%x "
3242 "x%x x%x x%x x%x "
3243 "x%x x%x x%x x%x\n",
3244 pring->ringno,
3245 irsp->un.ulpWord[0],
3246 irsp->un.ulpWord[1],
3247 irsp->un.ulpWord[2],
3248 irsp->un.ulpWord[3],
3249 irsp->un.ulpWord[4],
3250 irsp->un.ulpWord[5],
3251 *(((uint32_t *) irsp) + 6),
3252 *(((uint32_t *) irsp) + 7),
3253 *(((uint32_t *) irsp) + 8),
3254 *(((uint32_t *) irsp) + 9),
3255 *(((uint32_t *) irsp) + 10),
3256 *(((uint32_t *) irsp) + 11),
3257 *(((uint32_t *) irsp) + 12),
3258 *(((uint32_t *) irsp) + 13),
3259 *(((uint32_t *) irsp) + 14),
3260 *(((uint32_t *) irsp) + 15));
3261 }
3262
3263
3264
3265
3266
3267
3268
3269 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3270 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3271 switch (type) {
3272 case LPFC_SOL_IOCB:
3273 spin_unlock_irqrestore(&phba->hbalock, iflag);
3274 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3275 spin_lock_irqsave(&phba->hbalock, iflag);
3276 break;
3277
3278 case LPFC_UNSOL_IOCB:
3279 spin_unlock_irqrestore(&phba->hbalock, iflag);
3280 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3281 spin_lock_irqsave(&phba->hbalock, iflag);
3282 if (!rc)
3283 free_saveq = 0;
3284 break;
3285
3286 case LPFC_ABORT_IOCB:
3287 cmdiocbp = NULL;
3288 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3289 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3290 saveq);
3291 if (cmdiocbp) {
3292
3293 if (cmdiocbp->iocb_cmpl) {
3294 spin_unlock_irqrestore(&phba->hbalock,
3295 iflag);
3296 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3297 saveq);
3298 spin_lock_irqsave(&phba->hbalock,
3299 iflag);
3300 } else
3301 __lpfc_sli_release_iocbq(phba,
3302 cmdiocbp);
3303 }
3304 break;
3305
3306 case LPFC_UNKNOWN_IOCB:
3307 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3308 char adaptermsg[LPFC_MAX_ADPTMSG];
3309 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3310 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3311 MAX_MSG_DATA);
3312 dev_warn(&((phba->pcidev)->dev),
3313 "lpfc%d: %s\n",
3314 phba->brd_no, adaptermsg);
3315 } else {
3316
3317 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3318 "0335 Unknown IOCB "
3319 "command Data: x%x "
3320 "x%x x%x x%x\n",
3321 irsp->ulpCommand,
3322 irsp->ulpStatus,
3323 irsp->ulpIoTag,
3324 irsp->ulpContext);
3325 }
3326 break;
3327 }
3328
3329 if (free_saveq) {
3330 list_for_each_entry_safe(rspiocbp, next_iocb,
3331 &saveq->list, list) {
3332 list_del_init(&rspiocbp->list);
3333 __lpfc_sli_release_iocbq(phba, rspiocbp);
3334 }
3335 __lpfc_sli_release_iocbq(phba, saveq);
3336 }
3337 rspiocbp = NULL;
3338 }
3339 spin_unlock_irqrestore(&phba->hbalock, iflag);
3340 return rspiocbp;
3341}
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352void
3353lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3354 struct lpfc_sli_ring *pring, uint32_t mask)
3355{
3356 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3357}
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370static void
3371lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3372 struct lpfc_sli_ring *pring, uint32_t mask)
3373{
3374 struct lpfc_pgp *pgp;
3375 IOCB_t *entry;
3376 IOCB_t *irsp = NULL;
3377 struct lpfc_iocbq *rspiocbp = NULL;
3378 uint32_t portRspPut, portRspMax;
3379 unsigned long iflag;
3380 uint32_t status;
3381
3382 pgp = &phba->port_gp[pring->ringno];
3383 spin_lock_irqsave(&phba->hbalock, iflag);
3384 pring->stats.iocb_event++;
3385
3386
3387
3388
3389
3390 portRspMax = pring->sli.sli3.numRiocb;
3391 portRspPut = le32_to_cpu(pgp->rspPutInx);
3392 if (portRspPut >= portRspMax) {
3393
3394
3395
3396
3397 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3398 "0303 Ring %d handler: portRspPut %d "
3399 "is bigger than rsp ring %d\n",
3400 pring->ringno, portRspPut, portRspMax);
3401
3402 phba->link_state = LPFC_HBA_ERROR;
3403 spin_unlock_irqrestore(&phba->hbalock, iflag);
3404
3405 phba->work_hs = HS_FFER3;
3406 lpfc_handle_eratt(phba);
3407
3408 return;
3409 }
3410
3411 rmb();
3412 while (pring->sli.sli3.rspidx != portRspPut) {
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426 entry = lpfc_resp_iocb(phba, pring);
3427
3428 phba->last_completion_time = jiffies;
3429 rspiocbp = __lpfc_sli_get_iocbq(phba);
3430 if (rspiocbp == NULL) {
3431 printk(KERN_ERR "%s: out of buffers! Failing "
3432 "completion.\n", __func__);
3433 break;
3434 }
3435
3436 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3437 phba->iocb_rsp_size);
3438 irsp = &rspiocbp->iocb;
3439
3440 if (++pring->sli.sli3.rspidx >= portRspMax)
3441 pring->sli.sli3.rspidx = 0;
3442
3443 if (pring->ringno == LPFC_ELS_RING) {
3444 lpfc_debugfs_slow_ring_trc(phba,
3445 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3446 *(((uint32_t *) irsp) + 4),
3447 *(((uint32_t *) irsp) + 6),
3448 *(((uint32_t *) irsp) + 7));
3449 }
3450
3451 writel(pring->sli.sli3.rspidx,
3452 &phba->host_gp[pring->ringno].rspGetInx);
3453
3454 spin_unlock_irqrestore(&phba->hbalock, iflag);
3455
3456 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3457 spin_lock_irqsave(&phba->hbalock, iflag);
3458
3459
3460
3461
3462
3463
3464 if (pring->sli.sli3.rspidx == portRspPut) {
3465 portRspPut = le32_to_cpu(pgp->rspPutInx);
3466 }
3467 }
3468
3469 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3470
3471 pring->stats.iocb_rsp_full++;
3472
3473 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3474 writel(status, phba->CAregaddr);
3475 readl(phba->CAregaddr);
3476 }
3477 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3478 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3479 pring->stats.iocb_cmd_empty++;
3480
3481
3482 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3483 lpfc_sli_resume_iocb(phba, pring);
3484
3485 if ((pring->lpfc_sli_cmd_available))
3486 (pring->lpfc_sli_cmd_available) (phba, pring);
3487
3488 }
3489
3490 spin_unlock_irqrestore(&phba->hbalock, iflag);
3491 return;
3492}
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506static void
3507lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3508 struct lpfc_sli_ring *pring, uint32_t mask)
3509{
3510 struct lpfc_iocbq *irspiocbq;
3511 struct hbq_dmabuf *dmabuf;
3512 struct lpfc_cq_event *cq_event;
3513 unsigned long iflag;
3514
3515 spin_lock_irqsave(&phba->hbalock, iflag);
3516 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3517 spin_unlock_irqrestore(&phba->hbalock, iflag);
3518 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3519
3520 spin_lock_irqsave(&phba->hbalock, iflag);
3521 list_remove_head(&phba->sli4_hba.sp_queue_event,
3522 cq_event, struct lpfc_cq_event, list);
3523 spin_unlock_irqrestore(&phba->hbalock, iflag);
3524
3525 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3526 case CQE_CODE_COMPL_WQE:
3527 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3528 cq_event);
3529
3530 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3531 irspiocbq);
3532 if (irspiocbq)
3533 lpfc_sli_sp_handle_rspiocb(phba, pring,
3534 irspiocbq);
3535 break;
3536 case CQE_CODE_RECEIVE:
3537 case CQE_CODE_RECEIVE_V1:
3538 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3539 cq_event);
3540 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3541 break;
3542 default:
3543 break;
3544 }
3545 }
3546}
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558void
3559lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3560{
3561 LIST_HEAD(completions);
3562 struct lpfc_iocbq *iocb, *next_iocb;
3563
3564 if (pring->ringno == LPFC_ELS_RING) {
3565 lpfc_fabric_abort_hba(phba);
3566 }
3567
3568
3569
3570
3571 if (phba->sli_rev >= LPFC_SLI_REV4) {
3572 spin_lock_irq(&pring->ring_lock);
3573 list_splice_init(&pring->txq, &completions);
3574 pring->txq_cnt = 0;
3575 spin_unlock_irq(&pring->ring_lock);
3576
3577 spin_lock_irq(&phba->hbalock);
3578
3579 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3580 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3581 spin_unlock_irq(&phba->hbalock);
3582 } else {
3583 spin_lock_irq(&phba->hbalock);
3584 list_splice_init(&pring->txq, &completions);
3585 pring->txq_cnt = 0;
3586
3587
3588 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3589 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3590 spin_unlock_irq(&phba->hbalock);
3591 }
3592
3593
3594 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3595 IOERR_SLI_ABORTED);
3596}
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608void
3609lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3610{
3611 struct lpfc_sli *psli = &phba->sli;
3612 struct lpfc_sli_ring *pring;
3613 uint32_t i;
3614
3615
3616 if (phba->sli_rev >= LPFC_SLI_REV4) {
3617 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3618 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3619 lpfc_sli_abort_iocb_ring(phba, pring);
3620 }
3621 } else {
3622 pring = &psli->ring[psli->fcp_ring];
3623 lpfc_sli_abort_iocb_ring(phba, pring);
3624 }
3625}
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638void
3639lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3640{
3641 LIST_HEAD(txq);
3642 LIST_HEAD(txcmplq);
3643 struct lpfc_sli *psli = &phba->sli;
3644 struct lpfc_sli_ring *pring;
3645 uint32_t i;
3646
3647 spin_lock_irq(&phba->hbalock);
3648
3649 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3650 spin_unlock_irq(&phba->hbalock);
3651
3652
3653 if (phba->sli_rev >= LPFC_SLI_REV4) {
3654 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3655 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
3656
3657 spin_lock_irq(&pring->ring_lock);
3658
3659 list_splice_init(&pring->txq, &txq);
3660
3661 list_splice_init(&pring->txcmplq, &txcmplq);
3662 pring->txq_cnt = 0;
3663 pring->txcmplq_cnt = 0;
3664 spin_unlock_irq(&pring->ring_lock);
3665
3666
3667 lpfc_sli_cancel_iocbs(phba, &txq,
3668 IOSTAT_LOCAL_REJECT,
3669 IOERR_SLI_DOWN);
3670
3671 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3672 IOSTAT_LOCAL_REJECT,
3673 IOERR_SLI_DOWN);
3674 }
3675 } else {
3676 pring = &psli->ring[psli->fcp_ring];
3677
3678 spin_lock_irq(&phba->hbalock);
3679
3680 list_splice_init(&pring->txq, &txq);
3681
3682 list_splice_init(&pring->txcmplq, &txcmplq);
3683 pring->txq_cnt = 0;
3684 pring->txcmplq_cnt = 0;
3685 spin_unlock_irq(&phba->hbalock);
3686
3687
3688 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3689 IOERR_SLI_DOWN);
3690
3691 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3692 IOERR_SLI_DOWN);
3693 }
3694}
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709static int
3710lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3711{
3712 uint32_t status;
3713 int i = 0;
3714 int retval = 0;
3715
3716
3717 if (lpfc_readl(phba->HSregaddr, &status))
3718 return 1;
3719
3720
3721
3722
3723
3724
3725
3726 while (((status & mask) != mask) &&
3727 !(status & HS_FFERM) &&
3728 i++ < 20) {
3729
3730 if (i <= 5)
3731 msleep(10);
3732 else if (i <= 10)
3733 msleep(500);
3734 else
3735 msleep(2500);
3736
3737 if (i == 15) {
3738
3739 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3740 lpfc_sli_brdrestart(phba);
3741 }
3742
3743 if (lpfc_readl(phba->HSregaddr, &status)) {
3744 retval = 1;
3745 break;
3746 }
3747 }
3748
3749
3750 if ((status & HS_FFERM) || (i >= 20)) {
3751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3752 "2751 Adapter failed to restart, "
3753 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3754 status,
3755 readl(phba->MBslimaddr + 0xa8),
3756 readl(phba->MBslimaddr + 0xac));
3757 phba->link_state = LPFC_HBA_ERROR;
3758 retval = 1;
3759 }
3760
3761 return retval;
3762}
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775static int
3776lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3777{
3778 uint32_t status;
3779 int retval = 0;
3780
3781
3782 status = lpfc_sli4_post_status_check(phba);
3783
3784 if (status) {
3785 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3786 lpfc_sli_brdrestart(phba);
3787 status = lpfc_sli4_post_status_check(phba);
3788 }
3789
3790
3791 if (status) {
3792 phba->link_state = LPFC_HBA_ERROR;
3793 retval = 1;
3794 } else
3795 phba->sli4_hba.intr_enable = 0;
3796
3797 return retval;
3798}
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808int
3809lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3810{
3811 return phba->lpfc_sli_brdready(phba, mask);
3812}
3813
3814#define BARRIER_TEST_PATTERN (0xdeadbeef)
3815
3816
3817
3818
3819
3820
3821
3822
3823void lpfc_reset_barrier(struct lpfc_hba *phba)
3824{
3825 uint32_t __iomem *resp_buf;
3826 uint32_t __iomem *mbox_buf;
3827 volatile uint32_t mbox;
3828 uint32_t hc_copy, ha_copy, resp_data;
3829 int i;
3830 uint8_t hdrtype;
3831
3832 lockdep_assert_held(&phba->hbalock);
3833
3834 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
3835 if (hdrtype != 0x80 ||
3836 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
3837 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
3838 return;
3839
3840
3841
3842
3843
3844 resp_buf = phba->MBslimaddr;
3845
3846
3847 if (lpfc_readl(phba->HCregaddr, &hc_copy))
3848 return;
3849 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
3850 readl(phba->HCregaddr);
3851 phba->link_flag |= LS_IGNORE_ERATT;
3852
3853 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3854 return;
3855 if (ha_copy & HA_ERATT) {
3856
3857 writel(HA_ERATT, phba->HAregaddr);
3858 phba->pport->stopped = 1;
3859 }
3860
3861 mbox = 0;
3862 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
3863 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
3864
3865 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
3866 mbox_buf = phba->MBslimaddr;
3867 writel(mbox, mbox_buf);
3868
3869 for (i = 0; i < 50; i++) {
3870 if (lpfc_readl((resp_buf + 1), &resp_data))
3871 return;
3872 if (resp_data != ~(BARRIER_TEST_PATTERN))
3873 mdelay(1);
3874 else
3875 break;
3876 }
3877 resp_data = 0;
3878 if (lpfc_readl((resp_buf + 1), &resp_data))
3879 return;
3880 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
3881 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
3882 phba->pport->stopped)
3883 goto restore_hc;
3884 else
3885 goto clear_errat;
3886 }
3887
3888 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
3889 resp_data = 0;
3890 for (i = 0; i < 500; i++) {
3891 if (lpfc_readl(resp_buf, &resp_data))
3892 return;
3893 if (resp_data != mbox)
3894 mdelay(1);
3895 else
3896 break;
3897 }
3898
3899clear_errat:
3900
3901 while (++i < 500) {
3902 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3903 return;
3904 if (!(ha_copy & HA_ERATT))
3905 mdelay(1);
3906 else
3907 break;
3908 }
3909
3910 if (readl(phba->HAregaddr) & HA_ERATT) {
3911 writel(HA_ERATT, phba->HAregaddr);
3912 phba->pport->stopped = 1;
3913 }
3914
3915restore_hc:
3916 phba->link_flag &= ~LS_IGNORE_ERATT;
3917 writel(hc_copy, phba->HCregaddr);
3918 readl(phba->HCregaddr);
3919}
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932int
3933lpfc_sli_brdkill(struct lpfc_hba *phba)
3934{
3935 struct lpfc_sli *psli;
3936 LPFC_MBOXQ_t *pmb;
3937 uint32_t status;
3938 uint32_t ha_copy;
3939 int retval;
3940 int i = 0;
3941
3942 psli = &phba->sli;
3943
3944
3945 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3946 "0329 Kill HBA Data: x%x x%x\n",
3947 phba->pport->port_state, psli->sli_flag);
3948
3949 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3950 if (!pmb)
3951 return 1;
3952
3953
3954 spin_lock_irq(&phba->hbalock);
3955 if (lpfc_readl(phba->HCregaddr, &status)) {
3956 spin_unlock_irq(&phba->hbalock);
3957 mempool_free(pmb, phba->mbox_mem_pool);
3958 return 1;
3959 }
3960 status &= ~HC_ERINT_ENA;
3961 writel(status, phba->HCregaddr);
3962 readl(phba->HCregaddr);
3963 phba->link_flag |= LS_IGNORE_ERATT;
3964 spin_unlock_irq(&phba->hbalock);
3965
3966 lpfc_kill_board(phba, pmb);
3967 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3968 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3969
3970 if (retval != MBX_SUCCESS) {
3971 if (retval != MBX_BUSY)
3972 mempool_free(pmb, phba->mbox_mem_pool);
3973 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3974 "2752 KILL_BOARD command failed retval %d\n",
3975 retval);
3976 spin_lock_irq(&phba->hbalock);
3977 phba->link_flag &= ~LS_IGNORE_ERATT;
3978 spin_unlock_irq(&phba->hbalock);
3979 return 1;
3980 }
3981
3982 spin_lock_irq(&phba->hbalock);
3983 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
3984 spin_unlock_irq(&phba->hbalock);
3985
3986 mempool_free(pmb, phba->mbox_mem_pool);
3987
3988
3989
3990
3991
3992
3993 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3994 return 1;
3995 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
3996 mdelay(100);
3997 if (lpfc_readl(phba->HAregaddr, &ha_copy))
3998 return 1;
3999 }
4000
4001 del_timer_sync(&psli->mbox_tmo);
4002 if (ha_copy & HA_ERATT) {
4003 writel(HA_ERATT, phba->HAregaddr);
4004 phba->pport->stopped = 1;
4005 }
4006 spin_lock_irq(&phba->hbalock);
4007 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4008 psli->mbox_active = NULL;
4009 phba->link_flag &= ~LS_IGNORE_ERATT;
4010 spin_unlock_irq(&phba->hbalock);
4011
4012 lpfc_hba_down_post(phba);
4013 phba->link_state = LPFC_HBA_ERROR;
4014
4015 return ha_copy & HA_ERATT ? 0 : 1;
4016}
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029int
4030lpfc_sli_brdreset(struct lpfc_hba *phba)
4031{
4032 struct lpfc_sli *psli;
4033 struct lpfc_sli_ring *pring;
4034 uint16_t cfg_value;
4035 int i;
4036
4037 psli = &phba->sli;
4038
4039
4040 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4041 "0325 Reset HBA Data: x%x x%x\n",
4042 phba->pport->port_state, psli->sli_flag);
4043
4044
4045 phba->fc_eventTag = 0;
4046 phba->link_events = 0;
4047 phba->pport->fc_myDID = 0;
4048 phba->pport->fc_prevDID = 0;
4049
4050
4051 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4052 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4053 (cfg_value &
4054 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4055
4056 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4057
4058
4059 writel(HC_INITFF, phba->HCregaddr);
4060 mdelay(1);
4061 readl(phba->HCregaddr);
4062 writel(0, phba->HCregaddr);
4063 readl(phba->HCregaddr);
4064
4065
4066 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4067
4068
4069 for (i = 0; i < psli->num_rings; i++) {
4070 pring = &psli->ring[i];
4071 pring->flag = 0;
4072 pring->sli.sli3.rspidx = 0;
4073 pring->sli.sli3.next_cmdidx = 0;
4074 pring->sli.sli3.local_getidx = 0;
4075 pring->sli.sli3.cmdidx = 0;
4076 pring->missbufcnt = 0;
4077 }
4078
4079 phba->link_state = LPFC_WARM_START;
4080 return 0;
4081}
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093int
4094lpfc_sli4_brdreset(struct lpfc_hba *phba)
4095{
4096 struct lpfc_sli *psli = &phba->sli;
4097 uint16_t cfg_value;
4098 int rc = 0;
4099
4100
4101 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4102 "0295 Reset HBA Data: x%x x%x x%x\n",
4103 phba->pport->port_state, psli->sli_flag,
4104 phba->hba_flag);
4105
4106
4107 phba->fc_eventTag = 0;
4108 phba->link_events = 0;
4109 phba->pport->fc_myDID = 0;
4110 phba->pport->fc_prevDID = 0;
4111
4112 spin_lock_irq(&phba->hbalock);
4113 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4114 phba->fcf.fcf_flag = 0;
4115 spin_unlock_irq(&phba->hbalock);
4116
4117
4118 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4119 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4120 return rc;
4121 }
4122
4123
4124 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4125 "0389 Performing PCI function reset!\n");
4126
4127
4128 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4129 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4130 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4131
4132
4133 rc = lpfc_pci_function_reset(phba);
4134 lpfc_sli4_queue_destroy(phba);
4135
4136
4137 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4138
4139 return rc;
4140}
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155static int
4156lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4157{
4158 MAILBOX_t *mb;
4159 struct lpfc_sli *psli;
4160 volatile uint32_t word0;
4161 void __iomem *to_slim;
4162 uint32_t hba_aer_enabled;
4163
4164 spin_lock_irq(&phba->hbalock);
4165
4166
4167 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4168
4169 psli = &phba->sli;
4170
4171
4172 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4173 "0337 Restart HBA Data: x%x x%x\n",
4174 phba->pport->port_state, psli->sli_flag);
4175
4176 word0 = 0;
4177 mb = (MAILBOX_t *) &word0;
4178 mb->mbxCommand = MBX_RESTART;
4179 mb->mbxHc = 1;
4180
4181 lpfc_reset_barrier(phba);
4182
4183 to_slim = phba->MBslimaddr;
4184 writel(*(uint32_t *) mb, to_slim);
4185 readl(to_slim);
4186
4187
4188 if (phba->pport->port_state)
4189 word0 = 1;
4190 else
4191 word0 = 0;
4192 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4193 writel(*(uint32_t *) mb, to_slim);
4194 readl(to_slim);
4195
4196 lpfc_sli_brdreset(phba);
4197 phba->pport->stopped = 0;
4198 phba->link_state = LPFC_INIT_START;
4199 phba->hba_flag = 0;
4200 spin_unlock_irq(&phba->hbalock);
4201
4202 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4203 psli->stats_start = get_seconds();
4204
4205
4206 mdelay(100);
4207
4208
4209 if (hba_aer_enabled)
4210 pci_disable_pcie_error_reporting(phba->pcidev);
4211
4212 lpfc_hba_down_post(phba);
4213
4214 return 0;
4215}
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226static int
4227lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4228{
4229 struct lpfc_sli *psli = &phba->sli;
4230 uint32_t hba_aer_enabled;
4231 int rc;
4232
4233
4234 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4235 "0296 Restart HBA Data: x%x x%x\n",
4236 phba->pport->port_state, psli->sli_flag);
4237
4238
4239 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4240
4241 rc = lpfc_sli4_brdreset(phba);
4242
4243 spin_lock_irq(&phba->hbalock);
4244 phba->pport->stopped = 0;
4245 phba->link_state = LPFC_INIT_START;
4246 phba->hba_flag = 0;
4247 spin_unlock_irq(&phba->hbalock);
4248
4249 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4250 psli->stats_start = get_seconds();
4251
4252
4253 if (hba_aer_enabled)
4254 pci_disable_pcie_error_reporting(phba->pcidev);
4255
4256 lpfc_hba_down_post(phba);
4257
4258 return rc;
4259}
4260
4261
4262
4263
4264
4265
4266
4267
4268int
4269lpfc_sli_brdrestart(struct lpfc_hba *phba)
4270{
4271 return phba->lpfc_sli_brdrestart(phba);
4272}
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284static int
4285lpfc_sli_chipset_init(struct lpfc_hba *phba)
4286{
4287 uint32_t status, i = 0;
4288
4289
4290 if (lpfc_readl(phba->HSregaddr, &status))
4291 return -EIO;
4292
4293
4294 i = 0;
4295 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305 if (i++ >= 200) {
4306
4307
4308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4309 "0436 Adapter failed to init, "
4310 "timeout, status reg x%x, "
4311 "FW Data: A8 x%x AC x%x\n", status,
4312 readl(phba->MBslimaddr + 0xa8),
4313 readl(phba->MBslimaddr + 0xac));
4314 phba->link_state = LPFC_HBA_ERROR;
4315 return -ETIMEDOUT;
4316 }
4317
4318
4319 if (status & HS_FFERM) {
4320
4321
4322
4323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4324 "0437 Adapter failed to init, "
4325 "chipset, status reg x%x, "
4326 "FW Data: A8 x%x AC x%x\n", status,
4327 readl(phba->MBslimaddr + 0xa8),
4328 readl(phba->MBslimaddr + 0xac));
4329 phba->link_state = LPFC_HBA_ERROR;
4330 return -EIO;
4331 }
4332
4333 if (i <= 10)
4334 msleep(10);
4335 else if (i <= 100)
4336 msleep(100);
4337 else
4338 msleep(1000);
4339
4340 if (i == 150) {
4341
4342 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4343 lpfc_sli_brdrestart(phba);
4344 }
4345
4346 if (lpfc_readl(phba->HSregaddr, &status))
4347 return -EIO;
4348 }
4349
4350
4351 if (status & HS_FFERM) {
4352
4353
4354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4355 "0438 Adapter failed to init, chipset, "
4356 "status reg x%x, "
4357 "FW Data: A8 x%x AC x%x\n", status,
4358 readl(phba->MBslimaddr + 0xa8),
4359 readl(phba->MBslimaddr + 0xac));
4360 phba->link_state = LPFC_HBA_ERROR;
4361 return -EIO;
4362 }
4363
4364
4365 writel(0, phba->HCregaddr);
4366 readl(phba->HCregaddr);
4367
4368
4369 writel(0xffffffff, phba->HAregaddr);
4370 readl(phba->HAregaddr);
4371 return 0;
4372}
4373
4374
4375
4376
4377
4378
4379
4380int
4381lpfc_sli_hbq_count(void)
4382{
4383 return ARRAY_SIZE(lpfc_hbq_defs);
4384}
4385
4386
4387
4388
4389
4390
4391
4392
4393static int
4394lpfc_sli_hbq_entry_count(void)
4395{
4396 int hbq_count = lpfc_sli_hbq_count();
4397 int count = 0;
4398 int i;
4399
4400 for (i = 0; i < hbq_count; ++i)
4401 count += lpfc_hbq_defs[i]->entry_count;
4402 return count;
4403}
4404
4405
4406
4407
4408
4409
4410
4411int
4412lpfc_sli_hbq_size(void)
4413{
4414 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4415}
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426static int
4427lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4428{
4429 int hbq_count = lpfc_sli_hbq_count();
4430 LPFC_MBOXQ_t *pmb;
4431 MAILBOX_t *pmbox;
4432 uint32_t hbqno;
4433 uint32_t hbq_entry_index;
4434
4435
4436
4437
4438 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4439
4440 if (!pmb)
4441 return -ENOMEM;
4442
4443 pmbox = &pmb->u.mb;
4444
4445
4446 phba->link_state = LPFC_INIT_MBX_CMDS;
4447 phba->hbq_in_use = 1;
4448
4449 hbq_entry_index = 0;
4450 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4451 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4452 phba->hbqs[hbqno].hbqPutIdx = 0;
4453 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4454 phba->hbqs[hbqno].entry_count =
4455 lpfc_hbq_defs[hbqno]->entry_count;
4456 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4457 hbq_entry_index, pmb);
4458 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4459
4460 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4461
4462
4463
4464 lpfc_printf_log(phba, KERN_ERR,
4465 LOG_SLI | LOG_VPORT,
4466 "1805 Adapter failed to init. "
4467 "Data: x%x x%x x%x\n",
4468 pmbox->mbxCommand,
4469 pmbox->mbxStatus, hbqno);
4470
4471 phba->link_state = LPFC_HBA_ERROR;
4472 mempool_free(pmb, phba->mbox_mem_pool);
4473 return -ENXIO;
4474 }
4475 }
4476 phba->hbq_count = hbq_count;
4477
4478 mempool_free(pmb, phba->mbox_mem_pool);
4479
4480
4481 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4482 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4483 return 0;
4484}
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495static int
4496lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4497{
4498 phba->hbq_in_use = 1;
4499 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
4500 phba->hbq_count = 1;
4501
4502 lpfc_sli_hbqbuf_init_hbqs(phba, 0);
4503 return 0;
4504}
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519int
4520lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4521{
4522 LPFC_MBOXQ_t *pmb;
4523 uint32_t resetcount = 0, rc = 0, done = 0;
4524
4525 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4526 if (!pmb) {
4527 phba->link_state = LPFC_HBA_ERROR;
4528 return -ENOMEM;
4529 }
4530
4531 phba->sli_rev = sli_mode;
4532 while (resetcount < 2 && !done) {
4533 spin_lock_irq(&phba->hbalock);
4534 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4535 spin_unlock_irq(&phba->hbalock);
4536 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4537 lpfc_sli_brdrestart(phba);
4538 rc = lpfc_sli_chipset_init(phba);
4539 if (rc)
4540 break;
4541
4542 spin_lock_irq(&phba->hbalock);
4543 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4544 spin_unlock_irq(&phba->hbalock);
4545 resetcount++;
4546
4547
4548
4549
4550
4551
4552 rc = lpfc_config_port_prep(phba);
4553 if (rc == -ERESTART) {
4554 phba->link_state = LPFC_LINK_UNKNOWN;
4555 continue;
4556 } else if (rc)
4557 break;
4558
4559 phba->link_state = LPFC_INIT_MBX_CMDS;
4560 lpfc_config_port(phba, pmb);
4561 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4562 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4563 LPFC_SLI3_HBQ_ENABLED |
4564 LPFC_SLI3_CRP_ENABLED |
4565 LPFC_SLI3_BG_ENABLED |
4566 LPFC_SLI3_DSS_ENABLED);
4567 if (rc != MBX_SUCCESS) {
4568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4569 "0442 Adapter failed to init, mbxCmd x%x "
4570 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4571 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4572 spin_lock_irq(&phba->hbalock);
4573 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4574 spin_unlock_irq(&phba->hbalock);
4575 rc = -ENXIO;
4576 } else {
4577
4578 spin_lock_irq(&phba->hbalock);
4579 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4580 spin_unlock_irq(&phba->hbalock);
4581 done = 1;
4582
4583 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4584 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4585 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4586 "3110 Port did not grant ASABT\n");
4587 }
4588 }
4589 if (!done) {
4590 rc = -EINVAL;
4591 goto do_prep_failed;
4592 }
4593 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4594 if (!pmb->u.mb.un.varCfgPort.cMA) {
4595 rc = -ENXIO;
4596 goto do_prep_failed;
4597 }
4598 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4599 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4600 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4601 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4602 phba->max_vpi : phba->max_vports;
4603
4604 } else
4605 phba->max_vpi = 0;
4606 phba->fips_level = 0;
4607 phba->fips_spec_rev = 0;
4608 if (pmb->u.mb.un.varCfgPort.gdss) {
4609 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4610 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4611 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4612 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4613 "2850 Security Crypto Active. FIPS x%d "
4614 "(Spec Rev: x%d)",
4615 phba->fips_level, phba->fips_spec_rev);
4616 }
4617 if (pmb->u.mb.un.varCfgPort.sec_err) {
4618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4619 "2856 Config Port Security Crypto "
4620 "Error: x%x ",
4621 pmb->u.mb.un.varCfgPort.sec_err);
4622 }
4623 if (pmb->u.mb.un.varCfgPort.gerbm)
4624 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4625 if (pmb->u.mb.un.varCfgPort.gcrp)
4626 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4627
4628 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4629 phba->port_gp = phba->mbox->us.s3_pgp.port;
4630
4631 if (phba->cfg_enable_bg) {
4632 if (pmb->u.mb.un.varCfgPort.gbg)
4633 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4634 else
4635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4636 "0443 Adapter did not grant "
4637 "BlockGuard\n");
4638 }
4639 } else {
4640 phba->hbq_get = NULL;
4641 phba->port_gp = phba->mbox->us.s2.port;
4642 phba->max_vpi = 0;
4643 }
4644do_prep_failed:
4645 mempool_free(pmb, phba->mbox_mem_pool);
4646 return rc;
4647}
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663int
4664lpfc_sli_hba_setup(struct lpfc_hba *phba)
4665{
4666 uint32_t rc;
4667 int mode = 3, i;
4668 int longs;
4669
4670 switch (phba->cfg_sli_mode) {
4671 case 2:
4672 if (phba->cfg_enable_npiv) {
4673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4674 "1824 NPIV enabled: Override sli_mode "
4675 "parameter (%d) to auto (0).\n",
4676 phba->cfg_sli_mode);
4677 break;
4678 }
4679 mode = 2;
4680 break;
4681 case 0:
4682 case 3:
4683 break;
4684 default:
4685 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4686 "1819 Unrecognized sli_mode parameter: %d.\n",
4687 phba->cfg_sli_mode);
4688
4689 break;
4690 }
4691 phba->fcp_embed_io = 0;
4692
4693 rc = lpfc_sli_config_port(phba, mode);
4694
4695 if (rc && phba->cfg_sli_mode == 3)
4696 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4697 "1820 Unable to select SLI-3. "
4698 "Not supported by adapter.\n");
4699 if (rc && mode != 2)
4700 rc = lpfc_sli_config_port(phba, 2);
4701 else if (rc && mode == 2)
4702 rc = lpfc_sli_config_port(phba, 3);
4703 if (rc)
4704 goto lpfc_sli_hba_setup_error;
4705
4706
4707 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4708 rc = pci_enable_pcie_error_reporting(phba->pcidev);
4709 if (!rc) {
4710 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4711 "2709 This device supports "
4712 "Advanced Error Reporting (AER)\n");
4713 spin_lock_irq(&phba->hbalock);
4714 phba->hba_flag |= HBA_AER_ENABLED;
4715 spin_unlock_irq(&phba->hbalock);
4716 } else {
4717 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4718 "2708 This device does not support "
4719 "Advanced Error Reporting (AER): %d\n",
4720 rc);
4721 phba->cfg_aer_support = 0;
4722 }
4723 }
4724
4725 if (phba->sli_rev == 3) {
4726 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4727 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4728 } else {
4729 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4730 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4731 phba->sli3_options = 0;
4732 }
4733
4734 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4735 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4736 phba->sli_rev, phba->max_vpi);
4737 rc = lpfc_sli_ring_map(phba);
4738
4739 if (rc)
4740 goto lpfc_sli_hba_setup_error;
4741
4742
4743 if (phba->sli_rev == LPFC_SLI_REV3) {
4744
4745
4746
4747
4748
4749 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4750 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4751 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4752 GFP_KERNEL);
4753 if (!phba->vpi_bmask) {
4754 rc = -ENOMEM;
4755 goto lpfc_sli_hba_setup_error;
4756 }
4757
4758 phba->vpi_ids = kzalloc(
4759 (phba->max_vpi+1) * sizeof(uint16_t),
4760 GFP_KERNEL);
4761 if (!phba->vpi_ids) {
4762 kfree(phba->vpi_bmask);
4763 rc = -ENOMEM;
4764 goto lpfc_sli_hba_setup_error;
4765 }
4766 for (i = 0; i < phba->max_vpi; i++)
4767 phba->vpi_ids[i] = i;
4768 }
4769 }
4770
4771
4772 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4773 rc = lpfc_sli_hbq_setup(phba);
4774 if (rc)
4775 goto lpfc_sli_hba_setup_error;
4776 }
4777 spin_lock_irq(&phba->hbalock);
4778 phba->sli.sli_flag |= LPFC_PROCESS_LA;
4779 spin_unlock_irq(&phba->hbalock);
4780
4781 rc = lpfc_config_port_post(phba);
4782 if (rc)
4783 goto lpfc_sli_hba_setup_error;
4784
4785 return rc;
4786
4787lpfc_sli_hba_setup_error:
4788 phba->link_state = LPFC_HBA_ERROR;
4789 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4790 "0445 Firmware initialization failed\n");
4791 return rc;
4792}
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802static int
4803lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4804{
4805 LPFC_MBOXQ_t *mboxq;
4806 struct lpfc_dmabuf *mp;
4807 struct lpfc_mqe *mqe;
4808 uint32_t data_length;
4809 int rc;
4810
4811
4812 phba->valid_vlan = 0;
4813 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4814 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4815 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4816
4817 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4818 if (!mboxq)
4819 return -ENOMEM;
4820
4821 mqe = &mboxq->u.mqe;
4822 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4823 rc = -ENOMEM;
4824 goto out_free_mboxq;
4825 }
4826
4827 mp = (struct lpfc_dmabuf *) mboxq->context1;
4828 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4829
4830 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
4831 "(%d):2571 Mailbox cmd x%x Status x%x "
4832 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4833 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4834 "CQ: x%x x%x x%x x%x\n",
4835 mboxq->vport ? mboxq->vport->vpi : 0,
4836 bf_get(lpfc_mqe_command, mqe),
4837 bf_get(lpfc_mqe_status, mqe),
4838 mqe->un.mb_words[0], mqe->un.mb_words[1],
4839 mqe->un.mb_words[2], mqe->un.mb_words[3],
4840 mqe->un.mb_words[4], mqe->un.mb_words[5],
4841 mqe->un.mb_words[6], mqe->un.mb_words[7],
4842 mqe->un.mb_words[8], mqe->un.mb_words[9],
4843 mqe->un.mb_words[10], mqe->un.mb_words[11],
4844 mqe->un.mb_words[12], mqe->un.mb_words[13],
4845 mqe->un.mb_words[14], mqe->un.mb_words[15],
4846 mqe->un.mb_words[16], mqe->un.mb_words[50],
4847 mboxq->mcqe.word0,
4848 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
4849 mboxq->mcqe.trailer);
4850
4851 if (rc) {
4852 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4853 kfree(mp);
4854 rc = -EIO;
4855 goto out_free_mboxq;
4856 }
4857 data_length = mqe->un.mb_words[5];
4858 if (data_length > DMP_RGN23_SIZE) {
4859 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4860 kfree(mp);
4861 rc = -EIO;
4862 goto out_free_mboxq;
4863 }
4864
4865 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
4866 lpfc_mbuf_free(phba, mp->virt, mp->phys);
4867 kfree(mp);
4868 rc = 0;
4869
4870out_free_mboxq:
4871 mempool_free(mboxq, phba->mbox_mem_pool);
4872 return rc;
4873}
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890static int
4891lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4892 uint8_t *vpd, uint32_t *vpd_size)
4893{
4894 int rc = 0;
4895 uint32_t dma_size;
4896 struct lpfc_dmabuf *dmabuf;
4897 struct lpfc_mqe *mqe;
4898
4899 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4900 if (!dmabuf)
4901 return -ENOMEM;
4902
4903
4904
4905
4906
4907 dma_size = *vpd_size;
4908 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
4909 &dmabuf->phys, GFP_KERNEL);
4910 if (!dmabuf->virt) {
4911 kfree(dmabuf);
4912 return -ENOMEM;
4913 }
4914
4915
4916
4917
4918
4919
4920 lpfc_read_rev(phba, mboxq);
4921 mqe = &mboxq->u.mqe;
4922 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
4923 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
4924 mqe->un.read_rev.word1 &= 0x0000FFFF;
4925 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
4926 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
4927
4928 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4929 if (rc) {
4930 dma_free_coherent(&phba->pcidev->dev, dma_size,
4931 dmabuf->virt, dmabuf->phys);
4932 kfree(dmabuf);
4933 return -EIO;
4934 }
4935
4936
4937
4938
4939
4940
4941 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
4942 *vpd_size = mqe->un.read_rev.avail_vpd_len;
4943
4944 memcpy(vpd, dmabuf->virt, *vpd_size);
4945
4946 dma_free_coherent(&phba->pcidev->dev, dma_size,
4947 dmabuf->virt, dmabuf->phys);
4948 kfree(dmabuf);
4949 return 0;
4950}
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963static int
4964lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4965{
4966 LPFC_MBOXQ_t *mboxq;
4967 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4968 struct lpfc_controller_attribute *cntl_attr;
4969 struct lpfc_mbx_get_port_name *get_port_name;
4970 void *virtaddr = NULL;
4971 uint32_t alloclen, reqlen;
4972 uint32_t shdr_status, shdr_add_status;
4973 union lpfc_sli4_cfg_shdr *shdr;
4974 char cport_name = 0;
4975 int rc;
4976
4977
4978 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4979 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4980
4981 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4982 if (!mboxq)
4983 return -ENOMEM;
4984
4985 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4986 lpfc_sli4_read_config(phba);
4987 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
4988 goto retrieve_ppname;
4989
4990
4991 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4992 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4993 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4994 LPFC_SLI4_MBX_NEMBED);
4995 if (alloclen < reqlen) {
4996 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4997 "3084 Allocated DMA memory size (%d) is "
4998 "less than the requested DMA memory size "
4999 "(%d)\n", alloclen, reqlen);
5000 rc = -ENOMEM;
5001 goto out_free_mboxq;
5002 }
5003 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5004 virtaddr = mboxq->sge_array->addr[0];
5005 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5006 shdr = &mbx_cntl_attr->cfg_shdr;
5007 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5008 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5009 if (shdr_status || shdr_add_status || rc) {
5010 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5011 "3085 Mailbox x%x (x%x/x%x) failed, "
5012 "rc:x%x, status:x%x, add_status:x%x\n",
5013 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5014 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5015 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5016 rc, shdr_status, shdr_add_status);
5017 rc = -ENXIO;
5018 goto out_free_mboxq;
5019 }
5020 cntl_attr = &mbx_cntl_attr->cntl_attr;
5021 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5022 phba->sli4_hba.lnk_info.lnk_tp =
5023 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5024 phba->sli4_hba.lnk_info.lnk_no =
5025 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5026 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5027 "3086 lnk_type:%d, lnk_numb:%d\n",
5028 phba->sli4_hba.lnk_info.lnk_tp,
5029 phba->sli4_hba.lnk_info.lnk_no);
5030
5031retrieve_ppname:
5032 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5033 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5034 sizeof(struct lpfc_mbx_get_port_name) -
5035 sizeof(struct lpfc_sli4_cfg_mhdr),
5036 LPFC_SLI4_MBX_EMBED);
5037 get_port_name = &mboxq->u.mqe.un.get_port_name;
5038 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5039 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5040 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5041 phba->sli4_hba.lnk_info.lnk_tp);
5042 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5043 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5044 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5045 if (shdr_status || shdr_add_status || rc) {
5046 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5047 "3087 Mailbox x%x (x%x/x%x) failed: "
5048 "rc:x%x, status:x%x, add_status:x%x\n",
5049 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5050 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5051 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5052 rc, shdr_status, shdr_add_status);
5053 rc = -ENXIO;
5054 goto out_free_mboxq;
5055 }
5056 switch (phba->sli4_hba.lnk_info.lnk_no) {
5057 case LPFC_LINK_NUMBER_0:
5058 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5059 &get_port_name->u.response);
5060 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5061 break;
5062 case LPFC_LINK_NUMBER_1:
5063 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5064 &get_port_name->u.response);
5065 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5066 break;
5067 case LPFC_LINK_NUMBER_2:
5068 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5069 &get_port_name->u.response);
5070 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5071 break;
5072 case LPFC_LINK_NUMBER_3:
5073 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5074 &get_port_name->u.response);
5075 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5076 break;
5077 default:
5078 break;
5079 }
5080
5081 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5082 phba->Port[0] = cport_name;
5083 phba->Port[1] = '\0';
5084 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5085 "3091 SLI get port name: %s\n", phba->Port);
5086 }
5087
5088out_free_mboxq:
5089 if (rc != MBX_TIMEOUT) {
5090 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5091 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5092 else
5093 mempool_free(mboxq, phba->mbox_mem_pool);
5094 }
5095 return rc;
5096}
5097
5098
5099
5100
5101
5102
5103
5104
5105static void
5106lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5107{
5108 int fcp_eqidx;
5109
5110 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5111 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
5112 fcp_eqidx = 0;
5113 if (phba->sli4_hba.fcp_cq) {
5114 do {
5115 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
5116 LPFC_QUEUE_REARM);
5117 } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
5118 }
5119
5120 if (phba->cfg_fof)
5121 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5122
5123 if (phba->sli4_hba.hba_eq) {
5124 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
5125 fcp_eqidx++)
5126 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
5127 LPFC_QUEUE_REARM);
5128 }
5129
5130 if (phba->cfg_fof)
5131 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
5132}
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146int
5147lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5148 uint16_t *extnt_count, uint16_t *extnt_size)
5149{
5150 int rc = 0;
5151 uint32_t length;
5152 uint32_t mbox_tmo;
5153 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5154 LPFC_MBOXQ_t *mbox;
5155
5156 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5157 if (!mbox)
5158 return -ENOMEM;
5159
5160
5161 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5162 sizeof(struct lpfc_sli4_cfg_mhdr));
5163 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5164 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5165 length, LPFC_SLI4_MBX_EMBED);
5166
5167
5168 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5169 LPFC_SLI4_MBX_EMBED);
5170 if (unlikely(rc)) {
5171 rc = -EIO;
5172 goto err_exit;
5173 }
5174
5175 if (!phba->sli4_hba.intr_enable)
5176 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5177 else {
5178 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5179 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5180 }
5181 if (unlikely(rc)) {
5182 rc = -EIO;
5183 goto err_exit;
5184 }
5185
5186 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5187 if (bf_get(lpfc_mbox_hdr_status,
5188 &rsrc_info->header.cfg_shdr.response)) {
5189 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5190 "2930 Failed to get resource extents "
5191 "Status 0x%x Add'l Status 0x%x\n",
5192 bf_get(lpfc_mbox_hdr_status,
5193 &rsrc_info->header.cfg_shdr.response),
5194 bf_get(lpfc_mbox_hdr_add_status,
5195 &rsrc_info->header.cfg_shdr.response));
5196 rc = -EIO;
5197 goto err_exit;
5198 }
5199
5200 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5201 &rsrc_info->u.rsp);
5202 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5203 &rsrc_info->u.rsp);
5204
5205 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5206 "3162 Retrieved extents type-%d from port: count:%d, "
5207 "size:%d\n", type, *extnt_count, *extnt_size);
5208
5209err_exit:
5210 mempool_free(mbox, phba->mbox_mem_pool);
5211 return rc;
5212}
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229static int
5230lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5231{
5232 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5233 uint16_t size_diff, rsrc_ext_size;
5234 int rc = 0;
5235 struct lpfc_rsrc_blks *rsrc_entry;
5236 struct list_head *rsrc_blk_list = NULL;
5237
5238 size_diff = 0;
5239 curr_ext_cnt = 0;
5240 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5241 &rsrc_ext_cnt,
5242 &rsrc_ext_size);
5243 if (unlikely(rc))
5244 return -EIO;
5245
5246 switch (type) {
5247 case LPFC_RSC_TYPE_FCOE_RPI:
5248 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5249 break;
5250 case LPFC_RSC_TYPE_FCOE_VPI:
5251 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5252 break;
5253 case LPFC_RSC_TYPE_FCOE_XRI:
5254 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5255 break;
5256 case LPFC_RSC_TYPE_FCOE_VFI:
5257 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5258 break;
5259 default:
5260 break;
5261 }
5262
5263 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5264 curr_ext_cnt++;
5265 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5266 size_diff++;
5267 }
5268
5269 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5270 rc = 1;
5271
5272 return rc;
5273}
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292static int
5293lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5294 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5295{
5296 int rc = 0;
5297 uint32_t req_len;
5298 uint32_t emb_len;
5299 uint32_t alloc_len, mbox_tmo;
5300
5301
5302 req_len = extnt_cnt * sizeof(uint16_t);
5303
5304
5305
5306
5307
5308 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5309 sizeof(uint32_t);
5310
5311
5312
5313
5314
5315 *emb = LPFC_SLI4_MBX_EMBED;
5316 if (req_len > emb_len) {
5317 req_len = extnt_cnt * sizeof(uint16_t) +
5318 sizeof(union lpfc_sli4_cfg_shdr) +
5319 sizeof(uint32_t);
5320 *emb = LPFC_SLI4_MBX_NEMBED;
5321 }
5322
5323 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5324 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5325 req_len, *emb);
5326 if (alloc_len < req_len) {
5327 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5328 "2982 Allocated DMA memory size (x%x) is "
5329 "less than the requested DMA memory "
5330 "size (x%x)\n", alloc_len, req_len);
5331 return -ENOMEM;
5332 }
5333 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5334 if (unlikely(rc))
5335 return -EIO;
5336
5337 if (!phba->sli4_hba.intr_enable)
5338 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5339 else {
5340 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5341 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5342 }
5343
5344 if (unlikely(rc))
5345 rc = -EIO;
5346 return rc;
5347}
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357static int
5358lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5359{
5360 bool emb = false;
5361 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5362 uint16_t rsrc_id, rsrc_start, j, k;
5363 uint16_t *ids;
5364 int i, rc;
5365 unsigned long longs;
5366 unsigned long *bmask;
5367 struct lpfc_rsrc_blks *rsrc_blks;
5368 LPFC_MBOXQ_t *mbox;
5369 uint32_t length;
5370 struct lpfc_id_range *id_array = NULL;
5371 void *virtaddr = NULL;
5372 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5373 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5374 struct list_head *ext_blk_list;
5375
5376 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5377 &rsrc_cnt,
5378 &rsrc_size);
5379 if (unlikely(rc))
5380 return -EIO;
5381
5382 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5383 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5384 "3009 No available Resource Extents "
5385 "for resource type 0x%x: Count: 0x%x, "
5386 "Size 0x%x\n", type, rsrc_cnt,
5387 rsrc_size);
5388 return -ENOMEM;
5389 }
5390
5391 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5392 "2903 Post resource extents type-0x%x: "
5393 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5394
5395 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5396 if (!mbox)
5397 return -ENOMEM;
5398
5399 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5400 if (unlikely(rc)) {
5401 rc = -EIO;
5402 goto err_exit;
5403 }
5404
5405
5406
5407
5408
5409
5410
5411 if (emb == LPFC_SLI4_MBX_EMBED) {
5412 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5413 id_array = &rsrc_ext->u.rsp.id[0];
5414 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5415 } else {
5416 virtaddr = mbox->sge_array->addr[0];
5417 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5418 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5419 id_array = &n_rsrc->id;
5420 }
5421
5422 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5423 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5424
5425
5426
5427
5428
5429 length = sizeof(struct lpfc_rsrc_blks);
5430 switch (type) {
5431 case LPFC_RSC_TYPE_FCOE_RPI:
5432 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5433 sizeof(unsigned long),
5434 GFP_KERNEL);
5435 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5436 rc = -ENOMEM;
5437 goto err_exit;
5438 }
5439 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5440 sizeof(uint16_t),
5441 GFP_KERNEL);
5442 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5443 kfree(phba->sli4_hba.rpi_bmask);
5444 rc = -ENOMEM;
5445 goto err_exit;
5446 }
5447
5448
5449
5450
5451
5452
5453 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5454
5455
5456 bmask = phba->sli4_hba.rpi_bmask;
5457 ids = phba->sli4_hba.rpi_ids;
5458 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5459 break;
5460 case LPFC_RSC_TYPE_FCOE_VPI:
5461 phba->vpi_bmask = kzalloc(longs *
5462 sizeof(unsigned long),
5463 GFP_KERNEL);
5464 if (unlikely(!phba->vpi_bmask)) {
5465 rc = -ENOMEM;
5466 goto err_exit;
5467 }
5468 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5469 sizeof(uint16_t),
5470 GFP_KERNEL);
5471 if (unlikely(!phba->vpi_ids)) {
5472 kfree(phba->vpi_bmask);
5473 rc = -ENOMEM;
5474 goto err_exit;
5475 }
5476
5477
5478 bmask = phba->vpi_bmask;
5479 ids = phba->vpi_ids;
5480 ext_blk_list = &phba->lpfc_vpi_blk_list;
5481 break;
5482 case LPFC_RSC_TYPE_FCOE_XRI:
5483 phba->sli4_hba.xri_bmask = kzalloc(longs *
5484 sizeof(unsigned long),
5485 GFP_KERNEL);
5486 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5487 rc = -ENOMEM;
5488 goto err_exit;
5489 }
5490 phba->sli4_hba.max_cfg_param.xri_used = 0;
5491 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5492 sizeof(uint16_t),
5493 GFP_KERNEL);
5494 if (unlikely(!phba->sli4_hba.xri_ids)) {
5495 kfree(phba->sli4_hba.xri_bmask);
5496 rc = -ENOMEM;
5497 goto err_exit;
5498 }
5499
5500
5501 bmask = phba->sli4_hba.xri_bmask;
5502 ids = phba->sli4_hba.xri_ids;
5503 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5504 break;
5505 case LPFC_RSC_TYPE_FCOE_VFI:
5506 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5507 sizeof(unsigned long),
5508 GFP_KERNEL);
5509 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5510 rc = -ENOMEM;
5511 goto err_exit;
5512 }
5513 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5514 sizeof(uint16_t),
5515 GFP_KERNEL);
5516 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5517 kfree(phba->sli4_hba.vfi_bmask);
5518 rc = -ENOMEM;
5519 goto err_exit;
5520 }
5521
5522
5523 bmask = phba->sli4_hba.vfi_bmask;
5524 ids = phba->sli4_hba.vfi_ids;
5525 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5526 break;
5527 default:
5528
5529 id_array = NULL;
5530 bmask = NULL;
5531 ids = NULL;
5532 ext_blk_list = NULL;
5533 goto err_exit;
5534 }
5535
5536
5537
5538
5539
5540
5541
5542 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5543 if ((i % 2) == 0)
5544 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5545 &id_array[k]);
5546 else
5547 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5548 &id_array[k]);
5549
5550 rsrc_blks = kzalloc(length, GFP_KERNEL);
5551 if (unlikely(!rsrc_blks)) {
5552 rc = -ENOMEM;
5553 kfree(bmask);
5554 kfree(ids);
5555 goto err_exit;
5556 }
5557 rsrc_blks->rsrc_start = rsrc_id;
5558 rsrc_blks->rsrc_size = rsrc_size;
5559 list_add_tail(&rsrc_blks->list, ext_blk_list);
5560 rsrc_start = rsrc_id;
5561 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
5562 phba->sli4_hba.scsi_xri_start = rsrc_start +
5563 lpfc_sli4_get_els_iocb_cnt(phba);
5564
5565 while (rsrc_id < (rsrc_start + rsrc_size)) {
5566 ids[j] = rsrc_id;
5567 rsrc_id++;
5568 j++;
5569 }
5570
5571 if ((i % 2) == 1)
5572 k++;
5573 }
5574 err_exit:
5575 lpfc_sli4_mbox_cmd_free(phba, mbox);
5576 return rc;
5577}
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588static int
5589lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5590{
5591 int rc;
5592 uint32_t length, mbox_tmo = 0;
5593 LPFC_MBOXQ_t *mbox;
5594 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5595 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5596
5597 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5598 if (!mbox)
5599 return -ENOMEM;
5600
5601
5602
5603
5604
5605
5606 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5607 sizeof(struct lpfc_sli4_cfg_mhdr));
5608 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5609 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5610 length, LPFC_SLI4_MBX_EMBED);
5611
5612
5613 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5614 LPFC_SLI4_MBX_EMBED);
5615 if (unlikely(rc)) {
5616 rc = -EIO;
5617 goto out_free_mbox;
5618 }
5619 if (!phba->sli4_hba.intr_enable)
5620 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5621 else {
5622 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5623 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5624 }
5625 if (unlikely(rc)) {
5626 rc = -EIO;
5627 goto out_free_mbox;
5628 }
5629
5630 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5631 if (bf_get(lpfc_mbox_hdr_status,
5632 &dealloc_rsrc->header.cfg_shdr.response)) {
5633 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5634 "2919 Failed to release resource extents "
5635 "for type %d - Status 0x%x Add'l Status 0x%x. "
5636 "Resource memory not released.\n",
5637 type,
5638 bf_get(lpfc_mbox_hdr_status,
5639 &dealloc_rsrc->header.cfg_shdr.response),
5640 bf_get(lpfc_mbox_hdr_add_status,
5641 &dealloc_rsrc->header.cfg_shdr.response));
5642 rc = -EIO;
5643 goto out_free_mbox;
5644 }
5645
5646
5647 switch (type) {
5648 case LPFC_RSC_TYPE_FCOE_VPI:
5649 kfree(phba->vpi_bmask);
5650 kfree(phba->vpi_ids);
5651 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5652 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5653 &phba->lpfc_vpi_blk_list, list) {
5654 list_del_init(&rsrc_blk->list);
5655 kfree(rsrc_blk);
5656 }
5657 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5658 break;
5659 case LPFC_RSC_TYPE_FCOE_XRI:
5660 kfree(phba->sli4_hba.xri_bmask);
5661 kfree(phba->sli4_hba.xri_ids);
5662 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5663 &phba->sli4_hba.lpfc_xri_blk_list, list) {
5664 list_del_init(&rsrc_blk->list);
5665 kfree(rsrc_blk);
5666 }
5667 break;
5668 case LPFC_RSC_TYPE_FCOE_VFI:
5669 kfree(phba->sli4_hba.vfi_bmask);
5670 kfree(phba->sli4_hba.vfi_ids);
5671 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5672 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5673 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5674 list_del_init(&rsrc_blk->list);
5675 kfree(rsrc_blk);
5676 }
5677 break;
5678 case LPFC_RSC_TYPE_FCOE_RPI:
5679
5680 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5681 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5682 list_del_init(&rsrc_blk->list);
5683 kfree(rsrc_blk);
5684 }
5685 break;
5686 default:
5687 break;
5688 }
5689
5690 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5691
5692 out_free_mbox:
5693 mempool_free(mbox, phba->mbox_mem_pool);
5694 return rc;
5695}
5696
5697static void
5698lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5699 uint32_t feature)
5700{
5701 uint32_t len;
5702
5703 len = sizeof(struct lpfc_mbx_set_feature) -
5704 sizeof(struct lpfc_sli4_cfg_mhdr);
5705 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5706 LPFC_MBOX_OPCODE_SET_FEATURES, len,
5707 LPFC_SLI4_MBX_EMBED);
5708
5709 switch (feature) {
5710 case LPFC_SET_UE_RECOVERY:
5711 bf_set(lpfc_mbx_set_feature_UER,
5712 &mbox->u.mqe.un.set_feature, 1);
5713 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5714 mbox->u.mqe.un.set_feature.param_len = 8;
5715 break;
5716 case LPFC_SET_MDS_DIAGS:
5717 bf_set(lpfc_mbx_set_feature_mds,
5718 &mbox->u.mqe.un.set_feature, 1);
5719 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5720 &mbox->u.mqe.un.set_feature, 0);
5721 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5722 mbox->u.mqe.un.set_feature.param_len = 8;
5723 break;
5724 }
5725
5726 return;
5727}
5728
5729
5730
5731
5732
5733
5734
5735int
5736lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5737{
5738 int i, rc, error = 0;
5739 uint16_t count, base;
5740 unsigned long longs;
5741
5742 if (!phba->sli4_hba.rpi_hdrs_in_use)
5743 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5744 if (phba->sli4_hba.extents_in_use) {
5745
5746
5747
5748
5749
5750 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5751 LPFC_IDX_RSRC_RDY) {
5752
5753
5754
5755
5756
5757 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5758 LPFC_RSC_TYPE_FCOE_VFI);
5759 if (rc != 0)
5760 error++;
5761 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5762 LPFC_RSC_TYPE_FCOE_VPI);
5763 if (rc != 0)
5764 error++;
5765 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5766 LPFC_RSC_TYPE_FCOE_XRI);
5767 if (rc != 0)
5768 error++;
5769 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5770 LPFC_RSC_TYPE_FCOE_RPI);
5771 if (rc != 0)
5772 error++;
5773
5774
5775
5776
5777
5778
5779
5780 if (error) {
5781 lpfc_printf_log(phba, KERN_INFO,
5782 LOG_MBOX | LOG_INIT,
5783 "2931 Detected extent resource "
5784 "change. Reallocating all "
5785 "extents.\n");
5786 rc = lpfc_sli4_dealloc_extent(phba,
5787 LPFC_RSC_TYPE_FCOE_VFI);
5788 rc = lpfc_sli4_dealloc_extent(phba,
5789 LPFC_RSC_TYPE_FCOE_VPI);
5790 rc = lpfc_sli4_dealloc_extent(phba,
5791 LPFC_RSC_TYPE_FCOE_XRI);
5792 rc = lpfc_sli4_dealloc_extent(phba,
5793 LPFC_RSC_TYPE_FCOE_RPI);
5794 } else
5795 return 0;
5796 }
5797
5798 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5799 if (unlikely(rc))
5800 goto err_exit;
5801
5802 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5803 if (unlikely(rc))
5804 goto err_exit;
5805
5806 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5807 if (unlikely(rc))
5808 goto err_exit;
5809
5810 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5811 if (unlikely(rc))
5812 goto err_exit;
5813 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5814 LPFC_IDX_RSRC_RDY);
5815 return rc;
5816 } else {
5817
5818
5819
5820
5821
5822
5823
5824 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5825 LPFC_IDX_RSRC_RDY) {
5826 lpfc_sli4_dealloc_resource_identifiers(phba);
5827 lpfc_sli4_remove_rpis(phba);
5828 }
5829
5830 count = phba->sli4_hba.max_cfg_param.max_rpi;
5831 if (count <= 0) {
5832 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5833 "3279 Invalid provisioning of "
5834 "rpi:%d\n", count);
5835 rc = -EINVAL;
5836 goto err_exit;
5837 }
5838 base = phba->sli4_hba.max_cfg_param.rpi_base;
5839 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5840 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5841 sizeof(unsigned long),
5842 GFP_KERNEL);
5843 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5844 rc = -ENOMEM;
5845 goto err_exit;
5846 }
5847 phba->sli4_hba.rpi_ids = kzalloc(count *
5848 sizeof(uint16_t),
5849 GFP_KERNEL);
5850 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5851 rc = -ENOMEM;
5852 goto free_rpi_bmask;
5853 }
5854
5855 for (i = 0; i < count; i++)
5856 phba->sli4_hba.rpi_ids[i] = base + i;
5857
5858
5859 count = phba->sli4_hba.max_cfg_param.max_vpi;
5860 if (count <= 0) {
5861 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5862 "3280 Invalid provisioning of "
5863 "vpi:%d\n", count);
5864 rc = -EINVAL;
5865 goto free_rpi_ids;
5866 }
5867 base = phba->sli4_hba.max_cfg_param.vpi_base;
5868 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5869 phba->vpi_bmask = kzalloc(longs *
5870 sizeof(unsigned long),
5871 GFP_KERNEL);
5872 if (unlikely(!phba->vpi_bmask)) {
5873 rc = -ENOMEM;
5874 goto free_rpi_ids;
5875 }
5876 phba->vpi_ids = kzalloc(count *
5877 sizeof(uint16_t),
5878 GFP_KERNEL);
5879 if (unlikely(!phba->vpi_ids)) {
5880 rc = -ENOMEM;
5881 goto free_vpi_bmask;
5882 }
5883
5884 for (i = 0; i < count; i++)
5885 phba->vpi_ids[i] = base + i;
5886
5887
5888 count = phba->sli4_hba.max_cfg_param.max_xri;
5889 if (count <= 0) {
5890 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5891 "3281 Invalid provisioning of "
5892 "xri:%d\n", count);
5893 rc = -EINVAL;
5894 goto free_vpi_ids;
5895 }
5896 base = phba->sli4_hba.max_cfg_param.xri_base;
5897 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5898 phba->sli4_hba.xri_bmask = kzalloc(longs *
5899 sizeof(unsigned long),
5900 GFP_KERNEL);
5901 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5902 rc = -ENOMEM;
5903 goto free_vpi_ids;
5904 }
5905 phba->sli4_hba.max_cfg_param.xri_used = 0;
5906 phba->sli4_hba.xri_ids = kzalloc(count *
5907 sizeof(uint16_t),
5908 GFP_KERNEL);
5909 if (unlikely(!phba->sli4_hba.xri_ids)) {
5910 rc = -ENOMEM;
5911 goto free_xri_bmask;
5912 }
5913
5914 for (i = 0; i < count; i++)
5915 phba->sli4_hba.xri_ids[i] = base + i;
5916
5917
5918 count = phba->sli4_hba.max_cfg_param.max_vfi;
5919 if (count <= 0) {
5920 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5921 "3282 Invalid provisioning of "
5922 "vfi:%d\n", count);
5923 rc = -EINVAL;
5924 goto free_xri_ids;
5925 }
5926 base = phba->sli4_hba.max_cfg_param.vfi_base;
5927 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
5928 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5929 sizeof(unsigned long),
5930 GFP_KERNEL);
5931 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5932 rc = -ENOMEM;
5933 goto free_xri_ids;
5934 }
5935 phba->sli4_hba.vfi_ids = kzalloc(count *
5936 sizeof(uint16_t),
5937 GFP_KERNEL);
5938 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5939 rc = -ENOMEM;
5940 goto free_vfi_bmask;
5941 }
5942
5943 for (i = 0; i < count; i++)
5944 phba->sli4_hba.vfi_ids[i] = base + i;
5945
5946
5947
5948
5949
5950 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
5951 LPFC_IDX_RSRC_RDY);
5952 return 0;
5953 }
5954
5955 free_vfi_bmask:
5956 kfree(phba->sli4_hba.vfi_bmask);
5957 phba->sli4_hba.vfi_bmask = NULL;
5958 free_xri_ids:
5959 kfree(phba->sli4_hba.xri_ids);
5960 phba->sli4_hba.xri_ids = NULL;
5961 free_xri_bmask:
5962 kfree(phba->sli4_hba.xri_bmask);
5963 phba->sli4_hba.xri_bmask = NULL;
5964 free_vpi_ids:
5965 kfree(phba->vpi_ids);
5966 phba->vpi_ids = NULL;
5967 free_vpi_bmask:
5968 kfree(phba->vpi_bmask);
5969 phba->vpi_bmask = NULL;
5970 free_rpi_ids:
5971 kfree(phba->sli4_hba.rpi_ids);
5972 phba->sli4_hba.rpi_ids = NULL;
5973 free_rpi_bmask:
5974 kfree(phba->sli4_hba.rpi_bmask);
5975 phba->sli4_hba.rpi_bmask = NULL;
5976 err_exit:
5977 return rc;
5978}
5979
5980
5981
5982
5983
5984
5985
5986
5987int
5988lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
5989{
5990 if (phba->sli4_hba.extents_in_use) {
5991 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5992 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
5993 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
5994 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5995 } else {
5996 kfree(phba->vpi_bmask);
5997 phba->sli4_hba.max_cfg_param.vpi_used = 0;
5998 kfree(phba->vpi_ids);
5999 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6000 kfree(phba->sli4_hba.xri_bmask);
6001 kfree(phba->sli4_hba.xri_ids);
6002 kfree(phba->sli4_hba.vfi_bmask);
6003 kfree(phba->sli4_hba.vfi_ids);
6004 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6005 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6006 }
6007
6008 return 0;
6009}
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021int
6022lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6023 uint16_t *extnt_cnt, uint16_t *extnt_size)
6024{
6025 bool emb;
6026 int rc = 0;
6027 uint16_t curr_blks = 0;
6028 uint32_t req_len, emb_len;
6029 uint32_t alloc_len, mbox_tmo;
6030 struct list_head *blk_list_head;
6031 struct lpfc_rsrc_blks *rsrc_blk;
6032 LPFC_MBOXQ_t *mbox;
6033 void *virtaddr = NULL;
6034 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6035 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6036 union lpfc_sli4_cfg_shdr *shdr;
6037
6038 switch (type) {
6039 case LPFC_RSC_TYPE_FCOE_VPI:
6040 blk_list_head = &phba->lpfc_vpi_blk_list;
6041 break;
6042 case LPFC_RSC_TYPE_FCOE_XRI:
6043 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6044 break;
6045 case LPFC_RSC_TYPE_FCOE_VFI:
6046 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6047 break;
6048 case LPFC_RSC_TYPE_FCOE_RPI:
6049 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6050 break;
6051 default:
6052 return -EIO;
6053 }
6054
6055
6056 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6057 if (curr_blks == 0) {
6058
6059
6060
6061
6062
6063
6064
6065 *extnt_size = rsrc_blk->rsrc_size;
6066 }
6067 curr_blks++;
6068 }
6069
6070
6071
6072
6073
6074 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6075 sizeof(uint32_t);
6076
6077
6078
6079
6080
6081 emb = LPFC_SLI4_MBX_EMBED;
6082 req_len = emb_len;
6083 if (req_len > emb_len) {
6084 req_len = curr_blks * sizeof(uint16_t) +
6085 sizeof(union lpfc_sli4_cfg_shdr) +
6086 sizeof(uint32_t);
6087 emb = LPFC_SLI4_MBX_NEMBED;
6088 }
6089
6090 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6091 if (!mbox)
6092 return -ENOMEM;
6093 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6094
6095 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6096 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6097 req_len, emb);
6098 if (alloc_len < req_len) {
6099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6100 "2983 Allocated DMA memory size (x%x) is "
6101 "less than the requested DMA memory "
6102 "size (x%x)\n", alloc_len, req_len);
6103 rc = -ENOMEM;
6104 goto err_exit;
6105 }
6106 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6107 if (unlikely(rc)) {
6108 rc = -EIO;
6109 goto err_exit;
6110 }
6111
6112 if (!phba->sli4_hba.intr_enable)
6113 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6114 else {
6115 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6116 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6117 }
6118
6119 if (unlikely(rc)) {
6120 rc = -EIO;
6121 goto err_exit;
6122 }
6123
6124
6125
6126
6127
6128
6129
6130 if (emb == LPFC_SLI4_MBX_EMBED) {
6131 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6132 shdr = &rsrc_ext->header.cfg_shdr;
6133 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6134 } else {
6135 virtaddr = mbox->sge_array->addr[0];
6136 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6137 shdr = &n_rsrc->cfg_shdr;
6138 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6139 }
6140
6141 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6142 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6143 "2984 Failed to read allocated resources "
6144 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6145 type,
6146 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6147 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6148 rc = -EIO;
6149 goto err_exit;
6150 }
6151 err_exit:
6152 lpfc_sli4_mbox_cmd_free(phba, mbox);
6153 return rc;
6154}
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170static int
6171lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6172{
6173 struct lpfc_sglq *sglq_entry = NULL;
6174 struct lpfc_sglq *sglq_entry_next = NULL;
6175 struct lpfc_sglq *sglq_entry_first = NULL;
6176 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
6177 int last_xritag = NO_XRI;
6178 struct lpfc_sli_ring *pring;
6179 LIST_HEAD(prep_sgl_list);
6180 LIST_HEAD(blck_sgl_list);
6181 LIST_HEAD(allc_sgl_list);
6182 LIST_HEAD(post_sgl_list);
6183 LIST_HEAD(free_sgl_list);
6184
6185 pring = &phba->sli.ring[LPFC_ELS_RING];
6186 spin_lock_irq(&phba->hbalock);
6187 spin_lock(&pring->ring_lock);
6188 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6189 spin_unlock(&pring->ring_lock);
6190 spin_unlock_irq(&phba->hbalock);
6191
6192 total_cnt = phba->sli4_hba.els_xri_cnt;
6193 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6194 &allc_sgl_list, list) {
6195 list_del_init(&sglq_entry->list);
6196 block_cnt++;
6197 if ((last_xritag != NO_XRI) &&
6198 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6199
6200 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6201 post_cnt = block_cnt - 1;
6202
6203 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6204 block_cnt = 1;
6205 } else {
6206
6207 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6208
6209 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6210 list_splice_init(&prep_sgl_list,
6211 &blck_sgl_list);
6212 post_cnt = block_cnt;
6213 block_cnt = 0;
6214 }
6215 }
6216 num_posted++;
6217
6218
6219 last_xritag = sglq_entry->sli4_xritag;
6220
6221
6222 if (num_posted == phba->sli4_hba.els_xri_cnt) {
6223 if (post_cnt == 0) {
6224 list_splice_init(&prep_sgl_list,
6225 &blck_sgl_list);
6226 post_cnt = block_cnt;
6227 } else if (block_cnt == 1) {
6228 status = lpfc_sli4_post_sgl(phba,
6229 sglq_entry->phys, 0,
6230 sglq_entry->sli4_xritag);
6231 if (!status) {
6232
6233 list_add_tail(&sglq_entry->list,
6234 &post_sgl_list);
6235 } else {
6236
6237 lpfc_printf_log(phba, KERN_WARNING,
6238 LOG_SLI,
6239 "3159 Failed to post els "
6240 "sgl, xritag:x%x\n",
6241 sglq_entry->sli4_xritag);
6242 list_add_tail(&sglq_entry->list,
6243 &free_sgl_list);
6244 total_cnt--;
6245 }
6246 }
6247 }
6248
6249
6250 if (post_cnt == 0)
6251 continue;
6252
6253
6254 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
6255 post_cnt);
6256
6257 if (!status) {
6258
6259 list_splice_init(&blck_sgl_list, &post_sgl_list);
6260 } else {
6261
6262 sglq_entry_first = list_first_entry(&blck_sgl_list,
6263 struct lpfc_sglq,
6264 list);
6265 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6266 "3160 Failed to post els sgl-list, "
6267 "xritag:x%x-x%x\n",
6268 sglq_entry_first->sli4_xritag,
6269 (sglq_entry_first->sli4_xritag +
6270 post_cnt - 1));
6271 list_splice_init(&blck_sgl_list, &free_sgl_list);
6272 total_cnt -= post_cnt;
6273 }
6274
6275
6276 if (block_cnt == 0)
6277 last_xritag = NO_XRI;
6278
6279
6280 post_cnt = 0;
6281 }
6282
6283 phba->sli4_hba.els_xri_cnt = total_cnt;
6284
6285
6286 lpfc_free_sgl_list(phba, &free_sgl_list);
6287
6288
6289 if (!list_empty(&post_sgl_list)) {
6290 spin_lock_irq(&phba->hbalock);
6291 spin_lock(&pring->ring_lock);
6292 list_splice_init(&post_sgl_list,
6293 &phba->sli4_hba.lpfc_sgl_list);
6294 spin_unlock(&pring->ring_lock);
6295 spin_unlock_irq(&phba->hbalock);
6296 } else {
6297 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6298 "3161 Failure to post els sgl to port.\n");
6299 return -EIO;
6300 }
6301 return 0;
6302}
6303
6304void
6305lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6306{
6307 uint32_t len;
6308
6309 len = sizeof(struct lpfc_mbx_set_host_data) -
6310 sizeof(struct lpfc_sli4_cfg_mhdr);
6311 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6312 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6313 LPFC_SLI4_MBX_EMBED);
6314
6315 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6316 mbox->u.mqe.un.set_host_data.param_len = 8;
6317 snprintf(mbox->u.mqe.un.set_host_data.data,
6318 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6319 "Linux %s v"LPFC_DRIVER_VERSION,
6320 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6321}
6322
6323
6324
6325
6326
6327
6328
6329
6330
6331
6332int
6333lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6334{
6335 int rc;
6336 LPFC_MBOXQ_t *mboxq;
6337 struct lpfc_mqe *mqe;
6338 uint8_t *vpd;
6339 uint32_t vpd_size;
6340 uint32_t ftr_rsp = 0;
6341 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6342 struct lpfc_vport *vport = phba->pport;
6343 struct lpfc_dmabuf *mp;
6344
6345
6346 rc = lpfc_pci_function_reset(phba);
6347 if (unlikely(rc))
6348 return -ENODEV;
6349
6350
6351 rc = lpfc_sli4_post_status_check(phba);
6352 if (unlikely(rc))
6353 return -ENODEV;
6354 else {
6355 spin_lock_irq(&phba->hbalock);
6356 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6357 spin_unlock_irq(&phba->hbalock);
6358 }
6359
6360
6361
6362
6363
6364 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6365 if (!mboxq)
6366 return -ENOMEM;
6367
6368
6369 vpd_size = SLI4_PAGE_SIZE;
6370 vpd = kzalloc(vpd_size, GFP_KERNEL);
6371 if (!vpd) {
6372 rc = -ENOMEM;
6373 goto out_free_mbox;
6374 }
6375
6376 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6377 if (unlikely(rc)) {
6378 kfree(vpd);
6379 goto out_free_mbox;
6380 }
6381
6382 mqe = &mboxq->u.mqe;
6383 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6384 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6385 phba->hba_flag |= HBA_FCOE_MODE;
6386 phba->fcp_embed_io = 0;
6387 } else {
6388 phba->hba_flag &= ~HBA_FCOE_MODE;
6389 }
6390
6391 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6392 LPFC_DCBX_CEE_MODE)
6393 phba->hba_flag |= HBA_FIP_SUPPORT;
6394 else
6395 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6396
6397 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6398
6399 if (phba->sli_rev != LPFC_SLI_REV4) {
6400 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6401 "0376 READ_REV Error. SLI Level %d "
6402 "FCoE enabled %d\n",
6403 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6404 rc = -EIO;
6405 kfree(vpd);
6406 goto out_free_mbox;
6407 }
6408
6409
6410
6411
6412
6413
6414 if (phba->hba_flag & HBA_FCOE_MODE &&
6415 lpfc_sli4_read_fcoe_params(phba))
6416 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6417 "2570 Failed to read FCoE parameters\n");
6418
6419
6420
6421
6422
6423 rc = lpfc_sli4_retrieve_pport_name(phba);
6424 if (!rc)
6425 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6426 "3080 Successful retrieving SLI4 device "
6427 "physical port name: %s.\n", phba->Port);
6428
6429
6430
6431
6432
6433
6434 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6435 if (unlikely(!rc)) {
6436 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6437 "0377 Error %d parsing vpd. "
6438 "Using defaults.\n", rc);
6439 rc = 0;
6440 }
6441 kfree(vpd);
6442
6443
6444 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6445 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6446 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6447 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6448 &mqe->un.read_rev);
6449 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6450 &mqe->un.read_rev);
6451 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6452 &mqe->un.read_rev);
6453 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6454 &mqe->un.read_rev);
6455 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6456 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6457 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6458 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6459 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6460 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6461 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6462 "(%d):0380 READ_REV Status x%x "
6463 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6464 mboxq->vport ? mboxq->vport->vpi : 0,
6465 bf_get(lpfc_mqe_status, mqe),
6466 phba->vpd.rev.opFwName,
6467 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6468 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6469
6470
6471 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6472 if (phba->pport->cfg_lun_queue_depth > rc) {
6473 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6474 "3362 LUN queue depth changed from %d to %d\n",
6475 phba->pport->cfg_lun_queue_depth, rc);
6476 phba->pport->cfg_lun_queue_depth = rc;
6477 }
6478
6479 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6480 LPFC_SLI_INTF_IF_TYPE_0) {
6481 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6482 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6483 if (rc == MBX_SUCCESS) {
6484 phba->hba_flag |= HBA_RECOVERABLE_UE;
6485
6486 phba->eratt_poll_interval = 1;
6487 phba->sli4_hba.ue_to_sr = bf_get(
6488 lpfc_mbx_set_feature_UESR,
6489 &mboxq->u.mqe.un.set_feature);
6490 phba->sli4_hba.ue_to_rp = bf_get(
6491 lpfc_mbx_set_feature_UERP,
6492 &mboxq->u.mqe.un.set_feature);
6493 }
6494 }
6495
6496 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6497
6498 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6499 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6500 if (rc != MBX_SUCCESS)
6501 phba->mds_diags_support = 0;
6502 }
6503
6504
6505
6506
6507
6508 lpfc_request_features(phba, mboxq);
6509 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6510 if (unlikely(rc)) {
6511 rc = -EIO;
6512 goto out_free_mbox;
6513 }
6514
6515
6516
6517
6518
6519 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6520 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6521 "0378 No support for fcpi mode.\n");
6522 ftr_rsp++;
6523 }
6524 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6525 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6526 else
6527 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6528
6529
6530
6531
6532
6533 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6534 if (phba->cfg_enable_bg) {
6535 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6536 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6537 else
6538 ftr_rsp++;
6539 }
6540
6541 if (phba->max_vpi && phba->cfg_enable_npiv &&
6542 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6543 ftr_rsp++;
6544
6545 if (ftr_rsp) {
6546 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6547 "0379 Feature Mismatch Data: x%08x %08x "
6548 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6549 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6550 phba->cfg_enable_npiv, phba->max_vpi);
6551 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6552 phba->cfg_enable_bg = 0;
6553 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6554 phba->cfg_enable_npiv = 0;
6555 }
6556
6557
6558 spin_lock_irq(&phba->hbalock);
6559 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6560 spin_unlock_irq(&phba->hbalock);
6561
6562
6563
6564
6565
6566 rc = lpfc_sli4_alloc_resource_identifiers(phba);
6567 if (rc) {
6568 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6569 "2920 Failed to alloc Resource IDs "
6570 "rc = x%x\n", rc);
6571 goto out_free_mbox;
6572 }
6573
6574 lpfc_set_host_data(phba, mboxq);
6575
6576 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6577 if (rc) {
6578 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6579 "2134 Failed to set host os driver version %x",
6580 rc);
6581 }
6582
6583
6584 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6585 if (rc) {
6586 phba->link_state = LPFC_HBA_ERROR;
6587 rc = -ENOMEM;
6588 goto out_free_mbox;
6589 }
6590
6591 mboxq->vport = vport;
6592 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6593 mp = (struct lpfc_dmabuf *) mboxq->context1;
6594 if (rc == MBX_SUCCESS) {
6595 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6596 rc = 0;
6597 }
6598
6599
6600
6601
6602
6603 lpfc_mbuf_free(phba, mp->virt, mp->phys);
6604 kfree(mp);
6605 mboxq->context1 = NULL;
6606 if (unlikely(rc)) {
6607 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6608 "0382 READ_SPARAM command failed "
6609 "status %d, mbxStatus x%x\n",
6610 rc, bf_get(lpfc_mqe_status, mqe));
6611 phba->link_state = LPFC_HBA_ERROR;
6612 rc = -EIO;
6613 goto out_free_mbox;
6614 }
6615
6616 lpfc_update_vport_wwn(vport);
6617
6618
6619 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6620 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6621
6622
6623 rc = lpfc_sli4_xri_sgl_update(phba);
6624 if (unlikely(rc)) {
6625 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6626 "1400 Failed to update xri-sgl size and "
6627 "mapping: %d\n", rc);
6628 goto out_free_mbox;
6629 }
6630
6631
6632 rc = lpfc_sli4_repost_els_sgl_list(phba);
6633 if (unlikely(rc)) {
6634 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6635 "0582 Error %d during els sgl post "
6636 "operation\n", rc);
6637 rc = -ENODEV;
6638 goto out_free_mbox;
6639 }
6640
6641
6642 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6643 if (unlikely(rc)) {
6644 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6645 "0383 Error %d during scsi sgl post "
6646 "operation\n", rc);
6647
6648
6649 rc = -ENODEV;
6650 goto out_free_mbox;
6651 }
6652
6653
6654 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
6655 if (unlikely(rc)) {
6656 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6657 "0393 Error %d during rpi post operation\n",
6658 rc);
6659 rc = -ENODEV;
6660 goto out_free_mbox;
6661 }
6662 lpfc_sli4_node_prep(phba);
6663
6664
6665 rc = lpfc_sli4_queue_create(phba);
6666 if (rc) {
6667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6668 "3089 Failed to allocate queues\n");
6669 rc = -ENODEV;
6670 goto out_stop_timers;
6671 }
6672
6673 rc = lpfc_sli4_queue_setup(phba);
6674 if (unlikely(rc)) {
6675 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6676 "0381 Error %d during queue setup.\n ", rc);
6677 goto out_destroy_queue;
6678 }
6679
6680
6681 lpfc_sli4_arm_cqeq_intr(phba);
6682
6683
6684 phba->sli4_hba.intr_enable = 1;
6685
6686
6687 spin_lock_irq(&phba->hbalock);
6688 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
6689 spin_unlock_irq(&phba->hbalock);
6690
6691
6692 lpfc_sli4_rb_setup(phba);
6693
6694
6695 phba->fcf.fcf_flag = 0;
6696 phba->fcf.current_rec.flag = 0;
6697
6698
6699 mod_timer(&vport->els_tmofunc,
6700 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
6701
6702
6703 mod_timer(&phba->hb_tmofunc,
6704 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
6705 phba->hb_outstanding = 0;
6706 phba->last_completion_time = jiffies;
6707
6708
6709 mod_timer(&phba->eratt_poll,
6710 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
6711
6712
6713 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
6714 rc = pci_enable_pcie_error_reporting(phba->pcidev);
6715 if (!rc) {
6716 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6717 "2829 This device supports "
6718 "Advanced Error Reporting (AER)\n");
6719 spin_lock_irq(&phba->hbalock);
6720 phba->hba_flag |= HBA_AER_ENABLED;
6721 spin_unlock_irq(&phba->hbalock);
6722 } else {
6723 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6724 "2830 This device does not support "
6725 "Advanced Error Reporting (AER)\n");
6726 phba->cfg_aer_support = 0;
6727 }
6728 rc = 0;
6729 }
6730
6731 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
6732
6733
6734
6735 lpfc_reg_fcfi(phba, mboxq);
6736 mboxq->vport = phba->pport;
6737 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6738 if (rc != MBX_SUCCESS)
6739 goto out_unset_queue;
6740 rc = 0;
6741 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
6742 &mboxq->u.mqe.un.reg_fcfi);
6743
6744
6745 lpfc_sli_read_link_ste(phba);
6746 }
6747
6748
6749
6750
6751
6752 spin_lock_irq(&phba->hbalock);
6753 phba->link_state = LPFC_LINK_DOWN;
6754 spin_unlock_irq(&phba->hbalock);
6755 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
6756 (phba->hba_flag & LINK_DISABLED)) {
6757 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6758 "3103 Adapter Link is disabled.\n");
6759 lpfc_down_link(phba, mboxq);
6760 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6761 if (rc != MBX_SUCCESS) {
6762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
6763 "3104 Adapter failed to issue "
6764 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
6765 goto out_unset_queue;
6766 }
6767 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6768
6769 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
6770 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6771 if (rc)
6772 goto out_unset_queue;
6773 }
6774 }
6775 mempool_free(mboxq, phba->mbox_mem_pool);
6776 return rc;
6777out_unset_queue:
6778
6779 lpfc_sli4_queue_unset(phba);
6780out_destroy_queue:
6781 lpfc_sli4_queue_destroy(phba);
6782out_stop_timers:
6783 lpfc_stop_hba_timers(phba);
6784out_free_mbox:
6785 mempool_free(mboxq, phba->mbox_mem_pool);
6786 return rc;
6787}
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801void
6802lpfc_mbox_timeout(unsigned long ptr)
6803{
6804 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
6805 unsigned long iflag;
6806 uint32_t tmo_posted;
6807
6808 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
6809 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
6810 if (!tmo_posted)
6811 phba->pport->work_port_events |= WORKER_MBOX_TMO;
6812 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
6813
6814 if (!tmo_posted)
6815 lpfc_worker_wake_up(phba);
6816 return;
6817}
6818
6819
6820
6821
6822
6823
6824
6825
6826
6827static bool
6828lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
6829{
6830
6831 uint32_t idx;
6832 struct lpfc_queue *mcq;
6833 struct lpfc_mcqe *mcqe;
6834 bool pending_completions = false;
6835
6836 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6837 return false;
6838
6839
6840
6841 mcq = phba->sli4_hba.mbx_cq;
6842 idx = mcq->hba_index;
6843 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
6844 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
6845 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
6846 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
6847 pending_completions = true;
6848 break;
6849 }
6850 idx = (idx + 1) % mcq->entry_count;
6851 if (mcq->hba_index == idx)
6852 break;
6853 }
6854 return pending_completions;
6855
6856}
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869bool
6870lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
6871{
6872
6873 uint32_t eqidx;
6874 struct lpfc_queue *fpeq = NULL;
6875 struct lpfc_eqe *eqe;
6876 bool mbox_pending;
6877
6878 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
6879 return false;
6880
6881
6882
6883 if (phba->sli4_hba.hba_eq)
6884 for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
6885 if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
6886 phba->sli4_hba.mbx_cq->assoc_qid) {
6887 fpeq = phba->sli4_hba.hba_eq[eqidx];
6888 break;
6889 }
6890 if (!fpeq)
6891 return false;
6892
6893
6894
6895 lpfc_sli4_eq_clr_intr(fpeq);
6896
6897
6898
6899 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
6900
6901
6902
6903
6904
6905
6906
6907
6908 if (mbox_pending)
6909 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
6910 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
6911 fpeq->EQ_processed++;
6912 }
6913
6914
6915
6916 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
6917
6918 return mbox_pending;
6919
6920}
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930void
6931lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6932{
6933 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6934 MAILBOX_t *mb = NULL;
6935
6936 struct lpfc_sli *psli = &phba->sli;
6937
6938
6939 if (lpfc_sli4_process_missed_mbox_completions(phba))
6940 return;
6941
6942 if (pmbox != NULL)
6943 mb = &pmbox->u.mb;
6944
6945
6946
6947
6948
6949 spin_lock_irq(&phba->hbalock);
6950 if (pmbox == NULL) {
6951 lpfc_printf_log(phba, KERN_WARNING,
6952 LOG_MBOX | LOG_SLI,
6953 "0353 Active Mailbox cleared - mailbox timeout "
6954 "exiting\n");
6955 spin_unlock_irq(&phba->hbalock);
6956 return;
6957 }
6958
6959
6960 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6961 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6962 mb->mbxCommand,
6963 phba->pport->port_state,
6964 phba->sli.sli_flag,
6965 phba->sli.mbox_active);
6966 spin_unlock_irq(&phba->hbalock);
6967
6968
6969
6970
6971
6972 spin_lock_irq(&phba->pport->work_port_lock);
6973 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
6974 spin_unlock_irq(&phba->pport->work_port_lock);
6975 spin_lock_irq(&phba->hbalock);
6976 phba->link_state = LPFC_LINK_UNKNOWN;
6977 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
6978 spin_unlock_irq(&phba->hbalock);
6979
6980 lpfc_sli_abort_fcp_rings(phba);
6981
6982 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6983 "0345 Resetting board due to mailbox timeout\n");
6984
6985
6986 lpfc_reset_hba(phba);
6987}
6988
6989
6990
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012
7013
7014
7015static int
7016lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7017 uint32_t flag)
7018{
7019 MAILBOX_t *mbx;
7020 struct lpfc_sli *psli = &phba->sli;
7021 uint32_t status, evtctr;
7022 uint32_t ha_copy, hc_copy;
7023 int i;
7024 unsigned long timeout;
7025 unsigned long drvr_flag = 0;
7026 uint32_t word0, ldata;
7027 void __iomem *to_slim;
7028 int processing_queue = 0;
7029
7030 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7031 if (!pmbox) {
7032 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7033
7034 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7035 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7036 return MBX_SUCCESS;
7037 }
7038 processing_queue = 1;
7039 pmbox = lpfc_mbox_get(phba);
7040 if (!pmbox) {
7041 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7042 return MBX_SUCCESS;
7043 }
7044 }
7045
7046 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7047 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7048 if(!pmbox->vport) {
7049 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7050 lpfc_printf_log(phba, KERN_ERR,
7051 LOG_MBOX | LOG_VPORT,
7052 "1806 Mbox x%x failed. No vport\n",
7053 pmbox->u.mb.mbxCommand);
7054 dump_stack();
7055 goto out_not_finished;
7056 }
7057 }
7058
7059
7060 if (unlikely(pci_channel_offline(phba->pcidev))) {
7061 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7062 goto out_not_finished;
7063 }
7064
7065
7066 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7067 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7068 goto out_not_finished;
7069 }
7070
7071 psli = &phba->sli;
7072
7073 mbx = &pmbox->u.mb;
7074 status = MBX_SUCCESS;
7075
7076 if (phba->link_state == LPFC_HBA_ERROR) {
7077 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7078
7079
7080 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7081 "(%d):0311 Mailbox command x%x cannot "
7082 "issue Data: x%x x%x\n",
7083 pmbox->vport ? pmbox->vport->vpi : 0,
7084 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7085 goto out_not_finished;
7086 }
7087
7088 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7089 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7090 !(hc_copy & HC_MBINT_ENA)) {
7091 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7092 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7093 "(%d):2528 Mailbox command x%x cannot "
7094 "issue Data: x%x x%x\n",
7095 pmbox->vport ? pmbox->vport->vpi : 0,
7096 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7097 goto out_not_finished;
7098 }
7099 }
7100
7101 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7102
7103
7104
7105
7106
7107 if (flag & MBX_POLL) {
7108 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7109
7110
7111 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7112 "(%d):2529 Mailbox command x%x "
7113 "cannot issue Data: x%x x%x\n",
7114 pmbox->vport ? pmbox->vport->vpi : 0,
7115 pmbox->u.mb.mbxCommand,
7116 psli->sli_flag, flag);
7117 goto out_not_finished;
7118 }
7119
7120 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7121 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7122
7123 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7124 "(%d):2530 Mailbox command x%x "
7125 "cannot issue Data: x%x x%x\n",
7126 pmbox->vport ? pmbox->vport->vpi : 0,
7127 pmbox->u.mb.mbxCommand,
7128 psli->sli_flag, flag);
7129 goto out_not_finished;
7130 }
7131
7132
7133
7134
7135 lpfc_mbox_put(phba, pmbox);
7136
7137
7138 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7139 "(%d):0308 Mbox cmd issue - BUSY Data: "
7140 "x%x x%x x%x x%x\n",
7141 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7142 mbx->mbxCommand, phba->pport->port_state,
7143 psli->sli_flag, flag);
7144
7145 psli->slistat.mbox_busy++;
7146 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7147
7148 if (pmbox->vport) {
7149 lpfc_debugfs_disc_trc(pmbox->vport,
7150 LPFC_DISC_TRC_MBOX_VPORT,
7151 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7152 (uint32_t)mbx->mbxCommand,
7153 mbx->un.varWords[0], mbx->un.varWords[1]);
7154 }
7155 else {
7156 lpfc_debugfs_disc_trc(phba->pport,
7157 LPFC_DISC_TRC_MBOX,
7158 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7159 (uint32_t)mbx->mbxCommand,
7160 mbx->un.varWords[0], mbx->un.varWords[1]);
7161 }
7162
7163 return MBX_BUSY;
7164 }
7165
7166 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7167
7168
7169 if (flag != MBX_POLL) {
7170 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7171 (mbx->mbxCommand != MBX_KILL_BOARD)) {
7172 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7173 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7174
7175 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7176 "(%d):2531 Mailbox command x%x "
7177 "cannot issue Data: x%x x%x\n",
7178 pmbox->vport ? pmbox->vport->vpi : 0,
7179 pmbox->u.mb.mbxCommand,
7180 psli->sli_flag, flag);
7181 goto out_not_finished;
7182 }
7183
7184 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7185 1000);
7186 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7187 }
7188
7189
7190 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7191 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7192 "x%x\n",
7193 pmbox->vport ? pmbox->vport->vpi : 0,
7194 mbx->mbxCommand, phba->pport->port_state,
7195 psli->sli_flag, flag);
7196
7197 if (mbx->mbxCommand != MBX_HEARTBEAT) {
7198 if (pmbox->vport) {
7199 lpfc_debugfs_disc_trc(pmbox->vport,
7200 LPFC_DISC_TRC_MBOX_VPORT,
7201 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7202 (uint32_t)mbx->mbxCommand,
7203 mbx->un.varWords[0], mbx->un.varWords[1]);
7204 }
7205 else {
7206 lpfc_debugfs_disc_trc(phba->pport,
7207 LPFC_DISC_TRC_MBOX,
7208 "MBOX Send: cmd:x%x mb:x%x x%x",
7209 (uint32_t)mbx->mbxCommand,
7210 mbx->un.varWords[0], mbx->un.varWords[1]);
7211 }
7212 }
7213
7214 psli->slistat.mbox_cmd++;
7215 evtctr = psli->slistat.mbox_event;
7216
7217
7218 mbx->mbxOwner = OWN_CHIP;
7219
7220 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7221
7222 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7223 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7224 = (uint8_t *)phba->mbox_ext
7225 - (uint8_t *)phba->mbox;
7226 }
7227
7228
7229 if (pmbox->in_ext_byte_len && pmbox->context2) {
7230 lpfc_sli_pcimem_bcopy(pmbox->context2,
7231 (uint8_t *)phba->mbox_ext,
7232 pmbox->in_ext_byte_len);
7233 }
7234
7235 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7236 } else {
7237
7238 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7239 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7240 = MAILBOX_HBA_EXT_OFFSET;
7241
7242
7243 if (pmbox->in_ext_byte_len && pmbox->context2) {
7244 lpfc_memcpy_to_slim(phba->MBslimaddr +
7245 MAILBOX_HBA_EXT_OFFSET,
7246 pmbox->context2, pmbox->in_ext_byte_len);
7247
7248 }
7249 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7250
7251 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7252 }
7253
7254
7255
7256 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7257 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7258 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7259
7260
7261 ldata = *((uint32_t *)mbx);
7262 to_slim = phba->MBslimaddr;
7263 writel(ldata, to_slim);
7264 readl(to_slim);
7265
7266 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7267
7268 psli->sli_flag |= LPFC_SLI_ACTIVE;
7269 }
7270 }
7271
7272 wmb();
7273
7274 switch (flag) {
7275 case MBX_NOWAIT:
7276
7277 psli->mbox_active = pmbox;
7278
7279 writel(CA_MBATT, phba->CAregaddr);
7280 readl(phba->CAregaddr);
7281
7282 break;
7283
7284 case MBX_POLL:
7285
7286 psli->mbox_active = NULL;
7287
7288 writel(CA_MBATT, phba->CAregaddr);
7289 readl(phba->CAregaddr);
7290
7291 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7292
7293 word0 = *((uint32_t *)phba->mbox);
7294 word0 = le32_to_cpu(word0);
7295 } else {
7296
7297 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7298 spin_unlock_irqrestore(&phba->hbalock,
7299 drvr_flag);
7300 goto out_not_finished;
7301 }
7302 }
7303
7304
7305 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7306 spin_unlock_irqrestore(&phba->hbalock,
7307 drvr_flag);
7308 goto out_not_finished;
7309 }
7310 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7311 1000) + jiffies;
7312 i = 0;
7313
7314 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7315 (!(ha_copy & HA_MBATT) &&
7316 (phba->link_state > LPFC_WARM_START))) {
7317 if (time_after(jiffies, timeout)) {
7318 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7319 spin_unlock_irqrestore(&phba->hbalock,
7320 drvr_flag);
7321 goto out_not_finished;
7322 }
7323
7324
7325
7326 if (((word0 & OWN_CHIP) != OWN_CHIP)
7327 && (evtctr != psli->slistat.mbox_event))
7328 break;
7329
7330 if (i++ > 10) {
7331 spin_unlock_irqrestore(&phba->hbalock,
7332 drvr_flag);
7333 msleep(1);
7334 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7335 }
7336
7337 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7338
7339 word0 = *((uint32_t *)phba->mbox);
7340 word0 = le32_to_cpu(word0);
7341 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7342 MAILBOX_t *slimmb;
7343 uint32_t slimword0;
7344
7345 slimword0 = readl(phba->MBslimaddr);
7346 slimmb = (MAILBOX_t *) & slimword0;
7347 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7348 && slimmb->mbxStatus) {
7349 psli->sli_flag &=
7350 ~LPFC_SLI_ACTIVE;
7351 word0 = slimword0;
7352 }
7353 }
7354 } else {
7355
7356 word0 = readl(phba->MBslimaddr);
7357 }
7358
7359 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7360 spin_unlock_irqrestore(&phba->hbalock,
7361 drvr_flag);
7362 goto out_not_finished;
7363 }
7364 }
7365
7366 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7367
7368 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
7369
7370 if (pmbox->out_ext_byte_len && pmbox->context2) {
7371 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7372 pmbox->context2,
7373 pmbox->out_ext_byte_len);
7374 }
7375 } else {
7376
7377 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7378 MAILBOX_CMD_SIZE);
7379
7380 if (pmbox->out_ext_byte_len && pmbox->context2) {
7381 lpfc_memcpy_from_slim(pmbox->context2,
7382 phba->MBslimaddr +
7383 MAILBOX_HBA_EXT_OFFSET,
7384 pmbox->out_ext_byte_len);
7385 }
7386 }
7387
7388 writel(HA_MBATT, phba->HAregaddr);
7389 readl(phba->HAregaddr);
7390
7391 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7392 status = mbx->mbxStatus;
7393 }
7394
7395 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7396 return status;
7397
7398out_not_finished:
7399 if (processing_queue) {
7400 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7401 lpfc_mbox_cmpl_put(phba, pmbox);
7402 }
7403 return MBX_NOT_FINISHED;
7404}
7405
7406
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418static int
7419lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7420{
7421 struct lpfc_sli *psli = &phba->sli;
7422 int rc = 0;
7423 unsigned long timeout = 0;
7424
7425
7426 spin_lock_irq(&phba->hbalock);
7427 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7428
7429
7430
7431 if (phba->sli.mbox_active)
7432 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7433 phba->sli.mbox_active) *
7434 1000) + jiffies;
7435 spin_unlock_irq(&phba->hbalock);
7436
7437
7438 if (timeout)
7439 lpfc_sli4_process_missed_mbox_completions(phba);
7440
7441
7442 while (phba->sli.mbox_active) {
7443
7444 msleep(2);
7445 if (time_after(jiffies, timeout)) {
7446
7447 rc = 1;
7448 break;
7449 }
7450 }
7451
7452
7453 if (rc) {
7454 spin_lock_irq(&phba->hbalock);
7455 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7456 spin_unlock_irq(&phba->hbalock);
7457 }
7458 return rc;
7459}
7460
7461
7462
7463
7464
7465
7466
7467
7468
7469
7470
7471
7472static void
7473lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7474{
7475 struct lpfc_sli *psli = &phba->sli;
7476
7477 spin_lock_irq(&phba->hbalock);
7478 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7479
7480 spin_unlock_irq(&phba->hbalock);
7481 return;
7482 }
7483
7484
7485
7486
7487
7488
7489 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7490 spin_unlock_irq(&phba->hbalock);
7491
7492
7493 lpfc_worker_wake_up(phba);
7494}
7495
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506
7507static int
7508lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7509{
7510 uint32_t db_ready;
7511 unsigned long timeout;
7512 struct lpfc_register bmbx_reg;
7513
7514 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7515 * 1000) + jiffies;
7516
7517 do {
7518 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7519 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7520 if (!db_ready)
7521 msleep(2);
7522
7523 if (time_after(jiffies, timeout))
7524 return MBXERR_ERROR;
7525 } while (!db_ready);
7526
7527 return 0;
7528}
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546static int
7547lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7548{
7549 int rc = MBX_SUCCESS;
7550 unsigned long iflag;
7551 uint32_t mcqe_status;
7552 uint32_t mbx_cmnd;
7553 struct lpfc_sli *psli = &phba->sli;
7554 struct lpfc_mqe *mb = &mboxq->u.mqe;
7555 struct lpfc_bmbx_create *mbox_rgn;
7556 struct dma_address *dma_address;
7557
7558
7559
7560
7561
7562 spin_lock_irqsave(&phba->hbalock, iflag);
7563 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7564 spin_unlock_irqrestore(&phba->hbalock, iflag);
7565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7566 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7567 "cannot issue Data: x%x x%x\n",
7568 mboxq->vport ? mboxq->vport->vpi : 0,
7569 mboxq->u.mb.mbxCommand,
7570 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7571 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7572 psli->sli_flag, MBX_POLL);
7573 return MBXERR_ERROR;
7574 }
7575
7576 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7577 phba->sli.mbox_active = mboxq;
7578 spin_unlock_irqrestore(&phba->hbalock, iflag);
7579
7580
7581 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7582 if (rc)
7583 goto exit;
7584
7585
7586
7587
7588
7589
7590 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7591 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7592 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7593 sizeof(struct lpfc_mqe));
7594
7595
7596 dma_address = &phba->sli4_hba.bmbx.dma_address;
7597 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7598
7599
7600 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7601 if (rc)
7602 goto exit;
7603
7604
7605 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7606
7607
7608 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7609 if (rc)
7610 goto exit;
7611
7612
7613
7614
7615
7616
7617 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7618 sizeof(struct lpfc_mqe));
7619 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7620 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7621 sizeof(struct lpfc_mcqe));
7622 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7623
7624
7625
7626
7627
7628 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7629 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7630 bf_set(lpfc_mqe_status, mb,
7631 (LPFC_MBX_ERROR_RANGE | mcqe_status));
7632 rc = MBXERR_ERROR;
7633 } else
7634 lpfc_sli4_swap_str(phba, mboxq);
7635
7636 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7637 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7638 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7639 " x%x x%x CQ: x%x x%x x%x x%x\n",
7640 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7641 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7642 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7643 bf_get(lpfc_mqe_status, mb),
7644 mb->un.mb_words[0], mb->un.mb_words[1],
7645 mb->un.mb_words[2], mb->un.mb_words[3],
7646 mb->un.mb_words[4], mb->un.mb_words[5],
7647 mb->un.mb_words[6], mb->un.mb_words[7],
7648 mb->un.mb_words[8], mb->un.mb_words[9],
7649 mb->un.mb_words[10], mb->un.mb_words[11],
7650 mb->un.mb_words[12], mboxq->mcqe.word0,
7651 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
7652 mboxq->mcqe.trailer);
7653exit:
7654
7655 spin_lock_irqsave(&phba->hbalock, iflag);
7656 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7657 phba->sli.mbox_active = NULL;
7658 spin_unlock_irqrestore(&phba->hbalock, iflag);
7659 return rc;
7660}
7661
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674static int
7675lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
7676 uint32_t flag)
7677{
7678 struct lpfc_sli *psli = &phba->sli;
7679 unsigned long iflags;
7680 int rc;
7681
7682
7683 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
7684
7685 rc = lpfc_mbox_dev_check(phba);
7686 if (unlikely(rc)) {
7687 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7688 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7689 "cannot issue Data: x%x x%x\n",
7690 mboxq->vport ? mboxq->vport->vpi : 0,
7691 mboxq->u.mb.mbxCommand,
7692 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7693 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7694 psli->sli_flag, flag);
7695 goto out_not_finished;
7696 }
7697
7698
7699 if (!phba->sli4_hba.intr_enable) {
7700 if (flag == MBX_POLL)
7701 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7702 else
7703 rc = -EIO;
7704 if (rc != MBX_SUCCESS)
7705 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7706 "(%d):2541 Mailbox command x%x "
7707 "(x%x/x%x) failure: "
7708 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7709 "Data: x%x x%x\n,",
7710 mboxq->vport ? mboxq->vport->vpi : 0,
7711 mboxq->u.mb.mbxCommand,
7712 lpfc_sli_config_mbox_subsys_get(phba,
7713 mboxq),
7714 lpfc_sli_config_mbox_opcode_get(phba,
7715 mboxq),
7716 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7717 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7718 bf_get(lpfc_mcqe_ext_status,
7719 &mboxq->mcqe),
7720 psli->sli_flag, flag);
7721 return rc;
7722 } else if (flag == MBX_POLL) {
7723 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7724 "(%d):2542 Try to issue mailbox command "
7725 "x%x (x%x/x%x) synchronously ahead of async"
7726 "mailbox command queue: x%x x%x\n",
7727 mboxq->vport ? mboxq->vport->vpi : 0,
7728 mboxq->u.mb.mbxCommand,
7729 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7730 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7731 psli->sli_flag, flag);
7732
7733 rc = lpfc_sli4_async_mbox_block(phba);
7734 if (!rc) {
7735
7736 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
7737 if (rc != MBX_SUCCESS)
7738 lpfc_printf_log(phba, KERN_WARNING,
7739 LOG_MBOX | LOG_SLI,
7740 "(%d):2597 Sync Mailbox command "
7741 "x%x (x%x/x%x) failure: "
7742 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7743 "Data: x%x x%x\n,",
7744 mboxq->vport ? mboxq->vport->vpi : 0,
7745 mboxq->u.mb.mbxCommand,
7746 lpfc_sli_config_mbox_subsys_get(phba,
7747 mboxq),
7748 lpfc_sli_config_mbox_opcode_get(phba,
7749 mboxq),
7750 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
7751 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
7752 bf_get(lpfc_mcqe_ext_status,
7753 &mboxq->mcqe),
7754 psli->sli_flag, flag);
7755
7756 lpfc_sli4_async_mbox_unblock(phba);
7757 }
7758 return rc;
7759 }
7760
7761
7762 rc = lpfc_mbox_cmd_check(phba, mboxq);
7763 if (rc) {
7764 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7765 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7766 "cannot issue Data: x%x x%x\n",
7767 mboxq->vport ? mboxq->vport->vpi : 0,
7768 mboxq->u.mb.mbxCommand,
7769 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7770 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7771 psli->sli_flag, flag);
7772 goto out_not_finished;
7773 }
7774
7775
7776 psli->slistat.mbox_busy++;
7777 spin_lock_irqsave(&phba->hbalock, iflags);
7778 lpfc_mbox_put(phba, mboxq);
7779 spin_unlock_irqrestore(&phba->hbalock, iflags);
7780 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7781 "(%d):0354 Mbox cmd issue - Enqueue Data: "
7782 "x%x (x%x/x%x) x%x x%x x%x\n",
7783 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
7784 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7785 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7786 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7787 phba->pport->port_state,
7788 psli->sli_flag, MBX_NOWAIT);
7789
7790 lpfc_worker_wake_up(phba);
7791
7792 return MBX_BUSY;
7793
7794out_not_finished:
7795 return MBX_NOT_FINISHED;
7796}
7797
7798
7799
7800
7801
7802
7803
7804
7805
7806int
7807lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
7808{
7809 struct lpfc_sli *psli = &phba->sli;
7810 LPFC_MBOXQ_t *mboxq;
7811 int rc = MBX_SUCCESS;
7812 unsigned long iflags;
7813 struct lpfc_mqe *mqe;
7814 uint32_t mbx_cmnd;
7815
7816
7817 if (unlikely(!phba->sli4_hba.intr_enable))
7818 return MBX_NOT_FINISHED;
7819
7820
7821 spin_lock_irqsave(&phba->hbalock, iflags);
7822 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7823 spin_unlock_irqrestore(&phba->hbalock, iflags);
7824 return MBX_NOT_FINISHED;
7825 }
7826 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7827 spin_unlock_irqrestore(&phba->hbalock, iflags);
7828 return MBX_NOT_FINISHED;
7829 }
7830 if (unlikely(phba->sli.mbox_active)) {
7831 spin_unlock_irqrestore(&phba->hbalock, iflags);
7832 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7833 "0384 There is pending active mailbox cmd\n");
7834 return MBX_NOT_FINISHED;
7835 }
7836
7837 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7838
7839
7840 mboxq = lpfc_mbox_get(phba);
7841
7842
7843 if (!mboxq) {
7844 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7845 spin_unlock_irqrestore(&phba->hbalock, iflags);
7846 return MBX_SUCCESS;
7847 }
7848 phba->sli.mbox_active = mboxq;
7849 spin_unlock_irqrestore(&phba->hbalock, iflags);
7850
7851
7852 rc = lpfc_mbox_dev_check(phba);
7853 if (unlikely(rc))
7854
7855 goto out_not_finished;
7856
7857
7858 mqe = &mboxq->u.mqe;
7859 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
7860
7861
7862 mod_timer(&psli->mbox_tmo, (jiffies +
7863 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
7864
7865 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7866 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7867 "x%x x%x\n",
7868 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
7869 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7870 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7871 phba->pport->port_state, psli->sli_flag);
7872
7873 if (mbx_cmnd != MBX_HEARTBEAT) {
7874 if (mboxq->vport) {
7875 lpfc_debugfs_disc_trc(mboxq->vport,
7876 LPFC_DISC_TRC_MBOX_VPORT,
7877 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7878 mbx_cmnd, mqe->un.mb_words[0],
7879 mqe->un.mb_words[1]);
7880 } else {
7881 lpfc_debugfs_disc_trc(phba->pport,
7882 LPFC_DISC_TRC_MBOX,
7883 "MBOX Send: cmd:x%x mb:x%x x%x",
7884 mbx_cmnd, mqe->un.mb_words[0],
7885 mqe->un.mb_words[1]);
7886 }
7887 }
7888 psli->slistat.mbox_cmd++;
7889
7890
7891 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
7892 if (rc != MBX_SUCCESS) {
7893 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7894 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7895 "cannot issue Data: x%x x%x\n",
7896 mboxq->vport ? mboxq->vport->vpi : 0,
7897 mboxq->u.mb.mbxCommand,
7898 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7899 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7900 psli->sli_flag, MBX_NOWAIT);
7901 goto out_not_finished;
7902 }
7903
7904 return rc;
7905
7906out_not_finished:
7907 spin_lock_irqsave(&phba->hbalock, iflags);
7908 if (phba->sli.mbox_active) {
7909 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
7910 __lpfc_mbox_cmpl_put(phba, mboxq);
7911
7912 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7913 phba->sli.mbox_active = NULL;
7914 }
7915 spin_unlock_irqrestore(&phba->hbalock, iflags);
7916
7917 return MBX_NOT_FINISHED;
7918}
7919
7920
7921
7922
7923
7924
7925
7926
7927
7928
7929
7930
7931
7932int
7933lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
7934{
7935 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
7936}
7937
7938
7939
7940
7941
7942
7943
7944
7945
7946
7947int
7948lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7949{
7950
7951 switch (dev_grp) {
7952 case LPFC_PCI_DEV_LP:
7953 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
7954 phba->lpfc_sli_handle_slow_ring_event =
7955 lpfc_sli_handle_slow_ring_event_s3;
7956 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
7957 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
7958 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
7959 break;
7960 case LPFC_PCI_DEV_OC:
7961 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
7962 phba->lpfc_sli_handle_slow_ring_event =
7963 lpfc_sli_handle_slow_ring_event_s4;
7964 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
7965 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
7966 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
7967 break;
7968 default:
7969 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7970 "1420 Invalid HBA PCI-device group: 0x%x\n",
7971 dev_grp);
7972 return -ENODEV;
7973 break;
7974 }
7975 return 0;
7976}
7977
7978
7979
7980
7981
7982
7983
7984
7985
7986
7987
7988void
7989__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7990 struct lpfc_iocbq *piocb)
7991{
7992 lockdep_assert_held(&phba->hbalock);
7993
7994 list_add_tail(&piocb->list, &pring->txq);
7995}
7996
7997
7998
7999
8000
8001
8002
8003
8004
8005
8006
8007
8008
8009
8010
8011
8012
8013
8014static struct lpfc_iocbq *
8015lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8016 struct lpfc_iocbq **piocb)
8017{
8018 struct lpfc_iocbq * nextiocb;
8019
8020 lockdep_assert_held(&phba->hbalock);
8021
8022 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8023 if (!nextiocb) {
8024 nextiocb = *piocb;
8025 *piocb = NULL;
8026 }
8027
8028 return nextiocb;
8029}
8030
8031
8032
8033
8034
8035
8036
8037
8038
8039
8040
8041
8042
8043
8044
8045
8046
8047
8048
8049
8050
8051
8052
8053static int
8054__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8055 struct lpfc_iocbq *piocb, uint32_t flag)
8056{
8057 struct lpfc_iocbq *nextiocb;
8058 IOCB_t *iocb;
8059 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8060
8061 lockdep_assert_held(&phba->hbalock);
8062
8063 if (piocb->iocb_cmpl && (!piocb->vport) &&
8064 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8065 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8066 lpfc_printf_log(phba, KERN_ERR,
8067 LOG_SLI | LOG_VPORT,
8068 "1807 IOCB x%x failed. No vport\n",
8069 piocb->iocb.ulpCommand);
8070 dump_stack();
8071 return IOCB_ERROR;
8072 }
8073
8074
8075
8076 if (unlikely(pci_channel_offline(phba->pcidev)))
8077 return IOCB_ERROR;
8078
8079
8080 if (unlikely(phba->hba_flag & DEFER_ERATT))
8081 return IOCB_ERROR;
8082
8083
8084
8085
8086 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8087 return IOCB_ERROR;
8088
8089
8090
8091
8092
8093 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8094 goto iocb_busy;
8095
8096 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8097
8098
8099
8100
8101 switch (piocb->iocb.ulpCommand) {
8102 case CMD_GEN_REQUEST64_CR:
8103 case CMD_GEN_REQUEST64_CX:
8104 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8105 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8106 FC_RCTL_DD_UNSOL_CMD) ||
8107 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8108 MENLO_TRANSPORT_TYPE))
8109
8110 goto iocb_busy;
8111 break;
8112 case CMD_QUE_RING_BUF_CN:
8113 case CMD_QUE_RING_BUF64_CN:
8114
8115
8116
8117
8118 if (piocb->iocb_cmpl)
8119 piocb->iocb_cmpl = NULL;
8120
8121 case CMD_CREATE_XRI_CR:
8122 case CMD_CLOSE_XRI_CN:
8123 case CMD_CLOSE_XRI_CX:
8124 break;
8125 default:
8126 goto iocb_busy;
8127 }
8128
8129
8130
8131
8132
8133 } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
8134 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8135 goto iocb_busy;
8136 }
8137
8138 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8139 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8140 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8141
8142 if (iocb)
8143 lpfc_sli_update_ring(phba, pring);
8144 else
8145 lpfc_sli_update_full_ring(phba, pring);
8146
8147 if (!piocb)
8148 return IOCB_SUCCESS;
8149
8150 goto out_busy;
8151
8152 iocb_busy:
8153 pring->stats.iocb_cmd_delay++;
8154
8155 out_busy:
8156
8157 if (!(flag & SLI_IOCB_RET_IOCB)) {
8158 __lpfc_sli_ringtx_put(phba, pring, piocb);
8159 return IOCB_SUCCESS;
8160 }
8161
8162 return IOCB_BUSY;
8163}
8164
8165
8166
8167
8168
8169
8170
8171
8172
8173
8174
8175
8176
8177
8178
8179
8180
8181
8182static uint16_t
8183lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8184 struct lpfc_sglq *sglq)
8185{
8186 uint16_t xritag = NO_XRI;
8187 struct ulp_bde64 *bpl = NULL;
8188 struct ulp_bde64 bde;
8189 struct sli4_sge *sgl = NULL;
8190 struct lpfc_dmabuf *dmabuf;
8191 IOCB_t *icmd;
8192 int numBdes = 0;
8193 int i = 0;
8194 uint32_t offset = 0;
8195 int inbound = 0;
8196
8197 if (!piocbq || !sglq)
8198 return xritag;
8199
8200 sgl = (struct sli4_sge *)sglq->sgl;
8201 icmd = &piocbq->iocb;
8202 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8203 return sglq->sli4_xritag;
8204 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8205 numBdes = icmd->un.genreq64.bdl.bdeSize /
8206 sizeof(struct ulp_bde64);
8207
8208
8209
8210
8211 if (piocbq->context3)
8212 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8213 else
8214 return xritag;
8215
8216 bpl = (struct ulp_bde64 *)dmabuf->virt;
8217 if (!bpl)
8218 return xritag;
8219
8220 for (i = 0; i < numBdes; i++) {
8221
8222 sgl->addr_hi = bpl->addrHigh;
8223 sgl->addr_lo = bpl->addrLow;
8224
8225 sgl->word2 = le32_to_cpu(sgl->word2);
8226 if ((i+1) == numBdes)
8227 bf_set(lpfc_sli4_sge_last, sgl, 1);
8228 else
8229 bf_set(lpfc_sli4_sge_last, sgl, 0);
8230
8231
8232
8233 bde.tus.w = le32_to_cpu(bpl->tus.w);
8234 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8235
8236
8237
8238
8239 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8240
8241 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8242 inbound++;
8243
8244 if (inbound == 1)
8245 offset = 0;
8246 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8247 bf_set(lpfc_sli4_sge_type, sgl,
8248 LPFC_SGE_TYPE_DATA);
8249 offset += bde.tus.f.bdeSize;
8250 }
8251 sgl->word2 = cpu_to_le32(sgl->word2);
8252 bpl++;
8253 sgl++;
8254 }
8255 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8256
8257
8258
8259
8260 sgl->addr_hi =
8261 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8262 sgl->addr_lo =
8263 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8264 sgl->word2 = le32_to_cpu(sgl->word2);
8265 bf_set(lpfc_sli4_sge_last, sgl, 1);
8266 sgl->word2 = cpu_to_le32(sgl->word2);
8267 sgl->sge_len =
8268 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8269 }
8270 return sglq->sli4_xritag;
8271}
8272
8273
8274
8275
8276
8277
8278
8279
8280
8281
8282
8283
8284
8285
8286
8287static int
8288lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8289 union lpfc_wqe *wqe)
8290{
8291 uint32_t xmit_len = 0, total_len = 0;
8292 uint8_t ct = 0;
8293 uint32_t fip;
8294 uint32_t abort_tag;
8295 uint8_t command_type = ELS_COMMAND_NON_FIP;
8296 uint8_t cmnd;
8297 uint16_t xritag;
8298 uint16_t abrt_iotag;
8299 struct lpfc_iocbq *abrtiocbq;
8300 struct ulp_bde64 *bpl = NULL;
8301 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8302 int numBdes, i;
8303 struct ulp_bde64 bde;
8304 struct lpfc_nodelist *ndlp;
8305 uint32_t *pcmd;
8306 uint32_t if_type;
8307
8308 fip = phba->hba_flag & HBA_FIP_SUPPORT;
8309
8310 if (iocbq->iocb_flag & LPFC_IO_FCP)
8311 command_type = FCP_COMMAND;
8312 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8313 command_type = ELS_COMMAND_FIP;
8314 else
8315 command_type = ELS_COMMAND_NON_FIP;
8316
8317 if (phba->fcp_embed_io)
8318 memset(wqe, 0, sizeof(union lpfc_wqe128));
8319
8320 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8321 wqe->generic.wqe_com.word7 = 0;
8322 wqe->generic.wqe_com.word10 = 0;
8323
8324 abort_tag = (uint32_t) iocbq->iotag;
8325 xritag = iocbq->sli4_xritag;
8326
8327 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8328 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8329 sizeof(struct ulp_bde64);
8330 bpl = (struct ulp_bde64 *)
8331 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8332 if (!bpl)
8333 return IOCB_ERROR;
8334
8335
8336 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8337 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8338
8339
8340
8341 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
8342 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8343 total_len = 0;
8344 for (i = 0; i < numBdes; i++) {
8345 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8346 total_len += bde.tus.f.bdeSize;
8347 }
8348 } else
8349 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8350
8351 iocbq->iocb.ulpIoTag = iocbq->iotag;
8352 cmnd = iocbq->iocb.ulpCommand;
8353
8354 switch (iocbq->iocb.ulpCommand) {
8355 case CMD_ELS_REQUEST64_CR:
8356 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8357 ndlp = iocbq->context_un.ndlp;
8358 else
8359 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8360 if (!iocbq->iocb.ulpLe) {
8361 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8362 "2007 Only Limited Edition cmd Format"
8363 " supported 0x%x\n",
8364 iocbq->iocb.ulpCommand);
8365 return IOCB_ERROR;
8366 }
8367
8368 wqe->els_req.payload_len = xmit_len;
8369
8370 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8371 iocbq->iocb.ulpTimeout);
8372
8373 bf_set(els_req64_vf, &wqe->els_req, 0);
8374
8375 bf_set(els_req64_vfid, &wqe->els_req, 0);
8376 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8377 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8378 iocbq->iocb.ulpContext);
8379 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8380 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8381
8382 if (command_type == ELS_COMMAND_FIP)
8383 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8384 >> LPFC_FIP_ELS_ID_SHIFT);
8385 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8386 iocbq->context2)->virt);
8387 if_type = bf_get(lpfc_sli_intf_if_type,
8388 &phba->sli4_hba.sli_intf);
8389 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8390 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8391 *pcmd == ELS_CMD_SCR ||
8392 *pcmd == ELS_CMD_FDISC ||
8393 *pcmd == ELS_CMD_LOGO ||
8394 *pcmd == ELS_CMD_PLOGI)) {
8395 bf_set(els_req64_sp, &wqe->els_req, 1);
8396 bf_set(els_req64_sid, &wqe->els_req,
8397 iocbq->vport->fc_myDID);
8398 if ((*pcmd == ELS_CMD_FLOGI) &&
8399 !(phba->fc_topology ==
8400 LPFC_TOPOLOGY_LOOP))
8401 bf_set(els_req64_sid, &wqe->els_req, 0);
8402 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8403 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8404 phba->vpi_ids[iocbq->vport->vpi]);
8405 } else if (pcmd && iocbq->context1) {
8406 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8407 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8408 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8409 }
8410 }
8411 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8412 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8413 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8414 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8415 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8416 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8417 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8418 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8419 wqe->els_req.max_response_payload_len = total_len - xmit_len;
8420 break;
8421 case CMD_XMIT_SEQUENCE64_CX:
8422 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8423 iocbq->iocb.un.ulpWord[3]);
8424 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8425 iocbq->iocb.unsli3.rcvsli3.ox_id);
8426
8427 xmit_len = total_len;
8428 cmnd = CMD_XMIT_SEQUENCE64_CR;
8429 if (phba->link_flag & LS_LOOPBACK_MODE)
8430 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8431 case CMD_XMIT_SEQUENCE64_CR:
8432
8433 wqe->xmit_sequence.rsvd3 = 0;
8434
8435
8436 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8437 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8438 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8439 LPFC_WQE_IOD_WRITE);
8440 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8441 LPFC_WQE_LENLOC_WORD12);
8442 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8443 wqe->xmit_sequence.xmit_len = xmit_len;
8444 command_type = OTHER_COMMAND;
8445 break;
8446 case CMD_XMIT_BCAST64_CN:
8447
8448 wqe->xmit_bcast64.seq_payload_len = xmit_len;
8449
8450
8451
8452 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8453 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8454 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8455 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8456 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8457 LPFC_WQE_LENLOC_WORD3);
8458 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8459 break;
8460 case CMD_FCP_IWRITE64_CR:
8461 command_type = FCP_COMMAND_DATA_OUT;
8462
8463
8464 bf_set(payload_offset_len, &wqe->fcp_iwrite,
8465 xmit_len + sizeof(struct fcp_rsp));
8466 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8467 0);
8468
8469
8470 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8471 iocbq->iocb.ulpFCP2Rcvy);
8472 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8473
8474 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8475 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8476 LPFC_WQE_LENLOC_WORD4);
8477 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8478 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8479 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8480 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8481 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8482 if (iocbq->priority) {
8483 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8484 (iocbq->priority << 1));
8485 } else {
8486 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8487 (phba->cfg_XLanePriority << 1));
8488 }
8489 }
8490
8491
8492 if (phba->fcp_embed_io) {
8493 struct lpfc_scsi_buf *lpfc_cmd;
8494 struct sli4_sge *sgl;
8495 union lpfc_wqe128 *wqe128;
8496 struct fcp_cmnd *fcp_cmnd;
8497 uint32_t *ptr;
8498
8499
8500 wqe128 = (union lpfc_wqe128 *)wqe;
8501
8502 lpfc_cmd = iocbq->context1;
8503 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8504 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8505
8506
8507 wqe128->generic.bde.tus.f.bdeFlags =
8508 BUFF_TYPE_BDE_IMMED;
8509 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8510 wqe128->generic.bde.addrHigh = 0;
8511 wqe128->generic.bde.addrLow = 88;
8512
8513 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8514
8515
8516 ptr = &wqe128->words[22];
8517 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8518 }
8519 break;
8520 case CMD_FCP_IREAD64_CR:
8521
8522
8523 bf_set(payload_offset_len, &wqe->fcp_iread,
8524 xmit_len + sizeof(struct fcp_rsp));
8525 bf_set(cmd_buff_len, &wqe->fcp_iread,
8526 0);
8527
8528
8529 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8530 iocbq->iocb.ulpFCP2Rcvy);
8531 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8532
8533 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8534 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8535 LPFC_WQE_LENLOC_WORD4);
8536 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8537 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8538 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8539 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8540 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8541 if (iocbq->priority) {
8542 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8543 (iocbq->priority << 1));
8544 } else {
8545 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8546 (phba->cfg_XLanePriority << 1));
8547 }
8548 }
8549
8550
8551 if (phba->fcp_embed_io) {
8552 struct lpfc_scsi_buf *lpfc_cmd;
8553 struct sli4_sge *sgl;
8554 union lpfc_wqe128 *wqe128;
8555 struct fcp_cmnd *fcp_cmnd;
8556 uint32_t *ptr;
8557
8558
8559 wqe128 = (union lpfc_wqe128 *)wqe;
8560
8561 lpfc_cmd = iocbq->context1;
8562 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8563 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8564
8565
8566 wqe128->generic.bde.tus.f.bdeFlags =
8567 BUFF_TYPE_BDE_IMMED;
8568 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8569 wqe128->generic.bde.addrHigh = 0;
8570 wqe128->generic.bde.addrLow = 88;
8571
8572 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8573
8574
8575 ptr = &wqe128->words[22];
8576 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8577 }
8578 break;
8579 case CMD_FCP_ICMND64_CR:
8580
8581
8582 bf_set(payload_offset_len, &wqe->fcp_icmd,
8583 xmit_len + sizeof(struct fcp_rsp));
8584 bf_set(cmd_buff_len, &wqe->fcp_icmd,
8585 0);
8586
8587 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8588
8589 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8590 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8591 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8592 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8593 LPFC_WQE_LENLOC_NONE);
8594 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8595 iocbq->iocb.ulpFCP2Rcvy);
8596 if (iocbq->iocb_flag & LPFC_IO_OAS) {
8597 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8598 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8599 if (iocbq->priority) {
8600 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8601 (iocbq->priority << 1));
8602 } else {
8603 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8604 (phba->cfg_XLanePriority << 1));
8605 }
8606 }
8607
8608
8609 if (phba->fcp_embed_io) {
8610 struct lpfc_scsi_buf *lpfc_cmd;
8611 struct sli4_sge *sgl;
8612 union lpfc_wqe128 *wqe128;
8613 struct fcp_cmnd *fcp_cmnd;
8614 uint32_t *ptr;
8615
8616
8617 wqe128 = (union lpfc_wqe128 *)wqe;
8618
8619 lpfc_cmd = iocbq->context1;
8620 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8621 fcp_cmnd = lpfc_cmd->fcp_cmnd;
8622
8623
8624 wqe128->generic.bde.tus.f.bdeFlags =
8625 BUFF_TYPE_BDE_IMMED;
8626 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8627 wqe128->generic.bde.addrHigh = 0;
8628 wqe128->generic.bde.addrLow = 88;
8629
8630 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
8631
8632
8633 ptr = &wqe128->words[22];
8634 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8635 }
8636 break;
8637 case CMD_GEN_REQUEST64_CR:
8638
8639
8640
8641 xmit_len = 0;
8642 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8643 sizeof(struct ulp_bde64);
8644 for (i = 0; i < numBdes; i++) {
8645 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8646 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
8647 break;
8648 xmit_len += bde.tus.f.bdeSize;
8649 }
8650
8651 wqe->gen_req.request_payload_len = xmit_len;
8652
8653
8654
8655 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
8656 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8658 "2015 Invalid CT %x command 0x%x\n",
8659 ct, iocbq->iocb.ulpCommand);
8660 return IOCB_ERROR;
8661 }
8662 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
8663 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
8664 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
8665 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
8666 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
8667 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
8668 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8669 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
8670 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
8671 command_type = OTHER_COMMAND;
8672 break;
8673 case CMD_XMIT_ELS_RSP64_CX:
8674 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8675
8676
8677 wqe->xmit_els_rsp.response_payload_len = xmit_len;
8678
8679 wqe->xmit_els_rsp.word4 = 0;
8680
8681 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
8682 iocbq->iocb.un.xseq64.xmit_els_remoteID);
8683
8684 if_type = bf_get(lpfc_sli_intf_if_type,
8685 &phba->sli4_hba.sli_intf);
8686 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8687 if (iocbq->vport->fc_flag & FC_PT2PT) {
8688 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8689 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8690 iocbq->vport->fc_myDID);
8691 if (iocbq->vport->fc_myDID == Fabric_DID) {
8692 bf_set(wqe_els_did,
8693 &wqe->xmit_els_rsp.wqe_dest, 0);
8694 }
8695 }
8696 }
8697 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
8698 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8699 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
8700 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
8701 iocbq->iocb.unsli3.rcvsli3.ox_id);
8702 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
8703 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8704 phba->vpi_ids[iocbq->vport->vpi]);
8705 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
8706 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
8707 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
8708 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
8709 LPFC_WQE_LENLOC_WORD3);
8710 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
8711 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
8712 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8713 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8714 iocbq->context2)->virt);
8715 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
8716 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
8717 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
8718 iocbq->vport->fc_myDID);
8719 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
8720 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
8721 phba->vpi_ids[phba->pport->vpi]);
8722 }
8723 command_type = OTHER_COMMAND;
8724 break;
8725 case CMD_CLOSE_XRI_CN:
8726 case CMD_ABORT_XRI_CN:
8727 case CMD_ABORT_XRI_CX:
8728
8729
8730 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
8731 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
8732 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
8733 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
8734 } else
8735 fip = 0;
8736
8737 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
8738
8739
8740
8741
8742
8743 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
8744 else
8745 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
8746 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
8747
8748 wqe->abort_cmd.rsrvd5 = 0;
8749 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
8750 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8751 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
8752
8753
8754
8755
8756 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
8757 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
8758 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
8759 LPFC_WQE_LENLOC_NONE);
8760 cmnd = CMD_ABORT_XRI_CX;
8761 command_type = OTHER_COMMAND;
8762 xritag = 0;
8763 break;
8764 case CMD_XMIT_BLS_RSP64_CX:
8765 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8766
8767
8768
8769
8770 memset(wqe, 0, sizeof(union lpfc_wqe));
8771
8772 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
8773 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
8774 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
8775 LPFC_ABTS_UNSOL_INT) {
8776
8777
8778
8779
8780 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8781 iocbq->sli4_xritag);
8782 } else {
8783
8784
8785
8786
8787 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
8788 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
8789 }
8790 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8791 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8792
8793
8794 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8795 ndlp->nlp_DID);
8796 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8797 iocbq->iocb.ulpContext);
8798 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8799 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8800 phba->vpi_ids[phba->pport->vpi]);
8801 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8802 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8803 LPFC_WQE_LENLOC_NONE);
8804
8805 command_type = OTHER_COMMAND;
8806 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
8807 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
8808 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
8809 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
8810 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
8811 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
8812 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
8813 }
8814
8815 break;
8816 case CMD_XRI_ABORTED_CX:
8817 case CMD_CREATE_XRI_CR:
8818 case CMD_IOCB_FCP_IBIDIR64_CR:
8819 case CMD_FCP_TSEND64_CX:
8820 case CMD_FCP_TRSP64_CX:
8821 case CMD_FCP_AUTO_TRSP_CX:
8822 default:
8823 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8824 "2014 Invalid command 0x%x\n",
8825 iocbq->iocb.ulpCommand);
8826 return IOCB_ERROR;
8827 break;
8828 }
8829
8830 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
8831 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
8832 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
8833 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
8834 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
8835 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
8836 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
8837 LPFC_IO_DIF_INSERT);
8838 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
8839 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
8840 wqe->generic.wqe_com.abort_tag = abort_tag;
8841 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
8842 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
8843 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
8844 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
8845 return 0;
8846}
8847
8848
8849
8850
8851
8852
8853
8854
8855
8856
8857
8858
8859
8860
8861
8862static int
8863__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8864 struct lpfc_iocbq *piocb, uint32_t flag)
8865{
8866 struct lpfc_sglq *sglq;
8867 union lpfc_wqe *wqe;
8868 union lpfc_wqe128 wqe128;
8869 struct lpfc_queue *wq;
8870 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
8871
8872 lockdep_assert_held(&phba->hbalock);
8873
8874
8875
8876
8877
8878 wqe = (union lpfc_wqe *)&wqe128;
8879
8880 if (piocb->sli4_xritag == NO_XRI) {
8881 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8882 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8883 sglq = NULL;
8884 else {
8885 if (!list_empty(&pring->txq)) {
8886 if (!(flag & SLI_IOCB_RET_IOCB)) {
8887 __lpfc_sli_ringtx_put(phba,
8888 pring, piocb);
8889 return IOCB_SUCCESS;
8890 } else {
8891 return IOCB_BUSY;
8892 }
8893 } else {
8894 sglq = __lpfc_sli_get_sglq(phba, piocb);
8895 if (!sglq) {
8896 if (!(flag & SLI_IOCB_RET_IOCB)) {
8897 __lpfc_sli_ringtx_put(phba,
8898 pring,
8899 piocb);
8900 return IOCB_SUCCESS;
8901 } else
8902 return IOCB_BUSY;
8903 }
8904 }
8905 }
8906 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
8907
8908 sglq = NULL;
8909 } else {
8910
8911
8912
8913
8914 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
8915 if (!sglq)
8916 return IOCB_ERROR;
8917 }
8918
8919 if (sglq) {
8920 piocb->sli4_lxritag = sglq->sli4_lxritag;
8921 piocb->sli4_xritag = sglq->sli4_xritag;
8922 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
8923 return IOCB_ERROR;
8924 }
8925
8926 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
8927 return IOCB_ERROR;
8928
8929 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
8930 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
8931 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
8932 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
8933 } else {
8934 wq = phba->sli4_hba.oas_wq;
8935 }
8936 if (lpfc_sli4_wq_put(wq, wqe))
8937 return IOCB_ERROR;
8938 } else {
8939 if (unlikely(!phba->sli4_hba.els_wq))
8940 return IOCB_ERROR;
8941 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
8942 return IOCB_ERROR;
8943 }
8944 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
8945
8946 return 0;
8947}
8948
8949
8950
8951
8952
8953
8954
8955
8956
8957
8958
8959
8960int
8961__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8962 struct lpfc_iocbq *piocb, uint32_t flag)
8963{
8964 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8965}
8966
8967
8968
8969
8970
8971
8972
8973
8974
8975
8976int
8977lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8978{
8979
8980 switch (dev_grp) {
8981 case LPFC_PCI_DEV_LP:
8982 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
8983 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
8984 break;
8985 case LPFC_PCI_DEV_OC:
8986 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
8987 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
8988 break;
8989 default:
8990 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8991 "1419 Invalid HBA PCI-device group: 0x%x\n",
8992 dev_grp);
8993 return -ENODEV;
8994 break;
8995 }
8996 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
8997 return 0;
8998}
8999
9000
9001
9002
9003
9004
9005
9006
9007
9008
9009
9010
9011static int
9012lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
9013 struct lpfc_iocbq *piocb)
9014{
9015 if (phba->sli_rev < LPFC_SLI_REV4)
9016 return ring_number;
9017
9018 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9019 if (!(phba->cfg_fof) ||
9020 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9021 if (unlikely(!phba->sli4_hba.fcp_wq))
9022 return LPFC_HBA_ERROR;
9023
9024
9025
9026
9027 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
9028 piocb->fcp_wqidx =
9029 lpfc_sli4_scmd_to_wqidx_distr(phba,
9030 piocb->context1);
9031 ring_number = MAX_SLI3_CONFIGURED_RINGS +
9032 piocb->fcp_wqidx;
9033 } else {
9034 if (unlikely(!phba->sli4_hba.oas_wq))
9035 return LPFC_HBA_ERROR;
9036 piocb->fcp_wqidx = 0;
9037 ring_number = LPFC_FCP_OAS_RING;
9038 }
9039 }
9040 return ring_number;
9041}
9042
9043
9044
9045
9046
9047
9048
9049
9050
9051
9052
9053
9054
9055
9056int
9057lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9058 struct lpfc_iocbq *piocb, uint32_t flag)
9059{
9060 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
9061 struct lpfc_sli_ring *pring;
9062 struct lpfc_queue *fpeq;
9063 struct lpfc_eqe *eqe;
9064 unsigned long iflags;
9065 int rc, idx;
9066
9067 if (phba->sli_rev == LPFC_SLI_REV4) {
9068 ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
9069 if (unlikely(ring_number == LPFC_HBA_ERROR))
9070 return IOCB_ERROR;
9071 idx = piocb->fcp_wqidx;
9072
9073 pring = &phba->sli.ring[ring_number];
9074 spin_lock_irqsave(&pring->ring_lock, iflags);
9075 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9076 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9077
9078 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
9079 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
9080
9081 if (atomic_dec_and_test(&fcp_eq_hdl->
9082 fcp_eq_in_use)) {
9083
9084
9085 fpeq = phba->sli4_hba.hba_eq[idx];
9086
9087
9088 lpfc_sli4_eq_clr_intr(fpeq);
9089
9090
9091
9092
9093 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9094 lpfc_sli4_hba_handle_eqe(phba,
9095 eqe, idx);
9096 fpeq->EQ_processed++;
9097 }
9098
9099
9100 lpfc_sli4_eq_release(fpeq,
9101 LPFC_QUEUE_REARM);
9102 }
9103 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
9104 }
9105 } else {
9106
9107 spin_lock_irqsave(&phba->hbalock, iflags);
9108 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9109 spin_unlock_irqrestore(&phba->hbalock, iflags);
9110 }
9111 return rc;
9112}
9113
9114
9115
9116
9117
9118
9119
9120
9121
9122
9123
9124
9125static int
9126lpfc_extra_ring_setup( struct lpfc_hba *phba)
9127{
9128 struct lpfc_sli *psli;
9129 struct lpfc_sli_ring *pring;
9130
9131 psli = &phba->sli;
9132
9133
9134
9135
9136 pring = &psli->ring[psli->fcp_ring];
9137 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9138 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9139 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9140 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9141
9142
9143 pring = &psli->ring[psli->extra_ring];
9144
9145 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9146 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9147 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9148 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9149
9150
9151 pring->iotag_max = 4096;
9152 pring->num_mask = 1;
9153 pring->prt[0].profile = 0;
9154 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9155 pring->prt[0].type = phba->cfg_multi_ring_type;
9156 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9157 return 0;
9158}
9159
9160
9161
9162
9163
9164
9165
9166
9167
9168
9169
9170
9171
9172static void
9173lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9174 struct lpfc_iocbq *iocbq)
9175{
9176 struct lpfc_nodelist *ndlp = NULL;
9177 uint16_t rpi = 0, vpi = 0;
9178 struct lpfc_vport *vport = NULL;
9179
9180
9181 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9182 rpi = iocbq->iocb.ulpContext;
9183
9184 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9185 "3092 Port generated ABTS async event "
9186 "on vpi %d rpi %d status 0x%x\n",
9187 vpi, rpi, iocbq->iocb.ulpStatus);
9188
9189 vport = lpfc_find_vport_by_vpid(phba, vpi);
9190 if (!vport)
9191 goto err_exit;
9192 ndlp = lpfc_findnode_rpi(vport, rpi);
9193 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9194 goto err_exit;
9195
9196 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9197 lpfc_sli_abts_recover_port(vport, ndlp);
9198 return;
9199
9200 err_exit:
9201 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9202 "3095 Event Context not found, no "
9203 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9204 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9205 vpi, rpi);
9206}
9207
9208
9209
9210
9211
9212
9213
9214
9215
9216
9217
9218void
9219lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9220 struct lpfc_nodelist *ndlp,
9221 struct sli4_wcqe_xri_aborted *axri)
9222{
9223 struct lpfc_vport *vport;
9224 uint32_t ext_status = 0;
9225
9226 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9227 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9228 "3115 Node Context not found, driver "
9229 "ignoring abts err event\n");
9230 return;
9231 }
9232
9233 vport = ndlp->vport;
9234 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9235 "3116 Port generated FCP XRI ABORT event on "
9236 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9237 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9238 bf_get(lpfc_wcqe_xa_xri, axri),
9239 bf_get(lpfc_wcqe_xa_status, axri),
9240 axri->parameter);
9241
9242
9243
9244
9245
9246
9247 ext_status = axri->parameter & IOERR_PARAM_MASK;
9248 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9249 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9250 lpfc_sli_abts_recover_port(vport, ndlp);
9251}
9252
9253
9254
9255
9256
9257
9258
9259
9260
9261
9262
9263
9264
9265
9266static void
9267lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9268 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9269{
9270 IOCB_t *icmd;
9271 uint16_t evt_code;
9272 struct temp_event temp_event_data;
9273 struct Scsi_Host *shost;
9274 uint32_t *iocb_w;
9275
9276 icmd = &iocbq->iocb;
9277 evt_code = icmd->un.asyncstat.evt_code;
9278
9279 switch (evt_code) {
9280 case ASYNC_TEMP_WARN:
9281 case ASYNC_TEMP_SAFE:
9282 temp_event_data.data = (uint32_t) icmd->ulpContext;
9283 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9284 if (evt_code == ASYNC_TEMP_WARN) {
9285 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9286 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9287 "0347 Adapter is very hot, please take "
9288 "corrective action. temperature : %d Celsius\n",
9289 (uint32_t) icmd->ulpContext);
9290 } else {
9291 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9292 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9293 "0340 Adapter temperature is OK now. "
9294 "temperature : %d Celsius\n",
9295 (uint32_t) icmd->ulpContext);
9296 }
9297
9298
9299 shost = lpfc_shost_from_vport(phba->pport);
9300 fc_host_post_vendor_event(shost, fc_get_event_number(),
9301 sizeof(temp_event_data), (char *) &temp_event_data,
9302 LPFC_NL_VENDOR_ID);
9303 break;
9304 case ASYNC_STATUS_CN:
9305 lpfc_sli_abts_err_handler(phba, iocbq);
9306 break;
9307 default:
9308 iocb_w = (uint32_t *) icmd;
9309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9310 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9311 " evt_code 0x%x\n"
9312 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9313 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9314 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9315 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9316 pring->ringno, icmd->un.asyncstat.evt_code,
9317 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9318 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9319 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9320 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9321
9322 break;
9323 }
9324}
9325
9326
9327
9328
9329
9330
9331
9332
9333
9334
9335
9336
9337
9338int
9339lpfc_sli_setup(struct lpfc_hba *phba)
9340{
9341 int i, totiocbsize = 0;
9342 struct lpfc_sli *psli = &phba->sli;
9343 struct lpfc_sli_ring *pring;
9344
9345 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9346 if (phba->sli_rev == LPFC_SLI_REV4)
9347 psli->num_rings += phba->cfg_fcp_io_channel;
9348 psli->sli_flag = 0;
9349 psli->fcp_ring = LPFC_FCP_RING;
9350 psli->next_ring = LPFC_FCP_NEXT_RING;
9351 psli->extra_ring = LPFC_EXTRA_RING;
9352
9353 psli->iocbq_lookup = NULL;
9354 psli->iocbq_lookup_len = 0;
9355 psli->last_iotag = 0;
9356
9357 for (i = 0; i < psli->num_rings; i++) {
9358 pring = &psli->ring[i];
9359 switch (i) {
9360 case LPFC_FCP_RING:
9361
9362 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9363 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9364 pring->sli.sli3.numCiocb +=
9365 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9366 pring->sli.sli3.numRiocb +=
9367 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9368 pring->sli.sli3.numCiocb +=
9369 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9370 pring->sli.sli3.numRiocb +=
9371 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9372 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9373 SLI3_IOCB_CMD_SIZE :
9374 SLI2_IOCB_CMD_SIZE;
9375 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9376 SLI3_IOCB_RSP_SIZE :
9377 SLI2_IOCB_RSP_SIZE;
9378 pring->iotag_ctr = 0;
9379 pring->iotag_max =
9380 (phba->cfg_hba_queue_depth * 2);
9381 pring->fast_iotag = pring->iotag_max;
9382 pring->num_mask = 0;
9383 break;
9384 case LPFC_EXTRA_RING:
9385
9386 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9387 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9388 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9389 SLI3_IOCB_CMD_SIZE :
9390 SLI2_IOCB_CMD_SIZE;
9391 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9392 SLI3_IOCB_RSP_SIZE :
9393 SLI2_IOCB_RSP_SIZE;
9394 pring->iotag_max = phba->cfg_hba_queue_depth;
9395 pring->num_mask = 0;
9396 break;
9397 case LPFC_ELS_RING:
9398
9399 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9400 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9401 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9402 SLI3_IOCB_CMD_SIZE :
9403 SLI2_IOCB_CMD_SIZE;
9404 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9405 SLI3_IOCB_RSP_SIZE :
9406 SLI2_IOCB_RSP_SIZE;
9407 pring->fast_iotag = 0;
9408 pring->iotag_ctr = 0;
9409 pring->iotag_max = 4096;
9410 pring->lpfc_sli_rcv_async_status =
9411 lpfc_sli_async_event_handler;
9412 pring->num_mask = LPFC_MAX_RING_MASK;
9413 pring->prt[0].profile = 0;
9414 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9415 pring->prt[0].type = FC_TYPE_ELS;
9416 pring->prt[0].lpfc_sli_rcv_unsol_event =
9417 lpfc_els_unsol_event;
9418 pring->prt[1].profile = 0;
9419 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9420 pring->prt[1].type = FC_TYPE_ELS;
9421 pring->prt[1].lpfc_sli_rcv_unsol_event =
9422 lpfc_els_unsol_event;
9423 pring->prt[2].profile = 0;
9424
9425 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9426
9427 pring->prt[2].type = FC_TYPE_CT;
9428 pring->prt[2].lpfc_sli_rcv_unsol_event =
9429 lpfc_ct_unsol_event;
9430 pring->prt[3].profile = 0;
9431
9432 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9433
9434 pring->prt[3].type = FC_TYPE_CT;
9435 pring->prt[3].lpfc_sli_rcv_unsol_event =
9436 lpfc_ct_unsol_event;
9437 break;
9438 }
9439 totiocbsize += (pring->sli.sli3.numCiocb *
9440 pring->sli.sli3.sizeCiocb) +
9441 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
9442 }
9443 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
9444
9445 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9446 "SLI2 SLIM Data: x%x x%lx\n",
9447 phba->brd_no, totiocbsize,
9448 (unsigned long) MAX_SLIM_IOCB_SIZE);
9449 }
9450 if (phba->cfg_multi_ring_support == 2)
9451 lpfc_extra_ring_setup(phba);
9452
9453 return 0;
9454}
9455
9456
9457
9458
9459
9460
9461
9462
9463
9464
9465
9466
9467int
9468lpfc_sli_queue_setup(struct lpfc_hba *phba)
9469{
9470 struct lpfc_sli *psli;
9471 struct lpfc_sli_ring *pring;
9472 int i;
9473
9474 psli = &phba->sli;
9475 spin_lock_irq(&phba->hbalock);
9476 INIT_LIST_HEAD(&psli->mboxq);
9477 INIT_LIST_HEAD(&psli->mboxq_cmpl);
9478
9479 for (i = 0; i < psli->num_rings; i++) {
9480 pring = &psli->ring[i];
9481 pring->ringno = i;
9482 pring->sli.sli3.next_cmdidx = 0;
9483 pring->sli.sli3.local_getidx = 0;
9484 pring->sli.sli3.cmdidx = 0;
9485 pring->flag = 0;
9486 INIT_LIST_HEAD(&pring->txq);
9487 INIT_LIST_HEAD(&pring->txcmplq);
9488 INIT_LIST_HEAD(&pring->iocb_continueq);
9489 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
9490 INIT_LIST_HEAD(&pring->postbufq);
9491 spin_lock_init(&pring->ring_lock);
9492 }
9493 spin_unlock_irq(&phba->hbalock);
9494 return 1;
9495}
9496
9497
9498
9499
9500
9501
9502
9503
9504
9505
9506
9507
9508
9509
9510
9511
9512static void
9513lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9514{
9515 LIST_HEAD(completions);
9516 struct lpfc_sli *psli = &phba->sli;
9517 LPFC_MBOXQ_t *pmb;
9518 unsigned long iflag;
9519
9520
9521 spin_lock_irqsave(&phba->hbalock, iflag);
9522
9523 list_splice_init(&phba->sli.mboxq, &completions);
9524
9525 if (psli->mbox_active) {
9526 list_add_tail(&psli->mbox_active->list, &completions);
9527 psli->mbox_active = NULL;
9528 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9529 }
9530
9531 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
9532 spin_unlock_irqrestore(&phba->hbalock, iflag);
9533
9534
9535 while (!list_empty(&completions)) {
9536 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
9537 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
9538 if (pmb->mbox_cmpl)
9539 pmb->mbox_cmpl(phba, pmb);
9540 }
9541}
9542
9543
9544
9545
9546
9547
9548
9549
9550
9551
9552
9553
9554
9555
9556
9557
9558
9559
9560int
9561lpfc_sli_host_down(struct lpfc_vport *vport)
9562{
9563 LIST_HEAD(completions);
9564 struct lpfc_hba *phba = vport->phba;
9565 struct lpfc_sli *psli = &phba->sli;
9566 struct lpfc_sli_ring *pring;
9567 struct lpfc_iocbq *iocb, *next_iocb;
9568 int i;
9569 unsigned long flags = 0;
9570 uint16_t prev_pring_flag;
9571
9572 lpfc_cleanup_discovery_resources(vport);
9573
9574 spin_lock_irqsave(&phba->hbalock, flags);
9575 for (i = 0; i < psli->num_rings; i++) {
9576 pring = &psli->ring[i];
9577 prev_pring_flag = pring->flag;
9578
9579 if (pring->ringno == LPFC_ELS_RING) {
9580 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9581
9582 set_bit(LPFC_DATA_READY, &phba->data_flags);
9583 }
9584
9585
9586
9587
9588 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
9589 if (iocb->vport != vport)
9590 continue;
9591 list_move_tail(&iocb->list, &completions);
9592 }
9593
9594
9595 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
9596 list) {
9597 if (iocb->vport != vport)
9598 continue;
9599 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
9600 }
9601
9602 pring->flag = prev_pring_flag;
9603 }
9604
9605 spin_unlock_irqrestore(&phba->hbalock, flags);
9606
9607
9608 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9609 IOERR_SLI_DOWN);
9610 return 1;
9611}
9612
9613
9614
9615
9616
9617
9618
9619
9620
9621
9622
9623
9624
9625
9626
9627
9628int
9629lpfc_sli_hba_down(struct lpfc_hba *phba)
9630{
9631 LIST_HEAD(completions);
9632 struct lpfc_sli *psli = &phba->sli;
9633 struct lpfc_sli_ring *pring;
9634 struct lpfc_dmabuf *buf_ptr;
9635 unsigned long flags = 0;
9636 int i;
9637
9638
9639 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
9640
9641 lpfc_hba_down_prep(phba);
9642
9643 lpfc_fabric_abort_hba(phba);
9644
9645 spin_lock_irqsave(&phba->hbalock, flags);
9646 for (i = 0; i < psli->num_rings; i++) {
9647 pring = &psli->ring[i];
9648
9649 if (pring->ringno == LPFC_ELS_RING) {
9650 pring->flag |= LPFC_DEFERRED_RING_EVENT;
9651
9652 set_bit(LPFC_DATA_READY, &phba->data_flags);
9653 }
9654
9655
9656
9657
9658
9659 list_splice_init(&pring->txq, &completions);
9660 }
9661 spin_unlock_irqrestore(&phba->hbalock, flags);
9662
9663
9664 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
9665 IOERR_SLI_DOWN);
9666
9667 spin_lock_irqsave(&phba->hbalock, flags);
9668 list_splice_init(&phba->elsbuf, &completions);
9669 phba->elsbuf_cnt = 0;
9670 phba->elsbuf_prev_cnt = 0;
9671 spin_unlock_irqrestore(&phba->hbalock, flags);
9672
9673 while (!list_empty(&completions)) {
9674 list_remove_head(&completions, buf_ptr,
9675 struct lpfc_dmabuf, list);
9676 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
9677 kfree(buf_ptr);
9678 }
9679
9680
9681 del_timer_sync(&psli->mbox_tmo);
9682
9683 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
9684 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9685 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
9686
9687 return 1;
9688}
9689
9690
9691
9692
9693
9694
9695
9696
9697
9698
9699
9700
9701
9702void
9703lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
9704{
9705 uint32_t *src = srcp;
9706 uint32_t *dest = destp;
9707 uint32_t ldata;
9708 int i;
9709
9710 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
9711 ldata = *src;
9712 ldata = le32_to_cpu(ldata);
9713 *dest = ldata;
9714 src++;
9715 dest++;
9716 }
9717}
9718
9719
9720
9721
9722
9723
9724
9725
9726
9727
9728
9729
9730void
9731lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
9732{
9733 uint32_t *src = srcp;
9734 uint32_t *dest = destp;
9735 uint32_t ldata;
9736 int i;
9737
9738 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
9739 ldata = *src;
9740 ldata = be32_to_cpu(ldata);
9741 *dest = ldata;
9742 src++;
9743 dest++;
9744 }
9745}
9746
9747
9748
9749
9750
9751
9752
9753
9754
9755
9756
9757int
9758lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9759 struct lpfc_dmabuf *mp)
9760{
9761
9762
9763 spin_lock_irq(&phba->hbalock);
9764 list_add_tail(&mp->list, &pring->postbufq);
9765 pring->postbufq_cnt++;
9766 spin_unlock_irq(&phba->hbalock);
9767 return 0;
9768}
9769
9770
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780
9781uint32_t
9782lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
9783{
9784 spin_lock_irq(&phba->hbalock);
9785 phba->buffer_tag_count++;
9786
9787
9788
9789
9790 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
9791 spin_unlock_irq(&phba->hbalock);
9792 return phba->buffer_tag_count;
9793}
9794
9795
9796
9797
9798
9799
9800
9801
9802
9803
9804
9805
9806
9807
9808
9809
9810struct lpfc_dmabuf *
9811lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9812 uint32_t tag)
9813{
9814 struct lpfc_dmabuf *mp, *next_mp;
9815 struct list_head *slp = &pring->postbufq;
9816
9817
9818 spin_lock_irq(&phba->hbalock);
9819 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9820 if (mp->buffer_tag == tag) {
9821 list_del_init(&mp->list);
9822 pring->postbufq_cnt--;
9823 spin_unlock_irq(&phba->hbalock);
9824 return mp;
9825 }
9826 }
9827
9828 spin_unlock_irq(&phba->hbalock);
9829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9830 "0402 Cannot find virtual addr for buffer tag on "
9831 "ring %d Data x%lx x%p x%p x%x\n",
9832 pring->ringno, (unsigned long) tag,
9833 slp->next, slp->prev, pring->postbufq_cnt);
9834
9835 return NULL;
9836}
9837
9838
9839
9840
9841
9842
9843
9844
9845
9846
9847
9848
9849
9850
9851
9852
9853
9854struct lpfc_dmabuf *
9855lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9856 dma_addr_t phys)
9857{
9858 struct lpfc_dmabuf *mp, *next_mp;
9859 struct list_head *slp = &pring->postbufq;
9860
9861
9862 spin_lock_irq(&phba->hbalock);
9863 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
9864 if (mp->phys == phys) {
9865 list_del_init(&mp->list);
9866 pring->postbufq_cnt--;
9867 spin_unlock_irq(&phba->hbalock);
9868 return mp;
9869 }
9870 }
9871
9872 spin_unlock_irq(&phba->hbalock);
9873 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9874 "0410 Cannot find virtual addr for mapped buf on "
9875 "ring %d Data x%llx x%p x%p x%x\n",
9876 pring->ringno, (unsigned long long)phys,
9877 slp->next, slp->prev, pring->postbufq_cnt);
9878 return NULL;
9879}
9880
9881
9882
9883
9884
9885
9886
9887
9888
9889
9890
9891
9892static void
9893lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9894 struct lpfc_iocbq *rspiocb)
9895{
9896 IOCB_t *irsp = &rspiocb->iocb;
9897 uint16_t abort_iotag, abort_context;
9898 struct lpfc_iocbq *abort_iocb = NULL;
9899
9900 if (irsp->ulpStatus) {
9901
9902
9903
9904
9905
9906 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
9907 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
9908
9909 spin_lock_irq(&phba->hbalock);
9910 if (phba->sli_rev < LPFC_SLI_REV4) {
9911 if (abort_iotag != 0 &&
9912 abort_iotag <= phba->sli.last_iotag)
9913 abort_iocb =
9914 phba->sli.iocbq_lookup[abort_iotag];
9915 } else
9916
9917
9918
9919
9920
9921 abort_iocb = phba->sli.iocbq_lookup[abort_context];
9922
9923 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
9924 "0327 Cannot abort els iocb %p "
9925 "with tag %x context %x, abort status %x, "
9926 "abort code %x\n",
9927 abort_iocb, abort_iotag, abort_context,
9928 irsp->ulpStatus, irsp->un.ulpWord[4]);
9929
9930 spin_unlock_irq(&phba->hbalock);
9931 }
9932 lpfc_sli_release_iocbq(phba, cmdiocb);
9933 return;
9934}
9935
9936
9937
9938
9939
9940
9941
9942
9943
9944
9945
9946
9947static void
9948lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
9949 struct lpfc_iocbq *rspiocb)
9950{
9951 IOCB_t *irsp = &rspiocb->iocb;
9952
9953
9954 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
9955 "0139 Ignoring ELS cmd tag x%x completion Data: "
9956 "x%x x%x x%x\n",
9957 irsp->ulpIoTag, irsp->ulpStatus,
9958 irsp->un.ulpWord[4], irsp->ulpTimeout);
9959 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
9960 lpfc_ct_free_iocb(phba, cmdiocb);
9961 else
9962 lpfc_els_free_iocb(phba, cmdiocb);
9963 return;
9964}
9965
9966
9967
9968
9969
9970
9971
9972
9973
9974
9975
9976
9977
9978static int
9979lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9980 struct lpfc_iocbq *cmdiocb)
9981{
9982 struct lpfc_vport *vport = cmdiocb->vport;
9983 struct lpfc_iocbq *abtsiocbp;
9984 IOCB_t *icmd = NULL;
9985 IOCB_t *iabt = NULL;
9986 int ring_number;
9987 int retval;
9988 unsigned long iflags;
9989
9990 lockdep_assert_held(&phba->hbalock);
9991
9992
9993
9994
9995
9996
9997 icmd = &cmdiocb->iocb;
9998 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
9999 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10000 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10001 return 0;
10002
10003
10004 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10005 if (abtsiocbp == NULL)
10006 return 0;
10007
10008
10009
10010
10011 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10012
10013 iabt = &abtsiocbp->iocb;
10014 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10015 iabt->un.acxri.abortContextTag = icmd->ulpContext;
10016 if (phba->sli_rev == LPFC_SLI_REV4) {
10017 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10018 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10019 }
10020 else
10021 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10022 iabt->ulpLe = 1;
10023 iabt->ulpClass = icmd->ulpClass;
10024
10025
10026 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
10027 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10028 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10029 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10030 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10031
10032 if (phba->link_state >= LPFC_LINK_UP)
10033 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10034 else
10035 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10036
10037 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10038
10039 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10040 "0339 Abort xri x%x, original iotag x%x, "
10041 "abort cmd iotag x%x\n",
10042 iabt->un.acxri.abortIoTag,
10043 iabt->un.acxri.abortContextTag,
10044 abtsiocbp->iotag);
10045
10046 if (phba->sli_rev == LPFC_SLI_REV4) {
10047 ring_number =
10048 lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
10049 if (unlikely(ring_number == LPFC_HBA_ERROR))
10050 return 0;
10051 pring = &phba->sli.ring[ring_number];
10052
10053 spin_lock_irqsave(&pring->ring_lock, iflags);
10054 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10055 abtsiocbp, 0);
10056 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10057 } else {
10058 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10059 abtsiocbp, 0);
10060 }
10061
10062 if (retval)
10063 __lpfc_sli_release_iocbq(phba, abtsiocbp);
10064
10065
10066
10067
10068
10069
10070 return retval;
10071}
10072
10073
10074
10075
10076
10077
10078
10079
10080
10081
10082
10083
10084
10085
10086int
10087lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10088 struct lpfc_iocbq *cmdiocb)
10089{
10090 struct lpfc_vport *vport = cmdiocb->vport;
10091 int retval = IOCB_ERROR;
10092 IOCB_t *icmd = NULL;
10093
10094 lockdep_assert_held(&phba->hbalock);
10095
10096
10097
10098
10099
10100
10101 icmd = &cmdiocb->iocb;
10102 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10103 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10104 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10105 return 0;
10106
10107
10108
10109
10110
10111 if ((vport->load_flag & FC_UNLOADING) &&
10112 (pring->ringno == LPFC_ELS_RING)) {
10113 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10114 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10115 else
10116 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10117 goto abort_iotag_exit;
10118 }
10119
10120
10121 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10122
10123abort_iotag_exit:
10124
10125
10126
10127
10128
10129 return retval;
10130}
10131
10132
10133
10134
10135
10136
10137
10138void
10139lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10140{
10141 struct lpfc_sli *psli = &phba->sli;
10142 struct lpfc_sli_ring *pring;
10143 int i;
10144
10145 for (i = 0; i < psli->num_rings; i++) {
10146 pring = &psli->ring[i];
10147 lpfc_sli_abort_iocb_ring(phba, pring);
10148 }
10149}
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173static int
10174lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10175 uint16_t tgt_id, uint64_t lun_id,
10176 lpfc_ctx_cmd ctx_cmd)
10177{
10178 struct lpfc_scsi_buf *lpfc_cmd;
10179 int rc = 1;
10180
10181 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
10182 return rc;
10183
10184 if (iocbq->vport != vport)
10185 return rc;
10186
10187 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10188
10189 if (lpfc_cmd->pCmd == NULL)
10190 return rc;
10191
10192 switch (ctx_cmd) {
10193 case LPFC_CTX_LUN:
10194 if ((lpfc_cmd->rdata->pnode) &&
10195 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10196 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
10197 rc = 0;
10198 break;
10199 case LPFC_CTX_TGT:
10200 if ((lpfc_cmd->rdata->pnode) &&
10201 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
10202 rc = 0;
10203 break;
10204 case LPFC_CTX_HOST:
10205 rc = 0;
10206 break;
10207 default:
10208 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
10209 __func__, ctx_cmd);
10210 break;
10211 }
10212
10213 return rc;
10214}
10215
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231
10232
10233
10234
10235int
10236lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10237 lpfc_ctx_cmd ctx_cmd)
10238{
10239 struct lpfc_hba *phba = vport->phba;
10240 struct lpfc_iocbq *iocbq;
10241 int sum, i;
10242
10243 spin_lock_irq(&phba->hbalock);
10244 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10245 iocbq = phba->sli.iocbq_lookup[i];
10246
10247 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10248 ctx_cmd) == 0)
10249 sum++;
10250 }
10251 spin_unlock_irq(&phba->hbalock);
10252
10253 return sum;
10254}
10255
10256
10257
10258
10259
10260
10261
10262
10263
10264
10265
10266void
10267lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10268 struct lpfc_iocbq *rspiocb)
10269{
10270 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10271 "3096 ABORT_XRI_CN completing on rpi x%x "
10272 "original iotag x%x, abort cmd iotag x%x "
10273 "status 0x%x, reason 0x%x\n",
10274 cmdiocb->iocb.un.acxri.abortContextTag,
10275 cmdiocb->iocb.un.acxri.abortIoTag,
10276 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10277 rspiocb->iocb.un.ulpWord[4]);
10278 lpfc_sli_release_iocbq(phba, cmdiocb);
10279 return;
10280}
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298
10299
10300
10301
10302
10303int
10304lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10305 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
10306{
10307 struct lpfc_hba *phba = vport->phba;
10308 struct lpfc_iocbq *iocbq;
10309 struct lpfc_iocbq *abtsiocb;
10310 IOCB_t *cmd = NULL;
10311 int errcnt = 0, ret_val = 0;
10312 int i;
10313
10314 for (i = 1; i <= phba->sli.last_iotag; i++) {
10315 iocbq = phba->sli.iocbq_lookup[i];
10316
10317 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10318 abort_cmd) != 0)
10319 continue;
10320
10321
10322
10323
10324
10325 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10326 continue;
10327
10328
10329 abtsiocb = lpfc_sli_get_iocbq(phba);
10330 if (abtsiocb == NULL) {
10331 errcnt++;
10332 continue;
10333 }
10334
10335
10336 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10337
10338 cmd = &iocbq->iocb;
10339 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10340 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
10341 if (phba->sli_rev == LPFC_SLI_REV4)
10342 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10343 else
10344 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
10345 abtsiocb->iocb.ulpLe = 1;
10346 abtsiocb->iocb.ulpClass = cmd->ulpClass;
10347 abtsiocb->vport = vport;
10348
10349
10350 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
10351 if (iocbq->iocb_flag & LPFC_IO_FCP)
10352 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
10353 if (iocbq->iocb_flag & LPFC_IO_FOF)
10354 abtsiocb->iocb_flag |= LPFC_IO_FOF;
10355
10356 if (lpfc_is_link_up(phba))
10357 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10358 else
10359 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10360
10361
10362 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10363 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
10364 abtsiocb, 0);
10365 if (ret_val == IOCB_ERROR) {
10366 lpfc_sli_release_iocbq(phba, abtsiocb);
10367 errcnt++;
10368 continue;
10369 }
10370 }
10371
10372 return errcnt;
10373}
10374
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384
10385
10386
10387
10388
10389
10390
10391
10392
10393
10394
10395
10396
10397int
10398lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10399 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10400{
10401 struct lpfc_hba *phba = vport->phba;
10402 struct lpfc_scsi_buf *lpfc_cmd;
10403 struct lpfc_iocbq *abtsiocbq;
10404 struct lpfc_nodelist *ndlp;
10405 struct lpfc_iocbq *iocbq;
10406 IOCB_t *icmd;
10407 int sum, i, ret_val;
10408 unsigned long iflags;
10409 struct lpfc_sli_ring *pring_s4;
10410 uint32_t ring_number;
10411
10412 spin_lock_irq(&phba->hbalock);
10413
10414
10415 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
10416 spin_unlock_irq(&phba->hbalock);
10417 return 0;
10418 }
10419 sum = 0;
10420
10421 for (i = 1; i <= phba->sli.last_iotag; i++) {
10422 iocbq = phba->sli.iocbq_lookup[i];
10423
10424 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10425 cmd) != 0)
10426 continue;
10427
10428
10429
10430
10431
10432 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10433 continue;
10434
10435
10436 abtsiocbq = __lpfc_sli_get_iocbq(phba);
10437 if (abtsiocbq == NULL)
10438 continue;
10439
10440 icmd = &iocbq->iocb;
10441 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10442 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
10443 if (phba->sli_rev == LPFC_SLI_REV4)
10444 abtsiocbq->iocb.un.acxri.abortIoTag =
10445 iocbq->sli4_xritag;
10446 else
10447 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
10448 abtsiocbq->iocb.ulpLe = 1;
10449 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
10450 abtsiocbq->vport = vport;
10451
10452
10453 abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
10454 if (iocbq->iocb_flag & LPFC_IO_FCP)
10455 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
10456 if (iocbq->iocb_flag & LPFC_IO_FOF)
10457 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
10458
10459 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10460 ndlp = lpfc_cmd->rdata->pnode;
10461
10462 if (lpfc_is_link_up(phba) &&
10463 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
10464 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10465 else
10466 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
10467
10468
10469 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
10470
10471
10472
10473
10474
10475 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10476
10477 if (phba->sli_rev == LPFC_SLI_REV4) {
10478 ring_number = MAX_SLI3_CONFIGURED_RINGS +
10479 iocbq->fcp_wqidx;
10480 pring_s4 = &phba->sli.ring[ring_number];
10481
10482 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
10483 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
10484 abtsiocbq, 0);
10485 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
10486 } else {
10487 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
10488 abtsiocbq, 0);
10489 }
10490
10491
10492 if (ret_val == IOCB_ERROR)
10493 __lpfc_sli_release_iocbq(phba, abtsiocbq);
10494 else
10495 sum++;
10496 }
10497 spin_unlock_irq(&phba->hbalock);
10498 return sum;
10499}
10500
10501
10502
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512
10513
10514
10515
10516
10517
10518static void
10519lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
10520 struct lpfc_iocbq *cmdiocbq,
10521 struct lpfc_iocbq *rspiocbq)
10522{
10523 wait_queue_head_t *pdone_q;
10524 unsigned long iflags;
10525 struct lpfc_scsi_buf *lpfc_cmd;
10526
10527 spin_lock_irqsave(&phba->hbalock, iflags);
10528 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
10529
10530
10531
10532
10533
10534
10535
10536 spin_unlock_irqrestore(&phba->hbalock, iflags);
10537 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
10538 cmdiocbq->wait_iocb_cmpl = NULL;
10539 if (cmdiocbq->iocb_cmpl)
10540 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
10541 else
10542 lpfc_sli_release_iocbq(phba, cmdiocbq);
10543 return;
10544 }
10545
10546 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
10547 if (cmdiocbq->context2 && rspiocbq)
10548 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
10549 &rspiocbq->iocb, sizeof(IOCB_t));
10550
10551
10552 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
10553 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
10554 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
10555 cur_iocbq);
10556 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
10557 }
10558
10559 pdone_q = cmdiocbq->context_un.wait_queue;
10560 if (pdone_q)
10561 wake_up(pdone_q);
10562 spin_unlock_irqrestore(&phba->hbalock, iflags);
10563 return;
10564}
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578static int
10579lpfc_chk_iocb_flg(struct lpfc_hba *phba,
10580 struct lpfc_iocbq *piocbq, uint32_t flag)
10581{
10582 unsigned long iflags;
10583 int ret;
10584
10585 spin_lock_irqsave(&phba->hbalock, iflags);
10586 ret = piocbq->iocb_flag & flag;
10587 spin_unlock_irqrestore(&phba->hbalock, iflags);
10588 return ret;
10589
10590}
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628int
10629lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
10630 uint32_t ring_number,
10631 struct lpfc_iocbq *piocb,
10632 struct lpfc_iocbq *prspiocbq,
10633 uint32_t timeout)
10634{
10635 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10636 long timeleft, timeout_req = 0;
10637 int retval = IOCB_SUCCESS;
10638 uint32_t creg_val;
10639 struct lpfc_iocbq *iocb;
10640 int txq_cnt = 0;
10641 int txcmplq_cnt = 0;
10642 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
10643 unsigned long iflags;
10644 bool iocb_completed = true;
10645
10646
10647
10648
10649
10650 if (prspiocbq) {
10651 if (piocb->context2)
10652 return IOCB_ERROR;
10653 piocb->context2 = prspiocbq;
10654 }
10655
10656 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
10657 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
10658 piocb->context_un.wait_queue = &done_q;
10659 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
10660
10661 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10662 if (lpfc_readl(phba->HCregaddr, &creg_val))
10663 return IOCB_ERROR;
10664 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
10665 writel(creg_val, phba->HCregaddr);
10666 readl(phba->HCregaddr);
10667 }
10668
10669 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
10670 SLI_IOCB_RET_IOCB);
10671 if (retval == IOCB_SUCCESS) {
10672 timeout_req = msecs_to_jiffies(timeout * 1000);
10673 timeleft = wait_event_timeout(done_q,
10674 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
10675 timeout_req);
10676 spin_lock_irqsave(&phba->hbalock, iflags);
10677 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
10678
10679
10680
10681
10682
10683
10684 iocb_completed = false;
10685 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
10686 }
10687 spin_unlock_irqrestore(&phba->hbalock, iflags);
10688 if (iocb_completed) {
10689 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10690 "0331 IOCB wake signaled\n");
10691
10692
10693
10694
10695
10696 } else if (timeleft == 0) {
10697 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10698 "0338 IOCB wait timeout error - no "
10699 "wake response Data x%x\n", timeout);
10700 retval = IOCB_TIMEDOUT;
10701 } else {
10702 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10703 "0330 IOCB wake NOT set, "
10704 "Data x%x x%lx\n",
10705 timeout, (timeleft / jiffies));
10706 retval = IOCB_TIMEDOUT;
10707 }
10708 } else if (retval == IOCB_BUSY) {
10709 if (phba->cfg_log_verbose & LOG_SLI) {
10710 list_for_each_entry(iocb, &pring->txq, list) {
10711 txq_cnt++;
10712 }
10713 list_for_each_entry(iocb, &pring->txcmplq, list) {
10714 txcmplq_cnt++;
10715 }
10716 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10717 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10718 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
10719 }
10720 return retval;
10721 } else {
10722 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10723 "0332 IOCB wait issue failed, Data x%x\n",
10724 retval);
10725 retval = IOCB_ERROR;
10726 }
10727
10728 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
10729 if (lpfc_readl(phba->HCregaddr, &creg_val))
10730 return IOCB_ERROR;
10731 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
10732 writel(creg_val, phba->HCregaddr);
10733 readl(phba->HCregaddr);
10734 }
10735
10736 if (prspiocbq)
10737 piocb->context2 = NULL;
10738
10739 piocb->context_un.wait_queue = NULL;
10740 piocb->iocb_cmpl = NULL;
10741 return retval;
10742}
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754
10755
10756
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770int
10771lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
10772 uint32_t timeout)
10773{
10774 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
10775 MAILBOX_t *mb = NULL;
10776 int retval;
10777 unsigned long flag;
10778
10779
10780 if (pmboxq->context1)
10781 mb = (MAILBOX_t *)pmboxq->context1;
10782
10783 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
10784
10785 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
10786
10787 pmboxq->context1 = &done_q;
10788
10789
10790 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
10791 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
10792 wait_event_interruptible_timeout(done_q,
10793 pmboxq->mbox_flag & LPFC_MBX_WAKE,
10794 msecs_to_jiffies(timeout * 1000));
10795
10796 spin_lock_irqsave(&phba->hbalock, flag);
10797
10798 pmboxq->context1 = (uint8_t *)mb;
10799
10800
10801
10802
10803 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
10804 retval = MBX_SUCCESS;
10805 } else {
10806 retval = MBX_TIMEOUT;
10807 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
10808 }
10809 spin_unlock_irqrestore(&phba->hbalock, flag);
10810 } else {
10811
10812 pmboxq->context1 = (uint8_t *)mb;
10813 }
10814
10815 return retval;
10816}
10817
10818
10819
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833void
10834lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
10835{
10836 struct lpfc_sli *psli = &phba->sli;
10837 unsigned long timeout;
10838
10839 if (mbx_action == LPFC_MBX_NO_WAIT) {
10840
10841 msleep(100);
10842 lpfc_sli_mbox_sys_flush(phba);
10843 return;
10844 }
10845 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
10846
10847 spin_lock_irq(&phba->hbalock);
10848 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
10849
10850 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
10851
10852
10853
10854 if (phba->sli.mbox_active)
10855 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
10856 phba->sli.mbox_active) *
10857 1000) + jiffies;
10858 spin_unlock_irq(&phba->hbalock);
10859
10860 while (phba->sli.mbox_active) {
10861
10862 msleep(2);
10863 if (time_after(jiffies, timeout))
10864
10865
10866
10867 break;
10868 }
10869 } else
10870 spin_unlock_irq(&phba->hbalock);
10871
10872 lpfc_sli_mbox_sys_flush(phba);
10873}
10874
10875
10876
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886static int
10887lpfc_sli_eratt_read(struct lpfc_hba *phba)
10888{
10889 uint32_t ha_copy;
10890
10891
10892 if (lpfc_readl(phba->HAregaddr, &ha_copy))
10893 goto unplug_err;
10894
10895 if (ha_copy & HA_ERATT) {
10896
10897 if (lpfc_sli_read_hs(phba))
10898 goto unplug_err;
10899
10900
10901 if ((HS_FFER1 & phba->work_hs) &&
10902 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
10903 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
10904 phba->hba_flag |= DEFER_ERATT;
10905
10906 writel(0, phba->HCregaddr);
10907 readl(phba->HCregaddr);
10908 }
10909
10910
10911 phba->work_ha |= HA_ERATT;
10912
10913 phba->hba_flag |= HBA_ERATT_HANDLED;
10914 return 1;
10915 }
10916 return 0;
10917
10918unplug_err:
10919
10920 phba->work_hs |= UNPLUG_ERR;
10921
10922 phba->work_ha |= HA_ERATT;
10923
10924 phba->hba_flag |= HBA_ERATT_HANDLED;
10925 return 1;
10926}
10927
10928
10929
10930
10931
10932
10933
10934
10935
10936
10937
10938
10939static int
10940lpfc_sli4_eratt_read(struct lpfc_hba *phba)
10941{
10942 uint32_t uerr_sta_hi, uerr_sta_lo;
10943 uint32_t if_type, portsmphr;
10944 struct lpfc_register portstat_reg;
10945
10946
10947
10948
10949
10950 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10951 switch (if_type) {
10952 case LPFC_SLI_INTF_IF_TYPE_0:
10953 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
10954 &uerr_sta_lo) ||
10955 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
10956 &uerr_sta_hi)) {
10957 phba->work_hs |= UNPLUG_ERR;
10958 phba->work_ha |= HA_ERATT;
10959 phba->hba_flag |= HBA_ERATT_HANDLED;
10960 return 1;
10961 }
10962 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
10963 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
10964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10965 "1423 HBA Unrecoverable error: "
10966 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10967 "ue_mask_lo_reg=0x%x, "
10968 "ue_mask_hi_reg=0x%x\n",
10969 uerr_sta_lo, uerr_sta_hi,
10970 phba->sli4_hba.ue_mask_lo,
10971 phba->sli4_hba.ue_mask_hi);
10972 phba->work_status[0] = uerr_sta_lo;
10973 phba->work_status[1] = uerr_sta_hi;
10974 phba->work_ha |= HA_ERATT;
10975 phba->hba_flag |= HBA_ERATT_HANDLED;
10976 return 1;
10977 }
10978 break;
10979 case LPFC_SLI_INTF_IF_TYPE_2:
10980 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
10981 &portstat_reg.word0) ||
10982 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
10983 &portsmphr)){
10984 phba->work_hs |= UNPLUG_ERR;
10985 phba->work_ha |= HA_ERATT;
10986 phba->hba_flag |= HBA_ERATT_HANDLED;
10987 return 1;
10988 }
10989 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
10990 phba->work_status[0] =
10991 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
10992 phba->work_status[1] =
10993 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
10994 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10995 "2885 Port Status Event: "
10996 "port status reg 0x%x, "
10997 "port smphr reg 0x%x, "
10998 "error 1=0x%x, error 2=0x%x\n",
10999 portstat_reg.word0,
11000 portsmphr,
11001 phba->work_status[0],
11002 phba->work_status[1]);
11003 phba->work_ha |= HA_ERATT;
11004 phba->hba_flag |= HBA_ERATT_HANDLED;
11005 return 1;
11006 }
11007 break;
11008 case LPFC_SLI_INTF_IF_TYPE_1:
11009 default:
11010 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11011 "2886 HBA Error Attention on unsupported "
11012 "if type %d.", if_type);
11013 return 1;
11014 }
11015
11016 return 0;
11017}
11018
11019
11020
11021
11022
11023
11024
11025
11026
11027
11028
11029int
11030lpfc_sli_check_eratt(struct lpfc_hba *phba)
11031{
11032 uint32_t ha_copy;
11033
11034
11035
11036
11037 if (phba->link_flag & LS_IGNORE_ERATT)
11038 return 0;
11039
11040
11041 spin_lock_irq(&phba->hbalock);
11042 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11043
11044 spin_unlock_irq(&phba->hbalock);
11045 return 0;
11046 }
11047
11048
11049
11050
11051
11052 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11053 spin_unlock_irq(&phba->hbalock);
11054 return 0;
11055 }
11056
11057
11058 if (unlikely(pci_channel_offline(phba->pcidev))) {
11059 spin_unlock_irq(&phba->hbalock);
11060 return 0;
11061 }
11062
11063 switch (phba->sli_rev) {
11064 case LPFC_SLI_REV2:
11065 case LPFC_SLI_REV3:
11066
11067 ha_copy = lpfc_sli_eratt_read(phba);
11068 break;
11069 case LPFC_SLI_REV4:
11070
11071 ha_copy = lpfc_sli4_eratt_read(phba);
11072 break;
11073 default:
11074 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11075 "0299 Invalid SLI revision (%d)\n",
11076 phba->sli_rev);
11077 ha_copy = 0;
11078 break;
11079 }
11080 spin_unlock_irq(&phba->hbalock);
11081
11082 return ha_copy;
11083}
11084
11085
11086
11087
11088
11089
11090
11091
11092
11093
11094
11095static inline int
11096lpfc_intr_state_check(struct lpfc_hba *phba)
11097{
11098
11099 if (unlikely(pci_channel_offline(phba->pcidev)))
11100 return -EIO;
11101
11102
11103 phba->sli.slistat.sli_intr++;
11104
11105
11106 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11107 return -EIO;
11108
11109 return 0;
11110}
11111
11112
11113
11114
11115
11116
11117
11118
11119
11120
11121
11122
11123
11124
11125
11126
11127
11128
11129
11130
11131
11132
11133irqreturn_t
11134lpfc_sli_sp_intr_handler(int irq, void *dev_id)
11135{
11136 struct lpfc_hba *phba;
11137 uint32_t ha_copy, hc_copy;
11138 uint32_t work_ha_copy;
11139 unsigned long status;
11140 unsigned long iflag;
11141 uint32_t control;
11142
11143 MAILBOX_t *mbox, *pmbox;
11144 struct lpfc_vport *vport;
11145 struct lpfc_nodelist *ndlp;
11146 struct lpfc_dmabuf *mp;
11147 LPFC_MBOXQ_t *pmb;
11148 int rc;
11149
11150
11151
11152
11153
11154 phba = (struct lpfc_hba *)dev_id;
11155
11156 if (unlikely(!phba))
11157 return IRQ_NONE;
11158
11159
11160
11161
11162
11163 if (phba->intr_type == MSIX) {
11164
11165 if (lpfc_intr_state_check(phba))
11166 return IRQ_NONE;
11167
11168 spin_lock_irqsave(&phba->hbalock, iflag);
11169 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11170 goto unplug_error;
11171
11172
11173
11174 if (phba->link_flag & LS_IGNORE_ERATT)
11175 ha_copy &= ~HA_ERATT;
11176
11177 if (ha_copy & HA_ERATT) {
11178 if (phba->hba_flag & HBA_ERATT_HANDLED)
11179
11180 ha_copy &= ~HA_ERATT;
11181 else
11182
11183 phba->hba_flag |= HBA_ERATT_HANDLED;
11184 }
11185
11186
11187
11188
11189
11190 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11191 spin_unlock_irqrestore(&phba->hbalock, iflag);
11192 return IRQ_NONE;
11193 }
11194
11195
11196 if (lpfc_readl(phba->HCregaddr, &hc_copy))
11197 goto unplug_error;
11198
11199 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11200 HC_LAINT_ENA | HC_ERINT_ENA),
11201 phba->HCregaddr);
11202 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11203 phba->HAregaddr);
11204 writel(hc_copy, phba->HCregaddr);
11205 readl(phba->HAregaddr);
11206 spin_unlock_irqrestore(&phba->hbalock, iflag);
11207 } else
11208 ha_copy = phba->ha_copy;
11209
11210 work_ha_copy = ha_copy & phba->work_ha_mask;
11211
11212 if (work_ha_copy) {
11213 if (work_ha_copy & HA_LATT) {
11214 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11215
11216
11217
11218
11219 spin_lock_irqsave(&phba->hbalock, iflag);
11220 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
11221 if (lpfc_readl(phba->HCregaddr, &control))
11222 goto unplug_error;
11223 control &= ~HC_LAINT_ENA;
11224 writel(control, phba->HCregaddr);
11225 readl(phba->HCregaddr);
11226 spin_unlock_irqrestore(&phba->hbalock, iflag);
11227 }
11228 else
11229 work_ha_copy &= ~HA_LATT;
11230 }
11231
11232 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
11233
11234
11235
11236
11237 status = (work_ha_copy &
11238 (HA_RXMASK << (4*LPFC_ELS_RING)));
11239 status >>= (4*LPFC_ELS_RING);
11240 if (status & HA_RXMASK) {
11241 spin_lock_irqsave(&phba->hbalock, iflag);
11242 if (lpfc_readl(phba->HCregaddr, &control))
11243 goto unplug_error;
11244
11245 lpfc_debugfs_slow_ring_trc(phba,
11246 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11247 control, status,
11248 (uint32_t)phba->sli.slistat.sli_intr);
11249
11250 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
11251 lpfc_debugfs_slow_ring_trc(phba,
11252 "ISR Disable ring:"
11253 "pwork:x%x hawork:x%x wait:x%x",
11254 phba->work_ha, work_ha_copy,
11255 (uint32_t)((unsigned long)
11256 &phba->work_waitq));
11257
11258 control &=
11259 ~(HC_R0INT_ENA << LPFC_ELS_RING);
11260 writel(control, phba->HCregaddr);
11261 readl(phba->HCregaddr);
11262 }
11263 else {
11264 lpfc_debugfs_slow_ring_trc(phba,
11265 "ISR slow ring: pwork:"
11266 "x%x hawork:x%x wait:x%x",
11267 phba->work_ha, work_ha_copy,
11268 (uint32_t)((unsigned long)
11269 &phba->work_waitq));
11270 }
11271 spin_unlock_irqrestore(&phba->hbalock, iflag);
11272 }
11273 }
11274 spin_lock_irqsave(&phba->hbalock, iflag);
11275 if (work_ha_copy & HA_ERATT) {
11276 if (lpfc_sli_read_hs(phba))
11277 goto unplug_error;
11278
11279
11280
11281
11282 if ((HS_FFER1 & phba->work_hs) &&
11283 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11284 HS_FFER6 | HS_FFER7 | HS_FFER8) &
11285 phba->work_hs)) {
11286 phba->hba_flag |= DEFER_ERATT;
11287
11288 writel(0, phba->HCregaddr);
11289 readl(phba->HCregaddr);
11290 }
11291 }
11292
11293 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
11294 pmb = phba->sli.mbox_active;
11295 pmbox = &pmb->u.mb;
11296 mbox = phba->mbox;
11297 vport = pmb->vport;
11298
11299
11300 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11301 if (pmbox->mbxOwner != OWN_HOST) {
11302 spin_unlock_irqrestore(&phba->hbalock, iflag);
11303
11304
11305
11306
11307 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11308 LOG_SLI,
11309 "(%d):0304 Stray Mailbox "
11310 "Interrupt mbxCommand x%x "
11311 "mbxStatus x%x\n",
11312 (vport ? vport->vpi : 0),
11313 pmbox->mbxCommand,
11314 pmbox->mbxStatus);
11315
11316 work_ha_copy &= ~HA_MBATT;
11317 } else {
11318 phba->sli.mbox_active = NULL;
11319 spin_unlock_irqrestore(&phba->hbalock, iflag);
11320 phba->last_completion_time = jiffies;
11321 del_timer(&phba->sli.mbox_tmo);
11322 if (pmb->mbox_cmpl) {
11323 lpfc_sli_pcimem_bcopy(mbox, pmbox,
11324 MAILBOX_CMD_SIZE);
11325 if (pmb->out_ext_byte_len &&
11326 pmb->context2)
11327 lpfc_sli_pcimem_bcopy(
11328 phba->mbox_ext,
11329 pmb->context2,
11330 pmb->out_ext_byte_len);
11331 }
11332 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11333 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11334
11335 lpfc_debugfs_disc_trc(vport,
11336 LPFC_DISC_TRC_MBOX_VPORT,
11337 "MBOX dflt rpi: : "
11338 "status:x%x rpi:x%x",
11339 (uint32_t)pmbox->mbxStatus,
11340 pmbox->un.varWords[0], 0);
11341
11342 if (!pmbox->mbxStatus) {
11343 mp = (struct lpfc_dmabuf *)
11344 (pmb->context1);
11345 ndlp = (struct lpfc_nodelist *)
11346 pmb->context2;
11347
11348
11349
11350
11351
11352
11353 lpfc_unreg_login(phba,
11354 vport->vpi,
11355 pmbox->un.varWords[0],
11356 pmb);
11357 pmb->mbox_cmpl =
11358 lpfc_mbx_cmpl_dflt_rpi;
11359 pmb->context1 = mp;
11360 pmb->context2 = ndlp;
11361 pmb->vport = vport;
11362 rc = lpfc_sli_issue_mbox(phba,
11363 pmb,
11364 MBX_NOWAIT);
11365 if (rc != MBX_BUSY)
11366 lpfc_printf_log(phba,
11367 KERN_ERR,
11368 LOG_MBOX | LOG_SLI,
11369 "0350 rc should have"
11370 "been MBX_BUSY\n");
11371 if (rc != MBX_NOT_FINISHED)
11372 goto send_current_mbox;
11373 }
11374 }
11375 spin_lock_irqsave(
11376 &phba->pport->work_port_lock,
11377 iflag);
11378 phba->pport->work_port_events &=
11379 ~WORKER_MBOX_TMO;
11380 spin_unlock_irqrestore(
11381 &phba->pport->work_port_lock,
11382 iflag);
11383 lpfc_mbox_cmpl_put(phba, pmb);
11384 }
11385 } else
11386 spin_unlock_irqrestore(&phba->hbalock, iflag);
11387
11388 if ((work_ha_copy & HA_MBATT) &&
11389 (phba->sli.mbox_active == NULL)) {
11390send_current_mbox:
11391
11392 do {
11393 rc = lpfc_sli_issue_mbox(phba, NULL,
11394 MBX_NOWAIT);
11395 } while (rc == MBX_NOT_FINISHED);
11396 if (rc != MBX_SUCCESS)
11397 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11398 LOG_SLI, "0349 rc should be "
11399 "MBX_SUCCESS\n");
11400 }
11401
11402 spin_lock_irqsave(&phba->hbalock, iflag);
11403 phba->work_ha |= work_ha_copy;
11404 spin_unlock_irqrestore(&phba->hbalock, iflag);
11405 lpfc_worker_wake_up(phba);
11406 }
11407 return IRQ_HANDLED;
11408unplug_error:
11409 spin_unlock_irqrestore(&phba->hbalock, iflag);
11410 return IRQ_HANDLED;
11411
11412}
11413
11414
11415
11416
11417
11418
11419
11420
11421
11422
11423
11424
11425
11426
11427
11428
11429
11430
11431
11432
11433irqreturn_t
11434lpfc_sli_fp_intr_handler(int irq, void *dev_id)
11435{
11436 struct lpfc_hba *phba;
11437 uint32_t ha_copy;
11438 unsigned long status;
11439 unsigned long iflag;
11440
11441
11442
11443
11444 phba = (struct lpfc_hba *) dev_id;
11445
11446 if (unlikely(!phba))
11447 return IRQ_NONE;
11448
11449
11450
11451
11452
11453 if (phba->intr_type == MSIX) {
11454
11455 if (lpfc_intr_state_check(phba))
11456 return IRQ_NONE;
11457
11458 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11459 return IRQ_HANDLED;
11460
11461 spin_lock_irqsave(&phba->hbalock, iflag);
11462
11463
11464
11465
11466 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11467 spin_unlock_irqrestore(&phba->hbalock, iflag);
11468 return IRQ_NONE;
11469 }
11470 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
11471 phba->HAregaddr);
11472 readl(phba->HAregaddr);
11473 spin_unlock_irqrestore(&phba->hbalock, iflag);
11474 } else
11475 ha_copy = phba->ha_copy;
11476
11477
11478
11479
11480 ha_copy &= ~(phba->work_ha_mask);
11481
11482 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11483 status >>= (4*LPFC_FCP_RING);
11484 if (status & HA_RXMASK)
11485 lpfc_sli_handle_fast_ring_event(phba,
11486 &phba->sli.ring[LPFC_FCP_RING],
11487 status);
11488
11489 if (phba->cfg_multi_ring_support == 2) {
11490
11491
11492
11493
11494 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11495 status >>= (4*LPFC_EXTRA_RING);
11496 if (status & HA_RXMASK) {
11497 lpfc_sli_handle_fast_ring_event(phba,
11498 &phba->sli.ring[LPFC_EXTRA_RING],
11499 status);
11500 }
11501 }
11502 return IRQ_HANDLED;
11503}
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519
11520
11521
11522irqreturn_t
11523lpfc_sli_intr_handler(int irq, void *dev_id)
11524{
11525 struct lpfc_hba *phba;
11526 irqreturn_t sp_irq_rc, fp_irq_rc;
11527 unsigned long status1, status2;
11528 uint32_t hc_copy;
11529
11530
11531
11532
11533
11534 phba = (struct lpfc_hba *) dev_id;
11535
11536 if (unlikely(!phba))
11537 return IRQ_NONE;
11538
11539
11540 if (lpfc_intr_state_check(phba))
11541 return IRQ_NONE;
11542
11543 spin_lock(&phba->hbalock);
11544 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
11545 spin_unlock(&phba->hbalock);
11546 return IRQ_HANDLED;
11547 }
11548
11549 if (unlikely(!phba->ha_copy)) {
11550 spin_unlock(&phba->hbalock);
11551 return IRQ_NONE;
11552 } else if (phba->ha_copy & HA_ERATT) {
11553 if (phba->hba_flag & HBA_ERATT_HANDLED)
11554
11555 phba->ha_copy &= ~HA_ERATT;
11556 else
11557
11558 phba->hba_flag |= HBA_ERATT_HANDLED;
11559 }
11560
11561
11562
11563
11564 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11565 spin_unlock(&phba->hbalock);
11566 return IRQ_NONE;
11567 }
11568
11569
11570 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
11571 spin_unlock(&phba->hbalock);
11572 return IRQ_HANDLED;
11573 }
11574 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
11575 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
11576 phba->HCregaddr);
11577 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
11578 writel(hc_copy, phba->HCregaddr);
11579 readl(phba->HAregaddr);
11580 spin_unlock(&phba->hbalock);
11581
11582
11583
11584
11585
11586
11587 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
11588
11589
11590 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
11591 status2 >>= (4*LPFC_ELS_RING);
11592
11593 if (status1 || (status2 & HA_RXMASK))
11594 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
11595 else
11596 sp_irq_rc = IRQ_NONE;
11597
11598
11599
11600
11601
11602
11603 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
11604 status1 >>= (4*LPFC_FCP_RING);
11605
11606
11607 if (phba->cfg_multi_ring_support == 2) {
11608 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
11609 status2 >>= (4*LPFC_EXTRA_RING);
11610 } else
11611 status2 = 0;
11612
11613 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
11614 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
11615 else
11616 fp_irq_rc = IRQ_NONE;
11617
11618
11619 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
11620}
11621
11622
11623
11624
11625
11626
11627
11628
11629void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
11630{
11631 struct lpfc_cq_event *cq_event;
11632
11633
11634 spin_lock_irq(&phba->hbalock);
11635 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
11636 spin_unlock_irq(&phba->hbalock);
11637
11638 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
11639
11640 spin_lock_irq(&phba->hbalock);
11641 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
11642 cq_event, struct lpfc_cq_event, list);
11643 spin_unlock_irq(&phba->hbalock);
11644
11645 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11646
11647 lpfc_sli4_cq_event_release(phba, cq_event);
11648 }
11649}
11650
11651
11652
11653
11654
11655
11656
11657
11658void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
11659{
11660 struct lpfc_cq_event *cq_event;
11661
11662
11663 spin_lock_irq(&phba->hbalock);
11664 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
11665 spin_unlock_irq(&phba->hbalock);
11666
11667 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
11668
11669 spin_lock_irq(&phba->hbalock);
11670 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11671 cq_event, struct lpfc_cq_event, list);
11672 spin_unlock_irq(&phba->hbalock);
11673
11674 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
11675
11676 lpfc_sli4_cq_event_release(phba, cq_event);
11677 }
11678}
11679
11680
11681
11682
11683
11684
11685
11686
11687
11688
11689
11690
11691static void
11692lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
11693 struct lpfc_iocbq *pIocbIn,
11694 struct lpfc_iocbq *pIocbOut,
11695 struct lpfc_wcqe_complete *wcqe)
11696{
11697 int numBdes, i;
11698 unsigned long iflags;
11699 uint32_t status, max_response;
11700 struct lpfc_dmabuf *dmabuf;
11701 struct ulp_bde64 *bpl, bde;
11702 size_t offset = offsetof(struct lpfc_iocbq, iocb);
11703
11704 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
11705 sizeof(struct lpfc_iocbq) - offset);
11706
11707 status = bf_get(lpfc_wcqe_c_status, wcqe);
11708 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
11709 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
11710 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
11711 pIocbIn->iocb.un.fcpi.fcpi_parm =
11712 pIocbOut->iocb.un.fcpi.fcpi_parm -
11713 wcqe->total_data_placed;
11714 else
11715 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11716 else {
11717 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
11718 switch (pIocbOut->iocb.ulpCommand) {
11719 case CMD_ELS_REQUEST64_CR:
11720 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
11721 bpl = (struct ulp_bde64 *)dmabuf->virt;
11722 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
11723 max_response = bde.tus.f.bdeSize;
11724 break;
11725 case CMD_GEN_REQUEST64_CR:
11726 max_response = 0;
11727 if (!pIocbOut->context3)
11728 break;
11729 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
11730 sizeof(struct ulp_bde64);
11731 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
11732 bpl = (struct ulp_bde64 *)dmabuf->virt;
11733 for (i = 0; i < numBdes; i++) {
11734 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
11735 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
11736 max_response += bde.tus.f.bdeSize;
11737 }
11738 break;
11739 default:
11740 max_response = wcqe->total_data_placed;
11741 break;
11742 }
11743 if (max_response < wcqe->total_data_placed)
11744 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
11745 else
11746 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
11747 wcqe->total_data_placed;
11748 }
11749
11750
11751 if (status == CQE_STATUS_DI_ERROR) {
11752 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
11753
11754 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
11755 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
11756 else
11757 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
11758
11759 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
11760 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
11761 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11762 BGS_GUARD_ERR_MASK;
11763 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
11764 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11765 BGS_APPTAG_ERR_MASK;
11766 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
11767 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11768 BGS_REFTAG_ERR_MASK;
11769
11770
11771 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
11772 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11773 BGS_HI_WATER_MARK_PRESENT_MASK;
11774 pIocbIn->iocb.unsli3.sli3_bg.bghm =
11775 wcqe->total_data_placed;
11776 }
11777
11778
11779
11780
11781
11782 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
11783 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
11784 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
11785 BGS_GUARD_ERR_MASK);
11786 }
11787
11788
11789 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
11790 spin_lock_irqsave(&phba->hbalock, iflags);
11791 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
11792 spin_unlock_irqrestore(&phba->hbalock, iflags);
11793 }
11794}
11795
11796
11797
11798
11799
11800
11801
11802
11803
11804
11805
11806
11807static struct lpfc_iocbq *
11808lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
11809 struct lpfc_iocbq *irspiocbq)
11810{
11811 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
11812 struct lpfc_iocbq *cmdiocbq;
11813 struct lpfc_wcqe_complete *wcqe;
11814 unsigned long iflags;
11815
11816 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
11817 spin_lock_irqsave(&pring->ring_lock, iflags);
11818 pring->stats.iocb_event++;
11819
11820 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
11821 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11822
11823 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
11824 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11825
11826 if (unlikely(!cmdiocbq)) {
11827 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11828 "0386 ELS complete with no corresponding "
11829 "cmdiocb: iotag (%d)\n",
11830 bf_get(lpfc_wcqe_c_request_tag, wcqe));
11831 lpfc_sli_release_iocbq(phba, irspiocbq);
11832 return NULL;
11833 }
11834
11835
11836 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
11837
11838 return irspiocbq;
11839}
11840
11841
11842
11843
11844
11845
11846
11847
11848
11849
11850
11851static bool
11852lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11853{
11854 struct lpfc_cq_event *cq_event;
11855 unsigned long iflags;
11856
11857 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11858 "0392 Async Event: word0:x%x, word1:x%x, "
11859 "word2:x%x, word3:x%x\n", mcqe->word0,
11860 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
11861
11862
11863 cq_event = lpfc_sli4_cq_event_alloc(phba);
11864 if (!cq_event) {
11865 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11866 "0394 Failed to allocate CQ_EVENT entry\n");
11867 return false;
11868 }
11869
11870
11871 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
11872 spin_lock_irqsave(&phba->hbalock, iflags);
11873 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
11874
11875 phba->hba_flag |= ASYNC_EVENT;
11876 spin_unlock_irqrestore(&phba->hbalock, iflags);
11877
11878 return true;
11879}
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891static bool
11892lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
11893{
11894 uint32_t mcqe_status;
11895 MAILBOX_t *mbox, *pmbox;
11896 struct lpfc_mqe *mqe;
11897 struct lpfc_vport *vport;
11898 struct lpfc_nodelist *ndlp;
11899 struct lpfc_dmabuf *mp;
11900 unsigned long iflags;
11901 LPFC_MBOXQ_t *pmb;
11902 bool workposted = false;
11903 int rc;
11904
11905
11906 if (!bf_get(lpfc_trailer_completed, mcqe))
11907 goto out_no_mqe_complete;
11908
11909
11910 spin_lock_irqsave(&phba->hbalock, iflags);
11911 pmb = phba->sli.mbox_active;
11912 if (unlikely(!pmb)) {
11913 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
11914 "1832 No pending MBOX command to handle\n");
11915 spin_unlock_irqrestore(&phba->hbalock, iflags);
11916 goto out_no_mqe_complete;
11917 }
11918 spin_unlock_irqrestore(&phba->hbalock, iflags);
11919 mqe = &pmb->u.mqe;
11920 pmbox = (MAILBOX_t *)&pmb->u.mqe;
11921 mbox = phba->mbox;
11922 vport = pmb->vport;
11923
11924
11925 phba->last_completion_time = jiffies;
11926 del_timer(&phba->sli.mbox_tmo);
11927
11928
11929 if (pmb->mbox_cmpl && mbox)
11930 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
11931
11932
11933
11934
11935
11936 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
11937 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
11938 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
11939 bf_set(lpfc_mqe_status, mqe,
11940 (LPFC_MBX_ERROR_RANGE | mcqe_status));
11941 }
11942 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11943 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11944 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
11945 "MBOX dflt rpi: status:x%x rpi:x%x",
11946 mcqe_status,
11947 pmbox->un.varWords[0], 0);
11948 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
11949 mp = (struct lpfc_dmabuf *)(pmb->context1);
11950 ndlp = (struct lpfc_nodelist *)pmb->context2;
11951
11952
11953
11954 lpfc_unreg_login(phba, vport->vpi,
11955 pmbox->un.varWords[0], pmb);
11956 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
11957 pmb->context1 = mp;
11958 pmb->context2 = ndlp;
11959 pmb->vport = vport;
11960 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
11961 if (rc != MBX_BUSY)
11962 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11963 LOG_SLI, "0385 rc should "
11964 "have been MBX_BUSY\n");
11965 if (rc != MBX_NOT_FINISHED)
11966 goto send_current_mbox;
11967 }
11968 }
11969 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
11970 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11971 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
11972
11973
11974 spin_lock_irqsave(&phba->hbalock, iflags);
11975 __lpfc_mbox_cmpl_put(phba, pmb);
11976 phba->work_ha |= HA_MBATT;
11977 spin_unlock_irqrestore(&phba->hbalock, iflags);
11978 workposted = true;
11979
11980send_current_mbox:
11981 spin_lock_irqsave(&phba->hbalock, iflags);
11982
11983 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11984
11985 phba->sli.mbox_active = NULL;
11986 spin_unlock_irqrestore(&phba->hbalock, iflags);
11987
11988 lpfc_worker_wake_up(phba);
11989out_no_mqe_complete:
11990 if (bf_get(lpfc_trailer_consumed, mcqe))
11991 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
11992 return workposted;
11993}
11994
11995
11996
11997
11998
11999
12000
12001
12002
12003
12004
12005
12006static bool
12007lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12008{
12009 struct lpfc_mcqe mcqe;
12010 bool workposted;
12011
12012
12013 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12014
12015
12016 if (!bf_get(lpfc_trailer_async, &mcqe))
12017 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12018 else
12019 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12020 return workposted;
12021}
12022
12023
12024
12025
12026
12027
12028
12029
12030
12031
12032
12033static bool
12034lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12035 struct lpfc_wcqe_complete *wcqe)
12036{
12037 struct lpfc_iocbq *irspiocbq;
12038 unsigned long iflags;
12039 struct lpfc_sli_ring *pring = cq->pring;
12040 int txq_cnt = 0;
12041 int txcmplq_cnt = 0;
12042 int fcp_txcmplq_cnt = 0;
12043
12044
12045 irspiocbq = lpfc_sli_get_iocbq(phba);
12046 if (!irspiocbq) {
12047 if (!list_empty(&pring->txq))
12048 txq_cnt++;
12049 if (!list_empty(&pring->txcmplq))
12050 txcmplq_cnt++;
12051 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
12052 fcp_txcmplq_cnt++;
12053 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12054 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12055 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12056 txq_cnt, phba->iocb_cnt,
12057 fcp_txcmplq_cnt,
12058 txcmplq_cnt);
12059 return false;
12060 }
12061
12062
12063 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12064 spin_lock_irqsave(&phba->hbalock, iflags);
12065 list_add_tail(&irspiocbq->cq_event.list,
12066 &phba->sli4_hba.sp_queue_event);
12067 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12068 spin_unlock_irqrestore(&phba->hbalock, iflags);
12069
12070 return true;
12071}
12072
12073
12074
12075
12076
12077
12078
12079
12080
12081static void
12082lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12083 struct lpfc_wcqe_release *wcqe)
12084{
12085
12086 if (unlikely(!phba->sli4_hba.els_wq))
12087 return;
12088
12089 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12090 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12091 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12092 else
12093 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12094 "2579 Slow-path wqe consume event carries "
12095 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12096 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12097 phba->sli4_hba.els_wq->queue_id);
12098}
12099
12100
12101
12102
12103
12104
12105
12106
12107
12108
12109
12110static bool
12111lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12112 struct lpfc_queue *cq,
12113 struct sli4_wcqe_xri_aborted *wcqe)
12114{
12115 bool workposted = false;
12116 struct lpfc_cq_event *cq_event;
12117 unsigned long iflags;
12118
12119
12120 cq_event = lpfc_sli4_cq_event_alloc(phba);
12121 if (!cq_event) {
12122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12123 "0602 Failed to allocate CQ_EVENT entry\n");
12124 return false;
12125 }
12126
12127
12128 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12129 switch (cq->subtype) {
12130 case LPFC_FCP:
12131 spin_lock_irqsave(&phba->hbalock, iflags);
12132 list_add_tail(&cq_event->list,
12133 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12134
12135 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12136 spin_unlock_irqrestore(&phba->hbalock, iflags);
12137 workposted = true;
12138 break;
12139 case LPFC_ELS:
12140 spin_lock_irqsave(&phba->hbalock, iflags);
12141 list_add_tail(&cq_event->list,
12142 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12143
12144 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12145 spin_unlock_irqrestore(&phba->hbalock, iflags);
12146 workposted = true;
12147 break;
12148 default:
12149 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12150 "0603 Invalid work queue CQE subtype (x%x)\n",
12151 cq->subtype);
12152 workposted = false;
12153 break;
12154 }
12155 return workposted;
12156}
12157
12158
12159
12160
12161
12162
12163
12164
12165
12166
12167static bool
12168lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12169{
12170 bool workposted = false;
12171 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12172 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12173 struct hbq_dmabuf *dma_buf;
12174 uint32_t status, rq_id;
12175 unsigned long iflags;
12176
12177
12178 if (unlikely(!hrq) || unlikely(!drq))
12179 return workposted;
12180
12181 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12182 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12183 else
12184 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12185 if (rq_id != hrq->queue_id)
12186 goto out;
12187
12188 status = bf_get(lpfc_rcqe_status, rcqe);
12189 switch (status) {
12190 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12191 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12192 "2537 Receive Frame Truncated!!\n");
12193 hrq->RQ_buf_trunc++;
12194 case FC_STATUS_RQ_SUCCESS:
12195 lpfc_sli4_rq_release(hrq, drq);
12196 spin_lock_irqsave(&phba->hbalock, iflags);
12197 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12198 if (!dma_buf) {
12199 hrq->RQ_no_buf_found++;
12200 spin_unlock_irqrestore(&phba->hbalock, iflags);
12201 goto out;
12202 }
12203 hrq->RQ_rcv_buf++;
12204 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12205
12206 list_add_tail(&dma_buf->cq_event.list,
12207 &phba->sli4_hba.sp_queue_event);
12208
12209 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12210 spin_unlock_irqrestore(&phba->hbalock, iflags);
12211 workposted = true;
12212 break;
12213 case FC_STATUS_INSUFF_BUF_NEED_BUF:
12214 case FC_STATUS_INSUFF_BUF_FRM_DISC:
12215 hrq->RQ_no_posted_buf++;
12216
12217 spin_lock_irqsave(&phba->hbalock, iflags);
12218 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12219 spin_unlock_irqrestore(&phba->hbalock, iflags);
12220 workposted = true;
12221 break;
12222 }
12223out:
12224 return workposted;
12225}
12226
12227
12228
12229
12230
12231
12232
12233
12234
12235
12236
12237
12238static bool
12239lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12240 struct lpfc_cqe *cqe)
12241{
12242 struct lpfc_cqe cqevt;
12243 bool workposted = false;
12244
12245
12246 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
12247
12248
12249 switch (bf_get(lpfc_cqe_code, &cqevt)) {
12250 case CQE_CODE_COMPL_WQE:
12251
12252 phba->last_completion_time = jiffies;
12253 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
12254 (struct lpfc_wcqe_complete *)&cqevt);
12255 break;
12256 case CQE_CODE_RELEASE_WQE:
12257
12258 lpfc_sli4_sp_handle_rel_wcqe(phba,
12259 (struct lpfc_wcqe_release *)&cqevt);
12260 break;
12261 case CQE_CODE_XRI_ABORTED:
12262
12263 phba->last_completion_time = jiffies;
12264 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12265 (struct sli4_wcqe_xri_aborted *)&cqevt);
12266 break;
12267 case CQE_CODE_RECEIVE:
12268 case CQE_CODE_RECEIVE_V1:
12269
12270 phba->last_completion_time = jiffies;
12271 workposted = lpfc_sli4_sp_handle_rcqe(phba,
12272 (struct lpfc_rcqe *)&cqevt);
12273 break;
12274 default:
12275 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12276 "0388 Not a valid WCQE code: x%x\n",
12277 bf_get(lpfc_cqe_code, &cqevt));
12278 break;
12279 }
12280 return workposted;
12281}
12282
12283
12284
12285
12286
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296static void
12297lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12298 struct lpfc_queue *speq)
12299{
12300 struct lpfc_queue *cq = NULL, *childq;
12301 struct lpfc_cqe *cqe;
12302 bool workposted = false;
12303 int ecount = 0;
12304 uint16_t cqid;
12305
12306
12307 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12308
12309 list_for_each_entry(childq, &speq->child_list, list) {
12310 if (childq->queue_id == cqid) {
12311 cq = childq;
12312 break;
12313 }
12314 }
12315 if (unlikely(!cq)) {
12316 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12317 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12318 "0365 Slow-path CQ identifier "
12319 "(%d) does not exist\n", cqid);
12320 return;
12321 }
12322
12323
12324 switch (cq->type) {
12325 case LPFC_MCQ:
12326 while ((cqe = lpfc_sli4_cq_get(cq))) {
12327 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
12328 if (!(++ecount % cq->entry_repost))
12329 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12330 cq->CQ_mbox++;
12331 }
12332 break;
12333 case LPFC_WCQ:
12334 while ((cqe = lpfc_sli4_cq_get(cq))) {
12335 if (cq->subtype == LPFC_FCP)
12336 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
12337 cqe);
12338 else
12339 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
12340 cqe);
12341 if (!(++ecount % cq->entry_repost))
12342 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12343 }
12344
12345
12346 if (ecount > cq->CQ_max_cqe)
12347 cq->CQ_max_cqe = ecount;
12348 break;
12349 default:
12350 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12351 "0370 Invalid completion queue type (%d)\n",
12352 cq->type);
12353 return;
12354 }
12355
12356
12357 if (unlikely(ecount == 0))
12358 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12359 "0371 No entry from the CQ: identifier "
12360 "(x%x), type (%d)\n", cq->queue_id, cq->type);
12361
12362
12363 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12364
12365
12366 if (workposted)
12367 lpfc_worker_wake_up(phba);
12368}
12369
12370
12371
12372
12373
12374
12375
12376
12377
12378
12379static void
12380lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12381 struct lpfc_wcqe_complete *wcqe)
12382{
12383 struct lpfc_sli_ring *pring = cq->pring;
12384 struct lpfc_iocbq *cmdiocbq;
12385 struct lpfc_iocbq irspiocbq;
12386 unsigned long iflags;
12387
12388
12389 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
12390
12391
12392
12393 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
12394 IOSTAT_LOCAL_REJECT)) &&
12395 ((wcqe->parameter & IOERR_PARAM_MASK) ==
12396 IOERR_NO_RESOURCES))
12397 phba->lpfc_rampdown_queue_depth(phba);
12398
12399
12400 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12401 "0373 FCP complete error: status=x%x, "
12402 "hw_status=x%x, total_data_specified=%d, "
12403 "parameter=x%x, word3=x%x\n",
12404 bf_get(lpfc_wcqe_c_status, wcqe),
12405 bf_get(lpfc_wcqe_c_hw_status, wcqe),
12406 wcqe->total_data_placed, wcqe->parameter,
12407 wcqe->word3);
12408 }
12409
12410
12411 spin_lock_irqsave(&pring->ring_lock, iflags);
12412 pring->stats.iocb_event++;
12413 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12414 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12415 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12416 if (unlikely(!cmdiocbq)) {
12417 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12418 "0374 FCP complete with no corresponding "
12419 "cmdiocb: iotag (%d)\n",
12420 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12421 return;
12422 }
12423 if (unlikely(!cmdiocbq->iocb_cmpl)) {
12424 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12425 "0375 FCP cmdiocb not callback function "
12426 "iotag: (%d)\n",
12427 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12428 return;
12429 }
12430
12431
12432 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
12433
12434 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
12435 spin_lock_irqsave(&phba->hbalock, iflags);
12436 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
12437 spin_unlock_irqrestore(&phba->hbalock, iflags);
12438 }
12439
12440
12441 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
12442}
12443
12444
12445
12446
12447
12448
12449
12450
12451
12452
12453static void
12454lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12455 struct lpfc_wcqe_release *wcqe)
12456{
12457 struct lpfc_queue *childwq;
12458 bool wqid_matched = false;
12459 uint16_t fcp_wqid;
12460
12461
12462 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
12463 list_for_each_entry(childwq, &cq->child_list, list) {
12464 if (childwq->queue_id == fcp_wqid) {
12465 lpfc_sli4_wq_release(childwq,
12466 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12467 wqid_matched = true;
12468 break;
12469 }
12470 }
12471
12472 if (wqid_matched != true)
12473 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12474 "2580 Fast-path wqe consume event carries "
12475 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
12476}
12477
12478
12479
12480
12481
12482
12483
12484
12485
12486static int
12487lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12488 struct lpfc_cqe *cqe)
12489{
12490 struct lpfc_wcqe_release wcqe;
12491 bool workposted = false;
12492
12493
12494 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
12495
12496
12497 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
12498 case CQE_CODE_COMPL_WQE:
12499 cq->CQ_wq++;
12500
12501 phba->last_completion_time = jiffies;
12502 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
12503 (struct lpfc_wcqe_complete *)&wcqe);
12504 break;
12505 case CQE_CODE_RELEASE_WQE:
12506 cq->CQ_release_wqe++;
12507
12508 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
12509 (struct lpfc_wcqe_release *)&wcqe);
12510 break;
12511 case CQE_CODE_XRI_ABORTED:
12512 cq->CQ_xri_aborted++;
12513
12514 phba->last_completion_time = jiffies;
12515 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12516 (struct sli4_wcqe_xri_aborted *)&wcqe);
12517 break;
12518 default:
12519 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12520 "0144 Not a valid WCQE code: x%x\n",
12521 bf_get(lpfc_wcqe_c_code, &wcqe));
12522 break;
12523 }
12524 return workposted;
12525}
12526
12527
12528
12529
12530
12531
12532
12533
12534
12535
12536
12537
12538
12539static void
12540lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
12541 uint32_t qidx)
12542{
12543 struct lpfc_queue *cq;
12544 struct lpfc_cqe *cqe;
12545 bool workposted = false;
12546 uint16_t cqid;
12547 int ecount = 0;
12548
12549 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12550 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12551 "0366 Not a valid completion "
12552 "event: majorcode=x%x, minorcode=x%x\n",
12553 bf_get_le32(lpfc_eqe_major_code, eqe),
12554 bf_get_le32(lpfc_eqe_minor_code, eqe));
12555 return;
12556 }
12557
12558
12559 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12560
12561
12562 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
12563 lpfc_sli4_sp_handle_eqe(phba, eqe,
12564 phba->sli4_hba.hba_eq[qidx]);
12565 return;
12566 }
12567
12568 if (unlikely(!phba->sli4_hba.fcp_cq)) {
12569 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12570 "3146 Fast-path completion queues "
12571 "does not exist\n");
12572 return;
12573 }
12574 cq = phba->sli4_hba.fcp_cq[qidx];
12575 if (unlikely(!cq)) {
12576 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12577 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12578 "0367 Fast-path completion queue "
12579 "(%d) does not exist\n", qidx);
12580 return;
12581 }
12582
12583 if (unlikely(cqid != cq->queue_id)) {
12584 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12585 "0368 Miss-matched fast-path completion "
12586 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
12587 cqid, cq->queue_id);
12588 return;
12589 }
12590
12591
12592 while ((cqe = lpfc_sli4_cq_get(cq))) {
12593 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12594 if (!(++ecount % cq->entry_repost))
12595 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12596 }
12597
12598
12599 if (ecount > cq->CQ_max_cqe)
12600 cq->CQ_max_cqe = ecount;
12601
12602
12603 if (unlikely(ecount == 0))
12604 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12605 "0369 No entry from fast-path completion "
12606 "queue fcpcqid=%d\n", cq->queue_id);
12607
12608
12609 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12610
12611
12612 if (workposted)
12613 lpfc_worker_wake_up(phba);
12614}
12615
12616static void
12617lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
12618{
12619 struct lpfc_eqe *eqe;
12620
12621
12622 while ((eqe = lpfc_sli4_eq_get(eq)))
12623 ;
12624
12625
12626 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12627}
12628
12629
12630
12631
12632
12633
12634
12635
12636
12637
12638
12639
12640
12641
12642
12643static void
12644lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
12645{
12646 struct lpfc_queue *cq;
12647 struct lpfc_cqe *cqe;
12648 bool workposted = false;
12649 uint16_t cqid;
12650 int ecount = 0;
12651
12652 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
12653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12654 "9147 Not a valid completion "
12655 "event: majorcode=x%x, minorcode=x%x\n",
12656 bf_get_le32(lpfc_eqe_major_code, eqe),
12657 bf_get_le32(lpfc_eqe_minor_code, eqe));
12658 return;
12659 }
12660
12661
12662 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
12663
12664
12665 cq = phba->sli4_hba.oas_cq;
12666 if (unlikely(!cq)) {
12667 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
12668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12669 "9148 OAS completion queue "
12670 "does not exist\n");
12671 return;
12672 }
12673
12674 if (unlikely(cqid != cq->queue_id)) {
12675 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12676 "9149 Miss-matched fast-path compl "
12677 "queue id: eqcqid=%d, fcpcqid=%d\n",
12678 cqid, cq->queue_id);
12679 return;
12680 }
12681
12682
12683 while ((cqe = lpfc_sli4_cq_get(cq))) {
12684 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
12685 if (!(++ecount % cq->entry_repost))
12686 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
12687 }
12688
12689
12690 if (ecount > cq->CQ_max_cqe)
12691 cq->CQ_max_cqe = ecount;
12692
12693
12694 if (unlikely(ecount == 0))
12695 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12696 "9153 No entry from fast-path completion "
12697 "queue fcpcqid=%d\n", cq->queue_id);
12698
12699
12700 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
12701
12702
12703 if (workposted)
12704 lpfc_worker_wake_up(phba);
12705}
12706
12707
12708
12709
12710
12711
12712
12713
12714
12715
12716
12717
12718
12719
12720
12721
12722
12723
12724
12725
12726
12727
12728irqreturn_t
12729lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
12730{
12731 struct lpfc_hba *phba;
12732 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12733 struct lpfc_queue *eq;
12734 struct lpfc_eqe *eqe;
12735 unsigned long iflag;
12736 int ecount = 0;
12737
12738
12739 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12740 phba = fcp_eq_hdl->phba;
12741
12742 if (unlikely(!phba))
12743 return IRQ_NONE;
12744
12745
12746 eq = phba->sli4_hba.fof_eq;
12747 if (unlikely(!eq))
12748 return IRQ_NONE;
12749
12750
12751 if (unlikely(lpfc_intr_state_check(phba))) {
12752 eq->EQ_badstate++;
12753
12754 spin_lock_irqsave(&phba->hbalock, iflag);
12755 if (phba->link_state < LPFC_LINK_DOWN)
12756
12757 lpfc_sli4_eq_flush(phba, eq);
12758 spin_unlock_irqrestore(&phba->hbalock, iflag);
12759 return IRQ_NONE;
12760 }
12761
12762
12763
12764
12765 while ((eqe = lpfc_sli4_eq_get(eq))) {
12766 lpfc_sli4_fof_handle_eqe(phba, eqe);
12767 if (!(++ecount % eq->entry_repost))
12768 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM);
12769 eq->EQ_processed++;
12770 }
12771
12772
12773 if (ecount > eq->EQ_max_eqe)
12774 eq->EQ_max_eqe = ecount;
12775
12776
12777 if (unlikely(ecount == 0)) {
12778 eq->EQ_no_entry++;
12779
12780 if (phba->intr_type == MSIX)
12781
12782 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12783 "9145 MSI-X interrupt with no EQE\n");
12784 else {
12785 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12786 "9146 ISR interrupt with no EQE\n");
12787
12788 return IRQ_NONE;
12789 }
12790 }
12791
12792 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
12793 return IRQ_HANDLED;
12794}
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804
12805
12806
12807
12808
12809
12810
12811
12812
12813
12814
12815
12816
12817
12818
12819
12820
12821
12822irqreturn_t
12823lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
12824{
12825 struct lpfc_hba *phba;
12826 struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
12827 struct lpfc_queue *fpeq;
12828 struct lpfc_eqe *eqe;
12829 unsigned long iflag;
12830 int ecount = 0;
12831 int fcp_eqidx;
12832
12833
12834 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
12835 phba = fcp_eq_hdl->phba;
12836 fcp_eqidx = fcp_eq_hdl->idx;
12837
12838 if (unlikely(!phba))
12839 return IRQ_NONE;
12840 if (unlikely(!phba->sli4_hba.hba_eq))
12841 return IRQ_NONE;
12842
12843
12844 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
12845 if (unlikely(!fpeq))
12846 return IRQ_NONE;
12847
12848 if (lpfc_fcp_look_ahead) {
12849 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
12850 lpfc_sli4_eq_clr_intr(fpeq);
12851 else {
12852 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12853 return IRQ_NONE;
12854 }
12855 }
12856
12857
12858 if (unlikely(lpfc_intr_state_check(phba))) {
12859 fpeq->EQ_badstate++;
12860
12861 spin_lock_irqsave(&phba->hbalock, iflag);
12862 if (phba->link_state < LPFC_LINK_DOWN)
12863
12864 lpfc_sli4_eq_flush(phba, fpeq);
12865 spin_unlock_irqrestore(&phba->hbalock, iflag);
12866 if (lpfc_fcp_look_ahead)
12867 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12868 return IRQ_NONE;
12869 }
12870
12871
12872
12873
12874 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
12875 if (eqe == NULL)
12876 break;
12877
12878 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
12879 if (!(++ecount % fpeq->entry_repost))
12880 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
12881 fpeq->EQ_processed++;
12882 }
12883
12884
12885 if (ecount > fpeq->EQ_max_eqe)
12886 fpeq->EQ_max_eqe = ecount;
12887
12888
12889 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
12890
12891 if (unlikely(ecount == 0)) {
12892 fpeq->EQ_no_entry++;
12893
12894 if (lpfc_fcp_look_ahead) {
12895 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12896 return IRQ_NONE;
12897 }
12898
12899 if (phba->intr_type == MSIX)
12900
12901 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12902 "0358 MSI-X interrupt with no EQE\n");
12903 else
12904
12905 return IRQ_NONE;
12906 }
12907
12908 if (lpfc_fcp_look_ahead)
12909 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
12910 return IRQ_HANDLED;
12911}
12912
12913
12914
12915
12916
12917
12918
12919
12920
12921
12922
12923
12924
12925
12926
12927
12928
12929
12930irqreturn_t
12931lpfc_sli4_intr_handler(int irq, void *dev_id)
12932{
12933 struct lpfc_hba *phba;
12934 irqreturn_t hba_irq_rc;
12935 bool hba_handled = false;
12936 int fcp_eqidx;
12937
12938
12939 phba = (struct lpfc_hba *)dev_id;
12940
12941 if (unlikely(!phba))
12942 return IRQ_NONE;
12943
12944
12945
12946
12947 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
12948 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
12949 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
12950 if (hba_irq_rc == IRQ_HANDLED)
12951 hba_handled |= true;
12952 }
12953
12954 if (phba->cfg_fof) {
12955 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
12956 &phba->sli4_hba.fcp_eq_hdl[0]);
12957 if (hba_irq_rc == IRQ_HANDLED)
12958 hba_handled |= true;
12959 }
12960
12961 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
12962}
12963
12964
12965
12966
12967
12968
12969
12970
12971
12972void
12973lpfc_sli4_queue_free(struct lpfc_queue *queue)
12974{
12975 struct lpfc_dmabuf *dmabuf;
12976
12977 if (!queue)
12978 return;
12979
12980 while (!list_empty(&queue->page_list)) {
12981 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
12982 list);
12983 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
12984 dmabuf->virt, dmabuf->phys);
12985 kfree(dmabuf);
12986 }
12987 kfree(queue);
12988 return;
12989}
12990
12991
12992
12993
12994
12995
12996
12997
12998
12999
13000
13001struct lpfc_queue *
13002lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13003 uint32_t entry_count)
13004{
13005 struct lpfc_queue *queue;
13006 struct lpfc_dmabuf *dmabuf;
13007 int x, total_qe_count;
13008 void *dma_pointer;
13009 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13010
13011 if (!phba->sli4_hba.pc_sli4_params.supported)
13012 hw_page_size = SLI4_PAGE_SIZE;
13013
13014 queue = kzalloc(sizeof(struct lpfc_queue) +
13015 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
13016 if (!queue)
13017 return NULL;
13018 queue->page_count = (ALIGN(entry_size * entry_count,
13019 hw_page_size))/hw_page_size;
13020 INIT_LIST_HEAD(&queue->list);
13021 INIT_LIST_HEAD(&queue->page_list);
13022 INIT_LIST_HEAD(&queue->child_list);
13023 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13024 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13025 if (!dmabuf)
13026 goto out_fail;
13027 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
13028 hw_page_size, &dmabuf->phys,
13029 GFP_KERNEL);
13030 if (!dmabuf->virt) {
13031 kfree(dmabuf);
13032 goto out_fail;
13033 }
13034 dmabuf->buffer_tag = x;
13035 list_add_tail(&dmabuf->list, &queue->page_list);
13036
13037 dma_pointer = dmabuf->virt;
13038 for (; total_qe_count < entry_count &&
13039 dma_pointer < (hw_page_size + dmabuf->virt);
13040 total_qe_count++, dma_pointer += entry_size) {
13041 queue->qe[total_qe_count].address = dma_pointer;
13042 }
13043 }
13044 queue->entry_size = entry_size;
13045 queue->entry_count = entry_count;
13046
13047
13048
13049
13050
13051
13052 queue->entry_repost = (entry_count >> 3);
13053 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
13054 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
13055 queue->phba = phba;
13056
13057 return queue;
13058out_fail:
13059 lpfc_sli4_queue_free(queue);
13060 return NULL;
13061}
13062
13063
13064
13065
13066
13067
13068
13069
13070
13071
13072static void __iomem *
13073lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13074{
13075 if (!phba->pcidev)
13076 return NULL;
13077
13078 switch (pci_barset) {
13079 case WQ_PCI_BAR_0_AND_1:
13080 return phba->pci_bar0_memmap_p;
13081 case WQ_PCI_BAR_2_AND_3:
13082 return phba->pci_bar2_memmap_p;
13083 case WQ_PCI_BAR_4_AND_5:
13084 return phba->pci_bar4_memmap_p;
13085 default:
13086 break;
13087 }
13088 return NULL;
13089}
13090
13091
13092
13093
13094
13095
13096
13097
13098
13099
13100
13101
13102
13103
13104
13105
13106
13107int
13108lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
13109{
13110 struct lpfc_mbx_modify_eq_delay *eq_delay;
13111 LPFC_MBOXQ_t *mbox;
13112 struct lpfc_queue *eq;
13113 int cnt, rc, length, status = 0;
13114 uint32_t shdr_status, shdr_add_status;
13115 uint32_t result;
13116 int fcp_eqidx;
13117 union lpfc_sli4_cfg_shdr *shdr;
13118 uint16_t dmult;
13119
13120 if (startq >= phba->cfg_fcp_io_channel)
13121 return 0;
13122
13123 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13124 if (!mbox)
13125 return -ENOMEM;
13126 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
13127 sizeof(struct lpfc_sli4_cfg_mhdr));
13128 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13129 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
13130 length, LPFC_SLI4_MBX_EMBED);
13131 eq_delay = &mbox->u.mqe.un.eq_delay;
13132
13133
13134 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
13135 if (result > LPFC_DMULT_CONST)
13136 dmult = 0;
13137 else
13138 dmult = LPFC_DMULT_CONST/result - 1;
13139
13140 cnt = 0;
13141 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
13142 fcp_eqidx++) {
13143 eq = phba->sli4_hba.hba_eq[fcp_eqidx];
13144 if (!eq)
13145 continue;
13146 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
13147 eq_delay->u.request.eq[cnt].phase = 0;
13148 eq_delay->u.request.eq[cnt].delay_multi = dmult;
13149 cnt++;
13150 if (cnt >= LPFC_MAX_EQ_DELAY)
13151 break;
13152 }
13153 eq_delay->u.request.num_eq = cnt;
13154
13155 mbox->vport = phba->pport;
13156 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13157 mbox->context1 = NULL;
13158 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13159 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
13160 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13161 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13162 if (shdr_status || shdr_add_status || rc) {
13163 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13164 "2512 MODIFY_EQ_DELAY mailbox failed with "
13165 "status x%x add_status x%x, mbx status x%x\n",
13166 shdr_status, shdr_add_status, rc);
13167 status = -ENXIO;
13168 }
13169 mempool_free(mbox, phba->mbox_mem_pool);
13170 return status;
13171}
13172
13173
13174
13175
13176
13177
13178
13179
13180
13181
13182
13183
13184
13185
13186
13187
13188
13189
13190
13191
13192
13193int
13194lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
13195{
13196 struct lpfc_mbx_eq_create *eq_create;
13197 LPFC_MBOXQ_t *mbox;
13198 int rc, length, status = 0;
13199 struct lpfc_dmabuf *dmabuf;
13200 uint32_t shdr_status, shdr_add_status;
13201 union lpfc_sli4_cfg_shdr *shdr;
13202 uint16_t dmult;
13203 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13204
13205
13206 if (!eq)
13207 return -ENODEV;
13208 if (!phba->sli4_hba.pc_sli4_params.supported)
13209 hw_page_size = SLI4_PAGE_SIZE;
13210
13211 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13212 if (!mbox)
13213 return -ENOMEM;
13214 length = (sizeof(struct lpfc_mbx_eq_create) -
13215 sizeof(struct lpfc_sli4_cfg_mhdr));
13216 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13217 LPFC_MBOX_OPCODE_EQ_CREATE,
13218 length, LPFC_SLI4_MBX_EMBED);
13219 eq_create = &mbox->u.mqe.un.eq_create;
13220 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
13221 eq->page_count);
13222 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
13223 LPFC_EQE_SIZE);
13224 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
13225
13226 dmult = 0;
13227 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
13228 dmult);
13229 switch (eq->entry_count) {
13230 default:
13231 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13232 "0360 Unsupported EQ count. (%d)\n",
13233 eq->entry_count);
13234 if (eq->entry_count < 256)
13235 return -EINVAL;
13236
13237 case 256:
13238 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13239 LPFC_EQ_CNT_256);
13240 break;
13241 case 512:
13242 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13243 LPFC_EQ_CNT_512);
13244 break;
13245 case 1024:
13246 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13247 LPFC_EQ_CNT_1024);
13248 break;
13249 case 2048:
13250 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13251 LPFC_EQ_CNT_2048);
13252 break;
13253 case 4096:
13254 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
13255 LPFC_EQ_CNT_4096);
13256 break;
13257 }
13258 list_for_each_entry(dmabuf, &eq->page_list, list) {
13259 memset(dmabuf->virt, 0, hw_page_size);
13260 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13261 putPaddrLow(dmabuf->phys);
13262 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13263 putPaddrHigh(dmabuf->phys);
13264 }
13265 mbox->vport = phba->pport;
13266 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13267 mbox->context1 = NULL;
13268 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13269 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
13270 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13271 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13272 if (shdr_status || shdr_add_status || rc) {
13273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13274 "2500 EQ_CREATE mailbox failed with "
13275 "status x%x add_status x%x, mbx status x%x\n",
13276 shdr_status, shdr_add_status, rc);
13277 status = -ENXIO;
13278 }
13279 eq->type = LPFC_EQ;
13280 eq->subtype = LPFC_NONE;
13281 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
13282 if (eq->queue_id == 0xFFFF)
13283 status = -ENXIO;
13284 eq->host_index = 0;
13285 eq->hba_index = 0;
13286
13287 mempool_free(mbox, phba->mbox_mem_pool);
13288 return status;
13289}
13290
13291
13292
13293
13294
13295
13296
13297
13298
13299
13300
13301
13302
13303
13304
13305
13306
13307
13308
13309
13310
13311
13312int
13313lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
13314 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
13315{
13316 struct lpfc_mbx_cq_create *cq_create;
13317 struct lpfc_dmabuf *dmabuf;
13318 LPFC_MBOXQ_t *mbox;
13319 int rc, length, status = 0;
13320 uint32_t shdr_status, shdr_add_status;
13321 union lpfc_sli4_cfg_shdr *shdr;
13322 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13323
13324
13325 if (!cq || !eq)
13326 return -ENODEV;
13327 if (!phba->sli4_hba.pc_sli4_params.supported)
13328 hw_page_size = SLI4_PAGE_SIZE;
13329
13330 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13331 if (!mbox)
13332 return -ENOMEM;
13333 length = (sizeof(struct lpfc_mbx_cq_create) -
13334 sizeof(struct lpfc_sli4_cfg_mhdr));
13335 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13336 LPFC_MBOX_OPCODE_CQ_CREATE,
13337 length, LPFC_SLI4_MBX_EMBED);
13338 cq_create = &mbox->u.mqe.un.cq_create;
13339 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
13340 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
13341 cq->page_count);
13342 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
13343 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
13344 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13345 phba->sli4_hba.pc_sli4_params.cqv);
13346 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
13347
13348 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
13349 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
13350 eq->queue_id);
13351 } else {
13352 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
13353 eq->queue_id);
13354 }
13355 switch (cq->entry_count) {
13356 default:
13357 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13358 "0361 Unsupported CQ count. (%d)\n",
13359 cq->entry_count);
13360 if (cq->entry_count < 256) {
13361 status = -EINVAL;
13362 goto out;
13363 }
13364
13365 case 256:
13366 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13367 LPFC_CQ_CNT_256);
13368 break;
13369 case 512:
13370 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13371 LPFC_CQ_CNT_512);
13372 break;
13373 case 1024:
13374 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
13375 LPFC_CQ_CNT_1024);
13376 break;
13377 }
13378 list_for_each_entry(dmabuf, &cq->page_list, list) {
13379 memset(dmabuf->virt, 0, hw_page_size);
13380 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13381 putPaddrLow(dmabuf->phys);
13382 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13383 putPaddrHigh(dmabuf->phys);
13384 }
13385 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13386
13387
13388 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13389 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13390 if (shdr_status || shdr_add_status || rc) {
13391 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13392 "2501 CQ_CREATE mailbox failed with "
13393 "status x%x add_status x%x, mbx status x%x\n",
13394 shdr_status, shdr_add_status, rc);
13395 status = -ENXIO;
13396 goto out;
13397 }
13398 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13399 if (cq->queue_id == 0xFFFF) {
13400 status = -ENXIO;
13401 goto out;
13402 }
13403
13404 list_add_tail(&cq->list, &eq->child_list);
13405
13406 cq->type = type;
13407 cq->subtype = subtype;
13408 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
13409 cq->assoc_qid = eq->queue_id;
13410 cq->host_index = 0;
13411 cq->hba_index = 0;
13412
13413out:
13414 mempool_free(mbox, phba->mbox_mem_pool);
13415 return status;
13416}
13417
13418
13419
13420
13421
13422
13423
13424
13425
13426
13427
13428
13429
13430
13431
13432static void
13433lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
13434 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
13435{
13436 struct lpfc_mbx_mq_create *mq_create;
13437 struct lpfc_dmabuf *dmabuf;
13438 int length;
13439
13440 length = (sizeof(struct lpfc_mbx_mq_create) -
13441 sizeof(struct lpfc_sli4_cfg_mhdr));
13442 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13443 LPFC_MBOX_OPCODE_MQ_CREATE,
13444 length, LPFC_SLI4_MBX_EMBED);
13445 mq_create = &mbox->u.mqe.un.mq_create;
13446 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
13447 mq->page_count);
13448 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
13449 cq->queue_id);
13450 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
13451 switch (mq->entry_count) {
13452 case 16:
13453 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13454 LPFC_MQ_RING_SIZE_16);
13455 break;
13456 case 32:
13457 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13458 LPFC_MQ_RING_SIZE_32);
13459 break;
13460 case 64:
13461 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13462 LPFC_MQ_RING_SIZE_64);
13463 break;
13464 case 128:
13465 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
13466 LPFC_MQ_RING_SIZE_128);
13467 break;
13468 }
13469 list_for_each_entry(dmabuf, &mq->page_list, list) {
13470 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13471 putPaddrLow(dmabuf->phys);
13472 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13473 putPaddrHigh(dmabuf->phys);
13474 }
13475}
13476
13477
13478
13479
13480
13481
13482
13483
13484
13485
13486
13487
13488
13489
13490
13491
13492
13493
13494
13495
13496
13497
13498int32_t
13499lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
13500 struct lpfc_queue *cq, uint32_t subtype)
13501{
13502 struct lpfc_mbx_mq_create *mq_create;
13503 struct lpfc_mbx_mq_create_ext *mq_create_ext;
13504 struct lpfc_dmabuf *dmabuf;
13505 LPFC_MBOXQ_t *mbox;
13506 int rc, length, status = 0;
13507 uint32_t shdr_status, shdr_add_status;
13508 union lpfc_sli4_cfg_shdr *shdr;
13509 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13510
13511
13512 if (!mq || !cq)
13513 return -ENODEV;
13514 if (!phba->sli4_hba.pc_sli4_params.supported)
13515 hw_page_size = SLI4_PAGE_SIZE;
13516
13517 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13518 if (!mbox)
13519 return -ENOMEM;
13520 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
13521 sizeof(struct lpfc_sli4_cfg_mhdr));
13522 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
13523 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
13524 length, LPFC_SLI4_MBX_EMBED);
13525
13526 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
13527 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
13528 bf_set(lpfc_mbx_mq_create_ext_num_pages,
13529 &mq_create_ext->u.request, mq->page_count);
13530 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
13531 &mq_create_ext->u.request, 1);
13532 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
13533 &mq_create_ext->u.request, 1);
13534 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
13535 &mq_create_ext->u.request, 1);
13536 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
13537 &mq_create_ext->u.request, 1);
13538 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
13539 &mq_create_ext->u.request, 1);
13540 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
13541 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13542 phba->sli4_hba.pc_sli4_params.mqv);
13543 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
13544 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
13545 cq->queue_id);
13546 else
13547 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
13548 cq->queue_id);
13549 switch (mq->entry_count) {
13550 default:
13551 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13552 "0362 Unsupported MQ count. (%d)\n",
13553 mq->entry_count);
13554 if (mq->entry_count < 16) {
13555 status = -EINVAL;
13556 goto out;
13557 }
13558
13559 case 16:
13560 bf_set(lpfc_mq_context_ring_size,
13561 &mq_create_ext->u.request.context,
13562 LPFC_MQ_RING_SIZE_16);
13563 break;
13564 case 32:
13565 bf_set(lpfc_mq_context_ring_size,
13566 &mq_create_ext->u.request.context,
13567 LPFC_MQ_RING_SIZE_32);
13568 break;
13569 case 64:
13570 bf_set(lpfc_mq_context_ring_size,
13571 &mq_create_ext->u.request.context,
13572 LPFC_MQ_RING_SIZE_64);
13573 break;
13574 case 128:
13575 bf_set(lpfc_mq_context_ring_size,
13576 &mq_create_ext->u.request.context,
13577 LPFC_MQ_RING_SIZE_128);
13578 break;
13579 }
13580 list_for_each_entry(dmabuf, &mq->page_list, list) {
13581 memset(dmabuf->virt, 0, hw_page_size);
13582 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
13583 putPaddrLow(dmabuf->phys);
13584 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
13585 putPaddrHigh(dmabuf->phys);
13586 }
13587 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13588 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
13589 &mq_create_ext->u.response);
13590 if (rc != MBX_SUCCESS) {
13591 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13592 "2795 MQ_CREATE_EXT failed with "
13593 "status x%x. Failback to MQ_CREATE.\n",
13594 rc);
13595 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
13596 mq_create = &mbox->u.mqe.un.mq_create;
13597 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13598 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
13599 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
13600 &mq_create->u.response);
13601 }
13602
13603
13604 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13605 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13606 if (shdr_status || shdr_add_status || rc) {
13607 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13608 "2502 MQ_CREATE mailbox failed with "
13609 "status x%x add_status x%x, mbx status x%x\n",
13610 shdr_status, shdr_add_status, rc);
13611 status = -ENXIO;
13612 goto out;
13613 }
13614 if (mq->queue_id == 0xFFFF) {
13615 status = -ENXIO;
13616 goto out;
13617 }
13618 mq->type = LPFC_MQ;
13619 mq->assoc_qid = cq->queue_id;
13620 mq->subtype = subtype;
13621 mq->host_index = 0;
13622 mq->hba_index = 0;
13623
13624
13625 list_add_tail(&mq->list, &cq->child_list);
13626out:
13627 mempool_free(mbox, phba->mbox_mem_pool);
13628 return status;
13629}
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639
13640
13641
13642
13643
13644
13645
13646
13647
13648
13649
13650
13651
13652
13653int
13654lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
13655 struct lpfc_queue *cq, uint32_t subtype)
13656{
13657 struct lpfc_mbx_wq_create *wq_create;
13658 struct lpfc_dmabuf *dmabuf;
13659 LPFC_MBOXQ_t *mbox;
13660 int rc, length, status = 0;
13661 uint32_t shdr_status, shdr_add_status;
13662 union lpfc_sli4_cfg_shdr *shdr;
13663 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13664 struct dma_address *page;
13665 void __iomem *bar_memmap_p;
13666 uint32_t db_offset;
13667 uint16_t pci_barset;
13668
13669
13670 if (!wq || !cq)
13671 return -ENODEV;
13672 if (!phba->sli4_hba.pc_sli4_params.supported)
13673 hw_page_size = SLI4_PAGE_SIZE;
13674
13675 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13676 if (!mbox)
13677 return -ENOMEM;
13678 length = (sizeof(struct lpfc_mbx_wq_create) -
13679 sizeof(struct lpfc_sli4_cfg_mhdr));
13680 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13681 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
13682 length, LPFC_SLI4_MBX_EMBED);
13683 wq_create = &mbox->u.mqe.un.wq_create;
13684 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
13685 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
13686 wq->page_count);
13687 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
13688 cq->queue_id);
13689
13690
13691 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13692 phba->sli4_hba.pc_sli4_params.wqv);
13693
13694 switch (phba->sli4_hba.pc_sli4_params.wqv) {
13695 case LPFC_Q_CREATE_VERSION_0:
13696 switch (wq->entry_size) {
13697 default:
13698 case 64:
13699
13700 page = wq_create->u.request.page;
13701 break;
13702 case 128:
13703 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
13704 LPFC_WQ_SZ128_SUPPORT)) {
13705 status = -ERANGE;
13706 goto out;
13707 }
13708
13709
13710
13711 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13712 LPFC_Q_CREATE_VERSION_1);
13713
13714 bf_set(lpfc_mbx_wq_create_wqe_count,
13715 &wq_create->u.request_1, wq->entry_count);
13716 bf_set(lpfc_mbx_wq_create_wqe_size,
13717 &wq_create->u.request_1,
13718 LPFC_WQ_WQE_SIZE_128);
13719 bf_set(lpfc_mbx_wq_create_page_size,
13720 &wq_create->u.request_1,
13721 (PAGE_SIZE/SLI4_PAGE_SIZE));
13722 page = wq_create->u.request_1.page;
13723 break;
13724 }
13725 break;
13726 case LPFC_Q_CREATE_VERSION_1:
13727 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
13728 wq->entry_count);
13729 switch (wq->entry_size) {
13730 default:
13731 case 64:
13732 bf_set(lpfc_mbx_wq_create_wqe_size,
13733 &wq_create->u.request_1,
13734 LPFC_WQ_WQE_SIZE_64);
13735 break;
13736 case 128:
13737 if (!(phba->sli4_hba.pc_sli4_params.wqsize &
13738 LPFC_WQ_SZ128_SUPPORT)) {
13739 status = -ERANGE;
13740 goto out;
13741 }
13742 bf_set(lpfc_mbx_wq_create_wqe_size,
13743 &wq_create->u.request_1,
13744 LPFC_WQ_WQE_SIZE_128);
13745 break;
13746 }
13747 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
13748 (PAGE_SIZE/SLI4_PAGE_SIZE));
13749 page = wq_create->u.request_1.page;
13750 break;
13751 default:
13752 status = -ERANGE;
13753 goto out;
13754 }
13755
13756 list_for_each_entry(dmabuf, &wq->page_list, list) {
13757 memset(dmabuf->virt, 0, hw_page_size);
13758 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
13759 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
13760 }
13761
13762 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13763 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
13764
13765 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13766
13767 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13768 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13769 if (shdr_status || shdr_add_status || rc) {
13770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13771 "2503 WQ_CREATE mailbox failed with "
13772 "status x%x add_status x%x, mbx status x%x\n",
13773 shdr_status, shdr_add_status, rc);
13774 status = -ENXIO;
13775 goto out;
13776 }
13777 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
13778 if (wq->queue_id == 0xFFFF) {
13779 status = -ENXIO;
13780 goto out;
13781 }
13782 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
13783 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
13784 &wq_create->u.response);
13785 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
13786 (wq->db_format != LPFC_DB_RING_FORMAT)) {
13787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13788 "3265 WQ[%d] doorbell format not "
13789 "supported: x%x\n", wq->queue_id,
13790 wq->db_format);
13791 status = -EINVAL;
13792 goto out;
13793 }
13794 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
13795 &wq_create->u.response);
13796 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
13797 if (!bar_memmap_p) {
13798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13799 "3263 WQ[%d] failed to memmap pci "
13800 "barset:x%x\n", wq->queue_id,
13801 pci_barset);
13802 status = -ENOMEM;
13803 goto out;
13804 }
13805 db_offset = wq_create->u.response.doorbell_offset;
13806 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
13807 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
13808 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13809 "3252 WQ[%d] doorbell offset not "
13810 "supported: x%x\n", wq->queue_id,
13811 db_offset);
13812 status = -EINVAL;
13813 goto out;
13814 }
13815 wq->db_regaddr = bar_memmap_p + db_offset;
13816 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13817 "3264 WQ[%d]: barset:x%x, offset:x%x, "
13818 "format:x%x\n", wq->queue_id, pci_barset,
13819 db_offset, wq->db_format);
13820 } else {
13821 wq->db_format = LPFC_DB_LIST_FORMAT;
13822 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
13823 }
13824 wq->type = LPFC_WQ;
13825 wq->assoc_qid = cq->queue_id;
13826 wq->subtype = subtype;
13827 wq->host_index = 0;
13828 wq->hba_index = 0;
13829 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
13830
13831
13832 list_add_tail(&wq->list, &cq->child_list);
13833out:
13834 mempool_free(mbox, phba->mbox_mem_pool);
13835 return status;
13836}
13837
13838
13839
13840
13841
13842
13843
13844
13845
13846
13847
13848void
13849lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
13850{
13851 uint32_t cnt;
13852
13853
13854 if (!rq)
13855 return;
13856 cnt = lpfc_hbq_defs[qno]->entry_count;
13857
13858
13859 cnt = (cnt >> 3);
13860 if (cnt < LPFC_QUEUE_MIN_REPOST)
13861 cnt = LPFC_QUEUE_MIN_REPOST;
13862
13863 rq->entry_repost = cnt;
13864}
13865
13866
13867
13868
13869
13870
13871
13872
13873
13874
13875
13876
13877
13878
13879
13880
13881
13882
13883
13884
13885
13886
13887
13888
13889int
13890lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13891 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
13892{
13893 struct lpfc_mbx_rq_create *rq_create;
13894 struct lpfc_dmabuf *dmabuf;
13895 LPFC_MBOXQ_t *mbox;
13896 int rc, length, status = 0;
13897 uint32_t shdr_status, shdr_add_status;
13898 union lpfc_sli4_cfg_shdr *shdr;
13899 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13900 void __iomem *bar_memmap_p;
13901 uint32_t db_offset;
13902 uint16_t pci_barset;
13903
13904
13905 if (!hrq || !drq || !cq)
13906 return -ENODEV;
13907 if (!phba->sli4_hba.pc_sli4_params.supported)
13908 hw_page_size = SLI4_PAGE_SIZE;
13909
13910 if (hrq->entry_count != drq->entry_count)
13911 return -EINVAL;
13912 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13913 if (!mbox)
13914 return -ENOMEM;
13915 length = (sizeof(struct lpfc_mbx_rq_create) -
13916 sizeof(struct lpfc_sli4_cfg_mhdr));
13917 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
13918 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
13919 length, LPFC_SLI4_MBX_EMBED);
13920 rq_create = &mbox->u.mqe.un.rq_create;
13921 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
13922 bf_set(lpfc_mbox_hdr_version, &shdr->request,
13923 phba->sli4_hba.pc_sli4_params.rqv);
13924 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
13925 bf_set(lpfc_rq_context_rqe_count_1,
13926 &rq_create->u.request.context,
13927 hrq->entry_count);
13928 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
13929 bf_set(lpfc_rq_context_rqe_size,
13930 &rq_create->u.request.context,
13931 LPFC_RQE_SIZE_8);
13932 bf_set(lpfc_rq_context_page_size,
13933 &rq_create->u.request.context,
13934 (PAGE_SIZE/SLI4_PAGE_SIZE));
13935 } else {
13936 switch (hrq->entry_count) {
13937 default:
13938 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13939 "2535 Unsupported RQ count. (%d)\n",
13940 hrq->entry_count);
13941 if (hrq->entry_count < 512) {
13942 status = -EINVAL;
13943 goto out;
13944 }
13945
13946 case 512:
13947 bf_set(lpfc_rq_context_rqe_count,
13948 &rq_create->u.request.context,
13949 LPFC_RQ_RING_SIZE_512);
13950 break;
13951 case 1024:
13952 bf_set(lpfc_rq_context_rqe_count,
13953 &rq_create->u.request.context,
13954 LPFC_RQ_RING_SIZE_1024);
13955 break;
13956 case 2048:
13957 bf_set(lpfc_rq_context_rqe_count,
13958 &rq_create->u.request.context,
13959 LPFC_RQ_RING_SIZE_2048);
13960 break;
13961 case 4096:
13962 bf_set(lpfc_rq_context_rqe_count,
13963 &rq_create->u.request.context,
13964 LPFC_RQ_RING_SIZE_4096);
13965 break;
13966 }
13967 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
13968 LPFC_HDR_BUF_SIZE);
13969 }
13970 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
13971 cq->queue_id);
13972 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
13973 hrq->page_count);
13974 list_for_each_entry(dmabuf, &hrq->page_list, list) {
13975 memset(dmabuf->virt, 0, hw_page_size);
13976 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
13977 putPaddrLow(dmabuf->phys);
13978 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
13979 putPaddrHigh(dmabuf->phys);
13980 }
13981 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
13982 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
13983
13984 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13985
13986 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
13987 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
13988 if (shdr_status || shdr_add_status || rc) {
13989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13990 "2504 RQ_CREATE mailbox failed with "
13991 "status x%x add_status x%x, mbx status x%x\n",
13992 shdr_status, shdr_add_status, rc);
13993 status = -ENXIO;
13994 goto out;
13995 }
13996 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
13997 if (hrq->queue_id == 0xFFFF) {
13998 status = -ENXIO;
13999 goto out;
14000 }
14001
14002 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14003 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
14004 &rq_create->u.response);
14005 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
14006 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
14007 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14008 "3262 RQ [%d] doorbell format not "
14009 "supported: x%x\n", hrq->queue_id,
14010 hrq->db_format);
14011 status = -EINVAL;
14012 goto out;
14013 }
14014
14015 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
14016 &rq_create->u.response);
14017 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14018 if (!bar_memmap_p) {
14019 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14020 "3269 RQ[%d] failed to memmap pci "
14021 "barset:x%x\n", hrq->queue_id,
14022 pci_barset);
14023 status = -ENOMEM;
14024 goto out;
14025 }
14026
14027 db_offset = rq_create->u.response.doorbell_offset;
14028 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
14029 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
14030 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14031 "3270 RQ[%d] doorbell offset not "
14032 "supported: x%x\n", hrq->queue_id,
14033 db_offset);
14034 status = -EINVAL;
14035 goto out;
14036 }
14037 hrq->db_regaddr = bar_memmap_p + db_offset;
14038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14039 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
14040 "format:x%x\n", hrq->queue_id, pci_barset,
14041 db_offset, hrq->db_format);
14042 } else {
14043 hrq->db_format = LPFC_DB_RING_FORMAT;
14044 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
14045 }
14046 hrq->type = LPFC_HRQ;
14047 hrq->assoc_qid = cq->queue_id;
14048 hrq->subtype = subtype;
14049 hrq->host_index = 0;
14050 hrq->hba_index = 0;
14051
14052
14053 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14054 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
14055 length, LPFC_SLI4_MBX_EMBED);
14056 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14057 phba->sli4_hba.pc_sli4_params.rqv);
14058 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
14059 bf_set(lpfc_rq_context_rqe_count_1,
14060 &rq_create->u.request.context, hrq->entry_count);
14061 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE;
14062 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
14063 LPFC_RQE_SIZE_8);
14064 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
14065 (PAGE_SIZE/SLI4_PAGE_SIZE));
14066 } else {
14067 switch (drq->entry_count) {
14068 default:
14069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14070 "2536 Unsupported RQ count. (%d)\n",
14071 drq->entry_count);
14072 if (drq->entry_count < 512) {
14073 status = -EINVAL;
14074 goto out;
14075 }
14076
14077 case 512:
14078 bf_set(lpfc_rq_context_rqe_count,
14079 &rq_create->u.request.context,
14080 LPFC_RQ_RING_SIZE_512);
14081 break;
14082 case 1024:
14083 bf_set(lpfc_rq_context_rqe_count,
14084 &rq_create->u.request.context,
14085 LPFC_RQ_RING_SIZE_1024);
14086 break;
14087 case 2048:
14088 bf_set(lpfc_rq_context_rqe_count,
14089 &rq_create->u.request.context,
14090 LPFC_RQ_RING_SIZE_2048);
14091 break;
14092 case 4096:
14093 bf_set(lpfc_rq_context_rqe_count,
14094 &rq_create->u.request.context,
14095 LPFC_RQ_RING_SIZE_4096);
14096 break;
14097 }
14098 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
14099 LPFC_DATA_BUF_SIZE);
14100 }
14101 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
14102 cq->queue_id);
14103 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
14104 drq->page_count);
14105 list_for_each_entry(dmabuf, &drq->page_list, list) {
14106 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14107 putPaddrLow(dmabuf->phys);
14108 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14109 putPaddrHigh(dmabuf->phys);
14110 }
14111 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14112 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
14113 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14114
14115 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
14116 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14117 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14118 if (shdr_status || shdr_add_status || rc) {
14119 status = -ENXIO;
14120 goto out;
14121 }
14122 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
14123 if (drq->queue_id == 0xFFFF) {
14124 status = -ENXIO;
14125 goto out;
14126 }
14127 drq->type = LPFC_DRQ;
14128 drq->assoc_qid = cq->queue_id;
14129 drq->subtype = subtype;
14130 drq->host_index = 0;
14131 drq->hba_index = 0;
14132
14133
14134 list_add_tail(&hrq->list, &cq->child_list);
14135 list_add_tail(&drq->list, &cq->child_list);
14136
14137out:
14138 mempool_free(mbox, phba->mbox_mem_pool);
14139 return status;
14140}
14141
14142
14143
14144
14145
14146
14147
14148
14149
14150
14151
14152
14153
14154int
14155lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
14156{
14157 LPFC_MBOXQ_t *mbox;
14158 int rc, length, status = 0;
14159 uint32_t shdr_status, shdr_add_status;
14160 union lpfc_sli4_cfg_shdr *shdr;
14161
14162
14163 if (!eq)
14164 return -ENODEV;
14165 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
14166 if (!mbox)
14167 return -ENOMEM;
14168 length = (sizeof(struct lpfc_mbx_eq_destroy) -
14169 sizeof(struct lpfc_sli4_cfg_mhdr));
14170 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14171 LPFC_MBOX_OPCODE_EQ_DESTROY,
14172 length, LPFC_SLI4_MBX_EMBED);
14173 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
14174 eq->queue_id);
14175 mbox->vport = eq->phba->pport;
14176 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14177
14178 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
14179
14180 shdr = (union lpfc_sli4_cfg_shdr *)
14181 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
14182 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14183 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14184 if (shdr_status || shdr_add_status || rc) {
14185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14186 "2505 EQ_DESTROY mailbox failed with "
14187 "status x%x add_status x%x, mbx status x%x\n",
14188 shdr_status, shdr_add_status, rc);
14189 status = -ENXIO;
14190 }
14191
14192
14193 list_del_init(&eq->list);
14194 mempool_free(mbox, eq->phba->mbox_mem_pool);
14195 return status;
14196}
14197
14198
14199
14200
14201
14202
14203
14204
14205
14206
14207
14208
14209
14210int
14211lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
14212{
14213 LPFC_MBOXQ_t *mbox;
14214 int rc, length, status = 0;
14215 uint32_t shdr_status, shdr_add_status;
14216 union lpfc_sli4_cfg_shdr *shdr;
14217
14218
14219 if (!cq)
14220 return -ENODEV;
14221 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
14222 if (!mbox)
14223 return -ENOMEM;
14224 length = (sizeof(struct lpfc_mbx_cq_destroy) -
14225 sizeof(struct lpfc_sli4_cfg_mhdr));
14226 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14227 LPFC_MBOX_OPCODE_CQ_DESTROY,
14228 length, LPFC_SLI4_MBX_EMBED);
14229 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
14230 cq->queue_id);
14231 mbox->vport = cq->phba->pport;
14232 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14233 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
14234
14235 shdr = (union lpfc_sli4_cfg_shdr *)
14236 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
14237 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14238 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14239 if (shdr_status || shdr_add_status || rc) {
14240 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14241 "2506 CQ_DESTROY mailbox failed with "
14242 "status x%x add_status x%x, mbx status x%x\n",
14243 shdr_status, shdr_add_status, rc);
14244 status = -ENXIO;
14245 }
14246
14247 list_del_init(&cq->list);
14248 mempool_free(mbox, cq->phba->mbox_mem_pool);
14249 return status;
14250}
14251
14252
14253
14254
14255
14256
14257
14258
14259
14260
14261
14262
14263
14264int
14265lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
14266{
14267 LPFC_MBOXQ_t *mbox;
14268 int rc, length, status = 0;
14269 uint32_t shdr_status, shdr_add_status;
14270 union lpfc_sli4_cfg_shdr *shdr;
14271
14272
14273 if (!mq)
14274 return -ENODEV;
14275 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
14276 if (!mbox)
14277 return -ENOMEM;
14278 length = (sizeof(struct lpfc_mbx_mq_destroy) -
14279 sizeof(struct lpfc_sli4_cfg_mhdr));
14280 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14281 LPFC_MBOX_OPCODE_MQ_DESTROY,
14282 length, LPFC_SLI4_MBX_EMBED);
14283 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
14284 mq->queue_id);
14285 mbox->vport = mq->phba->pport;
14286 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14287 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
14288
14289 shdr = (union lpfc_sli4_cfg_shdr *)
14290 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
14291 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14292 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14293 if (shdr_status || shdr_add_status || rc) {
14294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14295 "2507 MQ_DESTROY mailbox failed with "
14296 "status x%x add_status x%x, mbx status x%x\n",
14297 shdr_status, shdr_add_status, rc);
14298 status = -ENXIO;
14299 }
14300
14301 list_del_init(&mq->list);
14302 mempool_free(mbox, mq->phba->mbox_mem_pool);
14303 return status;
14304}
14305
14306
14307
14308
14309
14310
14311
14312
14313
14314
14315
14316
14317
14318int
14319lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
14320{
14321 LPFC_MBOXQ_t *mbox;
14322 int rc, length, status = 0;
14323 uint32_t shdr_status, shdr_add_status;
14324 union lpfc_sli4_cfg_shdr *shdr;
14325
14326
14327 if (!wq)
14328 return -ENODEV;
14329 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
14330 if (!mbox)
14331 return -ENOMEM;
14332 length = (sizeof(struct lpfc_mbx_wq_destroy) -
14333 sizeof(struct lpfc_sli4_cfg_mhdr));
14334 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14335 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
14336 length, LPFC_SLI4_MBX_EMBED);
14337 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
14338 wq->queue_id);
14339 mbox->vport = wq->phba->pport;
14340 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14341 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
14342 shdr = (union lpfc_sli4_cfg_shdr *)
14343 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
14344 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14345 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14346 if (shdr_status || shdr_add_status || rc) {
14347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14348 "2508 WQ_DESTROY mailbox failed with "
14349 "status x%x add_status x%x, mbx status x%x\n",
14350 shdr_status, shdr_add_status, rc);
14351 status = -ENXIO;
14352 }
14353
14354 list_del_init(&wq->list);
14355 mempool_free(mbox, wq->phba->mbox_mem_pool);
14356 return status;
14357}
14358
14359
14360
14361
14362
14363
14364
14365
14366
14367
14368
14369
14370
14371int
14372lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14373 struct lpfc_queue *drq)
14374{
14375 LPFC_MBOXQ_t *mbox;
14376 int rc, length, status = 0;
14377 uint32_t shdr_status, shdr_add_status;
14378 union lpfc_sli4_cfg_shdr *shdr;
14379
14380
14381 if (!hrq || !drq)
14382 return -ENODEV;
14383 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
14384 if (!mbox)
14385 return -ENOMEM;
14386 length = (sizeof(struct lpfc_mbx_rq_destroy) -
14387 sizeof(struct lpfc_sli4_cfg_mhdr));
14388 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14389 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
14390 length, LPFC_SLI4_MBX_EMBED);
14391 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14392 hrq->queue_id);
14393 mbox->vport = hrq->phba->pport;
14394 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14395 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
14396
14397 shdr = (union lpfc_sli4_cfg_shdr *)
14398 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14399 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14400 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14401 if (shdr_status || shdr_add_status || rc) {
14402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14403 "2509 RQ_DESTROY mailbox failed with "
14404 "status x%x add_status x%x, mbx status x%x\n",
14405 shdr_status, shdr_add_status, rc);
14406 if (rc != MBX_TIMEOUT)
14407 mempool_free(mbox, hrq->phba->mbox_mem_pool);
14408 return -ENXIO;
14409 }
14410 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
14411 drq->queue_id);
14412 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
14413 shdr = (union lpfc_sli4_cfg_shdr *)
14414 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
14415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14417 if (shdr_status || shdr_add_status || rc) {
14418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14419 "2510 RQ_DESTROY mailbox failed with "
14420 "status x%x add_status x%x, mbx status x%x\n",
14421 shdr_status, shdr_add_status, rc);
14422 status = -ENXIO;
14423 }
14424 list_del_init(&hrq->list);
14425 list_del_init(&drq->list);
14426 mempool_free(mbox, hrq->phba->mbox_mem_pool);
14427 return status;
14428}
14429
14430
14431
14432
14433
14434
14435
14436
14437
14438
14439
14440
14441
14442
14443
14444
14445
14446
14447
14448
14449
14450
14451
14452int
14453lpfc_sli4_post_sgl(struct lpfc_hba *phba,
14454 dma_addr_t pdma_phys_addr0,
14455 dma_addr_t pdma_phys_addr1,
14456 uint16_t xritag)
14457{
14458 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
14459 LPFC_MBOXQ_t *mbox;
14460 int rc;
14461 uint32_t shdr_status, shdr_add_status;
14462 uint32_t mbox_tmo;
14463 union lpfc_sli4_cfg_shdr *shdr;
14464
14465 if (xritag == NO_XRI) {
14466 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14467 "0364 Invalid param:\n");
14468 return -EINVAL;
14469 }
14470
14471 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14472 if (!mbox)
14473 return -ENOMEM;
14474
14475 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14476 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
14477 sizeof(struct lpfc_mbx_post_sgl_pages) -
14478 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
14479
14480 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
14481 &mbox->u.mqe.un.post_sgl_pages;
14482 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
14483 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
14484
14485 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
14486 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
14487 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
14488 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
14489
14490 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
14491 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
14492 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
14493 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
14494 if (!phba->sli4_hba.intr_enable)
14495 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14496 else {
14497 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14498 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14499 }
14500
14501 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
14502 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14503 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14504 if (rc != MBX_TIMEOUT)
14505 mempool_free(mbox, phba->mbox_mem_pool);
14506 if (shdr_status || shdr_add_status || rc) {
14507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14508 "2511 POST_SGL mailbox failed with "
14509 "status x%x add_status x%x, mbx status x%x\n",
14510 shdr_status, shdr_add_status, rc);
14511 }
14512 return 0;
14513}
14514
14515
14516
14517
14518
14519
14520
14521
14522
14523
14524
14525
14526
14527
14528static uint16_t
14529lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
14530{
14531 unsigned long xri;
14532
14533
14534
14535
14536
14537 spin_lock_irq(&phba->hbalock);
14538 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
14539 phba->sli4_hba.max_cfg_param.max_xri, 0);
14540 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
14541 spin_unlock_irq(&phba->hbalock);
14542 return NO_XRI;
14543 } else {
14544 set_bit(xri, phba->sli4_hba.xri_bmask);
14545 phba->sli4_hba.max_cfg_param.xri_used++;
14546 }
14547 spin_unlock_irq(&phba->hbalock);
14548 return xri;
14549}
14550
14551
14552
14553
14554
14555
14556
14557
14558static void
14559__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14560{
14561 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
14562 phba->sli4_hba.max_cfg_param.xri_used--;
14563 }
14564}
14565
14566
14567
14568
14569
14570
14571
14572
14573void
14574lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14575{
14576 spin_lock_irq(&phba->hbalock);
14577 __lpfc_sli4_free_xri(phba, xri);
14578 spin_unlock_irq(&phba->hbalock);
14579}
14580
14581
14582
14583
14584
14585
14586
14587
14588
14589
14590
14591uint16_t
14592lpfc_sli4_next_xritag(struct lpfc_hba *phba)
14593{
14594 uint16_t xri_index;
14595
14596 xri_index = lpfc_sli4_alloc_xri(phba);
14597 if (xri_index == NO_XRI)
14598 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14599 "2004 Failed to allocate XRI.last XRITAG is %d"
14600 " Max XRI is %d, Used XRI is %d\n",
14601 xri_index,
14602 phba->sli4_hba.max_cfg_param.max_xri,
14603 phba->sli4_hba.max_cfg_param.xri_used);
14604 return xri_index;
14605}
14606
14607
14608
14609
14610
14611
14612
14613
14614
14615
14616
14617
14618static int
14619lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
14620 struct list_head *post_sgl_list,
14621 int post_cnt)
14622{
14623 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
14624 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
14625 struct sgl_page_pairs *sgl_pg_pairs;
14626 void *viraddr;
14627 LPFC_MBOXQ_t *mbox;
14628 uint32_t reqlen, alloclen, pg_pairs;
14629 uint32_t mbox_tmo;
14630 uint16_t xritag_start = 0;
14631 int rc = 0;
14632 uint32_t shdr_status, shdr_add_status;
14633 union lpfc_sli4_cfg_shdr *shdr;
14634
14635 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
14636 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
14637 if (reqlen > SLI4_PAGE_SIZE) {
14638 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
14639 "2559 Block sgl registration required DMA "
14640 "size (%d) great than a page\n", reqlen);
14641 return -ENOMEM;
14642 }
14643 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14644 if (!mbox)
14645 return -ENOMEM;
14646
14647
14648 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14649 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
14650 LPFC_SLI4_MBX_NEMBED);
14651
14652 if (alloclen < reqlen) {
14653 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14654 "0285 Allocated DMA memory size (%d) is "
14655 "less than the requested DMA memory "
14656 "size (%d)\n", alloclen, reqlen);
14657 lpfc_sli4_mbox_cmd_free(phba, mbox);
14658 return -ENOMEM;
14659 }
14660
14661 viraddr = mbox->sge_array->addr[0];
14662 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
14663 sgl_pg_pairs = &sgl->sgl_pg_pairs;
14664
14665 pg_pairs = 0;
14666 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
14667
14668 sgl_pg_pairs->sgl_pg0_addr_lo =
14669 cpu_to_le32(putPaddrLow(sglq_entry->phys));
14670 sgl_pg_pairs->sgl_pg0_addr_hi =
14671 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
14672 sgl_pg_pairs->sgl_pg1_addr_lo =
14673 cpu_to_le32(putPaddrLow(0));
14674 sgl_pg_pairs->sgl_pg1_addr_hi =
14675 cpu_to_le32(putPaddrHigh(0));
14676
14677
14678 if (pg_pairs == 0)
14679 xritag_start = sglq_entry->sli4_xritag;
14680 sgl_pg_pairs++;
14681 pg_pairs++;
14682 }
14683
14684
14685 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
14686 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
14687 sgl->word0 = cpu_to_le32(sgl->word0);
14688 if (!phba->sli4_hba.intr_enable)
14689 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14690 else {
14691 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14692 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14693 }
14694 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
14695 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14696 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14697 if (rc != MBX_TIMEOUT)
14698 lpfc_sli4_mbox_cmd_free(phba, mbox);
14699 if (shdr_status || shdr_add_status || rc) {
14700 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14701 "2513 POST_SGL_BLOCK mailbox command failed "
14702 "status x%x add_status x%x mbx status x%x\n",
14703 shdr_status, shdr_add_status, rc);
14704 rc = -ENXIO;
14705 }
14706 return rc;
14707}
14708
14709
14710
14711
14712
14713
14714
14715
14716
14717
14718
14719
14720int
14721lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
14722 struct list_head *sblist,
14723 int count)
14724{
14725 struct lpfc_scsi_buf *psb;
14726 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
14727 struct sgl_page_pairs *sgl_pg_pairs;
14728 void *viraddr;
14729 LPFC_MBOXQ_t *mbox;
14730 uint32_t reqlen, alloclen, pg_pairs;
14731 uint32_t mbox_tmo;
14732 uint16_t xritag_start = 0;
14733 int rc = 0;
14734 uint32_t shdr_status, shdr_add_status;
14735 dma_addr_t pdma_phys_bpl1;
14736 union lpfc_sli4_cfg_shdr *shdr;
14737
14738
14739 reqlen = count * sizeof(struct sgl_page_pairs) +
14740 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
14741 if (reqlen > SLI4_PAGE_SIZE) {
14742 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
14743 "0217 Block sgl registration required DMA "
14744 "size (%d) great than a page\n", reqlen);
14745 return -ENOMEM;
14746 }
14747 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14748 if (!mbox) {
14749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14750 "0283 Failed to allocate mbox cmd memory\n");
14751 return -ENOMEM;
14752 }
14753
14754
14755 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14756 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
14757 LPFC_SLI4_MBX_NEMBED);
14758
14759 if (alloclen < reqlen) {
14760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14761 "2561 Allocated DMA memory size (%d) is "
14762 "less than the requested DMA memory "
14763 "size (%d)\n", alloclen, reqlen);
14764 lpfc_sli4_mbox_cmd_free(phba, mbox);
14765 return -ENOMEM;
14766 }
14767
14768
14769 viraddr = mbox->sge_array->addr[0];
14770
14771
14772 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
14773 sgl_pg_pairs = &sgl->sgl_pg_pairs;
14774
14775 pg_pairs = 0;
14776 list_for_each_entry(psb, sblist, list) {
14777
14778 sgl_pg_pairs->sgl_pg0_addr_lo =
14779 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
14780 sgl_pg_pairs->sgl_pg0_addr_hi =
14781 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
14782 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
14783 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
14784 else
14785 pdma_phys_bpl1 = 0;
14786 sgl_pg_pairs->sgl_pg1_addr_lo =
14787 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
14788 sgl_pg_pairs->sgl_pg1_addr_hi =
14789 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
14790
14791 if (pg_pairs == 0)
14792 xritag_start = psb->cur_iocbq.sli4_xritag;
14793 sgl_pg_pairs++;
14794 pg_pairs++;
14795 }
14796 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
14797 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
14798
14799 sgl->word0 = cpu_to_le32(sgl->word0);
14800
14801 if (!phba->sli4_hba.intr_enable)
14802 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14803 else {
14804 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
14805 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
14806 }
14807 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
14808 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14809 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14810 if (rc != MBX_TIMEOUT)
14811 lpfc_sli4_mbox_cmd_free(phba, mbox);
14812 if (shdr_status || shdr_add_status || rc) {
14813 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14814 "2564 POST_SGL_BLOCK mailbox command failed "
14815 "status x%x add_status x%x mbx status x%x\n",
14816 shdr_status, shdr_add_status, rc);
14817 rc = -ENXIO;
14818 }
14819 return rc;
14820}
14821
14822
14823
14824
14825
14826
14827
14828
14829
14830
14831
14832static int
14833lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
14834{
14835
14836 static char *rctl_names[] = FC_RCTL_NAMES_INIT;
14837 char *type_names[] = FC_TYPE_NAMES_INIT;
14838 struct fc_vft_header *fc_vft_hdr;
14839 uint32_t *header = (uint32_t *) fc_hdr;
14840
14841 switch (fc_hdr->fh_r_ctl) {
14842 case FC_RCTL_DD_UNCAT:
14843 case FC_RCTL_DD_SOL_DATA:
14844 case FC_RCTL_DD_UNSOL_CTL:
14845 case FC_RCTL_DD_SOL_CTL:
14846 case FC_RCTL_DD_UNSOL_DATA:
14847 case FC_RCTL_DD_DATA_DESC:
14848 case FC_RCTL_DD_UNSOL_CMD:
14849 case FC_RCTL_DD_CMD_STATUS:
14850 case FC_RCTL_ELS_REQ:
14851 case FC_RCTL_ELS_REP:
14852 case FC_RCTL_ELS4_REQ:
14853 case FC_RCTL_ELS4_REP:
14854 case FC_RCTL_BA_NOP:
14855 case FC_RCTL_BA_ABTS:
14856 case FC_RCTL_BA_RMC:
14857 case FC_RCTL_BA_ACC:
14858 case FC_RCTL_BA_RJT:
14859 case FC_RCTL_BA_PRMT:
14860 case FC_RCTL_ACK_1:
14861 case FC_RCTL_ACK_0:
14862 case FC_RCTL_P_RJT:
14863 case FC_RCTL_F_RJT:
14864 case FC_RCTL_P_BSY:
14865 case FC_RCTL_F_BSY:
14866 case FC_RCTL_F_BSYL:
14867 case FC_RCTL_LCR:
14868 case FC_RCTL_END:
14869 break;
14870 case FC_RCTL_VFTH:
14871 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14872 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
14873 return lpfc_fc_frame_check(phba, fc_hdr);
14874 default:
14875 goto drop;
14876 }
14877 switch (fc_hdr->fh_type) {
14878 case FC_TYPE_BLS:
14879 case FC_TYPE_ELS:
14880 case FC_TYPE_FCP:
14881 case FC_TYPE_CT:
14882 break;
14883 case FC_TYPE_IP:
14884 case FC_TYPE_ILS:
14885 default:
14886 goto drop;
14887 }
14888
14889 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
14890 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
14891 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
14892 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
14893 type_names[fc_hdr->fh_type], fc_hdr->fh_type,
14894 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
14895 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
14896 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
14897 be32_to_cpu(header[6]));
14898 return 0;
14899drop:
14900 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
14901 "2539 Dropped frame rctl:%s type:%s\n",
14902 rctl_names[fc_hdr->fh_r_ctl],
14903 type_names[fc_hdr->fh_type]);
14904 return 1;
14905}
14906
14907
14908
14909
14910
14911
14912
14913
14914
14915static uint32_t
14916lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
14917{
14918 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
14919
14920 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
14921 return 0;
14922 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
14923}
14924
14925
14926
14927
14928
14929
14930
14931
14932
14933
14934
14935
14936
14937static struct lpfc_vport *
14938lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
14939 uint16_t fcfi)
14940{
14941 struct lpfc_vport **vports;
14942 struct lpfc_vport *vport = NULL;
14943 int i;
14944 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
14945 fc_hdr->fh_d_id[1] << 8 |
14946 fc_hdr->fh_d_id[2]);
14947
14948 if (did == Fabric_DID)
14949 return phba->pport;
14950 if ((phba->pport->fc_flag & FC_PT2PT) &&
14951 !(phba->link_state == LPFC_HBA_READY))
14952 return phba->pport;
14953
14954 vports = lpfc_create_vport_work_array(phba);
14955 if (vports != NULL)
14956 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
14957 if (phba->fcf.fcfi == fcfi &&
14958 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
14959 vports[i]->fc_myDID == did) {
14960 vport = vports[i];
14961 break;
14962 }
14963 }
14964 lpfc_destroy_vport_work_array(phba, vports);
14965 return vport;
14966}
14967
14968
14969
14970
14971
14972
14973
14974
14975
14976
14977
14978static void
14979lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14980{
14981 struct lpfc_dmabuf *h_buf;
14982 struct hbq_dmabuf *dmabuf = NULL;
14983
14984
14985 h_buf = list_get_first(&vport->rcv_buffer_list,
14986 struct lpfc_dmabuf, list);
14987 if (!h_buf)
14988 return;
14989 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
14990 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
14991}
14992
14993
14994
14995
14996
14997
14998
14999
15000
15001void
15002lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
15003{
15004 struct lpfc_dmabuf *h_buf, *hnext;
15005 struct lpfc_dmabuf *d_buf, *dnext;
15006 struct hbq_dmabuf *dmabuf = NULL;
15007
15008
15009 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15010 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15011 list_del_init(&dmabuf->hbuf.list);
15012 list_for_each_entry_safe(d_buf, dnext,
15013 &dmabuf->dbuf.list, list) {
15014 list_del_init(&d_buf->list);
15015 lpfc_in_buf_free(vport->phba, d_buf);
15016 }
15017 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15018 }
15019}
15020
15021
15022
15023
15024
15025
15026
15027
15028
15029
15030
15031
15032
15033void
15034lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
15035{
15036 struct lpfc_dmabuf *h_buf, *hnext;
15037 struct lpfc_dmabuf *d_buf, *dnext;
15038 struct hbq_dmabuf *dmabuf = NULL;
15039 unsigned long timeout;
15040 int abort_count = 0;
15041
15042 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15043 vport->rcv_buffer_time_stamp);
15044 if (list_empty(&vport->rcv_buffer_list) ||
15045 time_before(jiffies, timeout))
15046 return;
15047
15048 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
15049 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15050 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
15051 dmabuf->time_stamp);
15052 if (time_before(jiffies, timeout))
15053 break;
15054 abort_count++;
15055 list_del_init(&dmabuf->hbuf.list);
15056 list_for_each_entry_safe(d_buf, dnext,
15057 &dmabuf->dbuf.list, list) {
15058 list_del_init(&d_buf->list);
15059 lpfc_in_buf_free(vport->phba, d_buf);
15060 }
15061 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
15062 }
15063 if (abort_count)
15064 lpfc_update_rcv_time_stamp(vport);
15065}
15066
15067
15068
15069
15070
15071
15072
15073
15074
15075
15076
15077
15078
15079static struct hbq_dmabuf *
15080lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15081{
15082 struct fc_frame_header *new_hdr;
15083 struct fc_frame_header *temp_hdr;
15084 struct lpfc_dmabuf *d_buf;
15085 struct lpfc_dmabuf *h_buf;
15086 struct hbq_dmabuf *seq_dmabuf = NULL;
15087 struct hbq_dmabuf *temp_dmabuf = NULL;
15088 uint8_t found = 0;
15089
15090 INIT_LIST_HEAD(&dmabuf->dbuf.list);
15091 dmabuf->time_stamp = jiffies;
15092 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15093
15094
15095 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15096 temp_hdr = (struct fc_frame_header *)h_buf->virt;
15097 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15098 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15099 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15100 continue;
15101
15102 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15103 break;
15104 }
15105 if (!seq_dmabuf) {
15106
15107
15108
15109
15110 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15111 lpfc_update_rcv_time_stamp(vport);
15112 return dmabuf;
15113 }
15114 temp_hdr = seq_dmabuf->hbuf.virt;
15115 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
15116 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
15117 list_del_init(&seq_dmabuf->hbuf.list);
15118 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
15119 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15120 lpfc_update_rcv_time_stamp(vport);
15121 return dmabuf;
15122 }
15123
15124 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
15125 seq_dmabuf->time_stamp = jiffies;
15126 lpfc_update_rcv_time_stamp(vport);
15127 if (list_empty(&seq_dmabuf->dbuf.list)) {
15128 temp_hdr = dmabuf->hbuf.virt;
15129 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
15130 return seq_dmabuf;
15131 }
15132
15133 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
15134 while (!found) {
15135 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15136 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
15137
15138
15139
15140
15141 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
15142 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
15143 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
15144 found = 1;
15145 break;
15146 }
15147
15148 if (&d_buf->list == &seq_dmabuf->dbuf.list)
15149 break;
15150 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
15151 }
15152
15153 if (found)
15154 return seq_dmabuf;
15155 return NULL;
15156}
15157
15158
15159
15160
15161
15162
15163
15164
15165
15166
15167
15168
15169
15170
15171
15172
15173
15174static bool
15175lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
15176 struct hbq_dmabuf *dmabuf)
15177{
15178 struct fc_frame_header *new_hdr;
15179 struct fc_frame_header *temp_hdr;
15180 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
15181 struct hbq_dmabuf *seq_dmabuf = NULL;
15182
15183
15184 INIT_LIST_HEAD(&dmabuf->dbuf.list);
15185 INIT_LIST_HEAD(&dmabuf->hbuf.list);
15186 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15187 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
15188 temp_hdr = (struct fc_frame_header *)h_buf->virt;
15189 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
15190 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
15191 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
15192 continue;
15193
15194 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
15195 break;
15196 }
15197
15198
15199 if (seq_dmabuf) {
15200 list_for_each_entry_safe(d_buf, n_buf,
15201 &seq_dmabuf->dbuf.list, list) {
15202 list_del_init(&d_buf->list);
15203 lpfc_in_buf_free(vport->phba, d_buf);
15204 }
15205 return true;
15206 }
15207 return false;
15208}
15209
15210
15211
15212
15213
15214
15215
15216
15217
15218
15219
15220
15221
15222
15223
15224
15225
15226static bool
15227lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
15228{
15229 struct lpfc_hba *phba = vport->phba;
15230 int handled;
15231
15232
15233 if (phba->sli_rev < LPFC_SLI_REV4)
15234 return false;
15235
15236
15237 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
15238 if (handled)
15239 return true;
15240
15241 return false;
15242}
15243
15244
15245
15246
15247
15248
15249
15250
15251
15252
15253
15254static void
15255lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
15256 struct lpfc_iocbq *cmd_iocbq,
15257 struct lpfc_iocbq *rsp_iocbq)
15258{
15259 struct lpfc_nodelist *ndlp;
15260
15261 if (cmd_iocbq) {
15262 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
15263 lpfc_nlp_put(ndlp);
15264 lpfc_nlp_not_used(ndlp);
15265 lpfc_sli_release_iocbq(phba, cmd_iocbq);
15266 }
15267
15268
15269 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
15270 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15271 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
15272 rsp_iocbq->iocb.ulpStatus,
15273 rsp_iocbq->iocb.un.ulpWord[4]);
15274}
15275
15276
15277
15278
15279
15280
15281
15282
15283
15284uint16_t
15285lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
15286 uint16_t xri)
15287{
15288 uint16_t i;
15289
15290 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
15291 if (xri == phba->sli4_hba.xri_ids[i])
15292 return i;
15293 }
15294 return NO_XRI;
15295}
15296
15297
15298
15299
15300
15301
15302
15303
15304
15305static void
15306lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
15307 struct fc_frame_header *fc_hdr, bool aborted)
15308{
15309 struct lpfc_hba *phba = vport->phba;
15310 struct lpfc_iocbq *ctiocb = NULL;
15311 struct lpfc_nodelist *ndlp;
15312 uint16_t oxid, rxid, xri, lxri;
15313 uint32_t sid, fctl;
15314 IOCB_t *icmd;
15315 int rc;
15316
15317 if (!lpfc_is_link_up(phba))
15318 return;
15319
15320 sid = sli4_sid_from_fc_hdr(fc_hdr);
15321 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
15322 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
15323
15324 ndlp = lpfc_findnode_did(vport, sid);
15325 if (!ndlp) {
15326 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
15327 if (!ndlp) {
15328 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15329 "1268 Failed to allocate ndlp for "
15330 "oxid:x%x SID:x%x\n", oxid, sid);
15331 return;
15332 }
15333 lpfc_nlp_init(vport, ndlp, sid);
15334
15335 lpfc_enqueue_node(vport, ndlp);
15336 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
15337
15338 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
15339 if (!ndlp) {
15340 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
15341 "3275 Failed to active ndlp found "
15342 "for oxid:x%x SID:x%x\n", oxid, sid);
15343 return;
15344 }
15345 }
15346
15347
15348 ctiocb = lpfc_sli_get_iocbq(phba);
15349 if (!ctiocb)
15350 return;
15351
15352
15353 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
15354
15355 icmd = &ctiocb->iocb;
15356 icmd->un.xseq64.bdl.bdeSize = 0;
15357 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
15358 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
15359 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
15360 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
15361
15362
15363 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
15364 icmd->ulpBdeCount = 0;
15365 icmd->ulpLe = 1;
15366 icmd->ulpClass = CLASS3;
15367 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
15368 ctiocb->context1 = lpfc_nlp_get(ndlp);
15369
15370 ctiocb->iocb_cmpl = NULL;
15371 ctiocb->vport = phba->pport;
15372 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
15373 ctiocb->sli4_lxritag = NO_XRI;
15374 ctiocb->sli4_xritag = NO_XRI;
15375
15376 if (fctl & FC_FC_EX_CTX)
15377
15378
15379
15380 xri = oxid;
15381 else
15382 xri = rxid;
15383 lxri = lpfc_sli4_xri_inrange(phba, xri);
15384 if (lxri != NO_XRI)
15385 lpfc_set_rrq_active(phba, ndlp, lxri,
15386 (xri == oxid) ? rxid : oxid, 0);
15387
15388
15389
15390
15391
15392 if ((fctl & FC_FC_EX_CTX) &&
15393 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
15394 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15395 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15396 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15397 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15398 }
15399
15400
15401
15402
15403
15404 if (aborted == false) {
15405 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
15406 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
15407 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
15408 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
15409 }
15410
15411 if (fctl & FC_FC_EX_CTX) {
15412
15413
15414
15415
15416 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
15417 } else {
15418
15419
15420
15421
15422 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
15423 }
15424 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
15425 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
15426
15427
15428 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
15429 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
15430 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
15431
15432 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
15433 if (rc == IOCB_ERROR) {
15434 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
15435 "2925 Failed to issue CT ABTS RSP x%x on "
15436 "xri x%x, Data x%x\n",
15437 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
15438 phba->link_state);
15439 lpfc_nlp_put(ndlp);
15440 ctiocb->context1 = NULL;
15441 lpfc_sli_release_iocbq(phba, ctiocb);
15442 }
15443}
15444
15445
15446
15447
15448
15449
15450
15451
15452
15453
15454
15455
15456
15457
15458static void
15459lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
15460 struct hbq_dmabuf *dmabuf)
15461{
15462 struct lpfc_hba *phba = vport->phba;
15463 struct fc_frame_header fc_hdr;
15464 uint32_t fctl;
15465 bool aborted;
15466
15467
15468 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
15469 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
15470
15471 if (fctl & FC_FC_EX_CTX) {
15472
15473 aborted = true;
15474 } else {
15475
15476 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
15477 if (aborted == false)
15478 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
15479 }
15480 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15481
15482
15483 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
15484}
15485
15486
15487
15488
15489
15490
15491
15492
15493
15494
15495
15496
15497
15498static int
15499lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
15500{
15501 struct fc_frame_header *hdr;
15502 struct lpfc_dmabuf *d_buf;
15503 struct hbq_dmabuf *seq_dmabuf;
15504 uint32_t fctl;
15505 int seq_count = 0;
15506
15507 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15508
15509 if (hdr->fh_seq_cnt != seq_count)
15510 return 0;
15511 fctl = (hdr->fh_f_ctl[0] << 16 |
15512 hdr->fh_f_ctl[1] << 8 |
15513 hdr->fh_f_ctl[2]);
15514
15515 if (fctl & FC_FC_END_SEQ)
15516 return 1;
15517 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
15518 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15519 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15520
15521 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
15522 return 0;
15523 fctl = (hdr->fh_f_ctl[0] << 16 |
15524 hdr->fh_f_ctl[1] << 8 |
15525 hdr->fh_f_ctl[2]);
15526
15527 if (fctl & FC_FC_END_SEQ)
15528 return 1;
15529 }
15530 return 0;
15531}
15532
15533
15534
15535
15536
15537
15538
15539
15540
15541
15542
15543
15544
15545
15546static struct lpfc_iocbq *
15547lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
15548{
15549 struct hbq_dmabuf *hbq_buf;
15550 struct lpfc_dmabuf *d_buf, *n_buf;
15551 struct lpfc_iocbq *first_iocbq, *iocbq;
15552 struct fc_frame_header *fc_hdr;
15553 uint32_t sid;
15554 uint32_t len, tot_len;
15555 struct ulp_bde64 *pbde;
15556
15557 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15558
15559 list_del_init(&seq_dmabuf->hbuf.list);
15560 lpfc_update_rcv_time_stamp(vport);
15561
15562 sid = sli4_sid_from_fc_hdr(fc_hdr);
15563 tot_len = 0;
15564
15565 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
15566 if (first_iocbq) {
15567
15568 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
15569 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
15570
15571
15572 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
15573 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
15574 first_iocbq->iocb.un.rcvels.parmRo =
15575 sli4_did_from_fc_hdr(fc_hdr);
15576 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
15577 } else
15578 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
15579 first_iocbq->iocb.ulpContext = NO_XRI;
15580 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
15581 be16_to_cpu(fc_hdr->fh_ox_id);
15582
15583 first_iocbq->iocb.unsli3.rcvsli3.vpi =
15584 vport->phba->vpi_ids[vport->vpi];
15585
15586 tot_len = bf_get(lpfc_rcqe_length,
15587 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
15588
15589 first_iocbq->context2 = &seq_dmabuf->dbuf;
15590 first_iocbq->context3 = NULL;
15591 first_iocbq->iocb.ulpBdeCount = 1;
15592 if (tot_len > LPFC_DATA_BUF_SIZE)
15593 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
15594 LPFC_DATA_BUF_SIZE;
15595 else
15596 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
15597
15598 first_iocbq->iocb.un.rcvels.remoteID = sid;
15599
15600 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
15601 }
15602 iocbq = first_iocbq;
15603
15604
15605
15606
15607 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
15608 if (!iocbq) {
15609 lpfc_in_buf_free(vport->phba, d_buf);
15610 continue;
15611 }
15612 if (!iocbq->context3) {
15613 iocbq->context3 = d_buf;
15614 iocbq->iocb.ulpBdeCount++;
15615
15616 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15617 len = bf_get(lpfc_rcqe_length,
15618 &hbq_buf->cq_event.cqe.rcqe_cmpl);
15619 pbde = (struct ulp_bde64 *)
15620 &iocbq->iocb.unsli3.sli3Words[4];
15621 if (len > LPFC_DATA_BUF_SIZE)
15622 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
15623 else
15624 pbde->tus.f.bdeSize = len;
15625
15626 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
15627 tot_len += len;
15628 } else {
15629 iocbq = lpfc_sli_get_iocbq(vport->phba);
15630 if (!iocbq) {
15631 if (first_iocbq) {
15632 first_iocbq->iocb.ulpStatus =
15633 IOSTAT_FCP_RSP_ERROR;
15634 first_iocbq->iocb.un.ulpWord[4] =
15635 IOERR_NO_RESOURCES;
15636 }
15637 lpfc_in_buf_free(vport->phba, d_buf);
15638 continue;
15639 }
15640
15641 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
15642 len = bf_get(lpfc_rcqe_length,
15643 &hbq_buf->cq_event.cqe.rcqe_cmpl);
15644 iocbq->context2 = d_buf;
15645 iocbq->context3 = NULL;
15646 iocbq->iocb.ulpBdeCount = 1;
15647 if (len > LPFC_DATA_BUF_SIZE)
15648 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
15649 LPFC_DATA_BUF_SIZE;
15650 else
15651 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
15652
15653 tot_len += len;
15654 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
15655
15656 iocbq->iocb.un.rcvels.remoteID = sid;
15657 list_add_tail(&iocbq->list, &first_iocbq->list);
15658 }
15659 }
15660 return first_iocbq;
15661}
15662
15663static void
15664lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
15665 struct hbq_dmabuf *seq_dmabuf)
15666{
15667 struct fc_frame_header *fc_hdr;
15668 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
15669 struct lpfc_hba *phba = vport->phba;
15670
15671 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
15672 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
15673 if (!iocbq) {
15674 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15675 "2707 Ring %d handler: Failed to allocate "
15676 "iocb Rctl x%x Type x%x received\n",
15677 LPFC_ELS_RING,
15678 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
15679 return;
15680 }
15681 if (!lpfc_complete_unsol_iocb(phba,
15682 &phba->sli.ring[LPFC_ELS_RING],
15683 iocbq, fc_hdr->fh_r_ctl,
15684 fc_hdr->fh_type))
15685 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15686 "2540 Ring %d handler: unexpected Rctl "
15687 "x%x Type x%x received\n",
15688 LPFC_ELS_RING,
15689 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
15690
15691
15692 list_for_each_entry_safe(curr_iocb, next_iocb,
15693 &iocbq->list, list) {
15694 list_del_init(&curr_iocb->list);
15695 lpfc_sli_release_iocbq(phba, curr_iocb);
15696 }
15697 lpfc_sli_release_iocbq(phba, iocbq);
15698}
15699
15700
15701
15702
15703
15704
15705
15706
15707
15708
15709
15710
15711
15712void
15713lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
15714 struct hbq_dmabuf *dmabuf)
15715{
15716 struct hbq_dmabuf *seq_dmabuf;
15717 struct fc_frame_header *fc_hdr;
15718 struct lpfc_vport *vport;
15719 uint32_t fcfi;
15720 uint32_t did;
15721
15722
15723 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
15724
15725 if (lpfc_fc_frame_check(phba, fc_hdr)) {
15726 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15727 return;
15728 }
15729 if ((bf_get(lpfc_cqe_code,
15730 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
15731 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
15732 &dmabuf->cq_event.cqe.rcqe_cmpl);
15733 else
15734 fcfi = bf_get(lpfc_rcqe_fcf_id,
15735 &dmabuf->cq_event.cqe.rcqe_cmpl);
15736
15737 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
15738 if (!vport) {
15739
15740 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15741 return;
15742 }
15743
15744
15745 did = sli4_did_from_fc_hdr(fc_hdr);
15746
15747
15748 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
15749 (did != Fabric_DID)) {
15750
15751
15752
15753
15754
15755 if (!(vport->fc_flag & FC_PT2PT) ||
15756 (phba->link_state == LPFC_HBA_READY)) {
15757 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15758 return;
15759 }
15760 }
15761
15762
15763 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
15764 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
15765 return;
15766 }
15767
15768
15769 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
15770 if (!seq_dmabuf) {
15771
15772 lpfc_in_buf_free(phba, &dmabuf->dbuf);
15773 return;
15774 }
15775
15776 if (!lpfc_seq_complete(seq_dmabuf))
15777 return;
15778
15779
15780 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
15781}
15782
15783
15784
15785
15786
15787
15788
15789
15790
15791
15792
15793
15794
15795
15796
15797
15798
15799
15800
15801
15802
15803
15804int
15805lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
15806{
15807 struct lpfc_rpi_hdr *rpi_page;
15808 uint32_t rc = 0;
15809 uint16_t lrpi = 0;
15810
15811
15812 if (!phba->sli4_hba.rpi_hdrs_in_use)
15813 goto exit;
15814 if (phba->sli4_hba.extents_in_use)
15815 return -EIO;
15816
15817 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
15818
15819
15820
15821
15822
15823 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
15824 LPFC_RPI_RSRC_RDY)
15825 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15826
15827 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
15828 if (rc != MBX_SUCCESS) {
15829 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15830 "2008 Error %d posting all rpi "
15831 "headers\n", rc);
15832 rc = -EIO;
15833 break;
15834 }
15835 }
15836
15837 exit:
15838 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
15839 LPFC_RPI_RSRC_RDY);
15840 return rc;
15841}
15842
15843
15844
15845
15846
15847
15848
15849
15850
15851
15852
15853
15854
15855
15856
15857int
15858lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
15859{
15860 LPFC_MBOXQ_t *mboxq;
15861 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
15862 uint32_t rc = 0;
15863 uint32_t shdr_status, shdr_add_status;
15864 union lpfc_sli4_cfg_shdr *shdr;
15865
15866
15867 if (!phba->sli4_hba.rpi_hdrs_in_use)
15868 return rc;
15869 if (phba->sli4_hba.extents_in_use)
15870 return -EIO;
15871
15872
15873 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15874 if (!mboxq) {
15875 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15876 "2001 Unable to allocate memory for issuing "
15877 "SLI_CONFIG_SPECIAL mailbox command\n");
15878 return -ENOMEM;
15879 }
15880
15881
15882 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
15883 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
15884 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
15885 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
15886 sizeof(struct lpfc_sli4_cfg_mhdr),
15887 LPFC_SLI4_MBX_EMBED);
15888
15889
15890
15891 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
15892 rpi_page->start_rpi);
15893 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
15894 hdr_tmpl, rpi_page->page_count);
15895
15896 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
15897 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
15898 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
15899 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
15900 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15901 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15902 if (rc != MBX_TIMEOUT)
15903 mempool_free(mboxq, phba->mbox_mem_pool);
15904 if (shdr_status || shdr_add_status || rc) {
15905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15906 "2514 POST_RPI_HDR mailbox failed with "
15907 "status x%x add_status x%x, mbx status x%x\n",
15908 shdr_status, shdr_add_status, rc);
15909 rc = -ENXIO;
15910 }
15911 return rc;
15912}
15913
15914
15915
15916
15917
15918
15919
15920
15921
15922
15923
15924
15925
15926
15927int
15928lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
15929{
15930 unsigned long rpi;
15931 uint16_t max_rpi, rpi_limit;
15932 uint16_t rpi_remaining, lrpi = 0;
15933 struct lpfc_rpi_hdr *rpi_hdr;
15934 unsigned long iflag;
15935
15936
15937
15938
15939
15940 spin_lock_irqsave(&phba->hbalock, iflag);
15941 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
15942 rpi_limit = phba->sli4_hba.next_rpi;
15943
15944 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
15945 if (rpi >= rpi_limit)
15946 rpi = LPFC_RPI_ALLOC_ERROR;
15947 else {
15948 set_bit(rpi, phba->sli4_hba.rpi_bmask);
15949 phba->sli4_hba.max_cfg_param.rpi_used++;
15950 phba->sli4_hba.rpi_count++;
15951 }
15952 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15953 "0001 rpi:%x max:%x lim:%x\n",
15954 (int) rpi, max_rpi, rpi_limit);
15955
15956
15957
15958
15959
15960 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
15961 (phba->sli4_hba.rpi_count >= max_rpi)) {
15962 spin_unlock_irqrestore(&phba->hbalock, iflag);
15963 return rpi;
15964 }
15965
15966
15967
15968
15969
15970 if (!phba->sli4_hba.rpi_hdrs_in_use) {
15971 spin_unlock_irqrestore(&phba->hbalock, iflag);
15972 return rpi;
15973 }
15974
15975
15976
15977
15978
15979
15980
15981 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
15982 spin_unlock_irqrestore(&phba->hbalock, iflag);
15983 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
15984 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
15985 if (!rpi_hdr) {
15986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15987 "2002 Error Could not grow rpi "
15988 "count\n");
15989 } else {
15990 lrpi = rpi_hdr->start_rpi;
15991 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
15992 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
15993 }
15994 }
15995
15996 return rpi;
15997}
15998
15999
16000
16001
16002
16003
16004
16005
16006static void
16007__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
16008{
16009 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
16010 phba->sli4_hba.rpi_count--;
16011 phba->sli4_hba.max_cfg_param.rpi_used--;
16012 }
16013}
16014
16015
16016
16017
16018
16019
16020
16021
16022void
16023lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
16024{
16025 spin_lock_irq(&phba->hbalock);
16026 __lpfc_sli4_free_rpi(phba, rpi);
16027 spin_unlock_irq(&phba->hbalock);
16028}
16029
16030
16031
16032
16033
16034
16035
16036
16037void
16038lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
16039{
16040 kfree(phba->sli4_hba.rpi_bmask);
16041 kfree(phba->sli4_hba.rpi_ids);
16042 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
16043}
16044
16045
16046
16047
16048
16049
16050
16051
16052int
16053lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
16054 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
16055{
16056 LPFC_MBOXQ_t *mboxq;
16057 struct lpfc_hba *phba = ndlp->phba;
16058 int rc;
16059
16060
16061 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16062 if (!mboxq)
16063 return -ENOMEM;
16064
16065
16066 lpfc_resume_rpi(mboxq, ndlp);
16067 if (cmpl) {
16068 mboxq->mbox_cmpl = cmpl;
16069 mboxq->context1 = arg;
16070 mboxq->context2 = ndlp;
16071 } else
16072 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16073 mboxq->vport = ndlp->vport;
16074 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16075 if (rc == MBX_NOT_FINISHED) {
16076 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16077 "2010 Resume RPI Mailbox failed "
16078 "status %d, mbxStatus x%x\n", rc,
16079 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16080 mempool_free(mboxq, phba->mbox_mem_pool);
16081 return -EIO;
16082 }
16083 return 0;
16084}
16085
16086
16087
16088
16089
16090
16091
16092
16093
16094
16095
16096int
16097lpfc_sli4_init_vpi(struct lpfc_vport *vport)
16098{
16099 LPFC_MBOXQ_t *mboxq;
16100 int rc = 0;
16101 int retval = MBX_SUCCESS;
16102 uint32_t mbox_tmo;
16103 struct lpfc_hba *phba = vport->phba;
16104 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16105 if (!mboxq)
16106 return -ENOMEM;
16107 lpfc_init_vpi(phba, mboxq, vport->vpi);
16108 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
16109 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
16110 if (rc != MBX_SUCCESS) {
16111 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
16112 "2022 INIT VPI Mailbox failed "
16113 "status %d, mbxStatus x%x\n", rc,
16114 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
16115 retval = -EIO;
16116 }
16117 if (rc != MBX_TIMEOUT)
16118 mempool_free(mboxq, vport->phba->mbox_mem_pool);
16119
16120 return retval;
16121}
16122
16123
16124
16125
16126
16127
16128
16129
16130
16131
16132static void
16133lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
16134{
16135 void *virt_addr;
16136 union lpfc_sli4_cfg_shdr *shdr;
16137 uint32_t shdr_status, shdr_add_status;
16138
16139 virt_addr = mboxq->sge_array->addr[0];
16140
16141 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
16142 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16143 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16144
16145 if ((shdr_status || shdr_add_status) &&
16146 (shdr_status != STATUS_FCF_IN_USE))
16147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16148 "2558 ADD_FCF_RECORD mailbox failed with "
16149 "status x%x add_status x%x\n",
16150 shdr_status, shdr_add_status);
16151
16152 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16153}
16154
16155
16156
16157
16158
16159
16160
16161
16162
16163
16164int
16165lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
16166{
16167 int rc = 0;
16168 LPFC_MBOXQ_t *mboxq;
16169 uint8_t *bytep;
16170 void *virt_addr;
16171 struct lpfc_mbx_sge sge;
16172 uint32_t alloc_len, req_len;
16173 uint32_t fcfindex;
16174
16175 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16176 if (!mboxq) {
16177 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16178 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
16179 return -ENOMEM;
16180 }
16181
16182 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
16183 sizeof(uint32_t);
16184
16185
16186 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
16187 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
16188 req_len, LPFC_SLI4_MBX_NEMBED);
16189 if (alloc_len < req_len) {
16190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16191 "2523 Allocated DMA memory size (x%x) is "
16192 "less than the requested DMA memory "
16193 "size (x%x)\n", alloc_len, req_len);
16194 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16195 return -ENOMEM;
16196 }
16197
16198
16199
16200
16201
16202 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
16203 virt_addr = mboxq->sge_array->addr[0];
16204
16205
16206
16207
16208 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
16209 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
16210 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
16211
16212
16213
16214
16215
16216
16217 bytep += sizeof(uint32_t);
16218 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
16219 mboxq->vport = phba->pport;
16220 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
16221 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16222 if (rc == MBX_NOT_FINISHED) {
16223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16224 "2515 ADD_FCF_RECORD mailbox failed with "
16225 "status 0x%x\n", rc);
16226 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16227 rc = -EIO;
16228 } else
16229 rc = 0;
16230
16231 return rc;
16232}
16233
16234
16235
16236
16237
16238
16239
16240
16241
16242
16243
16244void
16245lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
16246 struct fcf_record *fcf_record,
16247 uint16_t fcf_index)
16248{
16249 memset(fcf_record, 0, sizeof(struct fcf_record));
16250 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
16251 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
16252 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
16253 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
16254 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
16255 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
16256 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
16257 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
16258 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
16259 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
16260 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
16261 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
16262 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
16263 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
16264 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
16265 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
16266 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
16267
16268 if (phba->valid_vlan) {
16269 fcf_record->vlan_bitmap[phba->vlan_id / 8]
16270 = 1 << (phba->vlan_id % 8);
16271 }
16272}
16273
16274
16275
16276
16277
16278
16279
16280
16281
16282
16283
16284
16285
16286int
16287lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16288{
16289 int rc = 0, error;
16290 LPFC_MBOXQ_t *mboxq;
16291
16292 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
16293 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
16294 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16295 if (!mboxq) {
16296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16297 "2000 Failed to allocate mbox for "
16298 "READ_FCF cmd\n");
16299 error = -ENOMEM;
16300 goto fail_fcf_scan;
16301 }
16302
16303 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16304 if (rc) {
16305 error = -EINVAL;
16306 goto fail_fcf_scan;
16307 }
16308
16309 mboxq->vport = phba->pport;
16310 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
16311
16312 spin_lock_irq(&phba->hbalock);
16313 phba->hba_flag |= FCF_TS_INPROG;
16314 spin_unlock_irq(&phba->hbalock);
16315
16316 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16317 if (rc == MBX_NOT_FINISHED)
16318 error = -EIO;
16319 else {
16320
16321 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
16322 phba->fcf.eligible_fcf_cnt = 0;
16323 error = 0;
16324 }
16325fail_fcf_scan:
16326 if (error) {
16327 if (mboxq)
16328 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16329
16330 spin_lock_irq(&phba->hbalock);
16331 phba->hba_flag &= ~FCF_TS_INPROG;
16332 spin_unlock_irq(&phba->hbalock);
16333 }
16334 return error;
16335}
16336
16337
16338
16339
16340
16341
16342
16343
16344
16345
16346
16347
16348int
16349lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16350{
16351 int rc = 0, error;
16352 LPFC_MBOXQ_t *mboxq;
16353
16354 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16355 if (!mboxq) {
16356 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16357 "2763 Failed to allocate mbox for "
16358 "READ_FCF cmd\n");
16359 error = -ENOMEM;
16360 goto fail_fcf_read;
16361 }
16362
16363 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16364 if (rc) {
16365 error = -EINVAL;
16366 goto fail_fcf_read;
16367 }
16368
16369 mboxq->vport = phba->pport;
16370 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
16371 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16372 if (rc == MBX_NOT_FINISHED)
16373 error = -EIO;
16374 else
16375 error = 0;
16376
16377fail_fcf_read:
16378 if (error && mboxq)
16379 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16380 return error;
16381}
16382
16383
16384
16385
16386
16387
16388
16389
16390
16391
16392
16393
16394int
16395lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
16396{
16397 int rc = 0, error;
16398 LPFC_MBOXQ_t *mboxq;
16399
16400 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16401 if (!mboxq) {
16402 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
16403 "2758 Failed to allocate mbox for "
16404 "READ_FCF cmd\n");
16405 error = -ENOMEM;
16406 goto fail_fcf_read;
16407 }
16408
16409 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
16410 if (rc) {
16411 error = -EINVAL;
16412 goto fail_fcf_read;
16413 }
16414
16415 mboxq->vport = phba->pport;
16416 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
16417 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
16418 if (rc == MBX_NOT_FINISHED)
16419 error = -EIO;
16420 else
16421 error = 0;
16422
16423fail_fcf_read:
16424 if (error && mboxq)
16425 lpfc_sli4_mbox_cmd_free(phba, mboxq);
16426 return error;
16427}
16428
16429
16430
16431
16432
16433
16434
16435
16436
16437
16438
16439
16440
16441
16442static int
16443lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
16444{
16445 uint16_t next_fcf_pri;
16446 uint16_t last_index;
16447 struct lpfc_fcf_pri *fcf_pri;
16448 int rc;
16449 int ret = 0;
16450
16451 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
16452 LPFC_SLI4_FCF_TBL_INDX_MAX);
16453 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16454 "3060 Last IDX %d\n", last_index);
16455
16456
16457 spin_lock_irq(&phba->hbalock);
16458 if (list_empty(&phba->fcf.fcf_pri_list) ||
16459 list_is_singular(&phba->fcf.fcf_pri_list)) {
16460 spin_unlock_irq(&phba->hbalock);
16461 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16462 "3061 Last IDX %d\n", last_index);
16463 return 0;
16464 }
16465 spin_unlock_irq(&phba->hbalock);
16466
16467 next_fcf_pri = 0;
16468
16469
16470
16471
16472 memset(phba->fcf.fcf_rr_bmask, 0,
16473 sizeof(*phba->fcf.fcf_rr_bmask));
16474 spin_lock_irq(&phba->hbalock);
16475 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16476 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
16477 continue;
16478
16479
16480
16481
16482 if (!next_fcf_pri)
16483 next_fcf_pri = fcf_pri->fcf_rec.priority;
16484 spin_unlock_irq(&phba->hbalock);
16485 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16486 rc = lpfc_sli4_fcf_rr_index_set(phba,
16487 fcf_pri->fcf_rec.fcf_index);
16488 if (rc)
16489 return 0;
16490 }
16491 spin_lock_irq(&phba->hbalock);
16492 }
16493
16494
16495
16496
16497
16498 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
16499 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
16500 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
16501
16502
16503
16504
16505 if (!next_fcf_pri)
16506 next_fcf_pri = fcf_pri->fcf_rec.priority;
16507 spin_unlock_irq(&phba->hbalock);
16508 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
16509 rc = lpfc_sli4_fcf_rr_index_set(phba,
16510 fcf_pri->fcf_rec.fcf_index);
16511 if (rc)
16512 return 0;
16513 }
16514 spin_lock_irq(&phba->hbalock);
16515 }
16516 } else
16517 ret = 1;
16518 spin_unlock_irq(&phba->hbalock);
16519
16520 return ret;
16521}
16522
16523
16524
16525
16526
16527
16528
16529
16530
16531
16532uint16_t
16533lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
16534{
16535 uint16_t next_fcf_index;
16536
16537initial_priority:
16538
16539 next_fcf_index = phba->fcf.current_rec.fcf_indx;
16540
16541next_priority:
16542
16543 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
16544 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
16545 LPFC_SLI4_FCF_TBL_INDX_MAX,
16546 next_fcf_index);
16547
16548
16549 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16550
16551
16552
16553
16554
16555 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
16556 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
16557 }
16558
16559
16560
16561 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
16562 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
16563
16564
16565
16566
16567
16568
16569 if (lpfc_check_next_fcf_pri_level(phba))
16570 goto initial_priority;
16571 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
16572 "2844 No roundrobin failover FCF available\n");
16573 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
16574 return LPFC_FCOE_FCF_NEXT_NONE;
16575 else {
16576 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
16577 "3063 Only FCF available idx %d, flag %x\n",
16578 next_fcf_index,
16579 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
16580 return next_fcf_index;
16581 }
16582 }
16583
16584 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
16585 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
16586 LPFC_FCF_FLOGI_FAILED) {
16587 if (list_is_singular(&phba->fcf.fcf_pri_list))
16588 return LPFC_FCOE_FCF_NEXT_NONE;
16589
16590 goto next_priority;
16591 }
16592
16593 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16594 "2845 Get next roundrobin failover FCF (x%x)\n",
16595 next_fcf_index);
16596
16597 return next_fcf_index;
16598}
16599
16600
16601
16602
16603
16604
16605
16606
16607
16608
16609
16610
16611
16612int
16613lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
16614{
16615 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16616 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16617 "2610 FCF (x%x) reached driver's book "
16618 "keeping dimension:x%x\n",
16619 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
16620 return -EINVAL;
16621 }
16622
16623 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
16624
16625 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16626 "2790 Set FCF (x%x) to roundrobin FCF failover "
16627 "bmask\n", fcf_index);
16628
16629 return 0;
16630}
16631
16632
16633
16634
16635
16636
16637
16638
16639
16640
16641void
16642lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
16643{
16644 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
16645 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
16646 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16647 "2762 FCF (x%x) reached driver's book "
16648 "keeping dimension:x%x\n",
16649 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
16650 return;
16651 }
16652
16653 spin_lock_irq(&phba->hbalock);
16654 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
16655 list) {
16656 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
16657 list_del_init(&fcf_pri->list);
16658 break;
16659 }
16660 }
16661 spin_unlock_irq(&phba->hbalock);
16662 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
16663
16664 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16665 "2791 Clear FCF (x%x) from roundrobin failover "
16666 "bmask\n", fcf_index);
16667}
16668
16669
16670
16671
16672
16673
16674
16675
16676
16677static void
16678lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
16679{
16680 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
16681 uint32_t shdr_status, shdr_add_status;
16682
16683 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
16684
16685 shdr_status = bf_get(lpfc_mbox_hdr_status,
16686 &redisc_fcf->header.cfg_shdr.response);
16687 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
16688 &redisc_fcf->header.cfg_shdr.response);
16689 if (shdr_status || shdr_add_status) {
16690 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
16691 "2746 Requesting for FCF rediscovery failed "
16692 "status x%x add_status x%x\n",
16693 shdr_status, shdr_add_status);
16694 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
16695 spin_lock_irq(&phba->hbalock);
16696 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
16697 spin_unlock_irq(&phba->hbalock);
16698
16699
16700
16701
16702 lpfc_retry_pport_discovery(phba);
16703 } else {
16704 spin_lock_irq(&phba->hbalock);
16705 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
16706 spin_unlock_irq(&phba->hbalock);
16707
16708
16709
16710
16711
16712 lpfc_sli4_fcf_dead_failthrough(phba);
16713 }
16714 } else {
16715 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
16716 "2775 Start FCF rediscover quiescent timer\n");
16717
16718
16719
16720
16721 lpfc_fcf_redisc_wait_start_timer(phba);
16722 }
16723
16724 mempool_free(mbox, phba->mbox_mem_pool);
16725}
16726
16727
16728
16729
16730
16731
16732
16733
16734int
16735lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
16736{
16737 LPFC_MBOXQ_t *mbox;
16738 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
16739 int rc, length;
16740
16741
16742 lpfc_cancel_all_vport_retry_delay_timer(phba);
16743
16744 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16745 if (!mbox) {
16746 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16747 "2745 Failed to allocate mbox for "
16748 "requesting FCF rediscover.\n");
16749 return -ENOMEM;
16750 }
16751
16752 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
16753 sizeof(struct lpfc_sli4_cfg_mhdr));
16754 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16755 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
16756 length, LPFC_SLI4_MBX_EMBED);
16757
16758 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
16759
16760 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
16761
16762
16763 mbox->vport = phba->pport;
16764 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
16765 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
16766
16767 if (rc == MBX_NOT_FINISHED) {
16768 mempool_free(mbox, phba->mbox_mem_pool);
16769 return -EIO;
16770 }
16771 return 0;
16772}
16773
16774
16775
16776
16777
16778
16779
16780
16781void
16782lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
16783{
16784 uint32_t link_state;
16785
16786
16787
16788
16789
16790
16791 link_state = phba->link_state;
16792 lpfc_linkdown(phba);
16793 phba->link_state = link_state;
16794
16795
16796 lpfc_unregister_unused_fcf(phba);
16797}
16798
16799
16800
16801
16802
16803
16804
16805
16806
16807
16808static uint32_t
16809lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
16810{
16811 LPFC_MBOXQ_t *pmb = NULL;
16812 MAILBOX_t *mb;
16813 uint32_t offset = 0;
16814 int rc;
16815
16816 if (!rgn23_data)
16817 return 0;
16818
16819 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16820 if (!pmb) {
16821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16822 "2600 failed to allocate mailbox memory\n");
16823 return 0;
16824 }
16825 mb = &pmb->u.mb;
16826
16827 do {
16828 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
16829 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
16830
16831 if (rc != MBX_SUCCESS) {
16832 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16833 "2601 failed to read config "
16834 "region 23, rc 0x%x Status 0x%x\n",
16835 rc, mb->mbxStatus);
16836 mb->un.varDmp.word_cnt = 0;
16837 }
16838
16839
16840
16841
16842 if (mb->un.varDmp.word_cnt == 0)
16843 break;
16844 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
16845 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
16846
16847 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
16848 rgn23_data + offset,
16849 mb->un.varDmp.word_cnt);
16850 offset += mb->un.varDmp.word_cnt;
16851 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
16852
16853 mempool_free(pmb, phba->mbox_mem_pool);
16854 return offset;
16855}
16856
16857
16858
16859
16860
16861
16862
16863
16864
16865
16866static uint32_t
16867lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
16868{
16869 LPFC_MBOXQ_t *mboxq = NULL;
16870 struct lpfc_dmabuf *mp = NULL;
16871 struct lpfc_mqe *mqe;
16872 uint32_t data_length = 0;
16873 int rc;
16874
16875 if (!rgn23_data)
16876 return 0;
16877
16878 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16879 if (!mboxq) {
16880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16881 "3105 failed to allocate mailbox memory\n");
16882 return 0;
16883 }
16884
16885 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
16886 goto out;
16887 mqe = &mboxq->u.mqe;
16888 mp = (struct lpfc_dmabuf *) mboxq->context1;
16889 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
16890 if (rc)
16891 goto out;
16892 data_length = mqe->un.mb_words[5];
16893 if (data_length == 0)
16894 goto out;
16895 if (data_length > DMP_RGN23_SIZE) {
16896 data_length = 0;
16897 goto out;
16898 }
16899 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
16900out:
16901 mempool_free(mboxq, phba->mbox_mem_pool);
16902 if (mp) {
16903 lpfc_mbuf_free(phba, mp->virt, mp->phys);
16904 kfree(mp);
16905 }
16906 return data_length;
16907}
16908
16909
16910
16911
16912
16913
16914
16915
16916
16917void
16918lpfc_sli_read_link_ste(struct lpfc_hba *phba)
16919{
16920 uint8_t *rgn23_data = NULL;
16921 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
16922 uint32_t offset = 0;
16923
16924
16925 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
16926 if (!rgn23_data)
16927 goto out;
16928
16929 if (phba->sli_rev < LPFC_SLI_REV4)
16930 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
16931 else {
16932 if_type = bf_get(lpfc_sli_intf_if_type,
16933 &phba->sli4_hba.sli_intf);
16934 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
16935 goto out;
16936 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
16937 }
16938
16939 if (!data_size)
16940 goto out;
16941
16942
16943 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
16944 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16945 "2619 Config region 23 has bad signature\n");
16946 goto out;
16947 }
16948 offset += 4;
16949
16950
16951 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
16952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16953 "2620 Config region 23 has bad version\n");
16954 goto out;
16955 }
16956 offset += 4;
16957
16958
16959 while (offset < data_size) {
16960 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
16961 break;
16962
16963
16964
16965
16966 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
16967 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
16968 (rgn23_data[offset + 3] != 0)) {
16969 offset += rgn23_data[offset + 1] * 4 + 4;
16970 continue;
16971 }
16972
16973
16974 sub_tlv_len = rgn23_data[offset + 1] * 4;
16975 offset += 4;
16976 tlv_offset = 0;
16977
16978
16979
16980
16981 while ((offset < data_size) &&
16982 (tlv_offset < sub_tlv_len)) {
16983 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
16984 offset += 4;
16985 tlv_offset += 4;
16986 break;
16987 }
16988 if (rgn23_data[offset] != PORT_STE_TYPE) {
16989 offset += rgn23_data[offset + 1] * 4 + 4;
16990 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
16991 continue;
16992 }
16993
16994
16995 if (!rgn23_data[offset + 2])
16996 phba->hba_flag |= LINK_DISABLED;
16997
16998 goto out;
16999 }
17000 }
17001
17002out:
17003 kfree(rgn23_data);
17004 return;
17005}
17006
17007
17008
17009
17010
17011
17012
17013
17014
17015
17016
17017
17018
17019
17020
17021
17022
17023
17024
17025
17026int
17027lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
17028 uint32_t size, uint32_t *offset)
17029{
17030 struct lpfc_mbx_wr_object *wr_object;
17031 LPFC_MBOXQ_t *mbox;
17032 int rc = 0, i = 0;
17033 uint32_t shdr_status, shdr_add_status;
17034 uint32_t mbox_tmo;
17035 union lpfc_sli4_cfg_shdr *shdr;
17036 struct lpfc_dmabuf *dmabuf;
17037 uint32_t written = 0;
17038
17039 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17040 if (!mbox)
17041 return -ENOMEM;
17042
17043 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17044 LPFC_MBOX_OPCODE_WRITE_OBJECT,
17045 sizeof(struct lpfc_mbx_wr_object) -
17046 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17047
17048 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
17049 wr_object->u.request.write_offset = *offset;
17050 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
17051 wr_object->u.request.object_name[0] =
17052 cpu_to_le32(wr_object->u.request.object_name[0]);
17053 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
17054 list_for_each_entry(dmabuf, dmabuf_list, list) {
17055 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
17056 break;
17057 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
17058 wr_object->u.request.bde[i].addrHigh =
17059 putPaddrHigh(dmabuf->phys);
17060 if (written + SLI4_PAGE_SIZE >= size) {
17061 wr_object->u.request.bde[i].tus.f.bdeSize =
17062 (size - written);
17063 written += (size - written);
17064 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
17065 } else {
17066 wr_object->u.request.bde[i].tus.f.bdeSize =
17067 SLI4_PAGE_SIZE;
17068 written += SLI4_PAGE_SIZE;
17069 }
17070 i++;
17071 }
17072 wr_object->u.request.bde_count = i;
17073 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
17074 if (!phba->sli4_hba.intr_enable)
17075 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17076 else {
17077 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17078 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17079 }
17080
17081 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
17082 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17083 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17084 if (rc != MBX_TIMEOUT)
17085 mempool_free(mbox, phba->mbox_mem_pool);
17086 if (shdr_status || shdr_add_status || rc) {
17087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17088 "3025 Write Object mailbox failed with "
17089 "status x%x add_status x%x, mbx status x%x\n",
17090 shdr_status, shdr_add_status, rc);
17091 rc = -ENXIO;
17092 } else
17093 *offset += wr_object->u.response.actual_write_length;
17094 return rc;
17095}
17096
17097
17098
17099
17100
17101
17102
17103
17104
17105
17106void
17107lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
17108{
17109 struct lpfc_hba *phba = vport->phba;
17110 LPFC_MBOXQ_t *mb, *nextmb;
17111 struct lpfc_dmabuf *mp;
17112 struct lpfc_nodelist *ndlp;
17113 struct lpfc_nodelist *act_mbx_ndlp = NULL;
17114 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
17115 LIST_HEAD(mbox_cmd_list);
17116 uint8_t restart_loop;
17117
17118
17119 spin_lock_irq(&phba->hbalock);
17120 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
17121 if (mb->vport != vport)
17122 continue;
17123
17124 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17125 (mb->u.mb.mbxCommand != MBX_REG_VPI))
17126 continue;
17127
17128 list_del(&mb->list);
17129 list_add_tail(&mb->list, &mbox_cmd_list);
17130 }
17131
17132 mb = phba->sli.mbox_active;
17133 if (mb && (mb->vport == vport)) {
17134 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
17135 (mb->u.mb.mbxCommand == MBX_REG_VPI))
17136 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17137 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17138 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
17139
17140 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
17141
17142 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17143 }
17144 }
17145
17146 do {
17147 restart_loop = 0;
17148 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
17149
17150
17151
17152
17153 if ((mb->vport != vport) ||
17154 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
17155 continue;
17156
17157 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
17158 (mb->u.mb.mbxCommand != MBX_REG_VPI))
17159 continue;
17160
17161 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17162 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17163 ndlp = (struct lpfc_nodelist *)mb->context2;
17164
17165 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
17166 restart_loop = 1;
17167 spin_unlock_irq(&phba->hbalock);
17168 spin_lock(shost->host_lock);
17169 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17170 spin_unlock(shost->host_lock);
17171 spin_lock_irq(&phba->hbalock);
17172 break;
17173 }
17174 }
17175 } while (restart_loop);
17176
17177 spin_unlock_irq(&phba->hbalock);
17178
17179
17180 while (!list_empty(&mbox_cmd_list)) {
17181 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
17182 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
17183 mp = (struct lpfc_dmabuf *) (mb->context1);
17184 if (mp) {
17185 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
17186 kfree(mp);
17187 }
17188 ndlp = (struct lpfc_nodelist *) mb->context2;
17189 mb->context2 = NULL;
17190 if (ndlp) {
17191 spin_lock(shost->host_lock);
17192 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17193 spin_unlock(shost->host_lock);
17194 lpfc_nlp_put(ndlp);
17195 }
17196 }
17197 mempool_free(mb, phba->mbox_mem_pool);
17198 }
17199
17200
17201 if (act_mbx_ndlp) {
17202 spin_lock(shost->host_lock);
17203 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
17204 spin_unlock(shost->host_lock);
17205 lpfc_nlp_put(act_mbx_ndlp);
17206 }
17207}
17208
17209
17210
17211
17212
17213
17214
17215
17216
17217
17218
17219
17220uint32_t
17221lpfc_drain_txq(struct lpfc_hba *phba)
17222{
17223 LIST_HEAD(completions);
17224 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
17225 struct lpfc_iocbq *piocbq = NULL;
17226 unsigned long iflags = 0;
17227 char *fail_msg = NULL;
17228 struct lpfc_sglq *sglq;
17229 union lpfc_wqe wqe;
17230 uint32_t txq_cnt = 0;
17231
17232 spin_lock_irqsave(&pring->ring_lock, iflags);
17233 list_for_each_entry(piocbq, &pring->txq, list) {
17234 txq_cnt++;
17235 }
17236
17237 if (txq_cnt > pring->txq_max)
17238 pring->txq_max = txq_cnt;
17239
17240 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17241
17242 while (!list_empty(&pring->txq)) {
17243 spin_lock_irqsave(&pring->ring_lock, iflags);
17244
17245 piocbq = lpfc_sli_ringtx_get(phba, pring);
17246 if (!piocbq) {
17247 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17248 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17249 "2823 txq empty and txq_cnt is %d\n ",
17250 txq_cnt);
17251 break;
17252 }
17253 sglq = __lpfc_sli_get_sglq(phba, piocbq);
17254 if (!sglq) {
17255 __lpfc_sli_ringtx_put(phba, pring, piocbq);
17256 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17257 break;
17258 }
17259 txq_cnt--;
17260
17261
17262
17263
17264 piocbq->sli4_lxritag = sglq->sli4_lxritag;
17265 piocbq->sli4_xritag = sglq->sli4_xritag;
17266 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
17267 fail_msg = "to convert bpl to sgl";
17268 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
17269 fail_msg = "to convert iocb to wqe";
17270 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
17271 fail_msg = " - Wq is full";
17272 else
17273 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
17274
17275 if (fail_msg) {
17276
17277 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17278 "2822 IOCB failed %s iotag 0x%x "
17279 "xri 0x%x\n",
17280 fail_msg,
17281 piocbq->iotag, piocbq->sli4_xritag);
17282 list_add_tail(&piocbq->list, &completions);
17283 }
17284 spin_unlock_irqrestore(&pring->ring_lock, iflags);
17285 }
17286
17287
17288 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
17289 IOERR_SLI_ABORTED);
17290
17291 return txq_cnt;
17292}
17293