1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/aer.h>
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
41
42#include <linux/nvme-fc-driver.h>
43
44#include "lpfc_hw4.h"
45#include "lpfc_hw.h"
46#include "lpfc_sli.h"
47#include "lpfc_sli4.h"
48#include "lpfc_nl.h"
49#include "lpfc_disc.h"
50#include "lpfc.h"
51#include "lpfc_scsi.h"
52#include "lpfc_nvme.h"
53#include "lpfc_nvmet.h"
54#include "lpfc_crtn.h"
55#include "lpfc_logmsg.h"
56#include "lpfc_compat.h"
57#include "lpfc_debugfs.h"
58#include "lpfc_vport.h"
59#include "lpfc_version.h"
60
61
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
69
70
71static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint32_t);
73static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76 struct lpfc_iocbq *);
77static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 struct hbq_dmabuf *);
79static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
82 struct lpfc_cqe *);
83static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84 int);
85static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_eqe *eqe, uint32_t qidx);
87static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
90 struct lpfc_sli_ring *pring,
91 struct lpfc_iocbq *cmdiocb);
92
93static IOCB_t *
94lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
95{
96 return &iocbq->iocb;
97}
98
99
100
101
102
103
104
105
106
107
108
109
110
111static int
112lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
113{
114 union lpfc_wqe *temp_wqe;
115 struct lpfc_register doorbell;
116 uint32_t host_index;
117 uint32_t idx;
118 uint32_t i = 0;
119 uint8_t *tmp;
120
121
122 if (unlikely(!q))
123 return -ENOMEM;
124 temp_wqe = q->qe[q->host_index].wqe;
125
126
127 idx = ((q->host_index + 1) % q->entry_count);
128 if (idx == q->hba_index) {
129 q->WQ_overflow++;
130 return -EBUSY;
131 }
132 q->WQ_posted++;
133
134 if (!((q->host_index + 1) % q->entry_repost))
135 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
136 else
137 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
138 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
139 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
140 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
141 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
142
143 tmp = (uint8_t *)temp_wqe;
144#ifdef __raw_writeq
145 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
146 __raw_writeq(*((uint64_t *)(tmp + i)),
147 q->dpp_regaddr + i);
148#else
149 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
150 __raw_writel(*((uint32_t *)(tmp + i)),
151 q->dpp_regaddr + i);
152#endif
153 }
154
155 wmb();
156
157
158 host_index = q->host_index;
159
160 q->host_index = idx;
161
162
163 doorbell.word0 = 0;
164 if (q->db_format == LPFC_DB_LIST_FORMAT) {
165 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
166 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
167 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
168 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
169 q->dpp_id);
170 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
171 q->queue_id);
172 } else {
173 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
174 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
175 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
176 }
177 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
178 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
179 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
180 } else {
181 return -EINVAL;
182 }
183 writel(doorbell.word0, q->db_regaddr);
184
185 return 0;
186}
187
188
189
190
191
192
193
194
195
196
197
198
199static uint32_t
200lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
201{
202 uint32_t released = 0;
203
204
205 if (unlikely(!q))
206 return 0;
207
208 if (q->hba_index == index)
209 return 0;
210 do {
211 q->hba_index = ((q->hba_index + 1) % q->entry_count);
212 released++;
213 } while (q->hba_index != index);
214 return released;
215}
216
217
218
219
220
221
222
223
224
225
226
227
228
229static uint32_t
230lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
231{
232 struct lpfc_mqe *temp_mqe;
233 struct lpfc_register doorbell;
234
235
236 if (unlikely(!q))
237 return -ENOMEM;
238 temp_mqe = q->qe[q->host_index].mqe;
239
240
241 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
242 return -ENOMEM;
243 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
244
245 q->phba->mbox = (MAILBOX_t *)temp_mqe;
246
247
248 q->host_index = ((q->host_index + 1) % q->entry_count);
249
250
251 doorbell.word0 = 0;
252 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
253 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
254 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
255 return 0;
256}
257
258
259
260
261
262
263
264
265
266
267
268static uint32_t
269lpfc_sli4_mq_release(struct lpfc_queue *q)
270{
271
272 if (unlikely(!q))
273 return 0;
274
275
276 q->phba->mbox = NULL;
277 q->hba_index = ((q->hba_index + 1) % q->entry_count);
278 return 1;
279}
280
281
282
283
284
285
286
287
288
289
290static struct lpfc_eqe *
291lpfc_sli4_eq_get(struct lpfc_queue *q)
292{
293 struct lpfc_hba *phba;
294 struct lpfc_eqe *eqe;
295 uint32_t idx;
296
297
298 if (unlikely(!q))
299 return NULL;
300 phba = q->phba;
301 eqe = q->qe[q->hba_index].eqe;
302
303
304 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
305 return NULL;
306
307 idx = ((q->hba_index + 1) % q->entry_count);
308 if (idx == q->host_index)
309 return NULL;
310
311 q->hba_index = idx;
312
313 if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index)
314 q->qe_valid = (q->qe_valid) ? 0 : 1;
315
316
317
318
319
320
321
322
323
324
325
326 mb();
327 return eqe;
328}
329
330
331
332
333
334
335inline void
336lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
337{
338 struct lpfc_register doorbell;
339
340 doorbell.word0 = 0;
341 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
342 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
343 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
344 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
345 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
346 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
347}
348
349
350
351
352
353
354inline void
355lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
356{
357 struct lpfc_register doorbell;
358
359 doorbell.word0 = 0;
360 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
361 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
362 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
363 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
364 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
365 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383uint32_t
384lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
385{
386 uint32_t released = 0;
387 struct lpfc_hba *phba;
388 struct lpfc_eqe *temp_eqe;
389 struct lpfc_register doorbell;
390
391
392 if (unlikely(!q))
393 return 0;
394 phba = q->phba;
395
396
397 while (q->hba_index != q->host_index) {
398 if (!phba->sli4_hba.pc_sli4_params.eqav) {
399 temp_eqe = q->qe[q->host_index].eqe;
400 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
401 }
402 released++;
403 q->host_index = ((q->host_index + 1) % q->entry_count);
404 }
405 if (unlikely(released == 0 && !arm))
406 return 0;
407
408
409 doorbell.word0 = 0;
410 if (arm) {
411 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
412 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
413 }
414 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
415 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
416 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
417 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
418 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
419 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
420
421 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
422 readl(q->phba->sli4_hba.EQDBregaddr);
423 return released;
424}
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441uint32_t
442lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm)
443{
444 uint32_t released = 0;
445 struct lpfc_hba *phba;
446 struct lpfc_eqe *temp_eqe;
447 struct lpfc_register doorbell;
448
449
450 if (unlikely(!q))
451 return 0;
452 phba = q->phba;
453
454
455 while (q->hba_index != q->host_index) {
456 if (!phba->sli4_hba.pc_sli4_params.eqav) {
457 temp_eqe = q->qe[q->host_index].eqe;
458 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
459 }
460 released++;
461 q->host_index = ((q->host_index + 1) % q->entry_count);
462 }
463 if (unlikely(released == 0 && !arm))
464 return 0;
465
466
467 doorbell.word0 = 0;
468 if (arm)
469 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
470 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released);
471 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
472 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
473
474 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
475 readl(q->phba->sli4_hba.EQDBregaddr);
476 return released;
477}
478
479
480
481
482
483
484
485
486
487
488static struct lpfc_cqe *
489lpfc_sli4_cq_get(struct lpfc_queue *q)
490{
491 struct lpfc_hba *phba;
492 struct lpfc_cqe *cqe;
493 uint32_t idx;
494
495
496 if (unlikely(!q))
497 return NULL;
498 phba = q->phba;
499 cqe = q->qe[q->hba_index].cqe;
500
501
502 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
503 return NULL;
504
505 idx = ((q->hba_index + 1) % q->entry_count);
506 if (idx == q->host_index)
507 return NULL;
508
509 q->hba_index = idx;
510
511 if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index)
512 q->qe_valid = (q->qe_valid) ? 0 : 1;
513
514
515
516
517
518
519
520
521
522 mb();
523 return cqe;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541uint32_t
542lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
543{
544 uint32_t released = 0;
545 struct lpfc_hba *phba;
546 struct lpfc_cqe *temp_qe;
547 struct lpfc_register doorbell;
548
549
550 if (unlikely(!q))
551 return 0;
552 phba = q->phba;
553
554
555 while (q->hba_index != q->host_index) {
556 if (!phba->sli4_hba.pc_sli4_params.cqav) {
557 temp_qe = q->qe[q->host_index].cqe;
558 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
559 }
560 released++;
561 q->host_index = ((q->host_index + 1) % q->entry_count);
562 }
563 if (unlikely(released == 0 && !arm))
564 return 0;
565
566
567 doorbell.word0 = 0;
568 if (arm)
569 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
570 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
571 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
572 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
573 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
574 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
575 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
576 return released;
577}
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594uint32_t
595lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm)
596{
597 uint32_t released = 0;
598 struct lpfc_hba *phba;
599 struct lpfc_cqe *temp_qe;
600 struct lpfc_register doorbell;
601
602
603 if (unlikely(!q))
604 return 0;
605 phba = q->phba;
606
607
608 while (q->hba_index != q->host_index) {
609 if (!phba->sli4_hba.pc_sli4_params.cqav) {
610 temp_qe = q->qe[q->host_index].cqe;
611 bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
612 }
613 released++;
614 q->host_index = ((q->host_index + 1) % q->entry_count);
615 }
616 if (unlikely(released == 0 && !arm))
617 return 0;
618
619
620 doorbell.word0 = 0;
621 if (arm)
622 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
623 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released);
624 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
625 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
626 return released;
627}
628
629
630
631
632
633
634
635
636
637
638
639
640
641int
642lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
643 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
644{
645 struct lpfc_rqe *temp_hrqe;
646 struct lpfc_rqe *temp_drqe;
647 struct lpfc_register doorbell;
648 int hq_put_index;
649 int dq_put_index;
650
651
652 if (unlikely(!hq) || unlikely(!dq))
653 return -ENOMEM;
654 hq_put_index = hq->host_index;
655 dq_put_index = dq->host_index;
656 temp_hrqe = hq->qe[hq_put_index].rqe;
657 temp_drqe = dq->qe[dq_put_index].rqe;
658
659 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
660 return -EINVAL;
661 if (hq_put_index != dq_put_index)
662 return -EINVAL;
663
664 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
665 return -EBUSY;
666 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
667 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
668
669
670 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
671 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
672 hq->RQ_buf_posted++;
673
674
675 if (!(hq->host_index % hq->entry_repost)) {
676 doorbell.word0 = 0;
677 if (hq->db_format == LPFC_DB_RING_FORMAT) {
678 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
679 hq->entry_repost);
680 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
681 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
682 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
683 hq->entry_repost);
684 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
685 hq->host_index);
686 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
687 } else {
688 return -EINVAL;
689 }
690 writel(doorbell.word0, hq->db_regaddr);
691 }
692 return hq_put_index;
693}
694
695
696
697
698
699
700
701
702
703
704
705static uint32_t
706lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
707{
708
709 if (unlikely(!hq) || unlikely(!dq))
710 return 0;
711
712 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
713 return 0;
714 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
715 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
716 return 1;
717}
718
719
720
721
722
723
724
725
726
727
728
729static inline IOCB_t *
730lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
731{
732 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
733 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
734}
735
736
737
738
739
740
741
742
743
744
745
746static inline IOCB_t *
747lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
748{
749 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
750 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
751}
752
753
754
755
756
757
758
759
760
761
762struct lpfc_iocbq *
763__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
764{
765 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
766 struct lpfc_iocbq * iocbq = NULL;
767
768 lockdep_assert_held(&phba->hbalock);
769
770 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
771 if (iocbq)
772 phba->iocb_cnt++;
773 if (phba->iocb_cnt > phba->iocb_max)
774 phba->iocb_max = phba->iocb_cnt;
775 return iocbq;
776}
777
778
779
780
781
782
783
784
785
786
787
788
789
790struct lpfc_sglq *
791__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
792{
793 struct lpfc_sglq *sglq;
794
795 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
796 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
797 return sglq;
798}
799
800
801
802
803
804
805
806
807
808
809
810
811
812struct lpfc_sglq *
813__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
814{
815 struct lpfc_sglq *sglq;
816
817 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
818 return sglq;
819}
820
821
822
823
824
825
826
827
828void
829lpfc_clr_rrq_active(struct lpfc_hba *phba,
830 uint16_t xritag,
831 struct lpfc_node_rrq *rrq)
832{
833 struct lpfc_nodelist *ndlp = NULL;
834
835 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
836 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
837
838
839
840
841
842 if ((!ndlp) && rrq->ndlp)
843 ndlp = rrq->ndlp;
844
845 if (!ndlp)
846 goto out;
847
848 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
849 rrq->send_rrq = 0;
850 rrq->xritag = 0;
851 rrq->rrq_stop_time = 0;
852 }
853out:
854 mempool_free(rrq, phba->rrq_pool);
855}
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871void
872lpfc_handle_rrq_active(struct lpfc_hba *phba)
873{
874 struct lpfc_node_rrq *rrq;
875 struct lpfc_node_rrq *nextrrq;
876 unsigned long next_time;
877 unsigned long iflags;
878 LIST_HEAD(send_rrq);
879
880 spin_lock_irqsave(&phba->hbalock, iflags);
881 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
882 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
883 list_for_each_entry_safe(rrq, nextrrq,
884 &phba->active_rrq_list, list) {
885 if (time_after(jiffies, rrq->rrq_stop_time))
886 list_move(&rrq->list, &send_rrq);
887 else if (time_before(rrq->rrq_stop_time, next_time))
888 next_time = rrq->rrq_stop_time;
889 }
890 spin_unlock_irqrestore(&phba->hbalock, iflags);
891 if ((!list_empty(&phba->active_rrq_list)) &&
892 (!(phba->pport->load_flag & FC_UNLOADING)))
893 mod_timer(&phba->rrq_tmr, next_time);
894 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
895 list_del(&rrq->list);
896 if (!rrq->send_rrq)
897
898 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
899 else if (lpfc_send_rrq(phba, rrq)) {
900
901
902
903 lpfc_clr_rrq_active(phba, rrq->xritag,
904 rrq);
905 }
906 }
907}
908
909
910
911
912
913
914
915
916
917
918struct lpfc_node_rrq *
919lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
920{
921 struct lpfc_hba *phba = vport->phba;
922 struct lpfc_node_rrq *rrq;
923 struct lpfc_node_rrq *nextrrq;
924 unsigned long iflags;
925
926 if (phba->sli_rev != LPFC_SLI_REV4)
927 return NULL;
928 spin_lock_irqsave(&phba->hbalock, iflags);
929 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
930 if (rrq->vport == vport && rrq->xritag == xri &&
931 rrq->nlp_DID == did){
932 list_del(&rrq->list);
933 spin_unlock_irqrestore(&phba->hbalock, iflags);
934 return rrq;
935 }
936 }
937 spin_unlock_irqrestore(&phba->hbalock, iflags);
938 return NULL;
939}
940
941
942
943
944
945
946
947
948
949void
950lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
951
952{
953 struct lpfc_hba *phba = vport->phba;
954 struct lpfc_node_rrq *rrq;
955 struct lpfc_node_rrq *nextrrq;
956 unsigned long iflags;
957 LIST_HEAD(rrq_list);
958
959 if (phba->sli_rev != LPFC_SLI_REV4)
960 return;
961 if (!ndlp) {
962 lpfc_sli4_vport_delete_els_xri_aborted(vport);
963 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
964 }
965 spin_lock_irqsave(&phba->hbalock, iflags);
966 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
967 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
968 list_move(&rrq->list, &rrq_list);
969 spin_unlock_irqrestore(&phba->hbalock, iflags);
970
971 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
972 list_del(&rrq->list);
973 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
974 }
975}
976
977
978
979
980
981
982
983
984
985
986
987int
988lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
989 uint16_t xritag)
990{
991 lockdep_assert_held(&phba->hbalock);
992 if (!ndlp)
993 return 0;
994 if (!ndlp->active_rrqs_xri_bitmap)
995 return 0;
996 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
997 return 1;
998 else
999 return 0;
1000}
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017int
1018lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1019 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1020{
1021 unsigned long iflags;
1022 struct lpfc_node_rrq *rrq;
1023 int empty;
1024
1025 if (!ndlp)
1026 return -EINVAL;
1027
1028 if (!phba->cfg_enable_rrq)
1029 return -EINVAL;
1030
1031 spin_lock_irqsave(&phba->hbalock, iflags);
1032 if (phba->pport->load_flag & FC_UNLOADING) {
1033 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1034 goto out;
1035 }
1036
1037
1038
1039
1040 if (NLP_CHK_FREE_REQ(ndlp))
1041 goto out;
1042
1043 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1044 goto out;
1045
1046 if (!ndlp->active_rrqs_xri_bitmap)
1047 goto out;
1048
1049 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1050 goto out;
1051
1052 spin_unlock_irqrestore(&phba->hbalock, iflags);
1053 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1054 if (!rrq) {
1055 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1056 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1057 " DID:0x%x Send:%d\n",
1058 xritag, rxid, ndlp->nlp_DID, send_rrq);
1059 return -EINVAL;
1060 }
1061 if (phba->cfg_enable_rrq == 1)
1062 rrq->send_rrq = send_rrq;
1063 else
1064 rrq->send_rrq = 0;
1065 rrq->xritag = xritag;
1066 rrq->rrq_stop_time = jiffies +
1067 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1068 rrq->ndlp = ndlp;
1069 rrq->nlp_DID = ndlp->nlp_DID;
1070 rrq->vport = ndlp->vport;
1071 rrq->rxid = rxid;
1072 spin_lock_irqsave(&phba->hbalock, iflags);
1073 empty = list_empty(&phba->active_rrq_list);
1074 list_add_tail(&rrq->list, &phba->active_rrq_list);
1075 phba->hba_flag |= HBA_RRQ_ACTIVE;
1076 if (empty)
1077 lpfc_worker_wake_up(phba);
1078 spin_unlock_irqrestore(&phba->hbalock, iflags);
1079 return 0;
1080out:
1081 spin_unlock_irqrestore(&phba->hbalock, iflags);
1082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1083 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1084 " DID:0x%x Send:%d\n",
1085 xritag, rxid, ndlp->nlp_DID, send_rrq);
1086 return -EINVAL;
1087}
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099static struct lpfc_sglq *
1100__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1101{
1102 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1103 struct lpfc_sglq *sglq = NULL;
1104 struct lpfc_sglq *start_sglq = NULL;
1105 struct lpfc_scsi_buf *lpfc_cmd;
1106 struct lpfc_nodelist *ndlp;
1107 int found = 0;
1108
1109 lockdep_assert_held(&phba->hbalock);
1110
1111 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1112 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
1113 ndlp = lpfc_cmd->rdata->pnode;
1114 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1115 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1116 ndlp = piocbq->context_un.ndlp;
1117 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1118 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1119 ndlp = NULL;
1120 else
1121 ndlp = piocbq->context_un.ndlp;
1122 } else {
1123 ndlp = piocbq->context1;
1124 }
1125
1126 spin_lock(&phba->sli4_hba.sgl_list_lock);
1127 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1128 start_sglq = sglq;
1129 while (!found) {
1130 if (!sglq)
1131 break;
1132 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1133 test_bit(sglq->sli4_lxritag,
1134 ndlp->active_rrqs_xri_bitmap)) {
1135
1136
1137
1138 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1139 sglq = NULL;
1140 list_remove_head(lpfc_els_sgl_list, sglq,
1141 struct lpfc_sglq, list);
1142 if (sglq == start_sglq) {
1143 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1144 sglq = NULL;
1145 break;
1146 } else
1147 continue;
1148 }
1149 sglq->ndlp = ndlp;
1150 found = 1;
1151 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1152 sglq->state = SGL_ALLOCATED;
1153 }
1154 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1155 return sglq;
1156}
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168struct lpfc_sglq *
1169__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1170{
1171 struct list_head *lpfc_nvmet_sgl_list;
1172 struct lpfc_sglq *sglq = NULL;
1173
1174 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1175
1176 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1177
1178 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1179 if (!sglq)
1180 return NULL;
1181 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1182 sglq->state = SGL_ALLOCATED;
1183 return sglq;
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195struct lpfc_iocbq *
1196lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1197{
1198 struct lpfc_iocbq * iocbq = NULL;
1199 unsigned long iflags;
1200
1201 spin_lock_irqsave(&phba->hbalock, iflags);
1202 iocbq = __lpfc_sli_get_iocbq(phba);
1203 spin_unlock_irqrestore(&phba->hbalock, iflags);
1204 return iocbq;
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225static void
1226__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1227{
1228 struct lpfc_sglq *sglq;
1229 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1230 unsigned long iflag = 0;
1231 struct lpfc_sli_ring *pring;
1232
1233 lockdep_assert_held(&phba->hbalock);
1234
1235 if (iocbq->sli4_xritag == NO_XRI)
1236 sglq = NULL;
1237 else
1238 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1239
1240
1241 if (sglq) {
1242 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1243 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1244 iflag);
1245 sglq->state = SGL_FREED;
1246 sglq->ndlp = NULL;
1247 list_add_tail(&sglq->list,
1248 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1249 spin_unlock_irqrestore(
1250 &phba->sli4_hba.sgl_list_lock, iflag);
1251 goto out;
1252 }
1253
1254 pring = phba->sli4_hba.els_wq->pring;
1255 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1256 (sglq->state != SGL_XRI_ABORTED)) {
1257 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1258 iflag);
1259 list_add(&sglq->list,
1260 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1261 spin_unlock_irqrestore(
1262 &phba->sli4_hba.sgl_list_lock, iflag);
1263 } else {
1264 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1265 iflag);
1266 sglq->state = SGL_FREED;
1267 sglq->ndlp = NULL;
1268 list_add_tail(&sglq->list,
1269 &phba->sli4_hba.lpfc_els_sgl_list);
1270 spin_unlock_irqrestore(
1271 &phba->sli4_hba.sgl_list_lock, iflag);
1272
1273
1274 if (!list_empty(&pring->txq))
1275 lpfc_worker_wake_up(phba);
1276 }
1277 }
1278
1279out:
1280
1281
1282
1283 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1284 iocbq->sli4_lxritag = NO_XRI;
1285 iocbq->sli4_xritag = NO_XRI;
1286 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1287 LPFC_IO_NVME_LS);
1288 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302static void
1303__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1304{
1305 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1306
1307 lockdep_assert_held(&phba->hbalock);
1308
1309
1310
1311
1312 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1313 iocbq->sli4_xritag = NO_XRI;
1314 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327static void
1328__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1329{
1330 lockdep_assert_held(&phba->hbalock);
1331
1332 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1333 phba->iocb_cnt--;
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344void
1345lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1346{
1347 unsigned long iflags;
1348
1349
1350
1351
1352 spin_lock_irqsave(&phba->hbalock, iflags);
1353 __lpfc_sli_release_iocbq(phba, iocbq);
1354 spin_unlock_irqrestore(&phba->hbalock, iflags);
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369void
1370lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1371 uint32_t ulpstatus, uint32_t ulpWord4)
1372{
1373 struct lpfc_iocbq *piocb;
1374
1375 while (!list_empty(iocblist)) {
1376 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1377 if (!piocb->iocb_cmpl)
1378 lpfc_sli_release_iocbq(phba, piocb);
1379 else {
1380 piocb->iocb.ulpStatus = ulpstatus;
1381 piocb->iocb.un.ulpWord[4] = ulpWord4;
1382 (piocb->iocb_cmpl) (phba, piocb, piocb);
1383 }
1384 }
1385 return;
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403static lpfc_iocb_type
1404lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1405{
1406 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1407
1408 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1409 return 0;
1410
1411 switch (iocb_cmnd) {
1412 case CMD_XMIT_SEQUENCE_CR:
1413 case CMD_XMIT_SEQUENCE_CX:
1414 case CMD_XMIT_BCAST_CN:
1415 case CMD_XMIT_BCAST_CX:
1416 case CMD_ELS_REQUEST_CR:
1417 case CMD_ELS_REQUEST_CX:
1418 case CMD_CREATE_XRI_CR:
1419 case CMD_CREATE_XRI_CX:
1420 case CMD_GET_RPI_CN:
1421 case CMD_XMIT_ELS_RSP_CX:
1422 case CMD_GET_RPI_CR:
1423 case CMD_FCP_IWRITE_CR:
1424 case CMD_FCP_IWRITE_CX:
1425 case CMD_FCP_IREAD_CR:
1426 case CMD_FCP_IREAD_CX:
1427 case CMD_FCP_ICMND_CR:
1428 case CMD_FCP_ICMND_CX:
1429 case CMD_FCP_TSEND_CX:
1430 case CMD_FCP_TRSP_CX:
1431 case CMD_FCP_TRECEIVE_CX:
1432 case CMD_FCP_AUTO_TRSP_CX:
1433 case CMD_ADAPTER_MSG:
1434 case CMD_ADAPTER_DUMP:
1435 case CMD_XMIT_SEQUENCE64_CR:
1436 case CMD_XMIT_SEQUENCE64_CX:
1437 case CMD_XMIT_BCAST64_CN:
1438 case CMD_XMIT_BCAST64_CX:
1439 case CMD_ELS_REQUEST64_CR:
1440 case CMD_ELS_REQUEST64_CX:
1441 case CMD_FCP_IWRITE64_CR:
1442 case CMD_FCP_IWRITE64_CX:
1443 case CMD_FCP_IREAD64_CR:
1444 case CMD_FCP_IREAD64_CX:
1445 case CMD_FCP_ICMND64_CR:
1446 case CMD_FCP_ICMND64_CX:
1447 case CMD_FCP_TSEND64_CX:
1448 case CMD_FCP_TRSP64_CX:
1449 case CMD_FCP_TRECEIVE64_CX:
1450 case CMD_GEN_REQUEST64_CR:
1451 case CMD_GEN_REQUEST64_CX:
1452 case CMD_XMIT_ELS_RSP64_CX:
1453 case DSSCMD_IWRITE64_CR:
1454 case DSSCMD_IWRITE64_CX:
1455 case DSSCMD_IREAD64_CR:
1456 case DSSCMD_IREAD64_CX:
1457 type = LPFC_SOL_IOCB;
1458 break;
1459 case CMD_ABORT_XRI_CN:
1460 case CMD_ABORT_XRI_CX:
1461 case CMD_CLOSE_XRI_CN:
1462 case CMD_CLOSE_XRI_CX:
1463 case CMD_XRI_ABORTED_CX:
1464 case CMD_ABORT_MXRI64_CN:
1465 case CMD_XMIT_BLS_RSP64_CX:
1466 type = LPFC_ABORT_IOCB;
1467 break;
1468 case CMD_RCV_SEQUENCE_CX:
1469 case CMD_RCV_ELS_REQ_CX:
1470 case CMD_RCV_SEQUENCE64_CX:
1471 case CMD_RCV_ELS_REQ64_CX:
1472 case CMD_ASYNC_STATUS:
1473 case CMD_IOCB_RCV_SEQ64_CX:
1474 case CMD_IOCB_RCV_ELS64_CX:
1475 case CMD_IOCB_RCV_CONT64_CX:
1476 case CMD_IOCB_RET_XRI64_CX:
1477 type = LPFC_UNSOL_IOCB;
1478 break;
1479 case CMD_IOCB_XMIT_MSEQ64_CR:
1480 case CMD_IOCB_XMIT_MSEQ64_CX:
1481 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1482 case CMD_IOCB_RCV_ELS_LIST64_CX:
1483 case CMD_IOCB_CLOSE_EXTENDED_CN:
1484 case CMD_IOCB_ABORT_EXTENDED_CN:
1485 case CMD_IOCB_RET_HBQE64_CN:
1486 case CMD_IOCB_FCP_IBIDIR64_CR:
1487 case CMD_IOCB_FCP_IBIDIR64_CX:
1488 case CMD_IOCB_FCP_ITASKMGT64_CX:
1489 case CMD_IOCB_LOGENTRY_CN:
1490 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1491 printk("%s - Unhandled SLI-3 Command x%x\n",
1492 __func__, iocb_cmnd);
1493 type = LPFC_UNKNOWN_IOCB;
1494 break;
1495 default:
1496 type = LPFC_UNKNOWN_IOCB;
1497 break;
1498 }
1499
1500 return type;
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static int
1515lpfc_sli_ring_map(struct lpfc_hba *phba)
1516{
1517 struct lpfc_sli *psli = &phba->sli;
1518 LPFC_MBOXQ_t *pmb;
1519 MAILBOX_t *pmbox;
1520 int i, rc, ret = 0;
1521
1522 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1523 if (!pmb)
1524 return -ENOMEM;
1525 pmbox = &pmb->u.mb;
1526 phba->link_state = LPFC_INIT_MBX_CMDS;
1527 for (i = 0; i < psli->num_rings; i++) {
1528 lpfc_config_ring(phba, i, pmb);
1529 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1530 if (rc != MBX_SUCCESS) {
1531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1532 "0446 Adapter failed to init (%d), "
1533 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1534 "ring %d\n",
1535 rc, pmbox->mbxCommand,
1536 pmbox->mbxStatus, i);
1537 phba->link_state = LPFC_HBA_ERROR;
1538 ret = -ENXIO;
1539 break;
1540 }
1541 }
1542 mempool_free(pmb, phba->mbox_mem_pool);
1543 return ret;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558static int
1559lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1560 struct lpfc_iocbq *piocb)
1561{
1562 lockdep_assert_held(&phba->hbalock);
1563
1564 BUG_ON(!piocb);
1565
1566 list_add_tail(&piocb->list, &pring->txcmplq);
1567 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1568
1569 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1570 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1571 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1572 BUG_ON(!piocb->vport);
1573 if (!(piocb->vport->load_flag & FC_UNLOADING))
1574 mod_timer(&piocb->vport->els_tmofunc,
1575 jiffies +
1576 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1577 }
1578
1579 return 0;
1580}
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592struct lpfc_iocbq *
1593lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1594{
1595 struct lpfc_iocbq *cmd_iocb;
1596
1597 lockdep_assert_held(&phba->hbalock);
1598
1599 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1600 return cmd_iocb;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617static IOCB_t *
1618lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1619{
1620 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1621 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1622
1623 lockdep_assert_held(&phba->hbalock);
1624
1625 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1626 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1627 pring->sli.sli3.next_cmdidx = 0;
1628
1629 if (unlikely(pring->sli.sli3.local_getidx ==
1630 pring->sli.sli3.next_cmdidx)) {
1631
1632 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1633
1634 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1635 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1636 "0315 Ring %d issue: portCmdGet %d "
1637 "is bigger than cmd ring %d\n",
1638 pring->ringno,
1639 pring->sli.sli3.local_getidx,
1640 max_cmd_idx);
1641
1642 phba->link_state = LPFC_HBA_ERROR;
1643
1644
1645
1646
1647 phba->work_ha |= HA_ERATT;
1648 phba->work_hs = HS_FFER3;
1649
1650 lpfc_worker_wake_up(phba);
1651
1652 return NULL;
1653 }
1654
1655 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1656 return NULL;
1657 }
1658
1659 return lpfc_cmd_iocb(phba, pring);
1660}
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674uint16_t
1675lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1676{
1677 struct lpfc_iocbq **new_arr;
1678 struct lpfc_iocbq **old_arr;
1679 size_t new_len;
1680 struct lpfc_sli *psli = &phba->sli;
1681 uint16_t iotag;
1682
1683 spin_lock_irq(&phba->hbalock);
1684 iotag = psli->last_iotag;
1685 if(++iotag < psli->iocbq_lookup_len) {
1686 psli->last_iotag = iotag;
1687 psli->iocbq_lookup[iotag] = iocbq;
1688 spin_unlock_irq(&phba->hbalock);
1689 iocbq->iotag = iotag;
1690 return iotag;
1691 } else if (psli->iocbq_lookup_len < (0xffff
1692 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1693 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1694 spin_unlock_irq(&phba->hbalock);
1695 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1696 GFP_KERNEL);
1697 if (new_arr) {
1698 spin_lock_irq(&phba->hbalock);
1699 old_arr = psli->iocbq_lookup;
1700 if (new_len <= psli->iocbq_lookup_len) {
1701
1702 kfree(new_arr);
1703 iotag = psli->last_iotag;
1704 if(++iotag < psli->iocbq_lookup_len) {
1705 psli->last_iotag = iotag;
1706 psli->iocbq_lookup[iotag] = iocbq;
1707 spin_unlock_irq(&phba->hbalock);
1708 iocbq->iotag = iotag;
1709 return iotag;
1710 }
1711 spin_unlock_irq(&phba->hbalock);
1712 return 0;
1713 }
1714 if (psli->iocbq_lookup)
1715 memcpy(new_arr, old_arr,
1716 ((psli->last_iotag + 1) *
1717 sizeof (struct lpfc_iocbq *)));
1718 psli->iocbq_lookup = new_arr;
1719 psli->iocbq_lookup_len = new_len;
1720 psli->last_iotag = iotag;
1721 psli->iocbq_lookup[iotag] = iocbq;
1722 spin_unlock_irq(&phba->hbalock);
1723 iocbq->iotag = iotag;
1724 kfree(old_arr);
1725 return iotag;
1726 }
1727 } else
1728 spin_unlock_irq(&phba->hbalock);
1729
1730 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1731 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1732 psli->last_iotag);
1733
1734 return 0;
1735}
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751static void
1752lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1753 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1754{
1755 lockdep_assert_held(&phba->hbalock);
1756
1757
1758
1759 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1760
1761
1762 if (pring->ringno == LPFC_ELS_RING) {
1763 lpfc_debugfs_slow_ring_trc(phba,
1764 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1765 *(((uint32_t *) &nextiocb->iocb) + 4),
1766 *(((uint32_t *) &nextiocb->iocb) + 6),
1767 *(((uint32_t *) &nextiocb->iocb) + 7));
1768 }
1769
1770
1771
1772
1773 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1774 wmb();
1775 pring->stats.iocb_cmd++;
1776
1777
1778
1779
1780
1781
1782 if (nextiocb->iocb_cmpl)
1783 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1784 else
1785 __lpfc_sli_release_iocbq(phba, nextiocb);
1786
1787
1788
1789
1790
1791 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1792 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1793}
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807static void
1808lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1809{
1810 int ringno = pring->ringno;
1811
1812 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1813
1814 wmb();
1815
1816
1817
1818
1819
1820 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1821 readl(phba->CAregaddr);
1822
1823 pring->stats.iocb_cmd_full++;
1824}
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835static void
1836lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1837{
1838 int ringno = pring->ringno;
1839
1840
1841
1842
1843 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1844 wmb();
1845 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1846 readl(phba->CAregaddr);
1847 }
1848}
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859static void
1860lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1861{
1862 IOCB_t *iocb;
1863 struct lpfc_iocbq *nextiocb;
1864
1865 lockdep_assert_held(&phba->hbalock);
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875 if (lpfc_is_link_up(phba) &&
1876 (!list_empty(&pring->txq)) &&
1877 (pring->ringno != LPFC_FCP_RING ||
1878 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1879
1880 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1881 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1882 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1883
1884 if (iocb)
1885 lpfc_sli_update_ring(phba, pring);
1886 else
1887 lpfc_sli_update_full_ring(phba, pring);
1888 }
1889
1890 return;
1891}
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903static struct lpfc_hbq_entry *
1904lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1905{
1906 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1907
1908 lockdep_assert_held(&phba->hbalock);
1909
1910 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1911 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1912 hbqp->next_hbqPutIdx = 0;
1913
1914 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1915 uint32_t raw_index = phba->hbq_get[hbqno];
1916 uint32_t getidx = le32_to_cpu(raw_index);
1917
1918 hbqp->local_hbqGetIdx = getidx;
1919
1920 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1921 lpfc_printf_log(phba, KERN_ERR,
1922 LOG_SLI | LOG_VPORT,
1923 "1802 HBQ %d: local_hbqGetIdx "
1924 "%u is > than hbqp->entry_count %u\n",
1925 hbqno, hbqp->local_hbqGetIdx,
1926 hbqp->entry_count);
1927
1928 phba->link_state = LPFC_HBA_ERROR;
1929 return NULL;
1930 }
1931
1932 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1933 return NULL;
1934 }
1935
1936 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1937 hbqp->hbqPutIdx;
1938}
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949void
1950lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1951{
1952 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1953 struct hbq_dmabuf *hbq_buf;
1954 unsigned long flags;
1955 int i, hbq_count;
1956
1957 hbq_count = lpfc_sli_hbq_count();
1958
1959 spin_lock_irqsave(&phba->hbalock, flags);
1960 for (i = 0; i < hbq_count; ++i) {
1961 list_for_each_entry_safe(dmabuf, next_dmabuf,
1962 &phba->hbqs[i].hbq_buffer_list, list) {
1963 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1964 list_del(&hbq_buf->dbuf.list);
1965 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1966 }
1967 phba->hbqs[i].buffer_count = 0;
1968 }
1969
1970
1971 phba->hbq_in_use = 0;
1972 spin_unlock_irqrestore(&phba->hbalock, flags);
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987static int
1988lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1989 struct hbq_dmabuf *hbq_buf)
1990{
1991 lockdep_assert_held(&phba->hbalock);
1992 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1993}
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006static int
2007lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2008 struct hbq_dmabuf *hbq_buf)
2009{
2010 struct lpfc_hbq_entry *hbqe;
2011 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2012
2013 lockdep_assert_held(&phba->hbalock);
2014
2015 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2016 if (hbqe) {
2017 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2018
2019 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2020 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2021 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2022 hbqe->bde.tus.f.bdeFlags = 0;
2023 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2024 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2025
2026 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2027 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2028
2029 readl(phba->hbq_put + hbqno);
2030 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2031 return 0;
2032 } else
2033 return -ENOMEM;
2034}
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046static int
2047lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2048 struct hbq_dmabuf *hbq_buf)
2049{
2050 int rc;
2051 struct lpfc_rqe hrqe;
2052 struct lpfc_rqe drqe;
2053 struct lpfc_queue *hrq;
2054 struct lpfc_queue *drq;
2055
2056 if (hbqno != LPFC_ELS_HBQ)
2057 return 1;
2058 hrq = phba->sli4_hba.hdr_rq;
2059 drq = phba->sli4_hba.dat_rq;
2060
2061 lockdep_assert_held(&phba->hbalock);
2062 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2063 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2064 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2065 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2066 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2067 if (rc < 0)
2068 return rc;
2069 hbq_buf->tag = (rc | (hbqno << 16));
2070 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2071 return 0;
2072}
2073
2074
2075static struct lpfc_hbq_init lpfc_els_hbq = {
2076 .rn = 1,
2077 .entry_count = 256,
2078 .mask_count = 0,
2079 .profile = 0,
2080 .ring_mask = (1 << LPFC_ELS_RING),
2081 .buffer_count = 0,
2082 .init_count = 40,
2083 .add_count = 40,
2084};
2085
2086
2087struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2088 &lpfc_els_hbq,
2089};
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101static int
2102lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2103{
2104 uint32_t i, posted = 0;
2105 unsigned long flags;
2106 struct hbq_dmabuf *hbq_buffer;
2107 LIST_HEAD(hbq_buf_list);
2108 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2109 return 0;
2110
2111 if ((phba->hbqs[hbqno].buffer_count + count) >
2112 lpfc_hbq_defs[hbqno]->entry_count)
2113 count = lpfc_hbq_defs[hbqno]->entry_count -
2114 phba->hbqs[hbqno].buffer_count;
2115 if (!count)
2116 return 0;
2117
2118 for (i = 0; i < count; i++) {
2119 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2120 if (!hbq_buffer)
2121 break;
2122 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2123 }
2124
2125 spin_lock_irqsave(&phba->hbalock, flags);
2126 if (!phba->hbq_in_use)
2127 goto err;
2128 while (!list_empty(&hbq_buf_list)) {
2129 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2130 dbuf.list);
2131 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2132 (hbqno << 16));
2133 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2134 phba->hbqs[hbqno].buffer_count++;
2135 posted++;
2136 } else
2137 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2138 }
2139 spin_unlock_irqrestore(&phba->hbalock, flags);
2140 return posted;
2141err:
2142 spin_unlock_irqrestore(&phba->hbalock, flags);
2143 while (!list_empty(&hbq_buf_list)) {
2144 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2145 dbuf.list);
2146 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2147 }
2148 return 0;
2149}
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160int
2161lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2162{
2163 if (phba->sli_rev == LPFC_SLI_REV4)
2164 return 0;
2165 else
2166 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2167 lpfc_hbq_defs[qno]->add_count);
2168}
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179static int
2180lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2181{
2182 if (phba->sli_rev == LPFC_SLI_REV4)
2183 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2184 lpfc_hbq_defs[qno]->entry_count);
2185 else
2186 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2187 lpfc_hbq_defs[qno]->init_count);
2188}
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198static struct hbq_dmabuf *
2199lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2200{
2201 struct lpfc_dmabuf *d_buf;
2202
2203 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2204 if (!d_buf)
2205 return NULL;
2206 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2207}
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217static struct rqb_dmabuf *
2218lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2219{
2220 struct lpfc_dmabuf *h_buf;
2221 struct lpfc_rqb *rqbp;
2222
2223 rqbp = hrq->rqbp;
2224 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2225 struct lpfc_dmabuf, list);
2226 if (!h_buf)
2227 return NULL;
2228 rqbp->buffer_count--;
2229 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241static struct hbq_dmabuf *
2242lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2243{
2244 struct lpfc_dmabuf *d_buf;
2245 struct hbq_dmabuf *hbq_buf;
2246 uint32_t hbqno;
2247
2248 hbqno = tag >> 16;
2249 if (hbqno >= LPFC_MAX_HBQS)
2250 return NULL;
2251
2252 spin_lock_irq(&phba->hbalock);
2253 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2254 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2255 if (hbq_buf->tag == tag) {
2256 spin_unlock_irq(&phba->hbalock);
2257 return hbq_buf;
2258 }
2259 }
2260 spin_unlock_irq(&phba->hbalock);
2261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2262 "1803 Bad hbq tag. Data: x%x x%x\n",
2263 tag, phba->hbqs[tag >> 16].buffer_count);
2264 return NULL;
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276void
2277lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2278{
2279 uint32_t hbqno;
2280
2281 if (hbq_buffer) {
2282 hbqno = hbq_buffer->tag >> 16;
2283 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2284 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2285 }
2286}
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297static int
2298lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2299{
2300 uint8_t ret;
2301
2302 switch (mbxCommand) {
2303 case MBX_LOAD_SM:
2304 case MBX_READ_NV:
2305 case MBX_WRITE_NV:
2306 case MBX_WRITE_VPARMS:
2307 case MBX_RUN_BIU_DIAG:
2308 case MBX_INIT_LINK:
2309 case MBX_DOWN_LINK:
2310 case MBX_CONFIG_LINK:
2311 case MBX_CONFIG_RING:
2312 case MBX_RESET_RING:
2313 case MBX_READ_CONFIG:
2314 case MBX_READ_RCONFIG:
2315 case MBX_READ_SPARM:
2316 case MBX_READ_STATUS:
2317 case MBX_READ_RPI:
2318 case MBX_READ_XRI:
2319 case MBX_READ_REV:
2320 case MBX_READ_LNK_STAT:
2321 case MBX_REG_LOGIN:
2322 case MBX_UNREG_LOGIN:
2323 case MBX_CLEAR_LA:
2324 case MBX_DUMP_MEMORY:
2325 case MBX_DUMP_CONTEXT:
2326 case MBX_RUN_DIAGS:
2327 case MBX_RESTART:
2328 case MBX_UPDATE_CFG:
2329 case MBX_DOWN_LOAD:
2330 case MBX_DEL_LD_ENTRY:
2331 case MBX_RUN_PROGRAM:
2332 case MBX_SET_MASK:
2333 case MBX_SET_VARIABLE:
2334 case MBX_UNREG_D_ID:
2335 case MBX_KILL_BOARD:
2336 case MBX_CONFIG_FARP:
2337 case MBX_BEACON:
2338 case MBX_LOAD_AREA:
2339 case MBX_RUN_BIU_DIAG64:
2340 case MBX_CONFIG_PORT:
2341 case MBX_READ_SPARM64:
2342 case MBX_READ_RPI64:
2343 case MBX_REG_LOGIN64:
2344 case MBX_READ_TOPOLOGY:
2345 case MBX_WRITE_WWN:
2346 case MBX_SET_DEBUG:
2347 case MBX_LOAD_EXP_ROM:
2348 case MBX_ASYNCEVT_ENABLE:
2349 case MBX_REG_VPI:
2350 case MBX_UNREG_VPI:
2351 case MBX_HEARTBEAT:
2352 case MBX_PORT_CAPABILITIES:
2353 case MBX_PORT_IOV_CONTROL:
2354 case MBX_SLI4_CONFIG:
2355 case MBX_SLI4_REQ_FTRS:
2356 case MBX_REG_FCFI:
2357 case MBX_UNREG_FCFI:
2358 case MBX_REG_VFI:
2359 case MBX_UNREG_VFI:
2360 case MBX_INIT_VPI:
2361 case MBX_INIT_VFI:
2362 case MBX_RESUME_RPI:
2363 case MBX_READ_EVENT_LOG_STATUS:
2364 case MBX_READ_EVENT_LOG:
2365 case MBX_SECURITY_MGMT:
2366 case MBX_AUTH_PORT:
2367 case MBX_ACCESS_VDATA:
2368 ret = mbxCommand;
2369 break;
2370 default:
2371 ret = MBX_SHUTDOWN;
2372 break;
2373 }
2374 return ret;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388void
2389lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2390{
2391 unsigned long drvr_flag;
2392 struct completion *pmbox_done;
2393
2394
2395
2396
2397
2398 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2399 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2400 pmbox_done = (struct completion *)pmboxq->context3;
2401 if (pmbox_done)
2402 complete(pmbox_done);
2403 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2404 return;
2405}
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418void
2419lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2420{
2421 struct lpfc_vport *vport = pmb->vport;
2422 struct lpfc_dmabuf *mp;
2423 struct lpfc_nodelist *ndlp;
2424 struct Scsi_Host *shost;
2425 uint16_t rpi, vpi;
2426 int rc;
2427
2428 mp = (struct lpfc_dmabuf *) (pmb->context1);
2429
2430 if (mp) {
2431 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2432 kfree(mp);
2433 }
2434
2435
2436
2437
2438
2439 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2440 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2441 !pmb->u.mb.mbxStatus) {
2442 rpi = pmb->u.mb.un.varWords[0];
2443 vpi = pmb->u.mb.un.varRegLogin.vpi;
2444 lpfc_unreg_login(phba, vpi, rpi, pmb);
2445 pmb->vport = vport;
2446 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2447 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2448 if (rc != MBX_NOT_FINISHED)
2449 return;
2450 }
2451
2452 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2453 !(phba->pport->load_flag & FC_UNLOADING) &&
2454 !pmb->u.mb.mbxStatus) {
2455 shost = lpfc_shost_from_vport(vport);
2456 spin_lock_irq(shost->host_lock);
2457 vport->vpi_state |= LPFC_VPI_REGISTERED;
2458 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2459 spin_unlock_irq(shost->host_lock);
2460 }
2461
2462 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2463 ndlp = (struct lpfc_nodelist *)pmb->context2;
2464 lpfc_nlp_put(ndlp);
2465 pmb->context2 = NULL;
2466 }
2467
2468
2469 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2470 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2471 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2472 "2860 SLI authentication is required "
2473 "for INIT_LINK but has not done yet\n");
2474
2475 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2476 lpfc_sli4_mbox_cmd_free(phba, pmb);
2477 else
2478 mempool_free(pmb, phba->mbox_mem_pool);
2479}
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493void
2494lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2495{
2496 struct lpfc_vport *vport = pmb->vport;
2497 struct lpfc_nodelist *ndlp;
2498
2499 ndlp = pmb->context1;
2500 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2501 if (phba->sli_rev == LPFC_SLI_REV4 &&
2502 (bf_get(lpfc_sli_intf_if_type,
2503 &phba->sli4_hba.sli_intf) >=
2504 LPFC_SLI_INTF_IF_TYPE_2)) {
2505 if (ndlp) {
2506 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2507 "0010 UNREG_LOGIN vpi:%x "
2508 "rpi:%x DID:%x map:%x %p\n",
2509 vport->vpi, ndlp->nlp_rpi,
2510 ndlp->nlp_DID,
2511 ndlp->nlp_usg_map, ndlp);
2512 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2513 lpfc_nlp_put(ndlp);
2514 }
2515 }
2516 }
2517
2518 mempool_free(pmb, phba->mbox_mem_pool);
2519}
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534int
2535lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2536{
2537 MAILBOX_t *pmbox;
2538 LPFC_MBOXQ_t *pmb;
2539 int rc;
2540 LIST_HEAD(cmplq);
2541
2542 phba->sli.slistat.mbox_event++;
2543
2544
2545 spin_lock_irq(&phba->hbalock);
2546 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2547 spin_unlock_irq(&phba->hbalock);
2548
2549
2550 do {
2551 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2552 if (pmb == NULL)
2553 break;
2554
2555 pmbox = &pmb->u.mb;
2556
2557 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2558 if (pmb->vport) {
2559 lpfc_debugfs_disc_trc(pmb->vport,
2560 LPFC_DISC_TRC_MBOX_VPORT,
2561 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2562 (uint32_t)pmbox->mbxCommand,
2563 pmbox->un.varWords[0],
2564 pmbox->un.varWords[1]);
2565 }
2566 else {
2567 lpfc_debugfs_disc_trc(phba->pport,
2568 LPFC_DISC_TRC_MBOX,
2569 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2570 (uint32_t)pmbox->mbxCommand,
2571 pmbox->un.varWords[0],
2572 pmbox->un.varWords[1]);
2573 }
2574 }
2575
2576
2577
2578
2579 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2580 MBX_SHUTDOWN) {
2581
2582 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2583 "(%d):0323 Unknown Mailbox command "
2584 "x%x (x%x/x%x) Cmpl\n",
2585 pmb->vport ? pmb->vport->vpi : 0,
2586 pmbox->mbxCommand,
2587 lpfc_sli_config_mbox_subsys_get(phba,
2588 pmb),
2589 lpfc_sli_config_mbox_opcode_get(phba,
2590 pmb));
2591 phba->link_state = LPFC_HBA_ERROR;
2592 phba->work_hs = HS_FFER3;
2593 lpfc_handle_eratt(phba);
2594 continue;
2595 }
2596
2597 if (pmbox->mbxStatus) {
2598 phba->sli.slistat.mbox_stat_err++;
2599 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2600
2601 lpfc_printf_log(phba, KERN_INFO,
2602 LOG_MBOX | LOG_SLI,
2603 "(%d):0305 Mbox cmd cmpl "
2604 "error - RETRYing Data: x%x "
2605 "(x%x/x%x) x%x x%x x%x\n",
2606 pmb->vport ? pmb->vport->vpi : 0,
2607 pmbox->mbxCommand,
2608 lpfc_sli_config_mbox_subsys_get(phba,
2609 pmb),
2610 lpfc_sli_config_mbox_opcode_get(phba,
2611 pmb),
2612 pmbox->mbxStatus,
2613 pmbox->un.varWords[0],
2614 pmb->vport->port_state);
2615 pmbox->mbxStatus = 0;
2616 pmbox->mbxOwner = OWN_HOST;
2617 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2618 if (rc != MBX_NOT_FINISHED)
2619 continue;
2620 }
2621 }
2622
2623
2624 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2625 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2626 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2627 "x%x x%x x%x\n",
2628 pmb->vport ? pmb->vport->vpi : 0,
2629 pmbox->mbxCommand,
2630 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2631 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2632 pmb->mbox_cmpl,
2633 *((uint32_t *) pmbox),
2634 pmbox->un.varWords[0],
2635 pmbox->un.varWords[1],
2636 pmbox->un.varWords[2],
2637 pmbox->un.varWords[3],
2638 pmbox->un.varWords[4],
2639 pmbox->un.varWords[5],
2640 pmbox->un.varWords[6],
2641 pmbox->un.varWords[7],
2642 pmbox->un.varWords[8],
2643 pmbox->un.varWords[9],
2644 pmbox->un.varWords[10]);
2645
2646 if (pmb->mbox_cmpl)
2647 pmb->mbox_cmpl(phba,pmb);
2648 } while (1);
2649 return 0;
2650}
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664static struct lpfc_dmabuf *
2665lpfc_sli_get_buff(struct lpfc_hba *phba,
2666 struct lpfc_sli_ring *pring,
2667 uint32_t tag)
2668{
2669 struct hbq_dmabuf *hbq_entry;
2670
2671 if (tag & QUE_BUFTAG_BIT)
2672 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2673 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2674 if (!hbq_entry)
2675 return NULL;
2676 return &hbq_entry->dbuf;
2677}
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691static int
2692lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2693 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2694 uint32_t fch_type)
2695{
2696 int i;
2697
2698 switch (fch_type) {
2699 case FC_TYPE_NVME:
2700 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2701 return 1;
2702 default:
2703 break;
2704 }
2705
2706
2707 if (pring->prt[0].profile) {
2708 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2709 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2710 saveq);
2711 return 1;
2712 }
2713
2714
2715 for (i = 0; i < pring->num_mask; i++) {
2716 if ((pring->prt[i].rctl == fch_r_ctl) &&
2717 (pring->prt[i].type == fch_type)) {
2718 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2719 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2720 (phba, pring, saveq);
2721 return 1;
2722 }
2723 }
2724 return 0;
2725}
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741static int
2742lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2743 struct lpfc_iocbq *saveq)
2744{
2745 IOCB_t * irsp;
2746 WORD5 * w5p;
2747 uint32_t Rctl, Type;
2748 struct lpfc_iocbq *iocbq;
2749 struct lpfc_dmabuf *dmzbuf;
2750
2751 irsp = &(saveq->iocb);
2752
2753 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2754 if (pring->lpfc_sli_rcv_async_status)
2755 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2756 else
2757 lpfc_printf_log(phba,
2758 KERN_WARNING,
2759 LOG_SLI,
2760 "0316 Ring %d handler: unexpected "
2761 "ASYNC_STATUS iocb received evt_code "
2762 "0x%x\n",
2763 pring->ringno,
2764 irsp->un.asyncstat.evt_code);
2765 return 1;
2766 }
2767
2768 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2769 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2770 if (irsp->ulpBdeCount > 0) {
2771 dmzbuf = lpfc_sli_get_buff(phba, pring,
2772 irsp->un.ulpWord[3]);
2773 lpfc_in_buf_free(phba, dmzbuf);
2774 }
2775
2776 if (irsp->ulpBdeCount > 1) {
2777 dmzbuf = lpfc_sli_get_buff(phba, pring,
2778 irsp->unsli3.sli3Words[3]);
2779 lpfc_in_buf_free(phba, dmzbuf);
2780 }
2781
2782 if (irsp->ulpBdeCount > 2) {
2783 dmzbuf = lpfc_sli_get_buff(phba, pring,
2784 irsp->unsli3.sli3Words[7]);
2785 lpfc_in_buf_free(phba, dmzbuf);
2786 }
2787
2788 return 1;
2789 }
2790
2791 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2792 if (irsp->ulpBdeCount != 0) {
2793 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2794 irsp->un.ulpWord[3]);
2795 if (!saveq->context2)
2796 lpfc_printf_log(phba,
2797 KERN_ERR,
2798 LOG_SLI,
2799 "0341 Ring %d Cannot find buffer for "
2800 "an unsolicited iocb. tag 0x%x\n",
2801 pring->ringno,
2802 irsp->un.ulpWord[3]);
2803 }
2804 if (irsp->ulpBdeCount == 2) {
2805 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2806 irsp->unsli3.sli3Words[7]);
2807 if (!saveq->context3)
2808 lpfc_printf_log(phba,
2809 KERN_ERR,
2810 LOG_SLI,
2811 "0342 Ring %d Cannot find buffer for an"
2812 " unsolicited iocb. tag 0x%x\n",
2813 pring->ringno,
2814 irsp->unsli3.sli3Words[7]);
2815 }
2816 list_for_each_entry(iocbq, &saveq->list, list) {
2817 irsp = &(iocbq->iocb);
2818 if (irsp->ulpBdeCount != 0) {
2819 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2820 irsp->un.ulpWord[3]);
2821 if (!iocbq->context2)
2822 lpfc_printf_log(phba,
2823 KERN_ERR,
2824 LOG_SLI,
2825 "0343 Ring %d Cannot find "
2826 "buffer for an unsolicited iocb"
2827 ". tag 0x%x\n", pring->ringno,
2828 irsp->un.ulpWord[3]);
2829 }
2830 if (irsp->ulpBdeCount == 2) {
2831 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2832 irsp->unsli3.sli3Words[7]);
2833 if (!iocbq->context3)
2834 lpfc_printf_log(phba,
2835 KERN_ERR,
2836 LOG_SLI,
2837 "0344 Ring %d Cannot find "
2838 "buffer for an unsolicited "
2839 "iocb. tag 0x%x\n",
2840 pring->ringno,
2841 irsp->unsli3.sli3Words[7]);
2842 }
2843 }
2844 }
2845 if (irsp->ulpBdeCount != 0 &&
2846 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2847 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2848 int found = 0;
2849
2850
2851 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2852 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2853 saveq->iocb.unsli3.rcvsli3.ox_id) {
2854 list_add_tail(&saveq->list, &iocbq->list);
2855 found = 1;
2856 break;
2857 }
2858 }
2859 if (!found)
2860 list_add_tail(&saveq->clist,
2861 &pring->iocb_continue_saveq);
2862 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2863 list_del_init(&iocbq->clist);
2864 saveq = iocbq;
2865 irsp = &(saveq->iocb);
2866 } else
2867 return 0;
2868 }
2869 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2870 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2871 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2872 Rctl = FC_RCTL_ELS_REQ;
2873 Type = FC_TYPE_ELS;
2874 } else {
2875 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2876 Rctl = w5p->hcsw.Rctl;
2877 Type = w5p->hcsw.Type;
2878
2879
2880 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2881 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2882 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2883 Rctl = FC_RCTL_ELS_REQ;
2884 Type = FC_TYPE_ELS;
2885 w5p->hcsw.Rctl = Rctl;
2886 w5p->hcsw.Type = Type;
2887 }
2888 }
2889
2890 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2891 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2892 "0313 Ring %d handler: unexpected Rctl x%x "
2893 "Type x%x received\n",
2894 pring->ringno, Rctl, Type);
2895
2896 return 1;
2897}
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912static struct lpfc_iocbq *
2913lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2914 struct lpfc_sli_ring *pring,
2915 struct lpfc_iocbq *prspiocb)
2916{
2917 struct lpfc_iocbq *cmd_iocb = NULL;
2918 uint16_t iotag;
2919 lockdep_assert_held(&phba->hbalock);
2920
2921 iotag = prspiocb->iocb.ulpIoTag;
2922
2923 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2924 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2925 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2926
2927 list_del_init(&cmd_iocb->list);
2928 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2929 return cmd_iocb;
2930 }
2931 }
2932
2933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2934 "0317 iotag x%x is out of "
2935 "range: max iotag x%x wd0 x%x\n",
2936 iotag, phba->sli.last_iotag,
2937 *(((uint32_t *) &prspiocb->iocb) + 7));
2938 return NULL;
2939}
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953static struct lpfc_iocbq *
2954lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2955 struct lpfc_sli_ring *pring, uint16_t iotag)
2956{
2957 struct lpfc_iocbq *cmd_iocb = NULL;
2958
2959 lockdep_assert_held(&phba->hbalock);
2960 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2961 cmd_iocb = phba->sli.iocbq_lookup[iotag];
2962 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2963
2964 list_del_init(&cmd_iocb->list);
2965 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2966 return cmd_iocb;
2967 }
2968 }
2969
2970 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2971 "0372 iotag x%x lookup error: max iotag (x%x) "
2972 "iocb_flag x%x\n",
2973 iotag, phba->sli.last_iotag,
2974 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
2975 return NULL;
2976}
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995static int
2996lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2997 struct lpfc_iocbq *saveq)
2998{
2999 struct lpfc_iocbq *cmdiocbp;
3000 int rc = 1;
3001 unsigned long iflag;
3002
3003
3004 if (phba->sli_rev == LPFC_SLI_REV4)
3005 spin_lock_irqsave(&pring->ring_lock, iflag);
3006 else
3007 spin_lock_irqsave(&phba->hbalock, iflag);
3008 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3009 if (phba->sli_rev == LPFC_SLI_REV4)
3010 spin_unlock_irqrestore(&pring->ring_lock, iflag);
3011 else
3012 spin_unlock_irqrestore(&phba->hbalock, iflag);
3013
3014 if (cmdiocbp) {
3015 if (cmdiocbp->iocb_cmpl) {
3016
3017
3018
3019
3020 if (saveq->iocb.ulpStatus &&
3021 (pring->ringno == LPFC_ELS_RING) &&
3022 (cmdiocbp->iocb.ulpCommand ==
3023 CMD_ELS_REQUEST64_CR))
3024 lpfc_send_els_failure_event(phba,
3025 cmdiocbp, saveq);
3026
3027
3028
3029
3030
3031 if (pring->ringno == LPFC_ELS_RING) {
3032 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3033 (cmdiocbp->iocb_flag &
3034 LPFC_DRIVER_ABORTED)) {
3035 spin_lock_irqsave(&phba->hbalock,
3036 iflag);
3037 cmdiocbp->iocb_flag &=
3038 ~LPFC_DRIVER_ABORTED;
3039 spin_unlock_irqrestore(&phba->hbalock,
3040 iflag);
3041 saveq->iocb.ulpStatus =
3042 IOSTAT_LOCAL_REJECT;
3043 saveq->iocb.un.ulpWord[4] =
3044 IOERR_SLI_ABORTED;
3045
3046
3047
3048
3049
3050 spin_lock_irqsave(&phba->hbalock,
3051 iflag);
3052 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3053 spin_unlock_irqrestore(&phba->hbalock,
3054 iflag);
3055 }
3056 if (phba->sli_rev == LPFC_SLI_REV4) {
3057 if (saveq->iocb_flag &
3058 LPFC_EXCHANGE_BUSY) {
3059
3060
3061
3062
3063
3064
3065 spin_lock_irqsave(
3066 &phba->hbalock, iflag);
3067 cmdiocbp->iocb_flag |=
3068 LPFC_EXCHANGE_BUSY;
3069 spin_unlock_irqrestore(
3070 &phba->hbalock, iflag);
3071 }
3072 if (cmdiocbp->iocb_flag &
3073 LPFC_DRIVER_ABORTED) {
3074
3075
3076
3077
3078
3079 spin_lock_irqsave(
3080 &phba->hbalock, iflag);
3081 cmdiocbp->iocb_flag &=
3082 ~LPFC_DRIVER_ABORTED;
3083 spin_unlock_irqrestore(
3084 &phba->hbalock, iflag);
3085 cmdiocbp->iocb.ulpStatus =
3086 IOSTAT_LOCAL_REJECT;
3087 cmdiocbp->iocb.un.ulpWord[4] =
3088 IOERR_ABORT_REQUESTED;
3089
3090
3091
3092
3093
3094
3095 saveq->iocb.ulpStatus =
3096 IOSTAT_LOCAL_REJECT;
3097 saveq->iocb.un.ulpWord[4] =
3098 IOERR_SLI_ABORTED;
3099 spin_lock_irqsave(
3100 &phba->hbalock, iflag);
3101 saveq->iocb_flag |=
3102 LPFC_DELAY_MEM_FREE;
3103 spin_unlock_irqrestore(
3104 &phba->hbalock, iflag);
3105 }
3106 }
3107 }
3108 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3109 } else
3110 lpfc_sli_release_iocbq(phba, cmdiocbp);
3111 } else {
3112
3113
3114
3115
3116
3117 if (pring->ringno != LPFC_ELS_RING) {
3118
3119
3120
3121
3122 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3123 "0322 Ring %d handler: "
3124 "unexpected completion IoTag x%x "
3125 "Data: x%x x%x x%x x%x\n",
3126 pring->ringno,
3127 saveq->iocb.ulpIoTag,
3128 saveq->iocb.ulpStatus,
3129 saveq->iocb.un.ulpWord[4],
3130 saveq->iocb.ulpCommand,
3131 saveq->iocb.ulpContext);
3132 }
3133 }
3134
3135 return rc;
3136}
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148static void
3149lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3150{
3151 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3152
3153
3154
3155
3156 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3157 "0312 Ring %d handler: portRspPut %d "
3158 "is bigger than rsp ring %d\n",
3159 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3160 pring->sli.sli3.numRiocb);
3161
3162 phba->link_state = LPFC_HBA_ERROR;
3163
3164
3165
3166
3167
3168 phba->work_ha |= HA_ERATT;
3169 phba->work_hs = HS_FFER3;
3170
3171 lpfc_worker_wake_up(phba);
3172
3173 return;
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186void lpfc_poll_eratt(struct timer_list *t)
3187{
3188 struct lpfc_hba *phba;
3189 uint32_t eratt = 0;
3190 uint64_t sli_intr, cnt;
3191
3192 phba = from_timer(phba, t, eratt_poll);
3193
3194
3195 sli_intr = phba->sli.slistat.sli_intr;
3196
3197 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3198 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3199 sli_intr);
3200 else
3201 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3202
3203
3204 do_div(cnt, phba->eratt_poll_interval);
3205 phba->sli.slistat.sli_ips = cnt;
3206
3207 phba->sli.slistat.sli_prev_intr = sli_intr;
3208
3209
3210 eratt = lpfc_sli_check_eratt(phba);
3211
3212 if (eratt)
3213
3214 lpfc_worker_wake_up(phba);
3215 else
3216
3217 mod_timer(&phba->eratt_poll,
3218 jiffies +
3219 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3220 return;
3221}
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241int
3242lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3243 struct lpfc_sli_ring *pring, uint32_t mask)
3244{
3245 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3246 IOCB_t *irsp = NULL;
3247 IOCB_t *entry = NULL;
3248 struct lpfc_iocbq *cmdiocbq = NULL;
3249 struct lpfc_iocbq rspiocbq;
3250 uint32_t status;
3251 uint32_t portRspPut, portRspMax;
3252 int rc = 1;
3253 lpfc_iocb_type type;
3254 unsigned long iflag;
3255 uint32_t rsp_cmpl = 0;
3256
3257 spin_lock_irqsave(&phba->hbalock, iflag);
3258 pring->stats.iocb_event++;
3259
3260
3261
3262
3263
3264 portRspMax = pring->sli.sli3.numRiocb;
3265 portRspPut = le32_to_cpu(pgp->rspPutInx);
3266 if (unlikely(portRspPut >= portRspMax)) {
3267 lpfc_sli_rsp_pointers_error(phba, pring);
3268 spin_unlock_irqrestore(&phba->hbalock, iflag);
3269 return 1;
3270 }
3271 if (phba->fcp_ring_in_use) {
3272 spin_unlock_irqrestore(&phba->hbalock, iflag);
3273 return 1;
3274 } else
3275 phba->fcp_ring_in_use = 1;
3276
3277 rmb();
3278 while (pring->sli.sli3.rspidx != portRspPut) {
3279
3280
3281
3282
3283
3284 entry = lpfc_resp_iocb(phba, pring);
3285 phba->last_completion_time = jiffies;
3286
3287 if (++pring->sli.sli3.rspidx >= portRspMax)
3288 pring->sli.sli3.rspidx = 0;
3289
3290 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3291 (uint32_t *) &rspiocbq.iocb,
3292 phba->iocb_rsp_size);
3293 INIT_LIST_HEAD(&(rspiocbq.list));
3294 irsp = &rspiocbq.iocb;
3295
3296 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3297 pring->stats.iocb_rsp++;
3298 rsp_cmpl++;
3299
3300 if (unlikely(irsp->ulpStatus)) {
3301
3302
3303
3304
3305 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3306 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3307 IOERR_NO_RESOURCES)) {
3308 spin_unlock_irqrestore(&phba->hbalock, iflag);
3309 phba->lpfc_rampdown_queue_depth(phba);
3310 spin_lock_irqsave(&phba->hbalock, iflag);
3311 }
3312
3313
3314 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3315 "0336 Rsp Ring %d error: IOCB Data: "
3316 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3317 pring->ringno,
3318 irsp->un.ulpWord[0],
3319 irsp->un.ulpWord[1],
3320 irsp->un.ulpWord[2],
3321 irsp->un.ulpWord[3],
3322 irsp->un.ulpWord[4],
3323 irsp->un.ulpWord[5],
3324 *(uint32_t *)&irsp->un1,
3325 *((uint32_t *)&irsp->un1 + 1));
3326 }
3327
3328 switch (type) {
3329 case LPFC_ABORT_IOCB:
3330 case LPFC_SOL_IOCB:
3331
3332
3333
3334
3335 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3336 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3337 "0333 IOCB cmd 0x%x"
3338 " processed. Skipping"
3339 " completion\n",
3340 irsp->ulpCommand);
3341 break;
3342 }
3343
3344 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3345 &rspiocbq);
3346 if (unlikely(!cmdiocbq))
3347 break;
3348 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3349 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3350 if (cmdiocbq->iocb_cmpl) {
3351 spin_unlock_irqrestore(&phba->hbalock, iflag);
3352 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3353 &rspiocbq);
3354 spin_lock_irqsave(&phba->hbalock, iflag);
3355 }
3356 break;
3357 case LPFC_UNSOL_IOCB:
3358 spin_unlock_irqrestore(&phba->hbalock, iflag);
3359 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3360 spin_lock_irqsave(&phba->hbalock, iflag);
3361 break;
3362 default:
3363 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3364 char adaptermsg[LPFC_MAX_ADPTMSG];
3365 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3366 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3367 MAX_MSG_DATA);
3368 dev_warn(&((phba->pcidev)->dev),
3369 "lpfc%d: %s\n",
3370 phba->brd_no, adaptermsg);
3371 } else {
3372
3373 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3374 "0334 Unknown IOCB command "
3375 "Data: x%x, x%x x%x x%x x%x\n",
3376 type, irsp->ulpCommand,
3377 irsp->ulpStatus,
3378 irsp->ulpIoTag,
3379 irsp->ulpContext);
3380 }
3381 break;
3382 }
3383
3384
3385
3386
3387
3388
3389
3390 writel(pring->sli.sli3.rspidx,
3391 &phba->host_gp[pring->ringno].rspGetInx);
3392
3393 if (pring->sli.sli3.rspidx == portRspPut)
3394 portRspPut = le32_to_cpu(pgp->rspPutInx);
3395 }
3396
3397 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3398 pring->stats.iocb_rsp_full++;
3399 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3400 writel(status, phba->CAregaddr);
3401 readl(phba->CAregaddr);
3402 }
3403 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3404 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3405 pring->stats.iocb_cmd_empty++;
3406
3407
3408 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3409 lpfc_sli_resume_iocb(phba, pring);
3410
3411 if ((pring->lpfc_sli_cmd_available))
3412 (pring->lpfc_sli_cmd_available) (phba, pring);
3413
3414 }
3415
3416 phba->fcp_ring_in_use = 0;
3417 spin_unlock_irqrestore(&phba->hbalock, iflag);
3418 return rc;
3419}
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439static struct lpfc_iocbq *
3440lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3441 struct lpfc_iocbq *rspiocbp)
3442{
3443 struct lpfc_iocbq *saveq;
3444 struct lpfc_iocbq *cmdiocbp;
3445 struct lpfc_iocbq *next_iocb;
3446 IOCB_t *irsp = NULL;
3447 uint32_t free_saveq;
3448 uint8_t iocb_cmd_type;
3449 lpfc_iocb_type type;
3450 unsigned long iflag;
3451 int rc;
3452
3453 spin_lock_irqsave(&phba->hbalock, iflag);
3454
3455 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3456 pring->iocb_continueq_cnt++;
3457
3458
3459 irsp = &rspiocbp->iocb;
3460 if (irsp->ulpLe) {
3461
3462
3463
3464
3465 free_saveq = 1;
3466 saveq = list_get_first(&pring->iocb_continueq,
3467 struct lpfc_iocbq, list);
3468 irsp = &(saveq->iocb);
3469 list_del_init(&pring->iocb_continueq);
3470 pring->iocb_continueq_cnt = 0;
3471
3472 pring->stats.iocb_rsp++;
3473
3474
3475
3476
3477
3478 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3479 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3480 IOERR_NO_RESOURCES)) {
3481 spin_unlock_irqrestore(&phba->hbalock, iflag);
3482 phba->lpfc_rampdown_queue_depth(phba);
3483 spin_lock_irqsave(&phba->hbalock, iflag);
3484 }
3485
3486 if (irsp->ulpStatus) {
3487
3488 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3489 "0328 Rsp Ring %d error: "
3490 "IOCB Data: "
3491 "x%x x%x x%x x%x "
3492 "x%x x%x x%x x%x "
3493 "x%x x%x x%x x%x "
3494 "x%x x%x x%x x%x\n",
3495 pring->ringno,
3496 irsp->un.ulpWord[0],
3497 irsp->un.ulpWord[1],
3498 irsp->un.ulpWord[2],
3499 irsp->un.ulpWord[3],
3500 irsp->un.ulpWord[4],
3501 irsp->un.ulpWord[5],
3502 *(((uint32_t *) irsp) + 6),
3503 *(((uint32_t *) irsp) + 7),
3504 *(((uint32_t *) irsp) + 8),
3505 *(((uint32_t *) irsp) + 9),
3506 *(((uint32_t *) irsp) + 10),
3507 *(((uint32_t *) irsp) + 11),
3508 *(((uint32_t *) irsp) + 12),
3509 *(((uint32_t *) irsp) + 13),
3510 *(((uint32_t *) irsp) + 14),
3511 *(((uint32_t *) irsp) + 15));
3512 }
3513
3514
3515
3516
3517
3518
3519
3520 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3521 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3522 switch (type) {
3523 case LPFC_SOL_IOCB:
3524 spin_unlock_irqrestore(&phba->hbalock, iflag);
3525 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3526 spin_lock_irqsave(&phba->hbalock, iflag);
3527 break;
3528
3529 case LPFC_UNSOL_IOCB:
3530 spin_unlock_irqrestore(&phba->hbalock, iflag);
3531 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3532 spin_lock_irqsave(&phba->hbalock, iflag);
3533 if (!rc)
3534 free_saveq = 0;
3535 break;
3536
3537 case LPFC_ABORT_IOCB:
3538 cmdiocbp = NULL;
3539 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3540 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3541 saveq);
3542 if (cmdiocbp) {
3543
3544 if (cmdiocbp->iocb_cmpl) {
3545 spin_unlock_irqrestore(&phba->hbalock,
3546 iflag);
3547 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3548 saveq);
3549 spin_lock_irqsave(&phba->hbalock,
3550 iflag);
3551 } else
3552 __lpfc_sli_release_iocbq(phba,
3553 cmdiocbp);
3554 }
3555 break;
3556
3557 case LPFC_UNKNOWN_IOCB:
3558 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3559 char adaptermsg[LPFC_MAX_ADPTMSG];
3560 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3561 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3562 MAX_MSG_DATA);
3563 dev_warn(&((phba->pcidev)->dev),
3564 "lpfc%d: %s\n",
3565 phba->brd_no, adaptermsg);
3566 } else {
3567
3568 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3569 "0335 Unknown IOCB "
3570 "command Data: x%x "
3571 "x%x x%x x%x\n",
3572 irsp->ulpCommand,
3573 irsp->ulpStatus,
3574 irsp->ulpIoTag,
3575 irsp->ulpContext);
3576 }
3577 break;
3578 }
3579
3580 if (free_saveq) {
3581 list_for_each_entry_safe(rspiocbp, next_iocb,
3582 &saveq->list, list) {
3583 list_del_init(&rspiocbp->list);
3584 __lpfc_sli_release_iocbq(phba, rspiocbp);
3585 }
3586 __lpfc_sli_release_iocbq(phba, saveq);
3587 }
3588 rspiocbp = NULL;
3589 }
3590 spin_unlock_irqrestore(&phba->hbalock, iflag);
3591 return rspiocbp;
3592}
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603void
3604lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3605 struct lpfc_sli_ring *pring, uint32_t mask)
3606{
3607 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3608}
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621static void
3622lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3623 struct lpfc_sli_ring *pring, uint32_t mask)
3624{
3625 struct lpfc_pgp *pgp;
3626 IOCB_t *entry;
3627 IOCB_t *irsp = NULL;
3628 struct lpfc_iocbq *rspiocbp = NULL;
3629 uint32_t portRspPut, portRspMax;
3630 unsigned long iflag;
3631 uint32_t status;
3632
3633 pgp = &phba->port_gp[pring->ringno];
3634 spin_lock_irqsave(&phba->hbalock, iflag);
3635 pring->stats.iocb_event++;
3636
3637
3638
3639
3640
3641 portRspMax = pring->sli.sli3.numRiocb;
3642 portRspPut = le32_to_cpu(pgp->rspPutInx);
3643 if (portRspPut >= portRspMax) {
3644
3645
3646
3647
3648 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3649 "0303 Ring %d handler: portRspPut %d "
3650 "is bigger than rsp ring %d\n",
3651 pring->ringno, portRspPut, portRspMax);
3652
3653 phba->link_state = LPFC_HBA_ERROR;
3654 spin_unlock_irqrestore(&phba->hbalock, iflag);
3655
3656 phba->work_hs = HS_FFER3;
3657 lpfc_handle_eratt(phba);
3658
3659 return;
3660 }
3661
3662 rmb();
3663 while (pring->sli.sli3.rspidx != portRspPut) {
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677 entry = lpfc_resp_iocb(phba, pring);
3678
3679 phba->last_completion_time = jiffies;
3680 rspiocbp = __lpfc_sli_get_iocbq(phba);
3681 if (rspiocbp == NULL) {
3682 printk(KERN_ERR "%s: out of buffers! Failing "
3683 "completion.\n", __func__);
3684 break;
3685 }
3686
3687 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3688 phba->iocb_rsp_size);
3689 irsp = &rspiocbp->iocb;
3690
3691 if (++pring->sli.sli3.rspidx >= portRspMax)
3692 pring->sli.sli3.rspidx = 0;
3693
3694 if (pring->ringno == LPFC_ELS_RING) {
3695 lpfc_debugfs_slow_ring_trc(phba,
3696 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3697 *(((uint32_t *) irsp) + 4),
3698 *(((uint32_t *) irsp) + 6),
3699 *(((uint32_t *) irsp) + 7));
3700 }
3701
3702 writel(pring->sli.sli3.rspidx,
3703 &phba->host_gp[pring->ringno].rspGetInx);
3704
3705 spin_unlock_irqrestore(&phba->hbalock, iflag);
3706
3707 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3708 spin_lock_irqsave(&phba->hbalock, iflag);
3709
3710
3711
3712
3713
3714
3715 if (pring->sli.sli3.rspidx == portRspPut) {
3716 portRspPut = le32_to_cpu(pgp->rspPutInx);
3717 }
3718 }
3719
3720 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3721
3722 pring->stats.iocb_rsp_full++;
3723
3724 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3725 writel(status, phba->CAregaddr);
3726 readl(phba->CAregaddr);
3727 }
3728 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3729 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3730 pring->stats.iocb_cmd_empty++;
3731
3732
3733 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3734 lpfc_sli_resume_iocb(phba, pring);
3735
3736 if ((pring->lpfc_sli_cmd_available))
3737 (pring->lpfc_sli_cmd_available) (phba, pring);
3738
3739 }
3740
3741 spin_unlock_irqrestore(&phba->hbalock, iflag);
3742 return;
3743}
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757static void
3758lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3759 struct lpfc_sli_ring *pring, uint32_t mask)
3760{
3761 struct lpfc_iocbq *irspiocbq;
3762 struct hbq_dmabuf *dmabuf;
3763 struct lpfc_cq_event *cq_event;
3764 unsigned long iflag;
3765
3766 spin_lock_irqsave(&phba->hbalock, iflag);
3767 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3768 spin_unlock_irqrestore(&phba->hbalock, iflag);
3769 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3770
3771 spin_lock_irqsave(&phba->hbalock, iflag);
3772 list_remove_head(&phba->sli4_hba.sp_queue_event,
3773 cq_event, struct lpfc_cq_event, list);
3774 spin_unlock_irqrestore(&phba->hbalock, iflag);
3775
3776 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3777 case CQE_CODE_COMPL_WQE:
3778 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3779 cq_event);
3780
3781 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3782 irspiocbq);
3783 if (irspiocbq)
3784 lpfc_sli_sp_handle_rspiocb(phba, pring,
3785 irspiocbq);
3786 break;
3787 case CQE_CODE_RECEIVE:
3788 case CQE_CODE_RECEIVE_V1:
3789 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3790 cq_event);
3791 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3792 break;
3793 default:
3794 break;
3795 }
3796 }
3797}
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809void
3810lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3811{
3812 LIST_HEAD(completions);
3813 struct lpfc_iocbq *iocb, *next_iocb;
3814
3815 if (pring->ringno == LPFC_ELS_RING) {
3816 lpfc_fabric_abort_hba(phba);
3817 }
3818
3819
3820
3821
3822 if (phba->sli_rev >= LPFC_SLI_REV4) {
3823 spin_lock_irq(&pring->ring_lock);
3824 list_splice_init(&pring->txq, &completions);
3825 pring->txq_cnt = 0;
3826 spin_unlock_irq(&pring->ring_lock);
3827
3828 spin_lock_irq(&phba->hbalock);
3829
3830 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3831 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3832 spin_unlock_irq(&phba->hbalock);
3833 } else {
3834 spin_lock_irq(&phba->hbalock);
3835 list_splice_init(&pring->txq, &completions);
3836 pring->txq_cnt = 0;
3837
3838
3839 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3840 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3841 spin_unlock_irq(&phba->hbalock);
3842 }
3843
3844
3845 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3846 IOERR_SLI_ABORTED);
3847}
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859void
3860lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3861{
3862 LIST_HEAD(completions);
3863 struct lpfc_iocbq *iocb, *next_iocb;
3864
3865 if (pring->ringno == LPFC_ELS_RING)
3866 lpfc_fabric_abort_hba(phba);
3867
3868 spin_lock_irq(&phba->hbalock);
3869
3870 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3871 lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3872 spin_unlock_irq(&phba->hbalock);
3873}
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886void
3887lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3888{
3889 struct lpfc_sli *psli = &phba->sli;
3890 struct lpfc_sli_ring *pring;
3891 uint32_t i;
3892
3893
3894 if (phba->sli_rev >= LPFC_SLI_REV4) {
3895 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3896 pring = phba->sli4_hba.fcp_wq[i]->pring;
3897 lpfc_sli_abort_iocb_ring(phba, pring);
3898 }
3899 } else {
3900 pring = &psli->sli3_ring[LPFC_FCP_RING];
3901 lpfc_sli_abort_iocb_ring(phba, pring);
3902 }
3903}
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914void
3915lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3916{
3917 struct lpfc_sli_ring *pring;
3918 uint32_t i;
3919
3920 if (phba->sli_rev < LPFC_SLI_REV4)
3921 return;
3922
3923
3924 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3925 pring = phba->sli4_hba.nvme_wq[i]->pring;
3926 lpfc_sli_abort_wqe_ring(phba, pring);
3927 }
3928}
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941void
3942lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3943{
3944 LIST_HEAD(txq);
3945 LIST_HEAD(txcmplq);
3946 struct lpfc_sli *psli = &phba->sli;
3947 struct lpfc_sli_ring *pring;
3948 uint32_t i;
3949 struct lpfc_iocbq *piocb, *next_iocb;
3950
3951 spin_lock_irq(&phba->hbalock);
3952
3953 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3954 spin_unlock_irq(&phba->hbalock);
3955
3956
3957 if (phba->sli_rev >= LPFC_SLI_REV4) {
3958 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3959 pring = phba->sli4_hba.fcp_wq[i]->pring;
3960
3961 spin_lock_irq(&pring->ring_lock);
3962
3963 list_splice_init(&pring->txq, &txq);
3964 list_for_each_entry_safe(piocb, next_iocb,
3965 &pring->txcmplq, list)
3966 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3967
3968 list_splice_init(&pring->txcmplq, &txcmplq);
3969 pring->txq_cnt = 0;
3970 pring->txcmplq_cnt = 0;
3971 spin_unlock_irq(&pring->ring_lock);
3972
3973
3974 lpfc_sli_cancel_iocbs(phba, &txq,
3975 IOSTAT_LOCAL_REJECT,
3976 IOERR_SLI_DOWN);
3977
3978 lpfc_sli_cancel_iocbs(phba, &txcmplq,
3979 IOSTAT_LOCAL_REJECT,
3980 IOERR_SLI_DOWN);
3981 }
3982 } else {
3983 pring = &psli->sli3_ring[LPFC_FCP_RING];
3984
3985 spin_lock_irq(&phba->hbalock);
3986
3987 list_splice_init(&pring->txq, &txq);
3988 list_for_each_entry_safe(piocb, next_iocb,
3989 &pring->txcmplq, list)
3990 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3991
3992 list_splice_init(&pring->txcmplq, &txcmplq);
3993 pring->txq_cnt = 0;
3994 pring->txcmplq_cnt = 0;
3995 spin_unlock_irq(&phba->hbalock);
3996
3997
3998 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3999 IOERR_SLI_DOWN);
4000
4001 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4002 IOERR_SLI_DOWN);
4003 }
4004}
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016void
4017lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4018{
4019 LIST_HEAD(txcmplq);
4020 struct lpfc_sli_ring *pring;
4021 uint32_t i;
4022 struct lpfc_iocbq *piocb, *next_iocb;
4023
4024 if (phba->sli_rev < LPFC_SLI_REV4)
4025 return;
4026
4027
4028 spin_lock_irq(&phba->hbalock);
4029 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4030 spin_unlock_irq(&phba->hbalock);
4031
4032
4033
4034
4035
4036 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
4037 pring = phba->sli4_hba.nvme_wq[i]->pring;
4038
4039 spin_lock_irq(&pring->ring_lock);
4040 list_for_each_entry_safe(piocb, next_iocb,
4041 &pring->txcmplq, list)
4042 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4043
4044 list_splice_init(&pring->txcmplq, &txcmplq);
4045 pring->txcmplq_cnt = 0;
4046 spin_unlock_irq(&pring->ring_lock);
4047
4048
4049 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4050 IOSTAT_LOCAL_REJECT,
4051 IOERR_SLI_DOWN);
4052 }
4053}
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068static int
4069lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4070{
4071 uint32_t status;
4072 int i = 0;
4073 int retval = 0;
4074
4075
4076 if (lpfc_readl(phba->HSregaddr, &status))
4077 return 1;
4078
4079
4080
4081
4082
4083
4084
4085 while (((status & mask) != mask) &&
4086 !(status & HS_FFERM) &&
4087 i++ < 20) {
4088
4089 if (i <= 5)
4090 msleep(10);
4091 else if (i <= 10)
4092 msleep(500);
4093 else
4094 msleep(2500);
4095
4096 if (i == 15) {
4097
4098 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4099 lpfc_sli_brdrestart(phba);
4100 }
4101
4102 if (lpfc_readl(phba->HSregaddr, &status)) {
4103 retval = 1;
4104 break;
4105 }
4106 }
4107
4108
4109 if ((status & HS_FFERM) || (i >= 20)) {
4110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4111 "2751 Adapter failed to restart, "
4112 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4113 status,
4114 readl(phba->MBslimaddr + 0xa8),
4115 readl(phba->MBslimaddr + 0xac));
4116 phba->link_state = LPFC_HBA_ERROR;
4117 retval = 1;
4118 }
4119
4120 return retval;
4121}
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134static int
4135lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4136{
4137 uint32_t status;
4138 int retval = 0;
4139
4140
4141 status = lpfc_sli4_post_status_check(phba);
4142
4143 if (status) {
4144 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4145 lpfc_sli_brdrestart(phba);
4146 status = lpfc_sli4_post_status_check(phba);
4147 }
4148
4149
4150 if (status) {
4151 phba->link_state = LPFC_HBA_ERROR;
4152 retval = 1;
4153 } else
4154 phba->sli4_hba.intr_enable = 0;
4155
4156 return retval;
4157}
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167int
4168lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4169{
4170 return phba->lpfc_sli_brdready(phba, mask);
4171}
4172
4173#define BARRIER_TEST_PATTERN (0xdeadbeef)
4174
4175
4176
4177
4178
4179
4180
4181
4182void lpfc_reset_barrier(struct lpfc_hba *phba)
4183{
4184 uint32_t __iomem *resp_buf;
4185 uint32_t __iomem *mbox_buf;
4186 volatile uint32_t mbox;
4187 uint32_t hc_copy, ha_copy, resp_data;
4188 int i;
4189 uint8_t hdrtype;
4190
4191 lockdep_assert_held(&phba->hbalock);
4192
4193 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4194 if (hdrtype != 0x80 ||
4195 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4196 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4197 return;
4198
4199
4200
4201
4202
4203 resp_buf = phba->MBslimaddr;
4204
4205
4206 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4207 return;
4208 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4209 readl(phba->HCregaddr);
4210 phba->link_flag |= LS_IGNORE_ERATT;
4211
4212 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4213 return;
4214 if (ha_copy & HA_ERATT) {
4215
4216 writel(HA_ERATT, phba->HAregaddr);
4217 phba->pport->stopped = 1;
4218 }
4219
4220 mbox = 0;
4221 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4222 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4223
4224 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4225 mbox_buf = phba->MBslimaddr;
4226 writel(mbox, mbox_buf);
4227
4228 for (i = 0; i < 50; i++) {
4229 if (lpfc_readl((resp_buf + 1), &resp_data))
4230 return;
4231 if (resp_data != ~(BARRIER_TEST_PATTERN))
4232 mdelay(1);
4233 else
4234 break;
4235 }
4236 resp_data = 0;
4237 if (lpfc_readl((resp_buf + 1), &resp_data))
4238 return;
4239 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4240 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4241 phba->pport->stopped)
4242 goto restore_hc;
4243 else
4244 goto clear_errat;
4245 }
4246
4247 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4248 resp_data = 0;
4249 for (i = 0; i < 500; i++) {
4250 if (lpfc_readl(resp_buf, &resp_data))
4251 return;
4252 if (resp_data != mbox)
4253 mdelay(1);
4254 else
4255 break;
4256 }
4257
4258clear_errat:
4259
4260 while (++i < 500) {
4261 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4262 return;
4263 if (!(ha_copy & HA_ERATT))
4264 mdelay(1);
4265 else
4266 break;
4267 }
4268
4269 if (readl(phba->HAregaddr) & HA_ERATT) {
4270 writel(HA_ERATT, phba->HAregaddr);
4271 phba->pport->stopped = 1;
4272 }
4273
4274restore_hc:
4275 phba->link_flag &= ~LS_IGNORE_ERATT;
4276 writel(hc_copy, phba->HCregaddr);
4277 readl(phba->HCregaddr);
4278}
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291int
4292lpfc_sli_brdkill(struct lpfc_hba *phba)
4293{
4294 struct lpfc_sli *psli;
4295 LPFC_MBOXQ_t *pmb;
4296 uint32_t status;
4297 uint32_t ha_copy;
4298 int retval;
4299 int i = 0;
4300
4301 psli = &phba->sli;
4302
4303
4304 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4305 "0329 Kill HBA Data: x%x x%x\n",
4306 phba->pport->port_state, psli->sli_flag);
4307
4308 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4309 if (!pmb)
4310 return 1;
4311
4312
4313 spin_lock_irq(&phba->hbalock);
4314 if (lpfc_readl(phba->HCregaddr, &status)) {
4315 spin_unlock_irq(&phba->hbalock);
4316 mempool_free(pmb, phba->mbox_mem_pool);
4317 return 1;
4318 }
4319 status &= ~HC_ERINT_ENA;
4320 writel(status, phba->HCregaddr);
4321 readl(phba->HCregaddr);
4322 phba->link_flag |= LS_IGNORE_ERATT;
4323 spin_unlock_irq(&phba->hbalock);
4324
4325 lpfc_kill_board(phba, pmb);
4326 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4327 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4328
4329 if (retval != MBX_SUCCESS) {
4330 if (retval != MBX_BUSY)
4331 mempool_free(pmb, phba->mbox_mem_pool);
4332 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4333 "2752 KILL_BOARD command failed retval %d\n",
4334 retval);
4335 spin_lock_irq(&phba->hbalock);
4336 phba->link_flag &= ~LS_IGNORE_ERATT;
4337 spin_unlock_irq(&phba->hbalock);
4338 return 1;
4339 }
4340
4341 spin_lock_irq(&phba->hbalock);
4342 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4343 spin_unlock_irq(&phba->hbalock);
4344
4345 mempool_free(pmb, phba->mbox_mem_pool);
4346
4347
4348
4349
4350
4351
4352 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4353 return 1;
4354 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4355 mdelay(100);
4356 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4357 return 1;
4358 }
4359
4360 del_timer_sync(&psli->mbox_tmo);
4361 if (ha_copy & HA_ERATT) {
4362 writel(HA_ERATT, phba->HAregaddr);
4363 phba->pport->stopped = 1;
4364 }
4365 spin_lock_irq(&phba->hbalock);
4366 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4367 psli->mbox_active = NULL;
4368 phba->link_flag &= ~LS_IGNORE_ERATT;
4369 spin_unlock_irq(&phba->hbalock);
4370
4371 lpfc_hba_down_post(phba);
4372 phba->link_state = LPFC_HBA_ERROR;
4373
4374 return ha_copy & HA_ERATT ? 0 : 1;
4375}
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388int
4389lpfc_sli_brdreset(struct lpfc_hba *phba)
4390{
4391 struct lpfc_sli *psli;
4392 struct lpfc_sli_ring *pring;
4393 uint16_t cfg_value;
4394 int i;
4395
4396 psli = &phba->sli;
4397
4398
4399 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4400 "0325 Reset HBA Data: x%x x%x\n",
4401 (phba->pport) ? phba->pport->port_state : 0,
4402 psli->sli_flag);
4403
4404
4405 phba->fc_eventTag = 0;
4406 phba->link_events = 0;
4407 if (phba->pport) {
4408 phba->pport->fc_myDID = 0;
4409 phba->pport->fc_prevDID = 0;
4410 }
4411
4412
4413 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4414 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4415 (cfg_value &
4416 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4417
4418 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4419
4420
4421 writel(HC_INITFF, phba->HCregaddr);
4422 mdelay(1);
4423 readl(phba->HCregaddr);
4424 writel(0, phba->HCregaddr);
4425 readl(phba->HCregaddr);
4426
4427
4428 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4429
4430
4431 for (i = 0; i < psli->num_rings; i++) {
4432 pring = &psli->sli3_ring[i];
4433 pring->flag = 0;
4434 pring->sli.sli3.rspidx = 0;
4435 pring->sli.sli3.next_cmdidx = 0;
4436 pring->sli.sli3.local_getidx = 0;
4437 pring->sli.sli3.cmdidx = 0;
4438 pring->missbufcnt = 0;
4439 }
4440
4441 phba->link_state = LPFC_WARM_START;
4442 return 0;
4443}
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455int
4456lpfc_sli4_brdreset(struct lpfc_hba *phba)
4457{
4458 struct lpfc_sli *psli = &phba->sli;
4459 uint16_t cfg_value;
4460 int rc = 0;
4461
4462
4463 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4464 "0295 Reset HBA Data: x%x x%x x%x\n",
4465 phba->pport->port_state, psli->sli_flag,
4466 phba->hba_flag);
4467
4468
4469 phba->fc_eventTag = 0;
4470 phba->link_events = 0;
4471 phba->pport->fc_myDID = 0;
4472 phba->pport->fc_prevDID = 0;
4473
4474 spin_lock_irq(&phba->hbalock);
4475 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4476 phba->fcf.fcf_flag = 0;
4477 spin_unlock_irq(&phba->hbalock);
4478
4479
4480 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4481 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4482 return rc;
4483 }
4484
4485
4486 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4487 "0389 Performing PCI function reset!\n");
4488
4489
4490 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4491 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4492 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4493
4494
4495 rc = lpfc_pci_function_reset(phba);
4496
4497
4498 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4499
4500 return rc;
4501}
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516static int
4517lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4518{
4519 MAILBOX_t *mb;
4520 struct lpfc_sli *psli;
4521 volatile uint32_t word0;
4522 void __iomem *to_slim;
4523 uint32_t hba_aer_enabled;
4524
4525 spin_lock_irq(&phba->hbalock);
4526
4527
4528 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4529
4530 psli = &phba->sli;
4531
4532
4533 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4534 "0337 Restart HBA Data: x%x x%x\n",
4535 (phba->pport) ? phba->pport->port_state : 0,
4536 psli->sli_flag);
4537
4538 word0 = 0;
4539 mb = (MAILBOX_t *) &word0;
4540 mb->mbxCommand = MBX_RESTART;
4541 mb->mbxHc = 1;
4542
4543 lpfc_reset_barrier(phba);
4544
4545 to_slim = phba->MBslimaddr;
4546 writel(*(uint32_t *) mb, to_slim);
4547 readl(to_slim);
4548
4549
4550 if (phba->pport && phba->pport->port_state)
4551 word0 = 1;
4552 else
4553 word0 = 0;
4554 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4555 writel(*(uint32_t *) mb, to_slim);
4556 readl(to_slim);
4557
4558 lpfc_sli_brdreset(phba);
4559 if (phba->pport)
4560 phba->pport->stopped = 0;
4561 phba->link_state = LPFC_INIT_START;
4562 phba->hba_flag = 0;
4563 spin_unlock_irq(&phba->hbalock);
4564
4565 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4566 psli->stats_start = get_seconds();
4567
4568
4569 mdelay(100);
4570
4571
4572 if (hba_aer_enabled)
4573 pci_disable_pcie_error_reporting(phba->pcidev);
4574
4575 lpfc_hba_down_post(phba);
4576
4577 return 0;
4578}
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589static int
4590lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4591{
4592 struct lpfc_sli *psli = &phba->sli;
4593 uint32_t hba_aer_enabled;
4594 int rc;
4595
4596
4597 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4598 "0296 Restart HBA Data: x%x x%x\n",
4599 phba->pport->port_state, psli->sli_flag);
4600
4601
4602 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4603
4604 rc = lpfc_sli4_brdreset(phba);
4605
4606 spin_lock_irq(&phba->hbalock);
4607 phba->pport->stopped = 0;
4608 phba->link_state = LPFC_INIT_START;
4609 phba->hba_flag = 0;
4610 spin_unlock_irq(&phba->hbalock);
4611
4612 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4613 psli->stats_start = get_seconds();
4614
4615
4616 if (hba_aer_enabled)
4617 pci_disable_pcie_error_reporting(phba->pcidev);
4618
4619 lpfc_hba_down_post(phba);
4620 lpfc_sli4_queue_destroy(phba);
4621
4622 return rc;
4623}
4624
4625
4626
4627
4628
4629
4630
4631
4632int
4633lpfc_sli_brdrestart(struct lpfc_hba *phba)
4634{
4635 return phba->lpfc_sli_brdrestart(phba);
4636}
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648int
4649lpfc_sli_chipset_init(struct lpfc_hba *phba)
4650{
4651 uint32_t status, i = 0;
4652
4653
4654 if (lpfc_readl(phba->HSregaddr, &status))
4655 return -EIO;
4656
4657
4658 i = 0;
4659 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669 if (i++ >= 200) {
4670
4671
4672 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4673 "0436 Adapter failed to init, "
4674 "timeout, status reg x%x, "
4675 "FW Data: A8 x%x AC x%x\n", status,
4676 readl(phba->MBslimaddr + 0xa8),
4677 readl(phba->MBslimaddr + 0xac));
4678 phba->link_state = LPFC_HBA_ERROR;
4679 return -ETIMEDOUT;
4680 }
4681
4682
4683 if (status & HS_FFERM) {
4684
4685
4686
4687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4688 "0437 Adapter failed to init, "
4689 "chipset, status reg x%x, "
4690 "FW Data: A8 x%x AC x%x\n", status,
4691 readl(phba->MBslimaddr + 0xa8),
4692 readl(phba->MBslimaddr + 0xac));
4693 phba->link_state = LPFC_HBA_ERROR;
4694 return -EIO;
4695 }
4696
4697 if (i <= 10)
4698 msleep(10);
4699 else if (i <= 100)
4700 msleep(100);
4701 else
4702 msleep(1000);
4703
4704 if (i == 150) {
4705
4706 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4707 lpfc_sli_brdrestart(phba);
4708 }
4709
4710 if (lpfc_readl(phba->HSregaddr, &status))
4711 return -EIO;
4712 }
4713
4714
4715 if (status & HS_FFERM) {
4716
4717
4718 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4719 "0438 Adapter failed to init, chipset, "
4720 "status reg x%x, "
4721 "FW Data: A8 x%x AC x%x\n", status,
4722 readl(phba->MBslimaddr + 0xa8),
4723 readl(phba->MBslimaddr + 0xac));
4724 phba->link_state = LPFC_HBA_ERROR;
4725 return -EIO;
4726 }
4727
4728
4729 writel(0, phba->HCregaddr);
4730 readl(phba->HCregaddr);
4731
4732
4733 writel(0xffffffff, phba->HAregaddr);
4734 readl(phba->HAregaddr);
4735 return 0;
4736}
4737
4738
4739
4740
4741
4742
4743
4744int
4745lpfc_sli_hbq_count(void)
4746{
4747 return ARRAY_SIZE(lpfc_hbq_defs);
4748}
4749
4750
4751
4752
4753
4754
4755
4756
4757static int
4758lpfc_sli_hbq_entry_count(void)
4759{
4760 int hbq_count = lpfc_sli_hbq_count();
4761 int count = 0;
4762 int i;
4763
4764 for (i = 0; i < hbq_count; ++i)
4765 count += lpfc_hbq_defs[i]->entry_count;
4766 return count;
4767}
4768
4769
4770
4771
4772
4773
4774
4775int
4776lpfc_sli_hbq_size(void)
4777{
4778 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4779}
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790static int
4791lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4792{
4793 int hbq_count = lpfc_sli_hbq_count();
4794 LPFC_MBOXQ_t *pmb;
4795 MAILBOX_t *pmbox;
4796 uint32_t hbqno;
4797 uint32_t hbq_entry_index;
4798
4799
4800
4801
4802 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4803
4804 if (!pmb)
4805 return -ENOMEM;
4806
4807 pmbox = &pmb->u.mb;
4808
4809
4810 phba->link_state = LPFC_INIT_MBX_CMDS;
4811 phba->hbq_in_use = 1;
4812
4813 hbq_entry_index = 0;
4814 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4815 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4816 phba->hbqs[hbqno].hbqPutIdx = 0;
4817 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4818 phba->hbqs[hbqno].entry_count =
4819 lpfc_hbq_defs[hbqno]->entry_count;
4820 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4821 hbq_entry_index, pmb);
4822 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4823
4824 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4825
4826
4827
4828 lpfc_printf_log(phba, KERN_ERR,
4829 LOG_SLI | LOG_VPORT,
4830 "1805 Adapter failed to init. "
4831 "Data: x%x x%x x%x\n",
4832 pmbox->mbxCommand,
4833 pmbox->mbxStatus, hbqno);
4834
4835 phba->link_state = LPFC_HBA_ERROR;
4836 mempool_free(pmb, phba->mbox_mem_pool);
4837 return -ENXIO;
4838 }
4839 }
4840 phba->hbq_count = hbq_count;
4841
4842 mempool_free(pmb, phba->mbox_mem_pool);
4843
4844
4845 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4846 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4847 return 0;
4848}
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859static int
4860lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4861{
4862 phba->hbq_in_use = 1;
4863 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4864 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4865 phba->hbq_count = 1;
4866 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4867
4868 return 0;
4869}
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884int
4885lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4886{
4887 LPFC_MBOXQ_t *pmb;
4888 uint32_t resetcount = 0, rc = 0, done = 0;
4889
4890 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4891 if (!pmb) {
4892 phba->link_state = LPFC_HBA_ERROR;
4893 return -ENOMEM;
4894 }
4895
4896 phba->sli_rev = sli_mode;
4897 while (resetcount < 2 && !done) {
4898 spin_lock_irq(&phba->hbalock);
4899 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4900 spin_unlock_irq(&phba->hbalock);
4901 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4902 lpfc_sli_brdrestart(phba);
4903 rc = lpfc_sli_chipset_init(phba);
4904 if (rc)
4905 break;
4906
4907 spin_lock_irq(&phba->hbalock);
4908 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4909 spin_unlock_irq(&phba->hbalock);
4910 resetcount++;
4911
4912
4913
4914
4915
4916
4917 rc = lpfc_config_port_prep(phba);
4918 if (rc == -ERESTART) {
4919 phba->link_state = LPFC_LINK_UNKNOWN;
4920 continue;
4921 } else if (rc)
4922 break;
4923
4924 phba->link_state = LPFC_INIT_MBX_CMDS;
4925 lpfc_config_port(phba, pmb);
4926 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4927 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4928 LPFC_SLI3_HBQ_ENABLED |
4929 LPFC_SLI3_CRP_ENABLED |
4930 LPFC_SLI3_BG_ENABLED |
4931 LPFC_SLI3_DSS_ENABLED);
4932 if (rc != MBX_SUCCESS) {
4933 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4934 "0442 Adapter failed to init, mbxCmd x%x "
4935 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4936 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4937 spin_lock_irq(&phba->hbalock);
4938 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4939 spin_unlock_irq(&phba->hbalock);
4940 rc = -ENXIO;
4941 } else {
4942
4943 spin_lock_irq(&phba->hbalock);
4944 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4945 spin_unlock_irq(&phba->hbalock);
4946 done = 1;
4947
4948 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4949 (pmb->u.mb.un.varCfgPort.gasabt == 0))
4950 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4951 "3110 Port did not grant ASABT\n");
4952 }
4953 }
4954 if (!done) {
4955 rc = -EINVAL;
4956 goto do_prep_failed;
4957 }
4958 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4959 if (!pmb->u.mb.un.varCfgPort.cMA) {
4960 rc = -ENXIO;
4961 goto do_prep_failed;
4962 }
4963 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4964 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4965 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4966 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4967 phba->max_vpi : phba->max_vports;
4968
4969 } else
4970 phba->max_vpi = 0;
4971 phba->fips_level = 0;
4972 phba->fips_spec_rev = 0;
4973 if (pmb->u.mb.un.varCfgPort.gdss) {
4974 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4975 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4976 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4977 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4978 "2850 Security Crypto Active. FIPS x%d "
4979 "(Spec Rev: x%d)",
4980 phba->fips_level, phba->fips_spec_rev);
4981 }
4982 if (pmb->u.mb.un.varCfgPort.sec_err) {
4983 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4984 "2856 Config Port Security Crypto "
4985 "Error: x%x ",
4986 pmb->u.mb.un.varCfgPort.sec_err);
4987 }
4988 if (pmb->u.mb.un.varCfgPort.gerbm)
4989 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4990 if (pmb->u.mb.un.varCfgPort.gcrp)
4991 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4992
4993 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4994 phba->port_gp = phba->mbox->us.s3_pgp.port;
4995
4996 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
4997 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
4998 phba->cfg_enable_bg = 0;
4999 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5001 "0443 Adapter did not grant "
5002 "BlockGuard\n");
5003 }
5004 }
5005 } else {
5006 phba->hbq_get = NULL;
5007 phba->port_gp = phba->mbox->us.s2.port;
5008 phba->max_vpi = 0;
5009 }
5010do_prep_failed:
5011 mempool_free(pmb, phba->mbox_mem_pool);
5012 return rc;
5013}
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029int
5030lpfc_sli_hba_setup(struct lpfc_hba *phba)
5031{
5032 uint32_t rc;
5033 int mode = 3, i;
5034 int longs;
5035
5036 switch (phba->cfg_sli_mode) {
5037 case 2:
5038 if (phba->cfg_enable_npiv) {
5039 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5040 "1824 NPIV enabled: Override sli_mode "
5041 "parameter (%d) to auto (0).\n",
5042 phba->cfg_sli_mode);
5043 break;
5044 }
5045 mode = 2;
5046 break;
5047 case 0:
5048 case 3:
5049 break;
5050 default:
5051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5052 "1819 Unrecognized sli_mode parameter: %d.\n",
5053 phba->cfg_sli_mode);
5054
5055 break;
5056 }
5057 phba->fcp_embed_io = 0;
5058
5059 rc = lpfc_sli_config_port(phba, mode);
5060
5061 if (rc && phba->cfg_sli_mode == 3)
5062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5063 "1820 Unable to select SLI-3. "
5064 "Not supported by adapter.\n");
5065 if (rc && mode != 2)
5066 rc = lpfc_sli_config_port(phba, 2);
5067 else if (rc && mode == 2)
5068 rc = lpfc_sli_config_port(phba, 3);
5069 if (rc)
5070 goto lpfc_sli_hba_setup_error;
5071
5072
5073 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5074 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5075 if (!rc) {
5076 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5077 "2709 This device supports "
5078 "Advanced Error Reporting (AER)\n");
5079 spin_lock_irq(&phba->hbalock);
5080 phba->hba_flag |= HBA_AER_ENABLED;
5081 spin_unlock_irq(&phba->hbalock);
5082 } else {
5083 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5084 "2708 This device does not support "
5085 "Advanced Error Reporting (AER): %d\n",
5086 rc);
5087 phba->cfg_aer_support = 0;
5088 }
5089 }
5090
5091 if (phba->sli_rev == 3) {
5092 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5093 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5094 } else {
5095 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5096 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5097 phba->sli3_options = 0;
5098 }
5099
5100 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5101 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5102 phba->sli_rev, phba->max_vpi);
5103 rc = lpfc_sli_ring_map(phba);
5104
5105 if (rc)
5106 goto lpfc_sli_hba_setup_error;
5107
5108
5109 if (phba->sli_rev == LPFC_SLI_REV3) {
5110
5111
5112
5113
5114
5115 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5116 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5117 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
5118 GFP_KERNEL);
5119 if (!phba->vpi_bmask) {
5120 rc = -ENOMEM;
5121 goto lpfc_sli_hba_setup_error;
5122 }
5123
5124 phba->vpi_ids = kzalloc(
5125 (phba->max_vpi+1) * sizeof(uint16_t),
5126 GFP_KERNEL);
5127 if (!phba->vpi_ids) {
5128 kfree(phba->vpi_bmask);
5129 rc = -ENOMEM;
5130 goto lpfc_sli_hba_setup_error;
5131 }
5132 for (i = 0; i < phba->max_vpi; i++)
5133 phba->vpi_ids[i] = i;
5134 }
5135 }
5136
5137
5138 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5139 rc = lpfc_sli_hbq_setup(phba);
5140 if (rc)
5141 goto lpfc_sli_hba_setup_error;
5142 }
5143 spin_lock_irq(&phba->hbalock);
5144 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5145 spin_unlock_irq(&phba->hbalock);
5146
5147 rc = lpfc_config_port_post(phba);
5148 if (rc)
5149 goto lpfc_sli_hba_setup_error;
5150
5151 return rc;
5152
5153lpfc_sli_hba_setup_error:
5154 phba->link_state = LPFC_HBA_ERROR;
5155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5156 "0445 Firmware initialization failed\n");
5157 return rc;
5158}
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168static int
5169lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5170{
5171 LPFC_MBOXQ_t *mboxq;
5172 struct lpfc_dmabuf *mp;
5173 struct lpfc_mqe *mqe;
5174 uint32_t data_length;
5175 int rc;
5176
5177
5178 phba->valid_vlan = 0;
5179 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5180 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5181 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5182
5183 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5184 if (!mboxq)
5185 return -ENOMEM;
5186
5187 mqe = &mboxq->u.mqe;
5188 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5189 rc = -ENOMEM;
5190 goto out_free_mboxq;
5191 }
5192
5193 mp = (struct lpfc_dmabuf *) mboxq->context1;
5194 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5195
5196 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5197 "(%d):2571 Mailbox cmd x%x Status x%x "
5198 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5199 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5200 "CQ: x%x x%x x%x x%x\n",
5201 mboxq->vport ? mboxq->vport->vpi : 0,
5202 bf_get(lpfc_mqe_command, mqe),
5203 bf_get(lpfc_mqe_status, mqe),
5204 mqe->un.mb_words[0], mqe->un.mb_words[1],
5205 mqe->un.mb_words[2], mqe->un.mb_words[3],
5206 mqe->un.mb_words[4], mqe->un.mb_words[5],
5207 mqe->un.mb_words[6], mqe->un.mb_words[7],
5208 mqe->un.mb_words[8], mqe->un.mb_words[9],
5209 mqe->un.mb_words[10], mqe->un.mb_words[11],
5210 mqe->un.mb_words[12], mqe->un.mb_words[13],
5211 mqe->un.mb_words[14], mqe->un.mb_words[15],
5212 mqe->un.mb_words[16], mqe->un.mb_words[50],
5213 mboxq->mcqe.word0,
5214 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5215 mboxq->mcqe.trailer);
5216
5217 if (rc) {
5218 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5219 kfree(mp);
5220 rc = -EIO;
5221 goto out_free_mboxq;
5222 }
5223 data_length = mqe->un.mb_words[5];
5224 if (data_length > DMP_RGN23_SIZE) {
5225 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5226 kfree(mp);
5227 rc = -EIO;
5228 goto out_free_mboxq;
5229 }
5230
5231 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5232 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5233 kfree(mp);
5234 rc = 0;
5235
5236out_free_mboxq:
5237 mempool_free(mboxq, phba->mbox_mem_pool);
5238 return rc;
5239}
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256static int
5257lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5258 uint8_t *vpd, uint32_t *vpd_size)
5259{
5260 int rc = 0;
5261 uint32_t dma_size;
5262 struct lpfc_dmabuf *dmabuf;
5263 struct lpfc_mqe *mqe;
5264
5265 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5266 if (!dmabuf)
5267 return -ENOMEM;
5268
5269
5270
5271
5272
5273 dma_size = *vpd_size;
5274 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5275 &dmabuf->phys, GFP_KERNEL);
5276 if (!dmabuf->virt) {
5277 kfree(dmabuf);
5278 return -ENOMEM;
5279 }
5280
5281
5282
5283
5284
5285
5286 lpfc_read_rev(phba, mboxq);
5287 mqe = &mboxq->u.mqe;
5288 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5289 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5290 mqe->un.read_rev.word1 &= 0x0000FFFF;
5291 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5292 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5293
5294 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5295 if (rc) {
5296 dma_free_coherent(&phba->pcidev->dev, dma_size,
5297 dmabuf->virt, dmabuf->phys);
5298 kfree(dmabuf);
5299 return -EIO;
5300 }
5301
5302
5303
5304
5305
5306
5307 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5308 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5309
5310 memcpy(vpd, dmabuf->virt, *vpd_size);
5311
5312 dma_free_coherent(&phba->pcidev->dev, dma_size,
5313 dmabuf->virt, dmabuf->phys);
5314 kfree(dmabuf);
5315 return 0;
5316}
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329static int
5330lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5331{
5332 LPFC_MBOXQ_t *mboxq;
5333 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5334 struct lpfc_controller_attribute *cntl_attr;
5335 struct lpfc_mbx_get_port_name *get_port_name;
5336 void *virtaddr = NULL;
5337 uint32_t alloclen, reqlen;
5338 uint32_t shdr_status, shdr_add_status;
5339 union lpfc_sli4_cfg_shdr *shdr;
5340 char cport_name = 0;
5341 int rc;
5342
5343
5344 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5345 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5346
5347 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5348 if (!mboxq)
5349 return -ENOMEM;
5350
5351 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5352 lpfc_sli4_read_config(phba);
5353 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5354 goto retrieve_ppname;
5355
5356
5357 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5358 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5359 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5360 LPFC_SLI4_MBX_NEMBED);
5361 if (alloclen < reqlen) {
5362 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5363 "3084 Allocated DMA memory size (%d) is "
5364 "less than the requested DMA memory size "
5365 "(%d)\n", alloclen, reqlen);
5366 rc = -ENOMEM;
5367 goto out_free_mboxq;
5368 }
5369 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5370 virtaddr = mboxq->sge_array->addr[0];
5371 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5372 shdr = &mbx_cntl_attr->cfg_shdr;
5373 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5374 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5375 if (shdr_status || shdr_add_status || rc) {
5376 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5377 "3085 Mailbox x%x (x%x/x%x) failed, "
5378 "rc:x%x, status:x%x, add_status:x%x\n",
5379 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5380 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5381 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5382 rc, shdr_status, shdr_add_status);
5383 rc = -ENXIO;
5384 goto out_free_mboxq;
5385 }
5386 cntl_attr = &mbx_cntl_attr->cntl_attr;
5387 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5388 phba->sli4_hba.lnk_info.lnk_tp =
5389 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5390 phba->sli4_hba.lnk_info.lnk_no =
5391 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5392 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5393 "3086 lnk_type:%d, lnk_numb:%d\n",
5394 phba->sli4_hba.lnk_info.lnk_tp,
5395 phba->sli4_hba.lnk_info.lnk_no);
5396
5397retrieve_ppname:
5398 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5399 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5400 sizeof(struct lpfc_mbx_get_port_name) -
5401 sizeof(struct lpfc_sli4_cfg_mhdr),
5402 LPFC_SLI4_MBX_EMBED);
5403 get_port_name = &mboxq->u.mqe.un.get_port_name;
5404 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5405 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5406 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5407 phba->sli4_hba.lnk_info.lnk_tp);
5408 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5409 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5410 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5411 if (shdr_status || shdr_add_status || rc) {
5412 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5413 "3087 Mailbox x%x (x%x/x%x) failed: "
5414 "rc:x%x, status:x%x, add_status:x%x\n",
5415 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5416 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5417 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5418 rc, shdr_status, shdr_add_status);
5419 rc = -ENXIO;
5420 goto out_free_mboxq;
5421 }
5422 switch (phba->sli4_hba.lnk_info.lnk_no) {
5423 case LPFC_LINK_NUMBER_0:
5424 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5425 &get_port_name->u.response);
5426 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5427 break;
5428 case LPFC_LINK_NUMBER_1:
5429 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5430 &get_port_name->u.response);
5431 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5432 break;
5433 case LPFC_LINK_NUMBER_2:
5434 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5435 &get_port_name->u.response);
5436 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5437 break;
5438 case LPFC_LINK_NUMBER_3:
5439 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5440 &get_port_name->u.response);
5441 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5442 break;
5443 default:
5444 break;
5445 }
5446
5447 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5448 phba->Port[0] = cport_name;
5449 phba->Port[1] = '\0';
5450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5451 "3091 SLI get port name: %s\n", phba->Port);
5452 }
5453
5454out_free_mboxq:
5455 if (rc != MBX_TIMEOUT) {
5456 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5457 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5458 else
5459 mempool_free(mboxq, phba->mbox_mem_pool);
5460 }
5461 return rc;
5462}
5463
5464
5465
5466
5467
5468
5469
5470
5471static void
5472lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5473{
5474 int qidx;
5475 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5476
5477 sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM);
5478 sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM);
5479 if (sli4_hba->nvmels_cq)
5480 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq,
5481 LPFC_QUEUE_REARM);
5482
5483 if (sli4_hba->fcp_cq)
5484 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5485 sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx],
5486 LPFC_QUEUE_REARM);
5487
5488 if (sli4_hba->nvme_cq)
5489 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5490 sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx],
5491 LPFC_QUEUE_REARM);
5492
5493 if (phba->cfg_fof)
5494 sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM);
5495
5496 if (sli4_hba->hba_eq)
5497 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5498 sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx],
5499 LPFC_QUEUE_REARM);
5500
5501 if (phba->nvmet_support) {
5502 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5503 sli4_hba->sli4_cq_release(
5504 sli4_hba->nvmet_cqset[qidx],
5505 LPFC_QUEUE_REARM);
5506 }
5507 }
5508
5509 if (phba->cfg_fof)
5510 sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM);
5511}
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525int
5526lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5527 uint16_t *extnt_count, uint16_t *extnt_size)
5528{
5529 int rc = 0;
5530 uint32_t length;
5531 uint32_t mbox_tmo;
5532 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5533 LPFC_MBOXQ_t *mbox;
5534
5535 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5536 if (!mbox)
5537 return -ENOMEM;
5538
5539
5540 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5541 sizeof(struct lpfc_sli4_cfg_mhdr));
5542 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5543 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5544 length, LPFC_SLI4_MBX_EMBED);
5545
5546
5547 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5548 LPFC_SLI4_MBX_EMBED);
5549 if (unlikely(rc)) {
5550 rc = -EIO;
5551 goto err_exit;
5552 }
5553
5554 if (!phba->sli4_hba.intr_enable)
5555 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5556 else {
5557 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5558 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5559 }
5560 if (unlikely(rc)) {
5561 rc = -EIO;
5562 goto err_exit;
5563 }
5564
5565 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5566 if (bf_get(lpfc_mbox_hdr_status,
5567 &rsrc_info->header.cfg_shdr.response)) {
5568 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5569 "2930 Failed to get resource extents "
5570 "Status 0x%x Add'l Status 0x%x\n",
5571 bf_get(lpfc_mbox_hdr_status,
5572 &rsrc_info->header.cfg_shdr.response),
5573 bf_get(lpfc_mbox_hdr_add_status,
5574 &rsrc_info->header.cfg_shdr.response));
5575 rc = -EIO;
5576 goto err_exit;
5577 }
5578
5579 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5580 &rsrc_info->u.rsp);
5581 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5582 &rsrc_info->u.rsp);
5583
5584 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5585 "3162 Retrieved extents type-%d from port: count:%d, "
5586 "size:%d\n", type, *extnt_count, *extnt_size);
5587
5588err_exit:
5589 mempool_free(mbox, phba->mbox_mem_pool);
5590 return rc;
5591}
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608static int
5609lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5610{
5611 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5612 uint16_t size_diff, rsrc_ext_size;
5613 int rc = 0;
5614 struct lpfc_rsrc_blks *rsrc_entry;
5615 struct list_head *rsrc_blk_list = NULL;
5616
5617 size_diff = 0;
5618 curr_ext_cnt = 0;
5619 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5620 &rsrc_ext_cnt,
5621 &rsrc_ext_size);
5622 if (unlikely(rc))
5623 return -EIO;
5624
5625 switch (type) {
5626 case LPFC_RSC_TYPE_FCOE_RPI:
5627 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5628 break;
5629 case LPFC_RSC_TYPE_FCOE_VPI:
5630 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5631 break;
5632 case LPFC_RSC_TYPE_FCOE_XRI:
5633 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5634 break;
5635 case LPFC_RSC_TYPE_FCOE_VFI:
5636 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5637 break;
5638 default:
5639 break;
5640 }
5641
5642 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5643 curr_ext_cnt++;
5644 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5645 size_diff++;
5646 }
5647
5648 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5649 rc = 1;
5650
5651 return rc;
5652}
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671static int
5672lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5673 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5674{
5675 int rc = 0;
5676 uint32_t req_len;
5677 uint32_t emb_len;
5678 uint32_t alloc_len, mbox_tmo;
5679
5680
5681 req_len = extnt_cnt * sizeof(uint16_t);
5682
5683
5684
5685
5686
5687 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5688 sizeof(uint32_t);
5689
5690
5691
5692
5693
5694 *emb = LPFC_SLI4_MBX_EMBED;
5695 if (req_len > emb_len) {
5696 req_len = extnt_cnt * sizeof(uint16_t) +
5697 sizeof(union lpfc_sli4_cfg_shdr) +
5698 sizeof(uint32_t);
5699 *emb = LPFC_SLI4_MBX_NEMBED;
5700 }
5701
5702 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5703 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5704 req_len, *emb);
5705 if (alloc_len < req_len) {
5706 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5707 "2982 Allocated DMA memory size (x%x) is "
5708 "less than the requested DMA memory "
5709 "size (x%x)\n", alloc_len, req_len);
5710 return -ENOMEM;
5711 }
5712 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5713 if (unlikely(rc))
5714 return -EIO;
5715
5716 if (!phba->sli4_hba.intr_enable)
5717 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5718 else {
5719 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5720 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5721 }
5722
5723 if (unlikely(rc))
5724 rc = -EIO;
5725 return rc;
5726}
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736static int
5737lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5738{
5739 bool emb = false;
5740 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5741 uint16_t rsrc_id, rsrc_start, j, k;
5742 uint16_t *ids;
5743 int i, rc;
5744 unsigned long longs;
5745 unsigned long *bmask;
5746 struct lpfc_rsrc_blks *rsrc_blks;
5747 LPFC_MBOXQ_t *mbox;
5748 uint32_t length;
5749 struct lpfc_id_range *id_array = NULL;
5750 void *virtaddr = NULL;
5751 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5752 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5753 struct list_head *ext_blk_list;
5754
5755 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5756 &rsrc_cnt,
5757 &rsrc_size);
5758 if (unlikely(rc))
5759 return -EIO;
5760
5761 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5762 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5763 "3009 No available Resource Extents "
5764 "for resource type 0x%x: Count: 0x%x, "
5765 "Size 0x%x\n", type, rsrc_cnt,
5766 rsrc_size);
5767 return -ENOMEM;
5768 }
5769
5770 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5771 "2903 Post resource extents type-0x%x: "
5772 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5773
5774 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5775 if (!mbox)
5776 return -ENOMEM;
5777
5778 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5779 if (unlikely(rc)) {
5780 rc = -EIO;
5781 goto err_exit;
5782 }
5783
5784
5785
5786
5787
5788
5789
5790 if (emb == LPFC_SLI4_MBX_EMBED) {
5791 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5792 id_array = &rsrc_ext->u.rsp.id[0];
5793 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5794 } else {
5795 virtaddr = mbox->sge_array->addr[0];
5796 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5797 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5798 id_array = &n_rsrc->id;
5799 }
5800
5801 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5802 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5803
5804
5805
5806
5807
5808 length = sizeof(struct lpfc_rsrc_blks);
5809 switch (type) {
5810 case LPFC_RSC_TYPE_FCOE_RPI:
5811 phba->sli4_hba.rpi_bmask = kzalloc(longs *
5812 sizeof(unsigned long),
5813 GFP_KERNEL);
5814 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5815 rc = -ENOMEM;
5816 goto err_exit;
5817 }
5818 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5819 sizeof(uint16_t),
5820 GFP_KERNEL);
5821 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5822 kfree(phba->sli4_hba.rpi_bmask);
5823 rc = -ENOMEM;
5824 goto err_exit;
5825 }
5826
5827
5828
5829
5830
5831
5832 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5833
5834
5835 bmask = phba->sli4_hba.rpi_bmask;
5836 ids = phba->sli4_hba.rpi_ids;
5837 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5838 break;
5839 case LPFC_RSC_TYPE_FCOE_VPI:
5840 phba->vpi_bmask = kzalloc(longs *
5841 sizeof(unsigned long),
5842 GFP_KERNEL);
5843 if (unlikely(!phba->vpi_bmask)) {
5844 rc = -ENOMEM;
5845 goto err_exit;
5846 }
5847 phba->vpi_ids = kzalloc(rsrc_id_cnt *
5848 sizeof(uint16_t),
5849 GFP_KERNEL);
5850 if (unlikely(!phba->vpi_ids)) {
5851 kfree(phba->vpi_bmask);
5852 rc = -ENOMEM;
5853 goto err_exit;
5854 }
5855
5856
5857 bmask = phba->vpi_bmask;
5858 ids = phba->vpi_ids;
5859 ext_blk_list = &phba->lpfc_vpi_blk_list;
5860 break;
5861 case LPFC_RSC_TYPE_FCOE_XRI:
5862 phba->sli4_hba.xri_bmask = kzalloc(longs *
5863 sizeof(unsigned long),
5864 GFP_KERNEL);
5865 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5866 rc = -ENOMEM;
5867 goto err_exit;
5868 }
5869 phba->sli4_hba.max_cfg_param.xri_used = 0;
5870 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5871 sizeof(uint16_t),
5872 GFP_KERNEL);
5873 if (unlikely(!phba->sli4_hba.xri_ids)) {
5874 kfree(phba->sli4_hba.xri_bmask);
5875 rc = -ENOMEM;
5876 goto err_exit;
5877 }
5878
5879
5880 bmask = phba->sli4_hba.xri_bmask;
5881 ids = phba->sli4_hba.xri_ids;
5882 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5883 break;
5884 case LPFC_RSC_TYPE_FCOE_VFI:
5885 phba->sli4_hba.vfi_bmask = kzalloc(longs *
5886 sizeof(unsigned long),
5887 GFP_KERNEL);
5888 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5889 rc = -ENOMEM;
5890 goto err_exit;
5891 }
5892 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5893 sizeof(uint16_t),
5894 GFP_KERNEL);
5895 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5896 kfree(phba->sli4_hba.vfi_bmask);
5897 rc = -ENOMEM;
5898 goto err_exit;
5899 }
5900
5901
5902 bmask = phba->sli4_hba.vfi_bmask;
5903 ids = phba->sli4_hba.vfi_ids;
5904 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5905 break;
5906 default:
5907
5908 id_array = NULL;
5909 bmask = NULL;
5910 ids = NULL;
5911 ext_blk_list = NULL;
5912 goto err_exit;
5913 }
5914
5915
5916
5917
5918
5919
5920
5921 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5922 if ((i % 2) == 0)
5923 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5924 &id_array[k]);
5925 else
5926 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5927 &id_array[k]);
5928
5929 rsrc_blks = kzalloc(length, GFP_KERNEL);
5930 if (unlikely(!rsrc_blks)) {
5931 rc = -ENOMEM;
5932 kfree(bmask);
5933 kfree(ids);
5934 goto err_exit;
5935 }
5936 rsrc_blks->rsrc_start = rsrc_id;
5937 rsrc_blks->rsrc_size = rsrc_size;
5938 list_add_tail(&rsrc_blks->list, ext_blk_list);
5939 rsrc_start = rsrc_id;
5940 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5941 phba->sli4_hba.scsi_xri_start = rsrc_start +
5942 lpfc_sli4_get_iocb_cnt(phba);
5943 phba->sli4_hba.nvme_xri_start =
5944 phba->sli4_hba.scsi_xri_start +
5945 phba->sli4_hba.scsi_xri_max;
5946 }
5947
5948 while (rsrc_id < (rsrc_start + rsrc_size)) {
5949 ids[j] = rsrc_id;
5950 rsrc_id++;
5951 j++;
5952 }
5953
5954 if ((i % 2) == 1)
5955 k++;
5956 }
5957 err_exit:
5958 lpfc_sli4_mbox_cmd_free(phba, mbox);
5959 return rc;
5960}
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973static int
5974lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5975{
5976 int rc;
5977 uint32_t length, mbox_tmo = 0;
5978 LPFC_MBOXQ_t *mbox;
5979 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5980 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5981
5982 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5983 if (!mbox)
5984 return -ENOMEM;
5985
5986
5987
5988
5989
5990
5991 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5992 sizeof(struct lpfc_sli4_cfg_mhdr));
5993 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5994 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5995 length, LPFC_SLI4_MBX_EMBED);
5996
5997
5998 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5999 LPFC_SLI4_MBX_EMBED);
6000 if (unlikely(rc)) {
6001 rc = -EIO;
6002 goto out_free_mbox;
6003 }
6004 if (!phba->sli4_hba.intr_enable)
6005 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6006 else {
6007 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6008 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6009 }
6010 if (unlikely(rc)) {
6011 rc = -EIO;
6012 goto out_free_mbox;
6013 }
6014
6015 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6016 if (bf_get(lpfc_mbox_hdr_status,
6017 &dealloc_rsrc->header.cfg_shdr.response)) {
6018 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6019 "2919 Failed to release resource extents "
6020 "for type %d - Status 0x%x Add'l Status 0x%x. "
6021 "Resource memory not released.\n",
6022 type,
6023 bf_get(lpfc_mbox_hdr_status,
6024 &dealloc_rsrc->header.cfg_shdr.response),
6025 bf_get(lpfc_mbox_hdr_add_status,
6026 &dealloc_rsrc->header.cfg_shdr.response));
6027 rc = -EIO;
6028 goto out_free_mbox;
6029 }
6030
6031
6032 switch (type) {
6033 case LPFC_RSC_TYPE_FCOE_VPI:
6034 kfree(phba->vpi_bmask);
6035 kfree(phba->vpi_ids);
6036 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6037 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6038 &phba->lpfc_vpi_blk_list, list) {
6039 list_del_init(&rsrc_blk->list);
6040 kfree(rsrc_blk);
6041 }
6042 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6043 break;
6044 case LPFC_RSC_TYPE_FCOE_XRI:
6045 kfree(phba->sli4_hba.xri_bmask);
6046 kfree(phba->sli4_hba.xri_ids);
6047 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6048 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6049 list_del_init(&rsrc_blk->list);
6050 kfree(rsrc_blk);
6051 }
6052 break;
6053 case LPFC_RSC_TYPE_FCOE_VFI:
6054 kfree(phba->sli4_hba.vfi_bmask);
6055 kfree(phba->sli4_hba.vfi_ids);
6056 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6057 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6058 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6059 list_del_init(&rsrc_blk->list);
6060 kfree(rsrc_blk);
6061 }
6062 break;
6063 case LPFC_RSC_TYPE_FCOE_RPI:
6064
6065 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6066 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6067 list_del_init(&rsrc_blk->list);
6068 kfree(rsrc_blk);
6069 }
6070 break;
6071 default:
6072 break;
6073 }
6074
6075 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6076
6077 out_free_mbox:
6078 mempool_free(mbox, phba->mbox_mem_pool);
6079 return rc;
6080}
6081
6082static void
6083lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6084 uint32_t feature)
6085{
6086 uint32_t len;
6087
6088 len = sizeof(struct lpfc_mbx_set_feature) -
6089 sizeof(struct lpfc_sli4_cfg_mhdr);
6090 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6091 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6092 LPFC_SLI4_MBX_EMBED);
6093
6094 switch (feature) {
6095 case LPFC_SET_UE_RECOVERY:
6096 bf_set(lpfc_mbx_set_feature_UER,
6097 &mbox->u.mqe.un.set_feature, 1);
6098 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6099 mbox->u.mqe.un.set_feature.param_len = 8;
6100 break;
6101 case LPFC_SET_MDS_DIAGS:
6102 bf_set(lpfc_mbx_set_feature_mds,
6103 &mbox->u.mqe.un.set_feature, 1);
6104 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6105 &mbox->u.mqe.un.set_feature, 1);
6106 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6107 mbox->u.mqe.un.set_feature.param_len = 8;
6108 break;
6109 }
6110
6111 return;
6112}
6113
6114
6115
6116
6117
6118
6119
6120int
6121lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6122{
6123 int i, rc, error = 0;
6124 uint16_t count, base;
6125 unsigned long longs;
6126
6127 if (!phba->sli4_hba.rpi_hdrs_in_use)
6128 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6129 if (phba->sli4_hba.extents_in_use) {
6130
6131
6132
6133
6134
6135 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6136 LPFC_IDX_RSRC_RDY) {
6137
6138
6139
6140
6141
6142 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6143 LPFC_RSC_TYPE_FCOE_VFI);
6144 if (rc != 0)
6145 error++;
6146 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6147 LPFC_RSC_TYPE_FCOE_VPI);
6148 if (rc != 0)
6149 error++;
6150 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6151 LPFC_RSC_TYPE_FCOE_XRI);
6152 if (rc != 0)
6153 error++;
6154 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6155 LPFC_RSC_TYPE_FCOE_RPI);
6156 if (rc != 0)
6157 error++;
6158
6159
6160
6161
6162
6163
6164
6165 if (error) {
6166 lpfc_printf_log(phba, KERN_INFO,
6167 LOG_MBOX | LOG_INIT,
6168 "2931 Detected extent resource "
6169 "change. Reallocating all "
6170 "extents.\n");
6171 rc = lpfc_sli4_dealloc_extent(phba,
6172 LPFC_RSC_TYPE_FCOE_VFI);
6173 rc = lpfc_sli4_dealloc_extent(phba,
6174 LPFC_RSC_TYPE_FCOE_VPI);
6175 rc = lpfc_sli4_dealloc_extent(phba,
6176 LPFC_RSC_TYPE_FCOE_XRI);
6177 rc = lpfc_sli4_dealloc_extent(phba,
6178 LPFC_RSC_TYPE_FCOE_RPI);
6179 } else
6180 return 0;
6181 }
6182
6183 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6184 if (unlikely(rc))
6185 goto err_exit;
6186
6187 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6188 if (unlikely(rc))
6189 goto err_exit;
6190
6191 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6192 if (unlikely(rc))
6193 goto err_exit;
6194
6195 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6196 if (unlikely(rc))
6197 goto err_exit;
6198 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6199 LPFC_IDX_RSRC_RDY);
6200 return rc;
6201 } else {
6202
6203
6204
6205
6206
6207
6208
6209 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6210 LPFC_IDX_RSRC_RDY) {
6211 lpfc_sli4_dealloc_resource_identifiers(phba);
6212 lpfc_sli4_remove_rpis(phba);
6213 }
6214
6215 count = phba->sli4_hba.max_cfg_param.max_rpi;
6216 if (count <= 0) {
6217 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6218 "3279 Invalid provisioning of "
6219 "rpi:%d\n", count);
6220 rc = -EINVAL;
6221 goto err_exit;
6222 }
6223 base = phba->sli4_hba.max_cfg_param.rpi_base;
6224 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6225 phba->sli4_hba.rpi_bmask = kzalloc(longs *
6226 sizeof(unsigned long),
6227 GFP_KERNEL);
6228 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6229 rc = -ENOMEM;
6230 goto err_exit;
6231 }
6232 phba->sli4_hba.rpi_ids = kzalloc(count *
6233 sizeof(uint16_t),
6234 GFP_KERNEL);
6235 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6236 rc = -ENOMEM;
6237 goto free_rpi_bmask;
6238 }
6239
6240 for (i = 0; i < count; i++)
6241 phba->sli4_hba.rpi_ids[i] = base + i;
6242
6243
6244 count = phba->sli4_hba.max_cfg_param.max_vpi;
6245 if (count <= 0) {
6246 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6247 "3280 Invalid provisioning of "
6248 "vpi:%d\n", count);
6249 rc = -EINVAL;
6250 goto free_rpi_ids;
6251 }
6252 base = phba->sli4_hba.max_cfg_param.vpi_base;
6253 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6254 phba->vpi_bmask = kzalloc(longs *
6255 sizeof(unsigned long),
6256 GFP_KERNEL);
6257 if (unlikely(!phba->vpi_bmask)) {
6258 rc = -ENOMEM;
6259 goto free_rpi_ids;
6260 }
6261 phba->vpi_ids = kzalloc(count *
6262 sizeof(uint16_t),
6263 GFP_KERNEL);
6264 if (unlikely(!phba->vpi_ids)) {
6265 rc = -ENOMEM;
6266 goto free_vpi_bmask;
6267 }
6268
6269 for (i = 0; i < count; i++)
6270 phba->vpi_ids[i] = base + i;
6271
6272
6273 count = phba->sli4_hba.max_cfg_param.max_xri;
6274 if (count <= 0) {
6275 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6276 "3281 Invalid provisioning of "
6277 "xri:%d\n", count);
6278 rc = -EINVAL;
6279 goto free_vpi_ids;
6280 }
6281 base = phba->sli4_hba.max_cfg_param.xri_base;
6282 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6283 phba->sli4_hba.xri_bmask = kzalloc(longs *
6284 sizeof(unsigned long),
6285 GFP_KERNEL);
6286 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6287 rc = -ENOMEM;
6288 goto free_vpi_ids;
6289 }
6290 phba->sli4_hba.max_cfg_param.xri_used = 0;
6291 phba->sli4_hba.xri_ids = kzalloc(count *
6292 sizeof(uint16_t),
6293 GFP_KERNEL);
6294 if (unlikely(!phba->sli4_hba.xri_ids)) {
6295 rc = -ENOMEM;
6296 goto free_xri_bmask;
6297 }
6298
6299 for (i = 0; i < count; i++)
6300 phba->sli4_hba.xri_ids[i] = base + i;
6301
6302
6303 count = phba->sli4_hba.max_cfg_param.max_vfi;
6304 if (count <= 0) {
6305 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6306 "3282 Invalid provisioning of "
6307 "vfi:%d\n", count);
6308 rc = -EINVAL;
6309 goto free_xri_ids;
6310 }
6311 base = phba->sli4_hba.max_cfg_param.vfi_base;
6312 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6313 phba->sli4_hba.vfi_bmask = kzalloc(longs *
6314 sizeof(unsigned long),
6315 GFP_KERNEL);
6316 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6317 rc = -ENOMEM;
6318 goto free_xri_ids;
6319 }
6320 phba->sli4_hba.vfi_ids = kzalloc(count *
6321 sizeof(uint16_t),
6322 GFP_KERNEL);
6323 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6324 rc = -ENOMEM;
6325 goto free_vfi_bmask;
6326 }
6327
6328 for (i = 0; i < count; i++)
6329 phba->sli4_hba.vfi_ids[i] = base + i;
6330
6331
6332
6333
6334
6335 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6336 LPFC_IDX_RSRC_RDY);
6337 return 0;
6338 }
6339
6340 free_vfi_bmask:
6341 kfree(phba->sli4_hba.vfi_bmask);
6342 phba->sli4_hba.vfi_bmask = NULL;
6343 free_xri_ids:
6344 kfree(phba->sli4_hba.xri_ids);
6345 phba->sli4_hba.xri_ids = NULL;
6346 free_xri_bmask:
6347 kfree(phba->sli4_hba.xri_bmask);
6348 phba->sli4_hba.xri_bmask = NULL;
6349 free_vpi_ids:
6350 kfree(phba->vpi_ids);
6351 phba->vpi_ids = NULL;
6352 free_vpi_bmask:
6353 kfree(phba->vpi_bmask);
6354 phba->vpi_bmask = NULL;
6355 free_rpi_ids:
6356 kfree(phba->sli4_hba.rpi_ids);
6357 phba->sli4_hba.rpi_ids = NULL;
6358 free_rpi_bmask:
6359 kfree(phba->sli4_hba.rpi_bmask);
6360 phba->sli4_hba.rpi_bmask = NULL;
6361 err_exit:
6362 return rc;
6363}
6364
6365
6366
6367
6368
6369
6370
6371
6372int
6373lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6374{
6375 if (phba->sli4_hba.extents_in_use) {
6376 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6377 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6378 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6379 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6380 } else {
6381 kfree(phba->vpi_bmask);
6382 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6383 kfree(phba->vpi_ids);
6384 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6385 kfree(phba->sli4_hba.xri_bmask);
6386 kfree(phba->sli4_hba.xri_ids);
6387 kfree(phba->sli4_hba.vfi_bmask);
6388 kfree(phba->sli4_hba.vfi_ids);
6389 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6390 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6391 }
6392
6393 return 0;
6394}
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406int
6407lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6408 uint16_t *extnt_cnt, uint16_t *extnt_size)
6409{
6410 bool emb;
6411 int rc = 0;
6412 uint16_t curr_blks = 0;
6413 uint32_t req_len, emb_len;
6414 uint32_t alloc_len, mbox_tmo;
6415 struct list_head *blk_list_head;
6416 struct lpfc_rsrc_blks *rsrc_blk;
6417 LPFC_MBOXQ_t *mbox;
6418 void *virtaddr = NULL;
6419 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6420 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6421 union lpfc_sli4_cfg_shdr *shdr;
6422
6423 switch (type) {
6424 case LPFC_RSC_TYPE_FCOE_VPI:
6425 blk_list_head = &phba->lpfc_vpi_blk_list;
6426 break;
6427 case LPFC_RSC_TYPE_FCOE_XRI:
6428 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6429 break;
6430 case LPFC_RSC_TYPE_FCOE_VFI:
6431 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6432 break;
6433 case LPFC_RSC_TYPE_FCOE_RPI:
6434 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6435 break;
6436 default:
6437 return -EIO;
6438 }
6439
6440
6441 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6442 if (curr_blks == 0) {
6443
6444
6445
6446
6447
6448
6449
6450 *extnt_size = rsrc_blk->rsrc_size;
6451 }
6452 curr_blks++;
6453 }
6454
6455
6456
6457
6458
6459 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6460 sizeof(uint32_t);
6461
6462
6463
6464
6465
6466 emb = LPFC_SLI4_MBX_EMBED;
6467 req_len = emb_len;
6468 if (req_len > emb_len) {
6469 req_len = curr_blks * sizeof(uint16_t) +
6470 sizeof(union lpfc_sli4_cfg_shdr) +
6471 sizeof(uint32_t);
6472 emb = LPFC_SLI4_MBX_NEMBED;
6473 }
6474
6475 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6476 if (!mbox)
6477 return -ENOMEM;
6478 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6479
6480 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6481 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6482 req_len, emb);
6483 if (alloc_len < req_len) {
6484 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6485 "2983 Allocated DMA memory size (x%x) is "
6486 "less than the requested DMA memory "
6487 "size (x%x)\n", alloc_len, req_len);
6488 rc = -ENOMEM;
6489 goto err_exit;
6490 }
6491 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6492 if (unlikely(rc)) {
6493 rc = -EIO;
6494 goto err_exit;
6495 }
6496
6497 if (!phba->sli4_hba.intr_enable)
6498 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6499 else {
6500 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6501 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6502 }
6503
6504 if (unlikely(rc)) {
6505 rc = -EIO;
6506 goto err_exit;
6507 }
6508
6509
6510
6511
6512
6513
6514
6515 if (emb == LPFC_SLI4_MBX_EMBED) {
6516 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6517 shdr = &rsrc_ext->header.cfg_shdr;
6518 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6519 } else {
6520 virtaddr = mbox->sge_array->addr[0];
6521 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6522 shdr = &n_rsrc->cfg_shdr;
6523 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6524 }
6525
6526 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6527 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6528 "2984 Failed to read allocated resources "
6529 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6530 type,
6531 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6532 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6533 rc = -EIO;
6534 goto err_exit;
6535 }
6536 err_exit:
6537 lpfc_sli4_mbox_cmd_free(phba, mbox);
6538 return rc;
6539}
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558static int
6559lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6560 struct list_head *sgl_list, int cnt)
6561{
6562 struct lpfc_sglq *sglq_entry = NULL;
6563 struct lpfc_sglq *sglq_entry_next = NULL;
6564 struct lpfc_sglq *sglq_entry_first = NULL;
6565 int status, total_cnt;
6566 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6567 int last_xritag = NO_XRI;
6568 LIST_HEAD(prep_sgl_list);
6569 LIST_HEAD(blck_sgl_list);
6570 LIST_HEAD(allc_sgl_list);
6571 LIST_HEAD(post_sgl_list);
6572 LIST_HEAD(free_sgl_list);
6573
6574 spin_lock_irq(&phba->hbalock);
6575 spin_lock(&phba->sli4_hba.sgl_list_lock);
6576 list_splice_init(sgl_list, &allc_sgl_list);
6577 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6578 spin_unlock_irq(&phba->hbalock);
6579
6580 total_cnt = cnt;
6581 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6582 &allc_sgl_list, list) {
6583 list_del_init(&sglq_entry->list);
6584 block_cnt++;
6585 if ((last_xritag != NO_XRI) &&
6586 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6587
6588 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6589 post_cnt = block_cnt - 1;
6590
6591 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6592 block_cnt = 1;
6593 } else {
6594
6595 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6596
6597 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6598 list_splice_init(&prep_sgl_list,
6599 &blck_sgl_list);
6600 post_cnt = block_cnt;
6601 block_cnt = 0;
6602 }
6603 }
6604 num_posted++;
6605
6606
6607 last_xritag = sglq_entry->sli4_xritag;
6608
6609
6610 if (num_posted == total_cnt) {
6611 if (post_cnt == 0) {
6612 list_splice_init(&prep_sgl_list,
6613 &blck_sgl_list);
6614 post_cnt = block_cnt;
6615 } else if (block_cnt == 1) {
6616 status = lpfc_sli4_post_sgl(phba,
6617 sglq_entry->phys, 0,
6618 sglq_entry->sli4_xritag);
6619 if (!status) {
6620
6621 list_add_tail(&sglq_entry->list,
6622 &post_sgl_list);
6623 } else {
6624
6625 lpfc_printf_log(phba, KERN_WARNING,
6626 LOG_SLI,
6627 "3159 Failed to post "
6628 "sgl, xritag:x%x\n",
6629 sglq_entry->sli4_xritag);
6630 list_add_tail(&sglq_entry->list,
6631 &free_sgl_list);
6632 total_cnt--;
6633 }
6634 }
6635 }
6636
6637
6638 if (post_cnt == 0)
6639 continue;
6640
6641
6642 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6643 post_cnt);
6644
6645 if (!status) {
6646
6647 list_splice_init(&blck_sgl_list, &post_sgl_list);
6648 } else {
6649
6650 sglq_entry_first = list_first_entry(&blck_sgl_list,
6651 struct lpfc_sglq,
6652 list);
6653 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6654 "3160 Failed to post sgl-list, "
6655 "xritag:x%x-x%x\n",
6656 sglq_entry_first->sli4_xritag,
6657 (sglq_entry_first->sli4_xritag +
6658 post_cnt - 1));
6659 list_splice_init(&blck_sgl_list, &free_sgl_list);
6660 total_cnt -= post_cnt;
6661 }
6662
6663
6664 if (block_cnt == 0)
6665 last_xritag = NO_XRI;
6666
6667
6668 post_cnt = 0;
6669 }
6670
6671
6672 lpfc_free_sgl_list(phba, &free_sgl_list);
6673
6674
6675 if (!list_empty(&post_sgl_list)) {
6676 spin_lock_irq(&phba->hbalock);
6677 spin_lock(&phba->sli4_hba.sgl_list_lock);
6678 list_splice_init(&post_sgl_list, sgl_list);
6679 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6680 spin_unlock_irq(&phba->hbalock);
6681 } else {
6682 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6683 "3161 Failure to post sgl to port.\n");
6684 return -EIO;
6685 }
6686
6687
6688 return total_cnt;
6689}
6690
6691void
6692lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6693{
6694 uint32_t len;
6695
6696 len = sizeof(struct lpfc_mbx_set_host_data) -
6697 sizeof(struct lpfc_sli4_cfg_mhdr);
6698 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6699 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6700 LPFC_SLI4_MBX_EMBED);
6701
6702 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6703 mbox->u.mqe.un.set_host_data.param_len =
6704 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
6705 snprintf(mbox->u.mqe.un.set_host_data.data,
6706 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6707 "Linux %s v"LPFC_DRIVER_VERSION,
6708 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6709}
6710
6711int
6712lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6713 struct lpfc_queue *drq, int count, int idx)
6714{
6715 int rc, i;
6716 struct lpfc_rqe hrqe;
6717 struct lpfc_rqe drqe;
6718 struct lpfc_rqb *rqbp;
6719 unsigned long flags;
6720 struct rqb_dmabuf *rqb_buffer;
6721 LIST_HEAD(rqb_buf_list);
6722
6723 spin_lock_irqsave(&phba->hbalock, flags);
6724 rqbp = hrq->rqbp;
6725 for (i = 0; i < count; i++) {
6726
6727 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
6728 break;
6729 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6730 if (!rqb_buffer)
6731 break;
6732 rqb_buffer->hrq = hrq;
6733 rqb_buffer->drq = drq;
6734 rqb_buffer->idx = idx;
6735 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6736 }
6737 while (!list_empty(&rqb_buf_list)) {
6738 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6739 hbuf.list);
6740
6741 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6742 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6743 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6744 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6745 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6746 if (rc < 0) {
6747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6748 "6421 Cannot post to HRQ %d: %x %x %x "
6749 "DRQ %x %x\n",
6750 hrq->queue_id,
6751 hrq->host_index,
6752 hrq->hba_index,
6753 hrq->entry_count,
6754 drq->host_index,
6755 drq->hba_index);
6756 rqbp->rqb_free_buffer(phba, rqb_buffer);
6757 } else {
6758 list_add_tail(&rqb_buffer->hbuf.list,
6759 &rqbp->rqb_buffer_list);
6760 rqbp->buffer_count++;
6761 }
6762 }
6763 spin_unlock_irqrestore(&phba->hbalock, flags);
6764 return 1;
6765}
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776int
6777lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6778{
6779 int rc, i, cnt;
6780 LPFC_MBOXQ_t *mboxq;
6781 struct lpfc_mqe *mqe;
6782 uint8_t *vpd;
6783 uint32_t vpd_size;
6784 uint32_t ftr_rsp = 0;
6785 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6786 struct lpfc_vport *vport = phba->pport;
6787 struct lpfc_dmabuf *mp;
6788 struct lpfc_rqb *rqbp;
6789
6790
6791 rc = lpfc_pci_function_reset(phba);
6792 if (unlikely(rc))
6793 return -ENODEV;
6794
6795
6796 rc = lpfc_sli4_post_status_check(phba);
6797 if (unlikely(rc))
6798 return -ENODEV;
6799 else {
6800 spin_lock_irq(&phba->hbalock);
6801 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6802 spin_unlock_irq(&phba->hbalock);
6803 }
6804
6805
6806
6807
6808
6809 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6810 if (!mboxq)
6811 return -ENOMEM;
6812
6813
6814 vpd_size = SLI4_PAGE_SIZE;
6815 vpd = kzalloc(vpd_size, GFP_KERNEL);
6816 if (!vpd) {
6817 rc = -ENOMEM;
6818 goto out_free_mbox;
6819 }
6820
6821 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6822 if (unlikely(rc)) {
6823 kfree(vpd);
6824 goto out_free_mbox;
6825 }
6826
6827 mqe = &mboxq->u.mqe;
6828 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6829 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6830 phba->hba_flag |= HBA_FCOE_MODE;
6831 phba->fcp_embed_io = 0;
6832 } else {
6833 phba->hba_flag &= ~HBA_FCOE_MODE;
6834 }
6835
6836 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6837 LPFC_DCBX_CEE_MODE)
6838 phba->hba_flag |= HBA_FIP_SUPPORT;
6839 else
6840 phba->hba_flag &= ~HBA_FIP_SUPPORT;
6841
6842 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6843
6844 if (phba->sli_rev != LPFC_SLI_REV4) {
6845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6846 "0376 READ_REV Error. SLI Level %d "
6847 "FCoE enabled %d\n",
6848 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6849 rc = -EIO;
6850 kfree(vpd);
6851 goto out_free_mbox;
6852 }
6853
6854
6855
6856
6857
6858
6859 if (phba->hba_flag & HBA_FCOE_MODE &&
6860 lpfc_sli4_read_fcoe_params(phba))
6861 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6862 "2570 Failed to read FCoE parameters\n");
6863
6864
6865
6866
6867
6868 rc = lpfc_sli4_retrieve_pport_name(phba);
6869 if (!rc)
6870 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6871 "3080 Successful retrieving SLI4 device "
6872 "physical port name: %s.\n", phba->Port);
6873
6874
6875
6876
6877
6878
6879 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6880 if (unlikely(!rc)) {
6881 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6882 "0377 Error %d parsing vpd. "
6883 "Using defaults.\n", rc);
6884 rc = 0;
6885 }
6886 kfree(vpd);
6887
6888
6889 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6890 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6891
6892
6893
6894
6895
6896 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6897 LPFC_SLI_INTF_IF_TYPE_6) &&
6898 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
6899 (phba->vpd.rev.smRev == 0) &&
6900 (phba->cfg_nvme_embed_cmd == 1))
6901 phba->cfg_nvme_embed_cmd = 0;
6902
6903 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6904 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6905 &mqe->un.read_rev);
6906 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6907 &mqe->un.read_rev);
6908 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6909 &mqe->un.read_rev);
6910 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6911 &mqe->un.read_rev);
6912 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6913 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6914 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6915 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6916 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6917 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6918 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6919 "(%d):0380 READ_REV Status x%x "
6920 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6921 mboxq->vport ? mboxq->vport->vpi : 0,
6922 bf_get(lpfc_mqe_status, mqe),
6923 phba->vpd.rev.opFwName,
6924 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6925 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6926
6927
6928 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6929 if (phba->pport->cfg_lun_queue_depth > rc) {
6930 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6931 "3362 LUN queue depth changed from %d to %d\n",
6932 phba->pport->cfg_lun_queue_depth, rc);
6933 phba->pport->cfg_lun_queue_depth = rc;
6934 }
6935
6936 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6937 LPFC_SLI_INTF_IF_TYPE_0) {
6938 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6939 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6940 if (rc == MBX_SUCCESS) {
6941 phba->hba_flag |= HBA_RECOVERABLE_UE;
6942
6943 phba->eratt_poll_interval = 1;
6944 phba->sli4_hba.ue_to_sr = bf_get(
6945 lpfc_mbx_set_feature_UESR,
6946 &mboxq->u.mqe.un.set_feature);
6947 phba->sli4_hba.ue_to_rp = bf_get(
6948 lpfc_mbx_set_feature_UERP,
6949 &mboxq->u.mqe.un.set_feature);
6950 }
6951 }
6952
6953 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6954
6955 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6956 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6957 if (rc != MBX_SUCCESS)
6958 phba->mds_diags_support = 0;
6959 }
6960
6961
6962
6963
6964
6965 lpfc_request_features(phba, mboxq);
6966 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6967 if (unlikely(rc)) {
6968 rc = -EIO;
6969 goto out_free_mbox;
6970 }
6971
6972
6973
6974
6975
6976 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6977 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6978 "0378 No support for fcpi mode.\n");
6979 ftr_rsp++;
6980 }
6981
6982
6983 if (phba->hba_flag & HBA_FCOE_MODE) {
6984 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6985 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6986 else
6987 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6988 }
6989
6990
6991
6992
6993
6994
6995 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
6996 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
6997 phba->cfg_enable_bg = 0;
6998 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6999 ftr_rsp++;
7000 }
7001 }
7002
7003 if (phba->max_vpi && phba->cfg_enable_npiv &&
7004 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7005 ftr_rsp++;
7006
7007 if (ftr_rsp) {
7008 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7009 "0379 Feature Mismatch Data: x%08x %08x "
7010 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7011 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7012 phba->cfg_enable_npiv, phba->max_vpi);
7013 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7014 phba->cfg_enable_bg = 0;
7015 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7016 phba->cfg_enable_npiv = 0;
7017 }
7018
7019
7020 spin_lock_irq(&phba->hbalock);
7021 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7022 spin_unlock_irq(&phba->hbalock);
7023
7024
7025
7026
7027
7028 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7029 if (rc) {
7030 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7031 "2920 Failed to alloc Resource IDs "
7032 "rc = x%x\n", rc);
7033 goto out_free_mbox;
7034 }
7035
7036 lpfc_set_host_data(phba, mboxq);
7037
7038 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7039 if (rc) {
7040 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7041 "2134 Failed to set host os driver version %x",
7042 rc);
7043 }
7044
7045
7046 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7047 if (rc) {
7048 phba->link_state = LPFC_HBA_ERROR;
7049 rc = -ENOMEM;
7050 goto out_free_mbox;
7051 }
7052
7053 mboxq->vport = vport;
7054 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7055 mp = (struct lpfc_dmabuf *) mboxq->context1;
7056 if (rc == MBX_SUCCESS) {
7057 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7058 rc = 0;
7059 }
7060
7061
7062
7063
7064
7065 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7066 kfree(mp);
7067 mboxq->context1 = NULL;
7068 if (unlikely(rc)) {
7069 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7070 "0382 READ_SPARAM command failed "
7071 "status %d, mbxStatus x%x\n",
7072 rc, bf_get(lpfc_mqe_status, mqe));
7073 phba->link_state = LPFC_HBA_ERROR;
7074 rc = -EIO;
7075 goto out_free_mbox;
7076 }
7077
7078 lpfc_update_vport_wwn(vport);
7079
7080
7081 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7082 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7083
7084
7085 rc = lpfc_sli4_queue_create(phba);
7086 if (rc) {
7087 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7088 "3089 Failed to allocate queues\n");
7089 rc = -ENODEV;
7090 goto out_free_mbox;
7091 }
7092
7093 rc = lpfc_sli4_queue_setup(phba);
7094 if (unlikely(rc)) {
7095 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7096 "0381 Error %d during queue setup.\n ", rc);
7097 goto out_stop_timers;
7098 }
7099
7100 lpfc_sli4_setup(phba);
7101 lpfc_sli4_queue_init(phba);
7102
7103
7104 rc = lpfc_sli4_els_sgl_update(phba);
7105 if (unlikely(rc)) {
7106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7107 "1400 Failed to update xri-sgl size and "
7108 "mapping: %d\n", rc);
7109 goto out_destroy_queue;
7110 }
7111
7112
7113 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7114 phba->sli4_hba.els_xri_cnt);
7115 if (unlikely(rc < 0)) {
7116 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7117 "0582 Error %d during els sgl post "
7118 "operation\n", rc);
7119 rc = -ENODEV;
7120 goto out_destroy_queue;
7121 }
7122 phba->sli4_hba.els_xri_cnt = rc;
7123
7124 if (phba->nvmet_support) {
7125
7126 rc = lpfc_sli4_nvmet_sgl_update(phba);
7127 if (unlikely(rc)) {
7128 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7129 "6308 Failed to update nvmet-sgl size "
7130 "and mapping: %d\n", rc);
7131 goto out_destroy_queue;
7132 }
7133
7134
7135 rc = lpfc_sli4_repost_sgl_list(
7136 phba,
7137 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7138 phba->sli4_hba.nvmet_xri_cnt);
7139 if (unlikely(rc < 0)) {
7140 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7141 "3117 Error %d during nvmet "
7142 "sgl post\n", rc);
7143 rc = -ENODEV;
7144 goto out_destroy_queue;
7145 }
7146 phba->sli4_hba.nvmet_xri_cnt = rc;
7147
7148 cnt = phba->cfg_iocb_cnt * 1024;
7149
7150 cnt += phba->sli4_hba.nvmet_xri_cnt;
7151 } else {
7152
7153 rc = lpfc_sli4_scsi_sgl_update(phba);
7154 if (unlikely(rc)) {
7155 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7156 "6309 Failed to update scsi-sgl size "
7157 "and mapping: %d\n", rc);
7158 goto out_destroy_queue;
7159 }
7160
7161
7162 rc = lpfc_sli4_nvme_sgl_update(phba);
7163 if (unlikely(rc)) {
7164 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7165 "6082 Failed to update nvme-sgl size "
7166 "and mapping: %d\n", rc);
7167 goto out_destroy_queue;
7168 }
7169
7170 cnt = phba->cfg_iocb_cnt * 1024;
7171 }
7172
7173 if (!phba->sli.iocbq_lookup) {
7174
7175 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7176 "2821 initialize iocb list %d total %d\n",
7177 phba->cfg_iocb_cnt, cnt);
7178 rc = lpfc_init_iocb_list(phba, cnt);
7179 if (rc) {
7180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7181 "1413 Failed to init iocb list.\n");
7182 goto out_destroy_queue;
7183 }
7184 }
7185
7186 if (phba->nvmet_support)
7187 lpfc_nvmet_create_targetport(phba);
7188
7189 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7190
7191 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7192 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7193 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7194 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7195 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7196 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7197 rqbp->buffer_count = 0;
7198
7199 lpfc_post_rq_buffer(
7200 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7201 phba->sli4_hba.nvmet_mrq_data[i],
7202 LPFC_NVMET_RQE_DEF_COUNT, i);
7203 }
7204 }
7205
7206 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
7207
7208 rc = lpfc_sli4_repost_scsi_sgl_list(phba);
7209 if (unlikely(rc)) {
7210 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7211 "0383 Error %d during scsi sgl post "
7212 "operation\n", rc);
7213
7214
7215 rc = -ENODEV;
7216 goto out_destroy_queue;
7217 }
7218 }
7219
7220 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
7221 (phba->nvmet_support == 0)) {
7222
7223
7224 rc = lpfc_repost_nvme_sgl_list(phba);
7225 if (unlikely(rc)) {
7226 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7227 "6116 Error %d during nvme sgl post "
7228 "operation\n", rc);
7229
7230
7231 rc = -ENODEV;
7232 goto out_destroy_queue;
7233 }
7234 }
7235
7236
7237 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7238 if (unlikely(rc)) {
7239 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7240 "0393 Error %d during rpi post operation\n",
7241 rc);
7242 rc = -ENODEV;
7243 goto out_destroy_queue;
7244 }
7245 lpfc_sli4_node_prep(phba);
7246
7247 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7248 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7249
7250
7251
7252 lpfc_reg_fcfi(phba, mboxq);
7253 mboxq->vport = phba->pport;
7254 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7255 if (rc != MBX_SUCCESS)
7256 goto out_unset_queue;
7257 rc = 0;
7258 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7259 &mboxq->u.mqe.un.reg_fcfi);
7260 } else {
7261
7262
7263
7264 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7265 mboxq->vport = phba->pport;
7266 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7267 if (rc != MBX_SUCCESS)
7268 goto out_unset_queue;
7269 rc = 0;
7270 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7271 &mboxq->u.mqe.un.reg_fcfi_mrq);
7272
7273
7274 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7275 mboxq->vport = phba->pport;
7276 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7277 if (rc != MBX_SUCCESS)
7278 goto out_unset_queue;
7279 rc = 0;
7280 }
7281
7282 lpfc_sli_read_link_ste(phba);
7283 }
7284
7285
7286 lpfc_sli4_arm_cqeq_intr(phba);
7287
7288
7289 phba->sli4_hba.intr_enable = 1;
7290
7291
7292 spin_lock_irq(&phba->hbalock);
7293 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7294 spin_unlock_irq(&phba->hbalock);
7295
7296
7297 lpfc_sli4_rb_setup(phba);
7298
7299
7300 phba->fcf.fcf_flag = 0;
7301 phba->fcf.current_rec.flag = 0;
7302
7303
7304 mod_timer(&vport->els_tmofunc,
7305 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7306
7307
7308 mod_timer(&phba->hb_tmofunc,
7309 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7310 phba->hb_outstanding = 0;
7311 phba->last_completion_time = jiffies;
7312
7313
7314 mod_timer(&phba->eratt_poll,
7315 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7316
7317
7318 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7319 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7320 if (!rc) {
7321 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7322 "2829 This device supports "
7323 "Advanced Error Reporting (AER)\n");
7324 spin_lock_irq(&phba->hbalock);
7325 phba->hba_flag |= HBA_AER_ENABLED;
7326 spin_unlock_irq(&phba->hbalock);
7327 } else {
7328 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7329 "2830 This device does not support "
7330 "Advanced Error Reporting (AER)\n");
7331 phba->cfg_aer_support = 0;
7332 }
7333 rc = 0;
7334 }
7335
7336
7337
7338
7339
7340 spin_lock_irq(&phba->hbalock);
7341 phba->link_state = LPFC_LINK_DOWN;
7342 spin_unlock_irq(&phba->hbalock);
7343 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7344 (phba->hba_flag & LINK_DISABLED)) {
7345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7346 "3103 Adapter Link is disabled.\n");
7347 lpfc_down_link(phba, mboxq);
7348 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7349 if (rc != MBX_SUCCESS) {
7350 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7351 "3104 Adapter failed to issue "
7352 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7353 goto out_unset_queue;
7354 }
7355 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7356
7357 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7358 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7359 if (rc)
7360 goto out_unset_queue;
7361 }
7362 }
7363 mempool_free(mboxq, phba->mbox_mem_pool);
7364 return rc;
7365out_unset_queue:
7366
7367 lpfc_sli4_queue_unset(phba);
7368out_destroy_queue:
7369 lpfc_free_iocb_list(phba);
7370 lpfc_sli4_queue_destroy(phba);
7371out_stop_timers:
7372 lpfc_stop_hba_timers(phba);
7373out_free_mbox:
7374 mempool_free(mboxq, phba->mbox_mem_pool);
7375 return rc;
7376}
7377
7378
7379
7380
7381
7382
7383
7384
7385
7386
7387
7388
7389
7390void
7391lpfc_mbox_timeout(struct timer_list *t)
7392{
7393 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7394 unsigned long iflag;
7395 uint32_t tmo_posted;
7396
7397 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7398 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7399 if (!tmo_posted)
7400 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7401 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7402
7403 if (!tmo_posted)
7404 lpfc_worker_wake_up(phba);
7405 return;
7406}
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416static bool
7417lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7418{
7419
7420 uint32_t idx;
7421 struct lpfc_queue *mcq;
7422 struct lpfc_mcqe *mcqe;
7423 bool pending_completions = false;
7424 uint8_t qe_valid;
7425
7426 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7427 return false;
7428
7429
7430
7431 mcq = phba->sli4_hba.mbx_cq;
7432 idx = mcq->hba_index;
7433 qe_valid = mcq->qe_valid;
7434 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) {
7435 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7436 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7437 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7438 pending_completions = true;
7439 break;
7440 }
7441 idx = (idx + 1) % mcq->entry_count;
7442 if (mcq->hba_index == idx)
7443 break;
7444
7445
7446 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7447 qe_valid = (qe_valid) ? 0 : 1;
7448 }
7449 return pending_completions;
7450
7451}
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462
7463
7464bool
7465lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7466{
7467 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7468 uint32_t eqidx;
7469 struct lpfc_queue *fpeq = NULL;
7470 struct lpfc_eqe *eqe;
7471 bool mbox_pending;
7472
7473 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7474 return false;
7475
7476
7477
7478 if (sli4_hba->hba_eq)
7479 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7480 if (sli4_hba->hba_eq[eqidx]->queue_id ==
7481 sli4_hba->mbx_cq->assoc_qid) {
7482 fpeq = sli4_hba->hba_eq[eqidx];
7483 break;
7484 }
7485 if (!fpeq)
7486 return false;
7487
7488
7489
7490 sli4_hba->sli4_eq_clr_intr(fpeq);
7491
7492
7493
7494 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7495
7496
7497
7498
7499
7500
7501
7502
7503 if (mbox_pending)
7504 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7505 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7506 fpeq->EQ_processed++;
7507 }
7508
7509
7510
7511 sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7512
7513 return mbox_pending;
7514
7515}
7516
7517
7518
7519
7520
7521
7522
7523
7524
7525void
7526lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7527{
7528 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7529 MAILBOX_t *mb = NULL;
7530
7531 struct lpfc_sli *psli = &phba->sli;
7532
7533
7534 if (lpfc_sli4_process_missed_mbox_completions(phba))
7535 return;
7536
7537 if (pmbox != NULL)
7538 mb = &pmbox->u.mb;
7539
7540
7541
7542
7543
7544 spin_lock_irq(&phba->hbalock);
7545 if (pmbox == NULL) {
7546 lpfc_printf_log(phba, KERN_WARNING,
7547 LOG_MBOX | LOG_SLI,
7548 "0353 Active Mailbox cleared - mailbox timeout "
7549 "exiting\n");
7550 spin_unlock_irq(&phba->hbalock);
7551 return;
7552 }
7553
7554
7555 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7556 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7557 mb->mbxCommand,
7558 phba->pport->port_state,
7559 phba->sli.sli_flag,
7560 phba->sli.mbox_active);
7561 spin_unlock_irq(&phba->hbalock);
7562
7563
7564
7565
7566
7567 spin_lock_irq(&phba->pport->work_port_lock);
7568 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7569 spin_unlock_irq(&phba->pport->work_port_lock);
7570 spin_lock_irq(&phba->hbalock);
7571 phba->link_state = LPFC_LINK_UNKNOWN;
7572 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7573 spin_unlock_irq(&phba->hbalock);
7574
7575 lpfc_sli_abort_fcp_rings(phba);
7576
7577 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7578 "0345 Resetting board due to mailbox timeout\n");
7579
7580
7581 lpfc_reset_hba(phba);
7582}
7583
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609
7610static int
7611lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7612 uint32_t flag)
7613{
7614 MAILBOX_t *mbx;
7615 struct lpfc_sli *psli = &phba->sli;
7616 uint32_t status, evtctr;
7617 uint32_t ha_copy, hc_copy;
7618 int i;
7619 unsigned long timeout;
7620 unsigned long drvr_flag = 0;
7621 uint32_t word0, ldata;
7622 void __iomem *to_slim;
7623 int processing_queue = 0;
7624
7625 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7626 if (!pmbox) {
7627 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7628
7629 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7630 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7631 return MBX_SUCCESS;
7632 }
7633 processing_queue = 1;
7634 pmbox = lpfc_mbox_get(phba);
7635 if (!pmbox) {
7636 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7637 return MBX_SUCCESS;
7638 }
7639 }
7640
7641 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7642 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7643 if(!pmbox->vport) {
7644 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7645 lpfc_printf_log(phba, KERN_ERR,
7646 LOG_MBOX | LOG_VPORT,
7647 "1806 Mbox x%x failed. No vport\n",
7648 pmbox->u.mb.mbxCommand);
7649 dump_stack();
7650 goto out_not_finished;
7651 }
7652 }
7653
7654
7655 if (unlikely(pci_channel_offline(phba->pcidev))) {
7656 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7657 goto out_not_finished;
7658 }
7659
7660
7661 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7662 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7663 goto out_not_finished;
7664 }
7665
7666 psli = &phba->sli;
7667
7668 mbx = &pmbox->u.mb;
7669 status = MBX_SUCCESS;
7670
7671 if (phba->link_state == LPFC_HBA_ERROR) {
7672 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7673
7674
7675 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7676 "(%d):0311 Mailbox command x%x cannot "
7677 "issue Data: x%x x%x\n",
7678 pmbox->vport ? pmbox->vport->vpi : 0,
7679 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7680 goto out_not_finished;
7681 }
7682
7683 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7684 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7685 !(hc_copy & HC_MBINT_ENA)) {
7686 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7687 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7688 "(%d):2528 Mailbox command x%x cannot "
7689 "issue Data: x%x x%x\n",
7690 pmbox->vport ? pmbox->vport->vpi : 0,
7691 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7692 goto out_not_finished;
7693 }
7694 }
7695
7696 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7697
7698
7699
7700
7701
7702 if (flag & MBX_POLL) {
7703 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7704
7705
7706 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7707 "(%d):2529 Mailbox command x%x "
7708 "cannot issue Data: x%x x%x\n",
7709 pmbox->vport ? pmbox->vport->vpi : 0,
7710 pmbox->u.mb.mbxCommand,
7711 psli->sli_flag, flag);
7712 goto out_not_finished;
7713 }
7714
7715 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7716 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7717
7718 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7719 "(%d):2530 Mailbox command x%x "
7720 "cannot issue Data: x%x x%x\n",
7721 pmbox->vport ? pmbox->vport->vpi : 0,
7722 pmbox->u.mb.mbxCommand,
7723 psli->sli_flag, flag);
7724 goto out_not_finished;
7725 }
7726
7727
7728
7729
7730 lpfc_mbox_put(phba, pmbox);
7731
7732
7733 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7734 "(%d):0308 Mbox cmd issue - BUSY Data: "
7735 "x%x x%x x%x x%x\n",
7736 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7737 mbx->mbxCommand,
7738 phba->pport ? phba->pport->port_state : 0xff,
7739 psli->sli_flag, flag);
7740
7741 psli->slistat.mbox_busy++;
7742 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7743
7744 if (pmbox->vport) {
7745 lpfc_debugfs_disc_trc(pmbox->vport,
7746 LPFC_DISC_TRC_MBOX_VPORT,
7747 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7748 (uint32_t)mbx->mbxCommand,
7749 mbx->un.varWords[0], mbx->un.varWords[1]);
7750 }
7751 else {
7752 lpfc_debugfs_disc_trc(phba->pport,
7753 LPFC_DISC_TRC_MBOX,
7754 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7755 (uint32_t)mbx->mbxCommand,
7756 mbx->un.varWords[0], mbx->un.varWords[1]);
7757 }
7758
7759 return MBX_BUSY;
7760 }
7761
7762 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7763
7764
7765 if (flag != MBX_POLL) {
7766 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7767 (mbx->mbxCommand != MBX_KILL_BOARD)) {
7768 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7769 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7770
7771 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7772 "(%d):2531 Mailbox command x%x "
7773 "cannot issue Data: x%x x%x\n",
7774 pmbox->vport ? pmbox->vport->vpi : 0,
7775 pmbox->u.mb.mbxCommand,
7776 psli->sli_flag, flag);
7777 goto out_not_finished;
7778 }
7779
7780 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7781 1000);
7782 mod_timer(&psli->mbox_tmo, jiffies + timeout);
7783 }
7784
7785
7786 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7787 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7788 "x%x\n",
7789 pmbox->vport ? pmbox->vport->vpi : 0,
7790 mbx->mbxCommand,
7791 phba->pport ? phba->pport->port_state : 0xff,
7792 psli->sli_flag, flag);
7793
7794 if (mbx->mbxCommand != MBX_HEARTBEAT) {
7795 if (pmbox->vport) {
7796 lpfc_debugfs_disc_trc(pmbox->vport,
7797 LPFC_DISC_TRC_MBOX_VPORT,
7798 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7799 (uint32_t)mbx->mbxCommand,
7800 mbx->un.varWords[0], mbx->un.varWords[1]);
7801 }
7802 else {
7803 lpfc_debugfs_disc_trc(phba->pport,
7804 LPFC_DISC_TRC_MBOX,
7805 "MBOX Send: cmd:x%x mb:x%x x%x",
7806 (uint32_t)mbx->mbxCommand,
7807 mbx->un.varWords[0], mbx->un.varWords[1]);
7808 }
7809 }
7810
7811 psli->slistat.mbox_cmd++;
7812 evtctr = psli->slistat.mbox_event;
7813
7814
7815 mbx->mbxOwner = OWN_CHIP;
7816
7817 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7818
7819 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7820 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7821 = (uint8_t *)phba->mbox_ext
7822 - (uint8_t *)phba->mbox;
7823 }
7824
7825
7826 if (pmbox->in_ext_byte_len && pmbox->context2) {
7827 lpfc_sli_pcimem_bcopy(pmbox->context2,
7828 (uint8_t *)phba->mbox_ext,
7829 pmbox->in_ext_byte_len);
7830 }
7831
7832 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7833 } else {
7834
7835 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7836 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7837 = MAILBOX_HBA_EXT_OFFSET;
7838
7839
7840 if (pmbox->in_ext_byte_len && pmbox->context2)
7841 lpfc_memcpy_to_slim(phba->MBslimaddr +
7842 MAILBOX_HBA_EXT_OFFSET,
7843 pmbox->context2, pmbox->in_ext_byte_len);
7844
7845 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7846
7847 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7848 MAILBOX_CMD_SIZE);
7849
7850
7851
7852 to_slim = phba->MBslimaddr + sizeof (uint32_t);
7853 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7854 MAILBOX_CMD_SIZE - sizeof (uint32_t));
7855
7856
7857 ldata = *((uint32_t *)mbx);
7858 to_slim = phba->MBslimaddr;
7859 writel(ldata, to_slim);
7860 readl(to_slim);
7861
7862 if (mbx->mbxCommand == MBX_CONFIG_PORT)
7863
7864 psli->sli_flag |= LPFC_SLI_ACTIVE;
7865 }
7866
7867 wmb();
7868
7869 switch (flag) {
7870 case MBX_NOWAIT:
7871
7872 psli->mbox_active = pmbox;
7873
7874 writel(CA_MBATT, phba->CAregaddr);
7875 readl(phba->CAregaddr);
7876
7877 break;
7878
7879 case MBX_POLL:
7880
7881 psli->mbox_active = NULL;
7882
7883 writel(CA_MBATT, phba->CAregaddr);
7884 readl(phba->CAregaddr);
7885
7886 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7887
7888 word0 = *((uint32_t *)phba->mbox);
7889 word0 = le32_to_cpu(word0);
7890 } else {
7891
7892 if (lpfc_readl(phba->MBslimaddr, &word0)) {
7893 spin_unlock_irqrestore(&phba->hbalock,
7894 drvr_flag);
7895 goto out_not_finished;
7896 }
7897 }
7898
7899
7900 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7901 spin_unlock_irqrestore(&phba->hbalock,
7902 drvr_flag);
7903 goto out_not_finished;
7904 }
7905 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7906 1000) + jiffies;
7907 i = 0;
7908
7909 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7910 (!(ha_copy & HA_MBATT) &&
7911 (phba->link_state > LPFC_WARM_START))) {
7912 if (time_after(jiffies, timeout)) {
7913 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7914 spin_unlock_irqrestore(&phba->hbalock,
7915 drvr_flag);
7916 goto out_not_finished;
7917 }
7918
7919
7920
7921 if (((word0 & OWN_CHIP) != OWN_CHIP)
7922 && (evtctr != psli->slistat.mbox_event))
7923 break;
7924
7925 if (i++ > 10) {
7926 spin_unlock_irqrestore(&phba->hbalock,
7927 drvr_flag);
7928 msleep(1);
7929 spin_lock_irqsave(&phba->hbalock, drvr_flag);
7930 }
7931
7932 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7933
7934 word0 = *((uint32_t *)phba->mbox);
7935 word0 = le32_to_cpu(word0);
7936 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7937 MAILBOX_t *slimmb;
7938 uint32_t slimword0;
7939
7940 slimword0 = readl(phba->MBslimaddr);
7941 slimmb = (MAILBOX_t *) & slimword0;
7942 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7943 && slimmb->mbxStatus) {
7944 psli->sli_flag &=
7945 ~LPFC_SLI_ACTIVE;
7946 word0 = slimword0;
7947 }
7948 }
7949 } else {
7950
7951 word0 = readl(phba->MBslimaddr);
7952 }
7953
7954 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7955 spin_unlock_irqrestore(&phba->hbalock,
7956 drvr_flag);
7957 goto out_not_finished;
7958 }
7959 }
7960
7961 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7962
7963 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7964 MAILBOX_CMD_SIZE);
7965
7966 if (pmbox->out_ext_byte_len && pmbox->context2) {
7967 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7968 pmbox->context2,
7969 pmbox->out_ext_byte_len);
7970 }
7971 } else {
7972
7973 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7974 MAILBOX_CMD_SIZE);
7975
7976 if (pmbox->out_ext_byte_len && pmbox->context2) {
7977 lpfc_memcpy_from_slim(pmbox->context2,
7978 phba->MBslimaddr +
7979 MAILBOX_HBA_EXT_OFFSET,
7980 pmbox->out_ext_byte_len);
7981 }
7982 }
7983
7984 writel(HA_MBATT, phba->HAregaddr);
7985 readl(phba->HAregaddr);
7986
7987 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7988 status = mbx->mbxStatus;
7989 }
7990
7991 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7992 return status;
7993
7994out_not_finished:
7995 if (processing_queue) {
7996 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7997 lpfc_mbox_cmpl_put(phba, pmbox);
7998 }
7999 return MBX_NOT_FINISHED;
8000}
8001
8002
8003
8004
8005
8006
8007
8008
8009
8010
8011
8012
8013
8014static int
8015lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8016{
8017 struct lpfc_sli *psli = &phba->sli;
8018 int rc = 0;
8019 unsigned long timeout = 0;
8020
8021
8022 spin_lock_irq(&phba->hbalock);
8023 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8024
8025
8026
8027 if (phba->sli.mbox_active)
8028 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8029 phba->sli.mbox_active) *
8030 1000) + jiffies;
8031 spin_unlock_irq(&phba->hbalock);
8032
8033
8034 if (timeout)
8035 lpfc_sli4_process_missed_mbox_completions(phba);
8036
8037
8038 while (phba->sli.mbox_active) {
8039
8040 msleep(2);
8041 if (time_after(jiffies, timeout)) {
8042
8043 rc = 1;
8044 break;
8045 }
8046 }
8047
8048
8049 if (rc) {
8050 spin_lock_irq(&phba->hbalock);
8051 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8052 spin_unlock_irq(&phba->hbalock);
8053 }
8054 return rc;
8055}
8056
8057
8058
8059
8060
8061
8062
8063
8064
8065
8066
8067
8068static void
8069lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8070{
8071 struct lpfc_sli *psli = &phba->sli;
8072
8073 spin_lock_irq(&phba->hbalock);
8074 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8075
8076 spin_unlock_irq(&phba->hbalock);
8077 return;
8078 }
8079
8080
8081
8082
8083
8084
8085 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8086 spin_unlock_irq(&phba->hbalock);
8087
8088
8089 lpfc_worker_wake_up(phba);
8090}
8091
8092
8093
8094
8095
8096
8097
8098
8099
8100
8101
8102
8103static int
8104lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8105{
8106 uint32_t db_ready;
8107 unsigned long timeout;
8108 struct lpfc_register bmbx_reg;
8109
8110 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8111 * 1000) + jiffies;
8112
8113 do {
8114 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8115 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8116 if (!db_ready)
8117 msleep(2);
8118
8119 if (time_after(jiffies, timeout))
8120 return MBXERR_ERROR;
8121 } while (!db_ready);
8122
8123 return 0;
8124}
8125
8126
8127
8128
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139
8140
8141
8142static int
8143lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8144{
8145 int rc = MBX_SUCCESS;
8146 unsigned long iflag;
8147 uint32_t mcqe_status;
8148 uint32_t mbx_cmnd;
8149 struct lpfc_sli *psli = &phba->sli;
8150 struct lpfc_mqe *mb = &mboxq->u.mqe;
8151 struct lpfc_bmbx_create *mbox_rgn;
8152 struct dma_address *dma_address;
8153
8154
8155
8156
8157
8158 spin_lock_irqsave(&phba->hbalock, iflag);
8159 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8160 spin_unlock_irqrestore(&phba->hbalock, iflag);
8161 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8162 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8163 "cannot issue Data: x%x x%x\n",
8164 mboxq->vport ? mboxq->vport->vpi : 0,
8165 mboxq->u.mb.mbxCommand,
8166 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8167 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8168 psli->sli_flag, MBX_POLL);
8169 return MBXERR_ERROR;
8170 }
8171
8172 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8173 phba->sli.mbox_active = mboxq;
8174 spin_unlock_irqrestore(&phba->hbalock, iflag);
8175
8176
8177 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8178 if (rc)
8179 goto exit;
8180
8181
8182
8183
8184
8185
8186 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8187 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8188 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8189 sizeof(struct lpfc_mqe));
8190
8191
8192 dma_address = &phba->sli4_hba.bmbx.dma_address;
8193 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8194
8195
8196 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8197 if (rc)
8198 goto exit;
8199
8200
8201 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8202
8203
8204 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8205 if (rc)
8206 goto exit;
8207
8208
8209
8210
8211
8212
8213 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8214 sizeof(struct lpfc_mqe));
8215 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8216 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8217 sizeof(struct lpfc_mcqe));
8218 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8219
8220
8221
8222
8223
8224 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8225 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8226 bf_set(lpfc_mqe_status, mb,
8227 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8228 rc = MBXERR_ERROR;
8229 } else
8230 lpfc_sli4_swap_str(phba, mboxq);
8231
8232 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8233 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8234 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8235 " x%x x%x CQ: x%x x%x x%x x%x\n",
8236 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8237 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8238 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8239 bf_get(lpfc_mqe_status, mb),
8240 mb->un.mb_words[0], mb->un.mb_words[1],
8241 mb->un.mb_words[2], mb->un.mb_words[3],
8242 mb->un.mb_words[4], mb->un.mb_words[5],
8243 mb->un.mb_words[6], mb->un.mb_words[7],
8244 mb->un.mb_words[8], mb->un.mb_words[9],
8245 mb->un.mb_words[10], mb->un.mb_words[11],
8246 mb->un.mb_words[12], mboxq->mcqe.word0,
8247 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8248 mboxq->mcqe.trailer);
8249exit:
8250
8251 spin_lock_irqsave(&phba->hbalock, iflag);
8252 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8253 phba->sli.mbox_active = NULL;
8254 spin_unlock_irqrestore(&phba->hbalock, iflag);
8255 return rc;
8256}
8257
8258
8259
8260
8261
8262
8263
8264
8265
8266
8267
8268
8269
8270static int
8271lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8272 uint32_t flag)
8273{
8274 struct lpfc_sli *psli = &phba->sli;
8275 unsigned long iflags;
8276 int rc;
8277
8278
8279 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8280
8281 rc = lpfc_mbox_dev_check(phba);
8282 if (unlikely(rc)) {
8283 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8284 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8285 "cannot issue Data: x%x x%x\n",
8286 mboxq->vport ? mboxq->vport->vpi : 0,
8287 mboxq->u.mb.mbxCommand,
8288 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8289 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8290 psli->sli_flag, flag);
8291 goto out_not_finished;
8292 }
8293
8294
8295 if (!phba->sli4_hba.intr_enable) {
8296 if (flag == MBX_POLL)
8297 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8298 else
8299 rc = -EIO;
8300 if (rc != MBX_SUCCESS)
8301 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8302 "(%d):2541 Mailbox command x%x "
8303 "(x%x/x%x) failure: "
8304 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8305 "Data: x%x x%x\n,",
8306 mboxq->vport ? mboxq->vport->vpi : 0,
8307 mboxq->u.mb.mbxCommand,
8308 lpfc_sli_config_mbox_subsys_get(phba,
8309 mboxq),
8310 lpfc_sli_config_mbox_opcode_get(phba,
8311 mboxq),
8312 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8313 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8314 bf_get(lpfc_mcqe_ext_status,
8315 &mboxq->mcqe),
8316 psli->sli_flag, flag);
8317 return rc;
8318 } else if (flag == MBX_POLL) {
8319 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8320 "(%d):2542 Try to issue mailbox command "
8321 "x%x (x%x/x%x) synchronously ahead of async "
8322 "mailbox command queue: x%x x%x\n",
8323 mboxq->vport ? mboxq->vport->vpi : 0,
8324 mboxq->u.mb.mbxCommand,
8325 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8326 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8327 psli->sli_flag, flag);
8328
8329 rc = lpfc_sli4_async_mbox_block(phba);
8330 if (!rc) {
8331
8332 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8333 if (rc != MBX_SUCCESS)
8334 lpfc_printf_log(phba, KERN_WARNING,
8335 LOG_MBOX | LOG_SLI,
8336 "(%d):2597 Sync Mailbox command "
8337 "x%x (x%x/x%x) failure: "
8338 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8339 "Data: x%x x%x\n,",
8340 mboxq->vport ? mboxq->vport->vpi : 0,
8341 mboxq->u.mb.mbxCommand,
8342 lpfc_sli_config_mbox_subsys_get(phba,
8343 mboxq),
8344 lpfc_sli_config_mbox_opcode_get(phba,
8345 mboxq),
8346 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8347 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8348 bf_get(lpfc_mcqe_ext_status,
8349 &mboxq->mcqe),
8350 psli->sli_flag, flag);
8351
8352 lpfc_sli4_async_mbox_unblock(phba);
8353 }
8354 return rc;
8355 }
8356
8357
8358 rc = lpfc_mbox_cmd_check(phba, mboxq);
8359 if (rc) {
8360 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8361 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8362 "cannot issue Data: x%x x%x\n",
8363 mboxq->vport ? mboxq->vport->vpi : 0,
8364 mboxq->u.mb.mbxCommand,
8365 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8366 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8367 psli->sli_flag, flag);
8368 goto out_not_finished;
8369 }
8370
8371
8372 psli->slistat.mbox_busy++;
8373 spin_lock_irqsave(&phba->hbalock, iflags);
8374 lpfc_mbox_put(phba, mboxq);
8375 spin_unlock_irqrestore(&phba->hbalock, iflags);
8376 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8377 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8378 "x%x (x%x/x%x) x%x x%x x%x\n",
8379 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8380 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8381 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8382 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8383 phba->pport->port_state,
8384 psli->sli_flag, MBX_NOWAIT);
8385
8386 lpfc_worker_wake_up(phba);
8387
8388 return MBX_BUSY;
8389
8390out_not_finished:
8391 return MBX_NOT_FINISHED;
8392}
8393
8394
8395
8396
8397
8398
8399
8400
8401
8402int
8403lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8404{
8405 struct lpfc_sli *psli = &phba->sli;
8406 LPFC_MBOXQ_t *mboxq;
8407 int rc = MBX_SUCCESS;
8408 unsigned long iflags;
8409 struct lpfc_mqe *mqe;
8410 uint32_t mbx_cmnd;
8411
8412
8413 if (unlikely(!phba->sli4_hba.intr_enable))
8414 return MBX_NOT_FINISHED;
8415
8416
8417 spin_lock_irqsave(&phba->hbalock, iflags);
8418 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8419 spin_unlock_irqrestore(&phba->hbalock, iflags);
8420 return MBX_NOT_FINISHED;
8421 }
8422 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8423 spin_unlock_irqrestore(&phba->hbalock, iflags);
8424 return MBX_NOT_FINISHED;
8425 }
8426 if (unlikely(phba->sli.mbox_active)) {
8427 spin_unlock_irqrestore(&phba->hbalock, iflags);
8428 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8429 "0384 There is pending active mailbox cmd\n");
8430 return MBX_NOT_FINISHED;
8431 }
8432
8433 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8434
8435
8436 mboxq = lpfc_mbox_get(phba);
8437
8438
8439 if (!mboxq) {
8440 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8441 spin_unlock_irqrestore(&phba->hbalock, iflags);
8442 return MBX_SUCCESS;
8443 }
8444 phba->sli.mbox_active = mboxq;
8445 spin_unlock_irqrestore(&phba->hbalock, iflags);
8446
8447
8448 rc = lpfc_mbox_dev_check(phba);
8449 if (unlikely(rc))
8450
8451 goto out_not_finished;
8452
8453
8454 mqe = &mboxq->u.mqe;
8455 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8456
8457
8458 mod_timer(&psli->mbox_tmo, (jiffies +
8459 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8460
8461 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8462 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8463 "x%x x%x\n",
8464 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8465 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8466 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8467 phba->pport->port_state, psli->sli_flag);
8468
8469 if (mbx_cmnd != MBX_HEARTBEAT) {
8470 if (mboxq->vport) {
8471 lpfc_debugfs_disc_trc(mboxq->vport,
8472 LPFC_DISC_TRC_MBOX_VPORT,
8473 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8474 mbx_cmnd, mqe->un.mb_words[0],
8475 mqe->un.mb_words[1]);
8476 } else {
8477 lpfc_debugfs_disc_trc(phba->pport,
8478 LPFC_DISC_TRC_MBOX,
8479 "MBOX Send: cmd:x%x mb:x%x x%x",
8480 mbx_cmnd, mqe->un.mb_words[0],
8481 mqe->un.mb_words[1]);
8482 }
8483 }
8484 psli->slistat.mbox_cmd++;
8485
8486
8487 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8488 if (rc != MBX_SUCCESS) {
8489 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8490 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8491 "cannot issue Data: x%x x%x\n",
8492 mboxq->vport ? mboxq->vport->vpi : 0,
8493 mboxq->u.mb.mbxCommand,
8494 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8495 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8496 psli->sli_flag, MBX_NOWAIT);
8497 goto out_not_finished;
8498 }
8499
8500 return rc;
8501
8502out_not_finished:
8503 spin_lock_irqsave(&phba->hbalock, iflags);
8504 if (phba->sli.mbox_active) {
8505 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8506 __lpfc_mbox_cmpl_put(phba, mboxq);
8507
8508 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8509 phba->sli.mbox_active = NULL;
8510 }
8511 spin_unlock_irqrestore(&phba->hbalock, iflags);
8512
8513 return MBX_NOT_FINISHED;
8514}
8515
8516
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528int
8529lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8530{
8531 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8532}
8533
8534
8535
8536
8537
8538
8539
8540
8541
8542
8543int
8544lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8545{
8546
8547 switch (dev_grp) {
8548 case LPFC_PCI_DEV_LP:
8549 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8550 phba->lpfc_sli_handle_slow_ring_event =
8551 lpfc_sli_handle_slow_ring_event_s3;
8552 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8553 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8554 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8555 break;
8556 case LPFC_PCI_DEV_OC:
8557 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8558 phba->lpfc_sli_handle_slow_ring_event =
8559 lpfc_sli_handle_slow_ring_event_s4;
8560 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8561 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8562 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8563 break;
8564 default:
8565 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8566 "1420 Invalid HBA PCI-device group: 0x%x\n",
8567 dev_grp);
8568 return -ENODEV;
8569 break;
8570 }
8571 return 0;
8572}
8573
8574
8575
8576
8577
8578
8579
8580
8581
8582
8583
8584void
8585__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8586 struct lpfc_iocbq *piocb)
8587{
8588 lockdep_assert_held(&phba->hbalock);
8589
8590 list_add_tail(&piocb->list, &pring->txq);
8591}
8592
8593
8594
8595
8596
8597
8598
8599
8600
8601
8602
8603
8604
8605
8606
8607
8608
8609
8610static struct lpfc_iocbq *
8611lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8612 struct lpfc_iocbq **piocb)
8613{
8614 struct lpfc_iocbq * nextiocb;
8615
8616 lockdep_assert_held(&phba->hbalock);
8617
8618 nextiocb = lpfc_sli_ringtx_get(phba, pring);
8619 if (!nextiocb) {
8620 nextiocb = *piocb;
8621 *piocb = NULL;
8622 }
8623
8624 return nextiocb;
8625}
8626
8627
8628
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639
8640
8641
8642
8643
8644
8645
8646
8647
8648
8649static int
8650__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8651 struct lpfc_iocbq *piocb, uint32_t flag)
8652{
8653 struct lpfc_iocbq *nextiocb;
8654 IOCB_t *iocb;
8655 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
8656
8657 lockdep_assert_held(&phba->hbalock);
8658
8659 if (piocb->iocb_cmpl && (!piocb->vport) &&
8660 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8661 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8662 lpfc_printf_log(phba, KERN_ERR,
8663 LOG_SLI | LOG_VPORT,
8664 "1807 IOCB x%x failed. No vport\n",
8665 piocb->iocb.ulpCommand);
8666 dump_stack();
8667 return IOCB_ERROR;
8668 }
8669
8670
8671
8672 if (unlikely(pci_channel_offline(phba->pcidev)))
8673 return IOCB_ERROR;
8674
8675
8676 if (unlikely(phba->hba_flag & DEFER_ERATT))
8677 return IOCB_ERROR;
8678
8679
8680
8681
8682 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8683 return IOCB_ERROR;
8684
8685
8686
8687
8688
8689 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8690 goto iocb_busy;
8691
8692 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8693
8694
8695
8696
8697 switch (piocb->iocb.ulpCommand) {
8698 case CMD_GEN_REQUEST64_CR:
8699 case CMD_GEN_REQUEST64_CX:
8700 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8701 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8702 FC_RCTL_DD_UNSOL_CMD) ||
8703 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
8704 MENLO_TRANSPORT_TYPE))
8705
8706 goto iocb_busy;
8707 break;
8708 case CMD_QUE_RING_BUF_CN:
8709 case CMD_QUE_RING_BUF64_CN:
8710
8711
8712
8713
8714 if (piocb->iocb_cmpl)
8715 piocb->iocb_cmpl = NULL;
8716
8717 case CMD_CREATE_XRI_CR:
8718 case CMD_CLOSE_XRI_CN:
8719 case CMD_CLOSE_XRI_CX:
8720 break;
8721 default:
8722 goto iocb_busy;
8723 }
8724
8725
8726
8727
8728
8729 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
8730 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8731 goto iocb_busy;
8732 }
8733
8734 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8735 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8736 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8737
8738 if (iocb)
8739 lpfc_sli_update_ring(phba, pring);
8740 else
8741 lpfc_sli_update_full_ring(phba, pring);
8742
8743 if (!piocb)
8744 return IOCB_SUCCESS;
8745
8746 goto out_busy;
8747
8748 iocb_busy:
8749 pring->stats.iocb_cmd_delay++;
8750
8751 out_busy:
8752
8753 if (!(flag & SLI_IOCB_RET_IOCB)) {
8754 __lpfc_sli_ringtx_put(phba, pring, piocb);
8755 return IOCB_SUCCESS;
8756 }
8757
8758 return IOCB_BUSY;
8759}
8760
8761
8762
8763
8764
8765
8766
8767
8768
8769
8770
8771
8772
8773
8774
8775
8776
8777
8778static uint16_t
8779lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8780 struct lpfc_sglq *sglq)
8781{
8782 uint16_t xritag = NO_XRI;
8783 struct ulp_bde64 *bpl = NULL;
8784 struct ulp_bde64 bde;
8785 struct sli4_sge *sgl = NULL;
8786 struct lpfc_dmabuf *dmabuf;
8787 IOCB_t *icmd;
8788 int numBdes = 0;
8789 int i = 0;
8790 uint32_t offset = 0;
8791 int inbound = 0;
8792
8793 if (!piocbq || !sglq)
8794 return xritag;
8795
8796 sgl = (struct sli4_sge *)sglq->sgl;
8797 icmd = &piocbq->iocb;
8798 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8799 return sglq->sli4_xritag;
8800 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8801 numBdes = icmd->un.genreq64.bdl.bdeSize /
8802 sizeof(struct ulp_bde64);
8803
8804
8805
8806
8807 if (piocbq->context3)
8808 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8809 else
8810 return xritag;
8811
8812 bpl = (struct ulp_bde64 *)dmabuf->virt;
8813 if (!bpl)
8814 return xritag;
8815
8816 for (i = 0; i < numBdes; i++) {
8817
8818 sgl->addr_hi = bpl->addrHigh;
8819 sgl->addr_lo = bpl->addrLow;
8820
8821 sgl->word2 = le32_to_cpu(sgl->word2);
8822 if ((i+1) == numBdes)
8823 bf_set(lpfc_sli4_sge_last, sgl, 1);
8824 else
8825 bf_set(lpfc_sli4_sge_last, sgl, 0);
8826
8827
8828
8829 bde.tus.w = le32_to_cpu(bpl->tus.w);
8830 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8831
8832
8833
8834
8835 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8836
8837 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8838 inbound++;
8839
8840 if (inbound == 1)
8841 offset = 0;
8842 bf_set(lpfc_sli4_sge_offset, sgl, offset);
8843 bf_set(lpfc_sli4_sge_type, sgl,
8844 LPFC_SGE_TYPE_DATA);
8845 offset += bde.tus.f.bdeSize;
8846 }
8847 sgl->word2 = cpu_to_le32(sgl->word2);
8848 bpl++;
8849 sgl++;
8850 }
8851 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8852
8853
8854
8855
8856 sgl->addr_hi =
8857 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8858 sgl->addr_lo =
8859 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8860 sgl->word2 = le32_to_cpu(sgl->word2);
8861 bf_set(lpfc_sli4_sge_last, sgl, 1);
8862 sgl->word2 = cpu_to_le32(sgl->word2);
8863 sgl->sge_len =
8864 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8865 }
8866 return sglq->sli4_xritag;
8867}
8868
8869
8870
8871
8872
8873
8874
8875
8876
8877
8878
8879
8880
8881
8882
8883static int
8884lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8885 union lpfc_wqe128 *wqe)
8886{
8887 uint32_t xmit_len = 0, total_len = 0;
8888 uint8_t ct = 0;
8889 uint32_t fip;
8890 uint32_t abort_tag;
8891 uint8_t command_type = ELS_COMMAND_NON_FIP;
8892 uint8_t cmnd;
8893 uint16_t xritag;
8894 uint16_t abrt_iotag;
8895 struct lpfc_iocbq *abrtiocbq;
8896 struct ulp_bde64 *bpl = NULL;
8897 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8898 int numBdes, i;
8899 struct ulp_bde64 bde;
8900 struct lpfc_nodelist *ndlp;
8901 uint32_t *pcmd;
8902 uint32_t if_type;
8903
8904 fip = phba->hba_flag & HBA_FIP_SUPPORT;
8905
8906 if (iocbq->iocb_flag & LPFC_IO_FCP)
8907 command_type = FCP_COMMAND;
8908 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8909 command_type = ELS_COMMAND_FIP;
8910 else
8911 command_type = ELS_COMMAND_NON_FIP;
8912
8913 if (phba->fcp_embed_io)
8914 memset(wqe, 0, sizeof(union lpfc_wqe128));
8915
8916 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8917 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8918
8919 wqe->generic.wqe_com.word7 = 0;
8920 wqe->generic.wqe_com.word10 = 0;
8921 }
8922
8923 abort_tag = (uint32_t) iocbq->iotag;
8924 xritag = iocbq->sli4_xritag;
8925
8926 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8927 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8928 sizeof(struct ulp_bde64);
8929 bpl = (struct ulp_bde64 *)
8930 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
8931 if (!bpl)
8932 return IOCB_ERROR;
8933
8934
8935 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
8936 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
8937
8938
8939
8940 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
8941 xmit_len = wqe->generic.bde.tus.f.bdeSize;
8942 total_len = 0;
8943 for (i = 0; i < numBdes; i++) {
8944 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
8945 total_len += bde.tus.f.bdeSize;
8946 }
8947 } else
8948 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8949
8950 iocbq->iocb.ulpIoTag = iocbq->iotag;
8951 cmnd = iocbq->iocb.ulpCommand;
8952
8953 switch (iocbq->iocb.ulpCommand) {
8954 case CMD_ELS_REQUEST64_CR:
8955 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8956 ndlp = iocbq->context_un.ndlp;
8957 else
8958 ndlp = (struct lpfc_nodelist *)iocbq->context1;
8959 if (!iocbq->iocb.ulpLe) {
8960 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8961 "2007 Only Limited Edition cmd Format"
8962 " supported 0x%x\n",
8963 iocbq->iocb.ulpCommand);
8964 return IOCB_ERROR;
8965 }
8966
8967 wqe->els_req.payload_len = xmit_len;
8968
8969 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8970 iocbq->iocb.ulpTimeout);
8971
8972 bf_set(els_req64_vf, &wqe->els_req, 0);
8973
8974 bf_set(els_req64_vfid, &wqe->els_req, 0);
8975 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8976 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8977 iocbq->iocb.ulpContext);
8978 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8979 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8980
8981 if (command_type == ELS_COMMAND_FIP)
8982 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8983 >> LPFC_FIP_ELS_ID_SHIFT);
8984 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8985 iocbq->context2)->virt);
8986 if_type = bf_get(lpfc_sli_intf_if_type,
8987 &phba->sli4_hba.sli_intf);
8988 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
8989 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8990 *pcmd == ELS_CMD_SCR ||
8991 *pcmd == ELS_CMD_FDISC ||
8992 *pcmd == ELS_CMD_LOGO ||
8993 *pcmd == ELS_CMD_PLOGI)) {
8994 bf_set(els_req64_sp, &wqe->els_req, 1);
8995 bf_set(els_req64_sid, &wqe->els_req,
8996 iocbq->vport->fc_myDID);
8997 if ((*pcmd == ELS_CMD_FLOGI) &&
8998 !(phba->fc_topology ==
8999 LPFC_TOPOLOGY_LOOP))
9000 bf_set(els_req64_sid, &wqe->els_req, 0);
9001 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9002 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9003 phba->vpi_ids[iocbq->vport->vpi]);
9004 } else if (pcmd && iocbq->context1) {
9005 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9006 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9007 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9008 }
9009 }
9010 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9011 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9012 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9013 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9014 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9015 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9016 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9017 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9018 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9019 break;
9020 case CMD_XMIT_SEQUENCE64_CX:
9021 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9022 iocbq->iocb.un.ulpWord[3]);
9023 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9024 iocbq->iocb.unsli3.rcvsli3.ox_id);
9025
9026 xmit_len = total_len;
9027 cmnd = CMD_XMIT_SEQUENCE64_CR;
9028 if (phba->link_flag & LS_LOOPBACK_MODE)
9029 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9030 case CMD_XMIT_SEQUENCE64_CR:
9031
9032 wqe->xmit_sequence.rsvd3 = 0;
9033
9034
9035 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9036 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9037 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9038 LPFC_WQE_IOD_WRITE);
9039 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9040 LPFC_WQE_LENLOC_WORD12);
9041 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9042 wqe->xmit_sequence.xmit_len = xmit_len;
9043 command_type = OTHER_COMMAND;
9044 break;
9045 case CMD_XMIT_BCAST64_CN:
9046
9047 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9048
9049
9050
9051 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9052 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9053 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9054 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9055 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9056 LPFC_WQE_LENLOC_WORD3);
9057 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9058 break;
9059 case CMD_FCP_IWRITE64_CR:
9060 command_type = FCP_COMMAND_DATA_OUT;
9061
9062
9063 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9064 xmit_len + sizeof(struct fcp_rsp));
9065 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9066 0);
9067
9068
9069 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9070 iocbq->iocb.ulpFCP2Rcvy);
9071 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9072
9073 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9074 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9075 LPFC_WQE_LENLOC_WORD4);
9076 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9077 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9078 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9079 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9080 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9081 if (iocbq->priority) {
9082 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9083 (iocbq->priority << 1));
9084 } else {
9085 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9086 (phba->cfg_XLanePriority << 1));
9087 }
9088 }
9089
9090
9091
9092 if (phba->fcp_embed_pbde)
9093 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9094 else
9095 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9096
9097 if (phba->fcp_embed_io) {
9098 struct lpfc_scsi_buf *lpfc_cmd;
9099 struct sli4_sge *sgl;
9100 struct fcp_cmnd *fcp_cmnd;
9101 uint32_t *ptr;
9102
9103
9104
9105 lpfc_cmd = iocbq->context1;
9106 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9107 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9108
9109
9110 wqe->generic.bde.tus.f.bdeFlags =
9111 BUFF_TYPE_BDE_IMMED;
9112 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9113 wqe->generic.bde.addrHigh = 0;
9114 wqe->generic.bde.addrLow = 88;
9115
9116 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9117 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9118
9119
9120 ptr = &wqe->words[22];
9121 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9122 }
9123 break;
9124 case CMD_FCP_IREAD64_CR:
9125
9126
9127 bf_set(payload_offset_len, &wqe->fcp_iread,
9128 xmit_len + sizeof(struct fcp_rsp));
9129 bf_set(cmd_buff_len, &wqe->fcp_iread,
9130 0);
9131
9132
9133 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9134 iocbq->iocb.ulpFCP2Rcvy);
9135 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9136
9137 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9138 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9139 LPFC_WQE_LENLOC_WORD4);
9140 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9141 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9142 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9143 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9144 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9145 if (iocbq->priority) {
9146 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9147 (iocbq->priority << 1));
9148 } else {
9149 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9150 (phba->cfg_XLanePriority << 1));
9151 }
9152 }
9153
9154
9155
9156 if (phba->fcp_embed_pbde)
9157 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9158 else
9159 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9160
9161 if (phba->fcp_embed_io) {
9162 struct lpfc_scsi_buf *lpfc_cmd;
9163 struct sli4_sge *sgl;
9164 struct fcp_cmnd *fcp_cmnd;
9165 uint32_t *ptr;
9166
9167
9168
9169 lpfc_cmd = iocbq->context1;
9170 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9171 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9172
9173
9174 wqe->generic.bde.tus.f.bdeFlags =
9175 BUFF_TYPE_BDE_IMMED;
9176 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9177 wqe->generic.bde.addrHigh = 0;
9178 wqe->generic.bde.addrLow = 88;
9179
9180 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9181 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9182
9183
9184 ptr = &wqe->words[22];
9185 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9186 }
9187 break;
9188 case CMD_FCP_ICMND64_CR:
9189
9190
9191 bf_set(payload_offset_len, &wqe->fcp_icmd,
9192 xmit_len + sizeof(struct fcp_rsp));
9193 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9194 0);
9195
9196 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9197
9198 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9199 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9200 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9201 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9202 LPFC_WQE_LENLOC_NONE);
9203 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9204 iocbq->iocb.ulpFCP2Rcvy);
9205 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9206 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9207 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9208 if (iocbq->priority) {
9209 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9210 (iocbq->priority << 1));
9211 } else {
9212 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9213 (phba->cfg_XLanePriority << 1));
9214 }
9215 }
9216
9217
9218 if (phba->fcp_embed_io) {
9219 struct lpfc_scsi_buf *lpfc_cmd;
9220 struct sli4_sge *sgl;
9221 struct fcp_cmnd *fcp_cmnd;
9222 uint32_t *ptr;
9223
9224
9225
9226 lpfc_cmd = iocbq->context1;
9227 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
9228 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9229
9230
9231 wqe->generic.bde.tus.f.bdeFlags =
9232 BUFF_TYPE_BDE_IMMED;
9233 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9234 wqe->generic.bde.addrHigh = 0;
9235 wqe->generic.bde.addrLow = 88;
9236
9237 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9238 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9239
9240
9241 ptr = &wqe->words[22];
9242 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9243 }
9244 break;
9245 case CMD_GEN_REQUEST64_CR:
9246
9247
9248
9249 xmit_len = 0;
9250 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9251 sizeof(struct ulp_bde64);
9252 for (i = 0; i < numBdes; i++) {
9253 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9254 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9255 break;
9256 xmit_len += bde.tus.f.bdeSize;
9257 }
9258
9259 wqe->gen_req.request_payload_len = xmit_len;
9260
9261
9262
9263 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9264 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9265 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9266 "2015 Invalid CT %x command 0x%x\n",
9267 ct, iocbq->iocb.ulpCommand);
9268 return IOCB_ERROR;
9269 }
9270 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9271 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9272 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9273 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9274 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9275 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9276 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9277 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9278 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9279 command_type = OTHER_COMMAND;
9280 break;
9281 case CMD_XMIT_ELS_RSP64_CX:
9282 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9283
9284
9285 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9286
9287 wqe->xmit_els_rsp.word4 = 0;
9288
9289 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9290 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9291
9292 if_type = bf_get(lpfc_sli_intf_if_type,
9293 &phba->sli4_hba.sli_intf);
9294 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9295 if (iocbq->vport->fc_flag & FC_PT2PT) {
9296 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9297 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9298 iocbq->vport->fc_myDID);
9299 if (iocbq->vport->fc_myDID == Fabric_DID) {
9300 bf_set(wqe_els_did,
9301 &wqe->xmit_els_rsp.wqe_dest, 0);
9302 }
9303 }
9304 }
9305 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9306 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9307 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9308 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9309 iocbq->iocb.unsli3.rcvsli3.ox_id);
9310 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9311 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9312 phba->vpi_ids[iocbq->vport->vpi]);
9313 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9314 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9315 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9316 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9317 LPFC_WQE_LENLOC_WORD3);
9318 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9319 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9320 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9321 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9322 iocbq->context2)->virt);
9323 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9324 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9325 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9326 iocbq->vport->fc_myDID);
9327 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9328 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9329 phba->vpi_ids[phba->pport->vpi]);
9330 }
9331 command_type = OTHER_COMMAND;
9332 break;
9333 case CMD_CLOSE_XRI_CN:
9334 case CMD_ABORT_XRI_CN:
9335 case CMD_ABORT_XRI_CX:
9336
9337
9338 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9339 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9340 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9341 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9342 } else
9343 fip = 0;
9344
9345 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9346
9347
9348
9349
9350
9351 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9352 else
9353 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9354 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9355
9356 wqe->abort_cmd.rsrvd5 = 0;
9357 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9358 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9359 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9360
9361
9362
9363
9364 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9365 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9366 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9367 LPFC_WQE_LENLOC_NONE);
9368 cmnd = CMD_ABORT_XRI_CX;
9369 command_type = OTHER_COMMAND;
9370 xritag = 0;
9371 break;
9372 case CMD_XMIT_BLS_RSP64_CX:
9373 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9374
9375
9376
9377
9378 memset(wqe, 0, sizeof(union lpfc_wqe));
9379
9380 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9381 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9382 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9383 LPFC_ABTS_UNSOL_INT) {
9384
9385
9386
9387
9388 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9389 iocbq->sli4_xritag);
9390 } else {
9391
9392
9393
9394
9395 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9396 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9397 }
9398 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9399 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9400
9401
9402 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9403 ndlp->nlp_DID);
9404 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9405 iocbq->iocb.ulpContext);
9406 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9407 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9408 phba->vpi_ids[phba->pport->vpi]);
9409 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9410 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9411 LPFC_WQE_LENLOC_NONE);
9412
9413 command_type = OTHER_COMMAND;
9414 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9415 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9416 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9417 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9418 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9419 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9420 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9421 }
9422
9423 break;
9424 case CMD_SEND_FRAME:
9425 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9426 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9427 return 0;
9428 case CMD_XRI_ABORTED_CX:
9429 case CMD_CREATE_XRI_CR:
9430 case CMD_IOCB_FCP_IBIDIR64_CR:
9431 case CMD_FCP_TSEND64_CX:
9432 case CMD_FCP_TRSP64_CX:
9433 case CMD_FCP_AUTO_TRSP_CX:
9434 default:
9435 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9436 "2014 Invalid command 0x%x\n",
9437 iocbq->iocb.ulpCommand);
9438 return IOCB_ERROR;
9439 break;
9440 }
9441
9442 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9443 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9444 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9445 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9446 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9447 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9448 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9449 LPFC_IO_DIF_INSERT);
9450 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9451 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9452 wqe->generic.wqe_com.abort_tag = abort_tag;
9453 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9454 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9455 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9456 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9457 return 0;
9458}
9459
9460
9461
9462
9463
9464
9465
9466
9467
9468
9469
9470
9471
9472
9473
9474static int
9475__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9476 struct lpfc_iocbq *piocb, uint32_t flag)
9477{
9478 struct lpfc_sglq *sglq;
9479 union lpfc_wqe128 wqe;
9480 struct lpfc_queue *wq;
9481 struct lpfc_sli_ring *pring;
9482
9483
9484 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9485 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9486 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9487 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9488 else
9489 wq = phba->sli4_hba.oas_wq;
9490 } else {
9491 wq = phba->sli4_hba.els_wq;
9492 }
9493
9494
9495 pring = wq->pring;
9496
9497
9498
9499
9500
9501 lockdep_assert_held(&phba->hbalock);
9502
9503 if (piocb->sli4_xritag == NO_XRI) {
9504 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9505 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9506 sglq = NULL;
9507 else {
9508 if (!list_empty(&pring->txq)) {
9509 if (!(flag & SLI_IOCB_RET_IOCB)) {
9510 __lpfc_sli_ringtx_put(phba,
9511 pring, piocb);
9512 return IOCB_SUCCESS;
9513 } else {
9514 return IOCB_BUSY;
9515 }
9516 } else {
9517 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9518 if (!sglq) {
9519 if (!(flag & SLI_IOCB_RET_IOCB)) {
9520 __lpfc_sli_ringtx_put(phba,
9521 pring,
9522 piocb);
9523 return IOCB_SUCCESS;
9524 } else
9525 return IOCB_BUSY;
9526 }
9527 }
9528 }
9529 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9530
9531 sglq = NULL;
9532 else {
9533
9534
9535
9536
9537 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9538 if (!sglq)
9539 return IOCB_ERROR;
9540 }
9541
9542 if (sglq) {
9543 piocb->sli4_lxritag = sglq->sli4_lxritag;
9544 piocb->sli4_xritag = sglq->sli4_xritag;
9545 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9546 return IOCB_ERROR;
9547 }
9548
9549 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9550 return IOCB_ERROR;
9551
9552 if (lpfc_sli4_wq_put(wq, &wqe))
9553 return IOCB_ERROR;
9554 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9555
9556 return 0;
9557}
9558
9559
9560
9561
9562
9563
9564
9565
9566
9567
9568
9569
9570int
9571__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9572 struct lpfc_iocbq *piocb, uint32_t flag)
9573{
9574 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9575}
9576
9577
9578
9579
9580
9581
9582
9583
9584
9585
9586int
9587lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9588{
9589
9590 switch (dev_grp) {
9591 case LPFC_PCI_DEV_LP:
9592 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9593 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9594 break;
9595 case LPFC_PCI_DEV_OC:
9596 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9597 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9598 break;
9599 default:
9600 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9601 "1419 Invalid HBA PCI-device group: 0x%x\n",
9602 dev_grp);
9603 return -ENODEV;
9604 break;
9605 }
9606 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9607 return 0;
9608}
9609
9610
9611
9612
9613
9614
9615
9616
9617
9618
9619
9620struct lpfc_sli_ring *
9621lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9622{
9623 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9624 if (!(phba->cfg_fof) ||
9625 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9626 if (unlikely(!phba->sli4_hba.fcp_wq))
9627 return NULL;
9628
9629
9630
9631
9632 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9633 piocb->hba_wqidx =
9634 lpfc_sli4_scmd_to_wqidx_distr(phba,
9635 piocb->context1);
9636 piocb->hba_wqidx = piocb->hba_wqidx %
9637 phba->cfg_fcp_io_channel;
9638 }
9639 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9640 } else {
9641 if (unlikely(!phba->sli4_hba.oas_wq))
9642 return NULL;
9643 piocb->hba_wqidx = 0;
9644 return phba->sli4_hba.oas_wq->pring;
9645 }
9646 } else {
9647 if (unlikely(!phba->sli4_hba.els_wq))
9648 return NULL;
9649 piocb->hba_wqidx = 0;
9650 return phba->sli4_hba.els_wq->pring;
9651 }
9652}
9653
9654
9655
9656
9657
9658
9659
9660
9661
9662
9663
9664
9665
9666
9667int
9668lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9669 struct lpfc_iocbq *piocb, uint32_t flag)
9670{
9671 struct lpfc_hba_eq_hdl *hba_eq_hdl;
9672 struct lpfc_sli_ring *pring;
9673 struct lpfc_queue *fpeq;
9674 struct lpfc_eqe *eqe;
9675 unsigned long iflags;
9676 int rc, idx;
9677
9678 if (phba->sli_rev == LPFC_SLI_REV4) {
9679 pring = lpfc_sli4_calc_ring(phba, piocb);
9680 if (unlikely(pring == NULL))
9681 return IOCB_ERROR;
9682
9683 spin_lock_irqsave(&pring->ring_lock, iflags);
9684 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9685 spin_unlock_irqrestore(&pring->ring_lock, iflags);
9686
9687 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
9688 idx = piocb->hba_wqidx;
9689 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
9690
9691 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
9692
9693
9694 fpeq = phba->sli4_hba.hba_eq[idx];
9695
9696
9697 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
9698
9699
9700
9701
9702 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9703 lpfc_sli4_hba_handle_eqe(phba,
9704 eqe, idx);
9705 fpeq->EQ_processed++;
9706 }
9707
9708
9709 phba->sli4_hba.sli4_eq_release(fpeq,
9710 LPFC_QUEUE_REARM);
9711 }
9712 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
9713 }
9714 } else {
9715
9716 spin_lock_irqsave(&phba->hbalock, iflags);
9717 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9718 spin_unlock_irqrestore(&phba->hbalock, iflags);
9719 }
9720 return rc;
9721}
9722
9723
9724
9725
9726
9727
9728
9729
9730
9731
9732
9733
9734static int
9735lpfc_extra_ring_setup( struct lpfc_hba *phba)
9736{
9737 struct lpfc_sli *psli;
9738 struct lpfc_sli_ring *pring;
9739
9740 psli = &phba->sli;
9741
9742
9743
9744
9745 pring = &psli->sli3_ring[LPFC_FCP_RING];
9746 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9747 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9748 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9749 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9750
9751
9752 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
9753
9754 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9755 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9756 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9757 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9758
9759
9760 pring->iotag_max = 4096;
9761 pring->num_mask = 1;
9762 pring->prt[0].profile = 0;
9763 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9764 pring->prt[0].type = phba->cfg_multi_ring_type;
9765 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9766 return 0;
9767}
9768
9769
9770
9771
9772
9773
9774
9775
9776
9777
9778
9779
9780
9781static void
9782lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9783 struct lpfc_iocbq *iocbq)
9784{
9785 struct lpfc_nodelist *ndlp = NULL;
9786 uint16_t rpi = 0, vpi = 0;
9787 struct lpfc_vport *vport = NULL;
9788
9789
9790 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9791 rpi = iocbq->iocb.ulpContext;
9792
9793 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9794 "3092 Port generated ABTS async event "
9795 "on vpi %d rpi %d status 0x%x\n",
9796 vpi, rpi, iocbq->iocb.ulpStatus);
9797
9798 vport = lpfc_find_vport_by_vpid(phba, vpi);
9799 if (!vport)
9800 goto err_exit;
9801 ndlp = lpfc_findnode_rpi(vport, rpi);
9802 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9803 goto err_exit;
9804
9805 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9806 lpfc_sli_abts_recover_port(vport, ndlp);
9807 return;
9808
9809 err_exit:
9810 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9811 "3095 Event Context not found, no "
9812 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9813 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9814 vpi, rpi);
9815}
9816
9817
9818
9819
9820
9821
9822
9823
9824
9825
9826
9827void
9828lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9829 struct lpfc_nodelist *ndlp,
9830 struct sli4_wcqe_xri_aborted *axri)
9831{
9832 struct lpfc_vport *vport;
9833 uint32_t ext_status = 0;
9834
9835 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9836 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9837 "3115 Node Context not found, driver "
9838 "ignoring abts err event\n");
9839 return;
9840 }
9841
9842 vport = ndlp->vport;
9843 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9844 "3116 Port generated FCP XRI ABORT event on "
9845 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9846 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9847 bf_get(lpfc_wcqe_xa_xri, axri),
9848 bf_get(lpfc_wcqe_xa_status, axri),
9849 axri->parameter);
9850
9851
9852
9853
9854
9855
9856 ext_status = axri->parameter & IOERR_PARAM_MASK;
9857 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9858 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9859 lpfc_sli_abts_recover_port(vport, ndlp);
9860}
9861
9862
9863
9864
9865
9866
9867
9868
9869
9870
9871
9872
9873
9874
9875static void
9876lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9877 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9878{
9879 IOCB_t *icmd;
9880 uint16_t evt_code;
9881 struct temp_event temp_event_data;
9882 struct Scsi_Host *shost;
9883 uint32_t *iocb_w;
9884
9885 icmd = &iocbq->iocb;
9886 evt_code = icmd->un.asyncstat.evt_code;
9887
9888 switch (evt_code) {
9889 case ASYNC_TEMP_WARN:
9890 case ASYNC_TEMP_SAFE:
9891 temp_event_data.data = (uint32_t) icmd->ulpContext;
9892 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9893 if (evt_code == ASYNC_TEMP_WARN) {
9894 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9895 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9896 "0347 Adapter is very hot, please take "
9897 "corrective action. temperature : %d Celsius\n",
9898 (uint32_t) icmd->ulpContext);
9899 } else {
9900 temp_event_data.event_code = LPFC_NORMAL_TEMP;
9901 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9902 "0340 Adapter temperature is OK now. "
9903 "temperature : %d Celsius\n",
9904 (uint32_t) icmd->ulpContext);
9905 }
9906
9907
9908 shost = lpfc_shost_from_vport(phba->pport);
9909 fc_host_post_vendor_event(shost, fc_get_event_number(),
9910 sizeof(temp_event_data), (char *) &temp_event_data,
9911 LPFC_NL_VENDOR_ID);
9912 break;
9913 case ASYNC_STATUS_CN:
9914 lpfc_sli_abts_err_handler(phba, iocbq);
9915 break;
9916 default:
9917 iocb_w = (uint32_t *) icmd;
9918 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9919 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9920 " evt_code 0x%x\n"
9921 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9922 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9923 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9924 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9925 pring->ringno, icmd->un.asyncstat.evt_code,
9926 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9927 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9928 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9929 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9930
9931 break;
9932 }
9933}
9934
9935
9936
9937
9938
9939
9940
9941
9942
9943
9944
9945
9946
9947int
9948lpfc_sli4_setup(struct lpfc_hba *phba)
9949{
9950 struct lpfc_sli_ring *pring;
9951
9952 pring = phba->sli4_hba.els_wq->pring;
9953 pring->num_mask = LPFC_MAX_RING_MASK;
9954 pring->prt[0].profile = 0;
9955 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9956 pring->prt[0].type = FC_TYPE_ELS;
9957 pring->prt[0].lpfc_sli_rcv_unsol_event =
9958 lpfc_els_unsol_event;
9959 pring->prt[1].profile = 0;
9960 pring->prt[1].rctl = FC_RCTL_ELS_REP;
9961 pring->prt[1].type = FC_TYPE_ELS;
9962 pring->prt[1].lpfc_sli_rcv_unsol_event =
9963 lpfc_els_unsol_event;
9964 pring->prt[2].profile = 0;
9965
9966 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9967
9968 pring->prt[2].type = FC_TYPE_CT;
9969 pring->prt[2].lpfc_sli_rcv_unsol_event =
9970 lpfc_ct_unsol_event;
9971 pring->prt[3].profile = 0;
9972
9973 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9974
9975 pring->prt[3].type = FC_TYPE_CT;
9976 pring->prt[3].lpfc_sli_rcv_unsol_event =
9977 lpfc_ct_unsol_event;
9978 return 0;
9979}
9980
9981
9982
9983
9984
9985
9986
9987
9988
9989
9990
9991
9992int
9993lpfc_sli_setup(struct lpfc_hba *phba)
9994{
9995 int i, totiocbsize = 0;
9996 struct lpfc_sli *psli = &phba->sli;
9997 struct lpfc_sli_ring *pring;
9998
9999 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10000 psli->sli_flag = 0;
10001
10002 psli->iocbq_lookup = NULL;
10003 psli->iocbq_lookup_len = 0;
10004 psli->last_iotag = 0;
10005
10006 for (i = 0; i < psli->num_rings; i++) {
10007 pring = &psli->sli3_ring[i];
10008 switch (i) {
10009 case LPFC_FCP_RING:
10010
10011 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10012 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10013 pring->sli.sli3.numCiocb +=
10014 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10015 pring->sli.sli3.numRiocb +=
10016 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10017 pring->sli.sli3.numCiocb +=
10018 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10019 pring->sli.sli3.numRiocb +=
10020 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10021 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10022 SLI3_IOCB_CMD_SIZE :
10023 SLI2_IOCB_CMD_SIZE;
10024 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10025 SLI3_IOCB_RSP_SIZE :
10026 SLI2_IOCB_RSP_SIZE;
10027 pring->iotag_ctr = 0;
10028 pring->iotag_max =
10029 (phba->cfg_hba_queue_depth * 2);
10030 pring->fast_iotag = pring->iotag_max;
10031 pring->num_mask = 0;
10032 break;
10033 case LPFC_EXTRA_RING:
10034
10035 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10036 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10037 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10038 SLI3_IOCB_CMD_SIZE :
10039 SLI2_IOCB_CMD_SIZE;
10040 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10041 SLI3_IOCB_RSP_SIZE :
10042 SLI2_IOCB_RSP_SIZE;
10043 pring->iotag_max = phba->cfg_hba_queue_depth;
10044 pring->num_mask = 0;
10045 break;
10046 case LPFC_ELS_RING:
10047
10048 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10049 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10050 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10051 SLI3_IOCB_CMD_SIZE :
10052 SLI2_IOCB_CMD_SIZE;
10053 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10054 SLI3_IOCB_RSP_SIZE :
10055 SLI2_IOCB_RSP_SIZE;
10056 pring->fast_iotag = 0;
10057 pring->iotag_ctr = 0;
10058 pring->iotag_max = 4096;
10059 pring->lpfc_sli_rcv_async_status =
10060 lpfc_sli_async_event_handler;
10061 pring->num_mask = LPFC_MAX_RING_MASK;
10062 pring->prt[0].profile = 0;
10063 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10064 pring->prt[0].type = FC_TYPE_ELS;
10065 pring->prt[0].lpfc_sli_rcv_unsol_event =
10066 lpfc_els_unsol_event;
10067 pring->prt[1].profile = 0;
10068 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10069 pring->prt[1].type = FC_TYPE_ELS;
10070 pring->prt[1].lpfc_sli_rcv_unsol_event =
10071 lpfc_els_unsol_event;
10072 pring->prt[2].profile = 0;
10073
10074 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10075
10076 pring->prt[2].type = FC_TYPE_CT;
10077 pring->prt[2].lpfc_sli_rcv_unsol_event =
10078 lpfc_ct_unsol_event;
10079 pring->prt[3].profile = 0;
10080
10081 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10082
10083 pring->prt[3].type = FC_TYPE_CT;
10084 pring->prt[3].lpfc_sli_rcv_unsol_event =
10085 lpfc_ct_unsol_event;
10086 break;
10087 }
10088 totiocbsize += (pring->sli.sli3.numCiocb *
10089 pring->sli.sli3.sizeCiocb) +
10090 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10091 }
10092 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10093
10094 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10095 "SLI2 SLIM Data: x%x x%lx\n",
10096 phba->brd_no, totiocbsize,
10097 (unsigned long) MAX_SLIM_IOCB_SIZE);
10098 }
10099 if (phba->cfg_multi_ring_support == 2)
10100 lpfc_extra_ring_setup(phba);
10101
10102 return 0;
10103}
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115
10116void
10117lpfc_sli4_queue_init(struct lpfc_hba *phba)
10118{
10119 struct lpfc_sli *psli;
10120 struct lpfc_sli_ring *pring;
10121 int i;
10122
10123 psli = &phba->sli;
10124 spin_lock_irq(&phba->hbalock);
10125 INIT_LIST_HEAD(&psli->mboxq);
10126 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10127
10128 for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
10129 pring = phba->sli4_hba.fcp_wq[i]->pring;
10130 pring->flag = 0;
10131 pring->ringno = LPFC_FCP_RING;
10132 INIT_LIST_HEAD(&pring->txq);
10133 INIT_LIST_HEAD(&pring->txcmplq);
10134 INIT_LIST_HEAD(&pring->iocb_continueq);
10135 spin_lock_init(&pring->ring_lock);
10136 }
10137 for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
10138 pring = phba->sli4_hba.nvme_wq[i]->pring;
10139 pring->flag = 0;
10140 pring->ringno = LPFC_FCP_RING;
10141 INIT_LIST_HEAD(&pring->txq);
10142 INIT_LIST_HEAD(&pring->txcmplq);
10143 INIT_LIST_HEAD(&pring->iocb_continueq);
10144 spin_lock_init(&pring->ring_lock);
10145 }
10146 pring = phba->sli4_hba.els_wq->pring;
10147 pring->flag = 0;
10148 pring->ringno = LPFC_ELS_RING;
10149 INIT_LIST_HEAD(&pring->txq);
10150 INIT_LIST_HEAD(&pring->txcmplq);
10151 INIT_LIST_HEAD(&pring->iocb_continueq);
10152 spin_lock_init(&pring->ring_lock);
10153
10154 if (phba->cfg_nvme_io_channel) {
10155 pring = phba->sli4_hba.nvmels_wq->pring;
10156 pring->flag = 0;
10157 pring->ringno = LPFC_ELS_RING;
10158 INIT_LIST_HEAD(&pring->txq);
10159 INIT_LIST_HEAD(&pring->txcmplq);
10160 INIT_LIST_HEAD(&pring->iocb_continueq);
10161 spin_lock_init(&pring->ring_lock);
10162 }
10163
10164 if (phba->cfg_fof) {
10165 pring = phba->sli4_hba.oas_wq->pring;
10166 pring->flag = 0;
10167 pring->ringno = LPFC_FCP_RING;
10168 INIT_LIST_HEAD(&pring->txq);
10169 INIT_LIST_HEAD(&pring->txcmplq);
10170 INIT_LIST_HEAD(&pring->iocb_continueq);
10171 spin_lock_init(&pring->ring_lock);
10172 }
10173
10174 spin_unlock_irq(&phba->hbalock);
10175}
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188void
10189lpfc_sli_queue_init(struct lpfc_hba *phba)
10190{
10191 struct lpfc_sli *psli;
10192 struct lpfc_sli_ring *pring;
10193 int i;
10194
10195 psli = &phba->sli;
10196 spin_lock_irq(&phba->hbalock);
10197 INIT_LIST_HEAD(&psli->mboxq);
10198 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10199
10200 for (i = 0; i < psli->num_rings; i++) {
10201 pring = &psli->sli3_ring[i];
10202 pring->ringno = i;
10203 pring->sli.sli3.next_cmdidx = 0;
10204 pring->sli.sli3.local_getidx = 0;
10205 pring->sli.sli3.cmdidx = 0;
10206 INIT_LIST_HEAD(&pring->iocb_continueq);
10207 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10208 INIT_LIST_HEAD(&pring->postbufq);
10209 pring->flag = 0;
10210 INIT_LIST_HEAD(&pring->txq);
10211 INIT_LIST_HEAD(&pring->txcmplq);
10212 spin_lock_init(&pring->ring_lock);
10213 }
10214 spin_unlock_irq(&phba->hbalock);
10215}
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231
10232static void
10233lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10234{
10235 LIST_HEAD(completions);
10236 struct lpfc_sli *psli = &phba->sli;
10237 LPFC_MBOXQ_t *pmb;
10238 unsigned long iflag;
10239
10240
10241 spin_lock_irqsave(&phba->hbalock, iflag);
10242
10243 list_splice_init(&phba->sli.mboxq, &completions);
10244
10245 if (psli->mbox_active) {
10246 list_add_tail(&psli->mbox_active->list, &completions);
10247 psli->mbox_active = NULL;
10248 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10249 }
10250
10251 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10252 spin_unlock_irqrestore(&phba->hbalock, iflag);
10253
10254
10255 while (!list_empty(&completions)) {
10256 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10257 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10258 if (pmb->mbox_cmpl)
10259 pmb->mbox_cmpl(phba, pmb);
10260 }
10261}
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277
10278
10279
10280int
10281lpfc_sli_host_down(struct lpfc_vport *vport)
10282{
10283 LIST_HEAD(completions);
10284 struct lpfc_hba *phba = vport->phba;
10285 struct lpfc_sli *psli = &phba->sli;
10286 struct lpfc_queue *qp = NULL;
10287 struct lpfc_sli_ring *pring;
10288 struct lpfc_iocbq *iocb, *next_iocb;
10289 int i;
10290 unsigned long flags = 0;
10291 uint16_t prev_pring_flag;
10292
10293 lpfc_cleanup_discovery_resources(vport);
10294
10295 spin_lock_irqsave(&phba->hbalock, flags);
10296
10297
10298
10299
10300
10301
10302 if (phba->sli_rev != LPFC_SLI_REV4) {
10303 for (i = 0; i < psli->num_rings; i++) {
10304 pring = &psli->sli3_ring[i];
10305 prev_pring_flag = pring->flag;
10306
10307 if (pring->ringno == LPFC_ELS_RING) {
10308 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10309
10310 set_bit(LPFC_DATA_READY, &phba->data_flags);
10311 }
10312 list_for_each_entry_safe(iocb, next_iocb,
10313 &pring->txq, list) {
10314 if (iocb->vport != vport)
10315 continue;
10316 list_move_tail(&iocb->list, &completions);
10317 }
10318 list_for_each_entry_safe(iocb, next_iocb,
10319 &pring->txcmplq, list) {
10320 if (iocb->vport != vport)
10321 continue;
10322 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10323 }
10324 pring->flag = prev_pring_flag;
10325 }
10326 } else {
10327 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10328 pring = qp->pring;
10329 if (!pring)
10330 continue;
10331 if (pring == phba->sli4_hba.els_wq->pring) {
10332 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10333
10334 set_bit(LPFC_DATA_READY, &phba->data_flags);
10335 }
10336 prev_pring_flag = pring->flag;
10337 spin_lock_irq(&pring->ring_lock);
10338 list_for_each_entry_safe(iocb, next_iocb,
10339 &pring->txq, list) {
10340 if (iocb->vport != vport)
10341 continue;
10342 list_move_tail(&iocb->list, &completions);
10343 }
10344 spin_unlock_irq(&pring->ring_lock);
10345 list_for_each_entry_safe(iocb, next_iocb,
10346 &pring->txcmplq, list) {
10347 if (iocb->vport != vport)
10348 continue;
10349 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10350 }
10351 pring->flag = prev_pring_flag;
10352 }
10353 }
10354 spin_unlock_irqrestore(&phba->hbalock, flags);
10355
10356
10357 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10358 IOERR_SLI_DOWN);
10359 return 1;
10360}
10361
10362
10363
10364
10365
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377int
10378lpfc_sli_hba_down(struct lpfc_hba *phba)
10379{
10380 LIST_HEAD(completions);
10381 struct lpfc_sli *psli = &phba->sli;
10382 struct lpfc_queue *qp = NULL;
10383 struct lpfc_sli_ring *pring;
10384 struct lpfc_dmabuf *buf_ptr;
10385 unsigned long flags = 0;
10386 int i;
10387
10388
10389 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10390
10391 lpfc_hba_down_prep(phba);
10392
10393 lpfc_fabric_abort_hba(phba);
10394
10395 spin_lock_irqsave(&phba->hbalock, flags);
10396
10397
10398
10399
10400
10401 if (phba->sli_rev != LPFC_SLI_REV4) {
10402 for (i = 0; i < psli->num_rings; i++) {
10403 pring = &psli->sli3_ring[i];
10404
10405 if (pring->ringno == LPFC_ELS_RING) {
10406 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10407
10408 set_bit(LPFC_DATA_READY, &phba->data_flags);
10409 }
10410 list_splice_init(&pring->txq, &completions);
10411 }
10412 } else {
10413 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10414 pring = qp->pring;
10415 if (!pring)
10416 continue;
10417 spin_lock_irq(&pring->ring_lock);
10418 list_splice_init(&pring->txq, &completions);
10419 spin_unlock_irq(&pring->ring_lock);
10420 if (pring == phba->sli4_hba.els_wq->pring) {
10421 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10422
10423 set_bit(LPFC_DATA_READY, &phba->data_flags);
10424 }
10425 }
10426 }
10427 spin_unlock_irqrestore(&phba->hbalock, flags);
10428
10429
10430 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10431 IOERR_SLI_DOWN);
10432
10433 spin_lock_irqsave(&phba->hbalock, flags);
10434 list_splice_init(&phba->elsbuf, &completions);
10435 phba->elsbuf_cnt = 0;
10436 phba->elsbuf_prev_cnt = 0;
10437 spin_unlock_irqrestore(&phba->hbalock, flags);
10438
10439 while (!list_empty(&completions)) {
10440 list_remove_head(&completions, buf_ptr,
10441 struct lpfc_dmabuf, list);
10442 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10443 kfree(buf_ptr);
10444 }
10445
10446
10447 del_timer_sync(&psli->mbox_tmo);
10448
10449 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10450 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10451 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10452
10453 return 1;
10454}
10455
10456
10457
10458
10459
10460
10461
10462
10463
10464
10465
10466
10467
10468void
10469lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10470{
10471 uint32_t *src = srcp;
10472 uint32_t *dest = destp;
10473 uint32_t ldata;
10474 int i;
10475
10476 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10477 ldata = *src;
10478 ldata = le32_to_cpu(ldata);
10479 *dest = ldata;
10480 src++;
10481 dest++;
10482 }
10483}
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495
10496void
10497lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10498{
10499 uint32_t *src = srcp;
10500 uint32_t *dest = destp;
10501 uint32_t ldata;
10502 int i;
10503
10504 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10505 ldata = *src;
10506 ldata = be32_to_cpu(ldata);
10507 *dest = ldata;
10508 src++;
10509 dest++;
10510 }
10511}
10512
10513
10514
10515
10516
10517
10518
10519
10520
10521
10522
10523int
10524lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10525 struct lpfc_dmabuf *mp)
10526{
10527
10528
10529 spin_lock_irq(&phba->hbalock);
10530 list_add_tail(&mp->list, &pring->postbufq);
10531 pring->postbufq_cnt++;
10532 spin_unlock_irq(&phba->hbalock);
10533 return 0;
10534}
10535
10536
10537
10538
10539
10540
10541
10542
10543
10544
10545
10546
10547uint32_t
10548lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10549{
10550 spin_lock_irq(&phba->hbalock);
10551 phba->buffer_tag_count++;
10552
10553
10554
10555
10556 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10557 spin_unlock_irq(&phba->hbalock);
10558 return phba->buffer_tag_count;
10559}
10560
10561
10562
10563
10564
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576struct lpfc_dmabuf *
10577lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10578 uint32_t tag)
10579{
10580 struct lpfc_dmabuf *mp, *next_mp;
10581 struct list_head *slp = &pring->postbufq;
10582
10583
10584 spin_lock_irq(&phba->hbalock);
10585 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10586 if (mp->buffer_tag == tag) {
10587 list_del_init(&mp->list);
10588 pring->postbufq_cnt--;
10589 spin_unlock_irq(&phba->hbalock);
10590 return mp;
10591 }
10592 }
10593
10594 spin_unlock_irq(&phba->hbalock);
10595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10596 "0402 Cannot find virtual addr for buffer tag on "
10597 "ring %d Data x%lx x%p x%p x%x\n",
10598 pring->ringno, (unsigned long) tag,
10599 slp->next, slp->prev, pring->postbufq_cnt);
10600
10601 return NULL;
10602}
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620struct lpfc_dmabuf *
10621lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10622 dma_addr_t phys)
10623{
10624 struct lpfc_dmabuf *mp, *next_mp;
10625 struct list_head *slp = &pring->postbufq;
10626
10627
10628 spin_lock_irq(&phba->hbalock);
10629 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10630 if (mp->phys == phys) {
10631 list_del_init(&mp->list);
10632 pring->postbufq_cnt--;
10633 spin_unlock_irq(&phba->hbalock);
10634 return mp;
10635 }
10636 }
10637
10638 spin_unlock_irq(&phba->hbalock);
10639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10640 "0410 Cannot find virtual addr for mapped buf on "
10641 "ring %d Data x%llx x%p x%p x%x\n",
10642 pring->ringno, (unsigned long long)phys,
10643 slp->next, slp->prev, pring->postbufq_cnt);
10644 return NULL;
10645}
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656
10657
10658static void
10659lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10660 struct lpfc_iocbq *rspiocb)
10661{
10662 IOCB_t *irsp = &rspiocb->iocb;
10663 uint16_t abort_iotag, abort_context;
10664 struct lpfc_iocbq *abort_iocb = NULL;
10665
10666 if (irsp->ulpStatus) {
10667
10668
10669
10670
10671
10672 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10673 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10674
10675 spin_lock_irq(&phba->hbalock);
10676 if (phba->sli_rev < LPFC_SLI_REV4) {
10677 if (abort_iotag != 0 &&
10678 abort_iotag <= phba->sli.last_iotag)
10679 abort_iocb =
10680 phba->sli.iocbq_lookup[abort_iotag];
10681 } else
10682
10683
10684
10685
10686
10687 abort_iocb = phba->sli.iocbq_lookup[abort_context];
10688
10689 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10690 "0327 Cannot abort els iocb %p "
10691 "with tag %x context %x, abort status %x, "
10692 "abort code %x\n",
10693 abort_iocb, abort_iotag, abort_context,
10694 irsp->ulpStatus, irsp->un.ulpWord[4]);
10695
10696 spin_unlock_irq(&phba->hbalock);
10697 }
10698 lpfc_sli_release_iocbq(phba, cmdiocb);
10699 return;
10700}
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713static void
10714lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10715 struct lpfc_iocbq *rspiocb)
10716{
10717 IOCB_t *irsp = &rspiocb->iocb;
10718
10719
10720 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10721 "0139 Ignoring ELS cmd tag x%x completion Data: "
10722 "x%x x%x x%x\n",
10723 irsp->ulpIoTag, irsp->ulpStatus,
10724 irsp->un.ulpWord[4], irsp->ulpTimeout);
10725 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10726 lpfc_ct_free_iocb(phba, cmdiocb);
10727 else
10728 lpfc_els_free_iocb(phba, cmdiocb);
10729 return;
10730}
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744static int
10745lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10746 struct lpfc_iocbq *cmdiocb)
10747{
10748 struct lpfc_vport *vport = cmdiocb->vport;
10749 struct lpfc_iocbq *abtsiocbp;
10750 IOCB_t *icmd = NULL;
10751 IOCB_t *iabt = NULL;
10752 int retval;
10753 unsigned long iflags;
10754
10755 lockdep_assert_held(&phba->hbalock);
10756
10757
10758
10759
10760
10761
10762 icmd = &cmdiocb->iocb;
10763 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10764 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10765 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10766 return 0;
10767
10768
10769 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10770 if (abtsiocbp == NULL)
10771 return 0;
10772
10773
10774
10775
10776 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10777
10778 iabt = &abtsiocbp->iocb;
10779 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10780 iabt->un.acxri.abortContextTag = icmd->ulpContext;
10781 if (phba->sli_rev == LPFC_SLI_REV4) {
10782 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10783 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10784 }
10785 else
10786 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10787 iabt->ulpLe = 1;
10788 iabt->ulpClass = icmd->ulpClass;
10789
10790
10791 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
10792 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10793 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10794 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10795 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10796
10797 if (phba->link_state >= LPFC_LINK_UP)
10798 iabt->ulpCommand = CMD_ABORT_XRI_CN;
10799 else
10800 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10801
10802 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10803 abtsiocbp->vport = vport;
10804
10805 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10806 "0339 Abort xri x%x, original iotag x%x, "
10807 "abort cmd iotag x%x\n",
10808 iabt->un.acxri.abortIoTag,
10809 iabt->un.acxri.abortContextTag,
10810 abtsiocbp->iotag);
10811
10812 if (phba->sli_rev == LPFC_SLI_REV4) {
10813 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10814 if (unlikely(pring == NULL))
10815 return 0;
10816
10817 spin_lock_irqsave(&pring->ring_lock, iflags);
10818 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10819 abtsiocbp, 0);
10820 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10821 } else {
10822 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10823 abtsiocbp, 0);
10824 }
10825
10826 if (retval)
10827 __lpfc_sli_release_iocbq(phba, abtsiocbp);
10828
10829
10830
10831
10832
10833
10834 return retval;
10835}
10836
10837
10838
10839
10840
10841
10842
10843
10844
10845
10846
10847
10848
10849
10850int
10851lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10852 struct lpfc_iocbq *cmdiocb)
10853{
10854 struct lpfc_vport *vport = cmdiocb->vport;
10855 int retval = IOCB_ERROR;
10856 IOCB_t *icmd = NULL;
10857
10858 lockdep_assert_held(&phba->hbalock);
10859
10860
10861
10862
10863
10864
10865 icmd = &cmdiocb->iocb;
10866 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10867 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10868 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10869 return 0;
10870
10871 if (!pring) {
10872 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10873 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10874 else
10875 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10876 goto abort_iotag_exit;
10877 }
10878
10879
10880
10881
10882
10883 if ((vport->load_flag & FC_UNLOADING) &&
10884 (pring->ringno == LPFC_ELS_RING)) {
10885 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10886 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10887 else
10888 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10889 goto abort_iotag_exit;
10890 }
10891
10892
10893 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10894
10895abort_iotag_exit:
10896
10897
10898
10899
10900
10901 return retval;
10902}
10903
10904
10905
10906
10907
10908
10909
10910
10911
10912
10913
10914
10915
10916static int
10917lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10918 struct lpfc_iocbq *cmdiocb)
10919{
10920 struct lpfc_vport *vport = cmdiocb->vport;
10921 struct lpfc_iocbq *abtsiocbp;
10922 union lpfc_wqe128 *abts_wqe;
10923 int retval;
10924
10925
10926
10927
10928
10929
10930 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10931 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10932 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10933 return 0;
10934
10935
10936 abtsiocbp = __lpfc_sli_get_iocbq(phba);
10937 if (abtsiocbp == NULL)
10938 return 0;
10939
10940
10941
10942
10943 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10944
10945
10946 abts_wqe = &abtsiocbp->wqe;
10947 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
10948 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10949
10950
10951 abts_wqe->abort_cmd.rsrvd4 = 0;
10952 abts_wqe->abort_cmd.rsrvd5 = 0;
10953
10954
10955 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10956 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10957
10958
10959 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
10960 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10961 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10962 cmdiocb->iocb.ulpClass);
10963
10964
10965
10966
10967 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10968
10969
10970 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10971 abtsiocbp->iotag);
10972
10973
10974 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
10975 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10976 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10977
10978
10979 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10980 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10981 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10982
10983
10984 abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10985 abtsiocbp->vport = vport;
10986 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
10987 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10988 if (retval) {
10989 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10990 "6147 Failed abts issue_wqe with status x%x "
10991 "for oxid x%x\n",
10992 retval, cmdiocb->sli4_xritag);
10993 lpfc_sli_release_iocbq(phba, abtsiocbp);
10994 return retval;
10995 }
10996
10997 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10998 "6148 Drv Abort NVME Request Issued for "
10999 "ox_id x%x on reqtag x%x\n",
11000 cmdiocb->sli4_xritag,
11001 abtsiocbp->iotag);
11002
11003 return retval;
11004}
11005
11006
11007
11008
11009
11010
11011
11012void
11013lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11014{
11015 struct lpfc_sli *psli = &phba->sli;
11016 struct lpfc_sli_ring *pring;
11017 struct lpfc_queue *qp = NULL;
11018 int i;
11019
11020 if (phba->sli_rev != LPFC_SLI_REV4) {
11021 for (i = 0; i < psli->num_rings; i++) {
11022 pring = &psli->sli3_ring[i];
11023 lpfc_sli_abort_iocb_ring(phba, pring);
11024 }
11025 return;
11026 }
11027 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11028 pring = qp->pring;
11029 if (!pring)
11030 continue;
11031 lpfc_sli_abort_iocb_ring(phba, pring);
11032 }
11033}
11034
11035
11036
11037
11038
11039
11040
11041
11042
11043
11044
11045
11046
11047
11048
11049
11050
11051
11052
11053
11054
11055
11056
11057static int
11058lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11059 uint16_t tgt_id, uint64_t lun_id,
11060 lpfc_ctx_cmd ctx_cmd)
11061{
11062 struct lpfc_scsi_buf *lpfc_cmd;
11063 int rc = 1;
11064
11065 if (!(iocbq->iocb_flag & LPFC_IO_FCP))
11066 return rc;
11067
11068 if (iocbq->vport != vport)
11069 return rc;
11070
11071 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11072
11073 if (lpfc_cmd->pCmd == NULL)
11074 return rc;
11075
11076 switch (ctx_cmd) {
11077 case LPFC_CTX_LUN:
11078 if ((lpfc_cmd->rdata->pnode) &&
11079 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11080 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11081 rc = 0;
11082 break;
11083 case LPFC_CTX_TGT:
11084 if ((lpfc_cmd->rdata->pnode) &&
11085 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11086 rc = 0;
11087 break;
11088 case LPFC_CTX_HOST:
11089 rc = 0;
11090 break;
11091 default:
11092 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11093 __func__, ctx_cmd);
11094 break;
11095 }
11096
11097 return rc;
11098}
11099
11100
11101
11102
11103
11104
11105
11106
11107
11108
11109
11110
11111
11112
11113
11114
11115
11116
11117
11118
11119int
11120lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11121 lpfc_ctx_cmd ctx_cmd)
11122{
11123 struct lpfc_hba *phba = vport->phba;
11124 struct lpfc_iocbq *iocbq;
11125 int sum, i;
11126
11127 spin_lock_irq(&phba->hbalock);
11128 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11129 iocbq = phba->sli.iocbq_lookup[i];
11130
11131 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11132 ctx_cmd) == 0)
11133 sum++;
11134 }
11135 spin_unlock_irq(&phba->hbalock);
11136
11137 return sum;
11138}
11139
11140
11141
11142
11143
11144
11145
11146
11147
11148
11149
11150void
11151lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11152 struct lpfc_iocbq *rspiocb)
11153{
11154 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11155 "3096 ABORT_XRI_CN completing on rpi x%x "
11156 "original iotag x%x, abort cmd iotag x%x "
11157 "status 0x%x, reason 0x%x\n",
11158 cmdiocb->iocb.un.acxri.abortContextTag,
11159 cmdiocb->iocb.un.acxri.abortIoTag,
11160 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11161 rspiocb->iocb.un.ulpWord[4]);
11162 lpfc_sli_release_iocbq(phba, cmdiocb);
11163 return;
11164}
11165
11166
11167
11168
11169
11170
11171
11172
11173
11174
11175
11176
11177
11178
11179
11180
11181
11182
11183
11184
11185
11186
11187int
11188lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11189 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11190{
11191 struct lpfc_hba *phba = vport->phba;
11192 struct lpfc_iocbq *iocbq;
11193 struct lpfc_iocbq *abtsiocb;
11194 struct lpfc_sli_ring *pring_s4;
11195 IOCB_t *cmd = NULL;
11196 int errcnt = 0, ret_val = 0;
11197 int i;
11198
11199 for (i = 1; i <= phba->sli.last_iotag; i++) {
11200 iocbq = phba->sli.iocbq_lookup[i];
11201
11202 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11203 abort_cmd) != 0)
11204 continue;
11205
11206
11207
11208
11209
11210 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11211 continue;
11212
11213
11214 abtsiocb = lpfc_sli_get_iocbq(phba);
11215 if (abtsiocb == NULL) {
11216 errcnt++;
11217 continue;
11218 }
11219
11220
11221 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11222
11223 cmd = &iocbq->iocb;
11224 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11225 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11226 if (phba->sli_rev == LPFC_SLI_REV4)
11227 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11228 else
11229 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11230 abtsiocb->iocb.ulpLe = 1;
11231 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11232 abtsiocb->vport = vport;
11233
11234
11235 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11236 if (iocbq->iocb_flag & LPFC_IO_FCP)
11237 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11238 if (iocbq->iocb_flag & LPFC_IO_FOF)
11239 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11240
11241 if (lpfc_is_link_up(phba))
11242 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11243 else
11244 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11245
11246
11247 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11248 if (phba->sli_rev == LPFC_SLI_REV4) {
11249 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11250 if (!pring_s4)
11251 continue;
11252 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11253 abtsiocb, 0);
11254 } else
11255 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11256 abtsiocb, 0);
11257 if (ret_val == IOCB_ERROR) {
11258 lpfc_sli_release_iocbq(phba, abtsiocb);
11259 errcnt++;
11260 continue;
11261 }
11262 }
11263
11264 return errcnt;
11265}
11266
11267
11268
11269
11270
11271
11272
11273
11274
11275
11276
11277
11278
11279
11280
11281
11282
11283
11284
11285
11286
11287
11288
11289int
11290lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11291 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11292{
11293 struct lpfc_hba *phba = vport->phba;
11294 struct lpfc_scsi_buf *lpfc_cmd;
11295 struct lpfc_iocbq *abtsiocbq;
11296 struct lpfc_nodelist *ndlp;
11297 struct lpfc_iocbq *iocbq;
11298 IOCB_t *icmd;
11299 int sum, i, ret_val;
11300 unsigned long iflags;
11301 struct lpfc_sli_ring *pring_s4;
11302
11303 spin_lock_irq(&phba->hbalock);
11304
11305
11306 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11307 spin_unlock_irq(&phba->hbalock);
11308 return 0;
11309 }
11310 sum = 0;
11311
11312 for (i = 1; i <= phba->sli.last_iotag; i++) {
11313 iocbq = phba->sli.iocbq_lookup[i];
11314
11315 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11316 cmd) != 0)
11317 continue;
11318
11319
11320
11321
11322
11323 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11324 continue;
11325
11326
11327 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11328 if (abtsiocbq == NULL)
11329 continue;
11330
11331 icmd = &iocbq->iocb;
11332 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11333 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11334 if (phba->sli_rev == LPFC_SLI_REV4)
11335 abtsiocbq->iocb.un.acxri.abortIoTag =
11336 iocbq->sli4_xritag;
11337 else
11338 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11339 abtsiocbq->iocb.ulpLe = 1;
11340 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11341 abtsiocbq->vport = vport;
11342
11343
11344 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11345 if (iocbq->iocb_flag & LPFC_IO_FCP)
11346 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11347 if (iocbq->iocb_flag & LPFC_IO_FOF)
11348 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11349
11350 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11351 ndlp = lpfc_cmd->rdata->pnode;
11352
11353 if (lpfc_is_link_up(phba) &&
11354 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11355 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11356 else
11357 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11358
11359
11360 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11361
11362
11363
11364
11365
11366 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11367
11368 if (phba->sli_rev == LPFC_SLI_REV4) {
11369 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11370 if (pring_s4 == NULL)
11371 continue;
11372
11373 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
11374 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11375 abtsiocbq, 0);
11376 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
11377 } else {
11378 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11379 abtsiocbq, 0);
11380 }
11381
11382
11383 if (ret_val == IOCB_ERROR)
11384 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11385 else
11386 sum++;
11387 }
11388 spin_unlock_irq(&phba->hbalock);
11389 return sum;
11390}
11391
11392
11393
11394
11395
11396
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407
11408
11409static void
11410lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11411 struct lpfc_iocbq *cmdiocbq,
11412 struct lpfc_iocbq *rspiocbq)
11413{
11414 wait_queue_head_t *pdone_q;
11415 unsigned long iflags;
11416 struct lpfc_scsi_buf *lpfc_cmd;
11417
11418 spin_lock_irqsave(&phba->hbalock, iflags);
11419 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11420
11421
11422
11423
11424
11425
11426
11427 spin_unlock_irqrestore(&phba->hbalock, iflags);
11428 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11429 cmdiocbq->wait_iocb_cmpl = NULL;
11430 if (cmdiocbq->iocb_cmpl)
11431 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11432 else
11433 lpfc_sli_release_iocbq(phba, cmdiocbq);
11434 return;
11435 }
11436
11437 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11438 if (cmdiocbq->context2 && rspiocbq)
11439 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11440 &rspiocbq->iocb, sizeof(IOCB_t));
11441
11442
11443 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11444 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11445 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11446 cur_iocbq);
11447 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11448 }
11449
11450 pdone_q = cmdiocbq->context_un.wait_queue;
11451 if (pdone_q)
11452 wake_up(pdone_q);
11453 spin_unlock_irqrestore(&phba->hbalock, iflags);
11454 return;
11455}
11456
11457
11458
11459
11460
11461
11462
11463
11464
11465
11466
11467
11468
11469static int
11470lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11471 struct lpfc_iocbq *piocbq, uint32_t flag)
11472{
11473 unsigned long iflags;
11474 int ret;
11475
11476 spin_lock_irqsave(&phba->hbalock, iflags);
11477 ret = piocbq->iocb_flag & flag;
11478 spin_unlock_irqrestore(&phba->hbalock, iflags);
11479 return ret;
11480
11481}
11482
11483
11484
11485
11486
11487
11488
11489
11490
11491
11492
11493
11494
11495
11496
11497
11498
11499
11500
11501
11502
11503
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518
11519int
11520lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11521 uint32_t ring_number,
11522 struct lpfc_iocbq *piocb,
11523 struct lpfc_iocbq *prspiocbq,
11524 uint32_t timeout)
11525{
11526 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11527 long timeleft, timeout_req = 0;
11528 int retval = IOCB_SUCCESS;
11529 uint32_t creg_val;
11530 struct lpfc_iocbq *iocb;
11531 int txq_cnt = 0;
11532 int txcmplq_cnt = 0;
11533 struct lpfc_sli_ring *pring;
11534 unsigned long iflags;
11535 bool iocb_completed = true;
11536
11537 if (phba->sli_rev >= LPFC_SLI_REV4)
11538 pring = lpfc_sli4_calc_ring(phba, piocb);
11539 else
11540 pring = &phba->sli.sli3_ring[ring_number];
11541
11542
11543
11544
11545 if (prspiocbq) {
11546 if (piocb->context2)
11547 return IOCB_ERROR;
11548 piocb->context2 = prspiocbq;
11549 }
11550
11551 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11552 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11553 piocb->context_un.wait_queue = &done_q;
11554 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11555
11556 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11557 if (lpfc_readl(phba->HCregaddr, &creg_val))
11558 return IOCB_ERROR;
11559 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11560 writel(creg_val, phba->HCregaddr);
11561 readl(phba->HCregaddr);
11562 }
11563
11564 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11565 SLI_IOCB_RET_IOCB);
11566 if (retval == IOCB_SUCCESS) {
11567 timeout_req = msecs_to_jiffies(timeout * 1000);
11568 timeleft = wait_event_timeout(done_q,
11569 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11570 timeout_req);
11571 spin_lock_irqsave(&phba->hbalock, iflags);
11572 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11573
11574
11575
11576
11577
11578
11579 iocb_completed = false;
11580 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11581 }
11582 spin_unlock_irqrestore(&phba->hbalock, iflags);
11583 if (iocb_completed) {
11584 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11585 "0331 IOCB wake signaled\n");
11586
11587
11588
11589
11590
11591 } else if (timeleft == 0) {
11592 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11593 "0338 IOCB wait timeout error - no "
11594 "wake response Data x%x\n", timeout);
11595 retval = IOCB_TIMEDOUT;
11596 } else {
11597 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11598 "0330 IOCB wake NOT set, "
11599 "Data x%x x%lx\n",
11600 timeout, (timeleft / jiffies));
11601 retval = IOCB_TIMEDOUT;
11602 }
11603 } else if (retval == IOCB_BUSY) {
11604 if (phba->cfg_log_verbose & LOG_SLI) {
11605 list_for_each_entry(iocb, &pring->txq, list) {
11606 txq_cnt++;
11607 }
11608 list_for_each_entry(iocb, &pring->txcmplq, list) {
11609 txcmplq_cnt++;
11610 }
11611 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11612 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11613 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11614 }
11615 return retval;
11616 } else {
11617 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11618 "0332 IOCB wait issue failed, Data x%x\n",
11619 retval);
11620 retval = IOCB_ERROR;
11621 }
11622
11623 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11624 if (lpfc_readl(phba->HCregaddr, &creg_val))
11625 return IOCB_ERROR;
11626 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11627 writel(creg_val, phba->HCregaddr);
11628 readl(phba->HCregaddr);
11629 }
11630
11631 if (prspiocbq)
11632 piocb->context2 = NULL;
11633
11634 piocb->context_un.wait_queue = NULL;
11635 piocb->iocb_cmpl = NULL;
11636 return retval;
11637}
11638
11639
11640
11641
11642
11643
11644
11645
11646
11647
11648
11649
11650
11651
11652
11653
11654
11655
11656
11657
11658
11659
11660
11661
11662
11663
11664
11665int
11666lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11667 uint32_t timeout)
11668{
11669 struct completion mbox_done;
11670 int retval;
11671 unsigned long flag;
11672
11673 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11674
11675 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11676
11677
11678 init_completion(&mbox_done);
11679 pmboxq->context3 = &mbox_done;
11680
11681 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11682 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11683 wait_for_completion_timeout(&mbox_done,
11684 msecs_to_jiffies(timeout * 1000));
11685
11686 spin_lock_irqsave(&phba->hbalock, flag);
11687 pmboxq->context3 = NULL;
11688
11689
11690
11691
11692 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
11693 retval = MBX_SUCCESS;
11694 } else {
11695 retval = MBX_TIMEOUT;
11696 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11697 }
11698 spin_unlock_irqrestore(&phba->hbalock, flag);
11699 }
11700 return retval;
11701}
11702
11703
11704
11705
11706
11707
11708
11709
11710
11711
11712
11713
11714
11715
11716
11717
11718void
11719lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11720{
11721 struct lpfc_sli *psli = &phba->sli;
11722 unsigned long timeout;
11723
11724 if (mbx_action == LPFC_MBX_NO_WAIT) {
11725
11726 msleep(100);
11727 lpfc_sli_mbox_sys_flush(phba);
11728 return;
11729 }
11730 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
11731
11732 spin_lock_irq(&phba->hbalock);
11733 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11734
11735 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
11736
11737
11738
11739 if (phba->sli.mbox_active)
11740 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11741 phba->sli.mbox_active) *
11742 1000) + jiffies;
11743 spin_unlock_irq(&phba->hbalock);
11744
11745 while (phba->sli.mbox_active) {
11746
11747 msleep(2);
11748 if (time_after(jiffies, timeout))
11749
11750
11751
11752 break;
11753 }
11754 } else
11755 spin_unlock_irq(&phba->hbalock);
11756
11757 lpfc_sli_mbox_sys_flush(phba);
11758}
11759
11760
11761
11762
11763
11764
11765
11766
11767
11768
11769
11770
11771static int
11772lpfc_sli_eratt_read(struct lpfc_hba *phba)
11773{
11774 uint32_t ha_copy;
11775
11776
11777 if (lpfc_readl(phba->HAregaddr, &ha_copy))
11778 goto unplug_err;
11779
11780 if (ha_copy & HA_ERATT) {
11781
11782 if (lpfc_sli_read_hs(phba))
11783 goto unplug_err;
11784
11785
11786 if ((HS_FFER1 & phba->work_hs) &&
11787 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11788 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
11789 phba->hba_flag |= DEFER_ERATT;
11790
11791 writel(0, phba->HCregaddr);
11792 readl(phba->HCregaddr);
11793 }
11794
11795
11796 phba->work_ha |= HA_ERATT;
11797
11798 phba->hba_flag |= HBA_ERATT_HANDLED;
11799 return 1;
11800 }
11801 return 0;
11802
11803unplug_err:
11804
11805 phba->work_hs |= UNPLUG_ERR;
11806
11807 phba->work_ha |= HA_ERATT;
11808
11809 phba->hba_flag |= HBA_ERATT_HANDLED;
11810 return 1;
11811}
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824static int
11825lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11826{
11827 uint32_t uerr_sta_hi, uerr_sta_lo;
11828 uint32_t if_type, portsmphr;
11829 struct lpfc_register portstat_reg;
11830
11831
11832
11833
11834
11835 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11836 switch (if_type) {
11837 case LPFC_SLI_INTF_IF_TYPE_0:
11838 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11839 &uerr_sta_lo) ||
11840 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11841 &uerr_sta_hi)) {
11842 phba->work_hs |= UNPLUG_ERR;
11843 phba->work_ha |= HA_ERATT;
11844 phba->hba_flag |= HBA_ERATT_HANDLED;
11845 return 1;
11846 }
11847 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11848 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11850 "1423 HBA Unrecoverable error: "
11851 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11852 "ue_mask_lo_reg=0x%x, "
11853 "ue_mask_hi_reg=0x%x\n",
11854 uerr_sta_lo, uerr_sta_hi,
11855 phba->sli4_hba.ue_mask_lo,
11856 phba->sli4_hba.ue_mask_hi);
11857 phba->work_status[0] = uerr_sta_lo;
11858 phba->work_status[1] = uerr_sta_hi;
11859 phba->work_ha |= HA_ERATT;
11860 phba->hba_flag |= HBA_ERATT_HANDLED;
11861 return 1;
11862 }
11863 break;
11864 case LPFC_SLI_INTF_IF_TYPE_2:
11865 case LPFC_SLI_INTF_IF_TYPE_6:
11866 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11867 &portstat_reg.word0) ||
11868 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11869 &portsmphr)){
11870 phba->work_hs |= UNPLUG_ERR;
11871 phba->work_ha |= HA_ERATT;
11872 phba->hba_flag |= HBA_ERATT_HANDLED;
11873 return 1;
11874 }
11875 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11876 phba->work_status[0] =
11877 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11878 phba->work_status[1] =
11879 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11881 "2885 Port Status Event: "
11882 "port status reg 0x%x, "
11883 "port smphr reg 0x%x, "
11884 "error 1=0x%x, error 2=0x%x\n",
11885 portstat_reg.word0,
11886 portsmphr,
11887 phba->work_status[0],
11888 phba->work_status[1]);
11889 phba->work_ha |= HA_ERATT;
11890 phba->hba_flag |= HBA_ERATT_HANDLED;
11891 return 1;
11892 }
11893 break;
11894 case LPFC_SLI_INTF_IF_TYPE_1:
11895 default:
11896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11897 "2886 HBA Error Attention on unsupported "
11898 "if type %d.", if_type);
11899 return 1;
11900 }
11901
11902 return 0;
11903}
11904
11905
11906
11907
11908
11909
11910
11911
11912
11913
11914
11915int
11916lpfc_sli_check_eratt(struct lpfc_hba *phba)
11917{
11918 uint32_t ha_copy;
11919
11920
11921
11922
11923 if (phba->link_flag & LS_IGNORE_ERATT)
11924 return 0;
11925
11926
11927 spin_lock_irq(&phba->hbalock);
11928 if (phba->hba_flag & HBA_ERATT_HANDLED) {
11929
11930 spin_unlock_irq(&phba->hbalock);
11931 return 0;
11932 }
11933
11934
11935
11936
11937
11938 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11939 spin_unlock_irq(&phba->hbalock);
11940 return 0;
11941 }
11942
11943
11944 if (unlikely(pci_channel_offline(phba->pcidev))) {
11945 spin_unlock_irq(&phba->hbalock);
11946 return 0;
11947 }
11948
11949 switch (phba->sli_rev) {
11950 case LPFC_SLI_REV2:
11951 case LPFC_SLI_REV3:
11952
11953 ha_copy = lpfc_sli_eratt_read(phba);
11954 break;
11955 case LPFC_SLI_REV4:
11956
11957 ha_copy = lpfc_sli4_eratt_read(phba);
11958 break;
11959 default:
11960 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11961 "0299 Invalid SLI revision (%d)\n",
11962 phba->sli_rev);
11963 ha_copy = 0;
11964 break;
11965 }
11966 spin_unlock_irq(&phba->hbalock);
11967
11968 return ha_copy;
11969}
11970
11971
11972
11973
11974
11975
11976
11977
11978
11979
11980
11981static inline int
11982lpfc_intr_state_check(struct lpfc_hba *phba)
11983{
11984
11985 if (unlikely(pci_channel_offline(phba->pcidev)))
11986 return -EIO;
11987
11988
11989 phba->sli.slistat.sli_intr++;
11990
11991
11992 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11993 return -EIO;
11994
11995 return 0;
11996}
11997
11998
11999
12000
12001
12002
12003
12004
12005
12006
12007
12008
12009
12010
12011
12012
12013
12014
12015
12016
12017
12018
12019irqreturn_t
12020lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12021{
12022 struct lpfc_hba *phba;
12023 uint32_t ha_copy, hc_copy;
12024 uint32_t work_ha_copy;
12025 unsigned long status;
12026 unsigned long iflag;
12027 uint32_t control;
12028
12029 MAILBOX_t *mbox, *pmbox;
12030 struct lpfc_vport *vport;
12031 struct lpfc_nodelist *ndlp;
12032 struct lpfc_dmabuf *mp;
12033 LPFC_MBOXQ_t *pmb;
12034 int rc;
12035
12036
12037
12038
12039
12040 phba = (struct lpfc_hba *)dev_id;
12041
12042 if (unlikely(!phba))
12043 return IRQ_NONE;
12044
12045
12046
12047
12048
12049 if (phba->intr_type == MSIX) {
12050
12051 if (lpfc_intr_state_check(phba))
12052 return IRQ_NONE;
12053
12054 spin_lock_irqsave(&phba->hbalock, iflag);
12055 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12056 goto unplug_error;
12057
12058
12059
12060 if (phba->link_flag & LS_IGNORE_ERATT)
12061 ha_copy &= ~HA_ERATT;
12062
12063 if (ha_copy & HA_ERATT) {
12064 if (phba->hba_flag & HBA_ERATT_HANDLED)
12065
12066 ha_copy &= ~HA_ERATT;
12067 else
12068
12069 phba->hba_flag |= HBA_ERATT_HANDLED;
12070 }
12071
12072
12073
12074
12075
12076 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12077 spin_unlock_irqrestore(&phba->hbalock, iflag);
12078 return IRQ_NONE;
12079 }
12080
12081
12082 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12083 goto unplug_error;
12084
12085 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12086 HC_LAINT_ENA | HC_ERINT_ENA),
12087 phba->HCregaddr);
12088 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12089 phba->HAregaddr);
12090 writel(hc_copy, phba->HCregaddr);
12091 readl(phba->HAregaddr);
12092 spin_unlock_irqrestore(&phba->hbalock, iflag);
12093 } else
12094 ha_copy = phba->ha_copy;
12095
12096 work_ha_copy = ha_copy & phba->work_ha_mask;
12097
12098 if (work_ha_copy) {
12099 if (work_ha_copy & HA_LATT) {
12100 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12101
12102
12103
12104
12105 spin_lock_irqsave(&phba->hbalock, iflag);
12106 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12107 if (lpfc_readl(phba->HCregaddr, &control))
12108 goto unplug_error;
12109 control &= ~HC_LAINT_ENA;
12110 writel(control, phba->HCregaddr);
12111 readl(phba->HCregaddr);
12112 spin_unlock_irqrestore(&phba->hbalock, iflag);
12113 }
12114 else
12115 work_ha_copy &= ~HA_LATT;
12116 }
12117
12118 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12119
12120
12121
12122
12123 status = (work_ha_copy &
12124 (HA_RXMASK << (4*LPFC_ELS_RING)));
12125 status >>= (4*LPFC_ELS_RING);
12126 if (status & HA_RXMASK) {
12127 spin_lock_irqsave(&phba->hbalock, iflag);
12128 if (lpfc_readl(phba->HCregaddr, &control))
12129 goto unplug_error;
12130
12131 lpfc_debugfs_slow_ring_trc(phba,
12132 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12133 control, status,
12134 (uint32_t)phba->sli.slistat.sli_intr);
12135
12136 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12137 lpfc_debugfs_slow_ring_trc(phba,
12138 "ISR Disable ring:"
12139 "pwork:x%x hawork:x%x wait:x%x",
12140 phba->work_ha, work_ha_copy,
12141 (uint32_t)((unsigned long)
12142 &phba->work_waitq));
12143
12144 control &=
12145 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12146 writel(control, phba->HCregaddr);
12147 readl(phba->HCregaddr);
12148 }
12149 else {
12150 lpfc_debugfs_slow_ring_trc(phba,
12151 "ISR slow ring: pwork:"
12152 "x%x hawork:x%x wait:x%x",
12153 phba->work_ha, work_ha_copy,
12154 (uint32_t)((unsigned long)
12155 &phba->work_waitq));
12156 }
12157 spin_unlock_irqrestore(&phba->hbalock, iflag);
12158 }
12159 }
12160 spin_lock_irqsave(&phba->hbalock, iflag);
12161 if (work_ha_copy & HA_ERATT) {
12162 if (lpfc_sli_read_hs(phba))
12163 goto unplug_error;
12164
12165
12166
12167
12168 if ((HS_FFER1 & phba->work_hs) &&
12169 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12170 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12171 phba->work_hs)) {
12172 phba->hba_flag |= DEFER_ERATT;
12173
12174 writel(0, phba->HCregaddr);
12175 readl(phba->HCregaddr);
12176 }
12177 }
12178
12179 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12180 pmb = phba->sli.mbox_active;
12181 pmbox = &pmb->u.mb;
12182 mbox = phba->mbox;
12183 vport = pmb->vport;
12184
12185
12186 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12187 if (pmbox->mbxOwner != OWN_HOST) {
12188 spin_unlock_irqrestore(&phba->hbalock, iflag);
12189
12190
12191
12192
12193 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12194 LOG_SLI,
12195 "(%d):0304 Stray Mailbox "
12196 "Interrupt mbxCommand x%x "
12197 "mbxStatus x%x\n",
12198 (vport ? vport->vpi : 0),
12199 pmbox->mbxCommand,
12200 pmbox->mbxStatus);
12201
12202 work_ha_copy &= ~HA_MBATT;
12203 } else {
12204 phba->sli.mbox_active = NULL;
12205 spin_unlock_irqrestore(&phba->hbalock, iflag);
12206 phba->last_completion_time = jiffies;
12207 del_timer(&phba->sli.mbox_tmo);
12208 if (pmb->mbox_cmpl) {
12209 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12210 MAILBOX_CMD_SIZE);
12211 if (pmb->out_ext_byte_len &&
12212 pmb->context2)
12213 lpfc_sli_pcimem_bcopy(
12214 phba->mbox_ext,
12215 pmb->context2,
12216 pmb->out_ext_byte_len);
12217 }
12218 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12219 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12220
12221 lpfc_debugfs_disc_trc(vport,
12222 LPFC_DISC_TRC_MBOX_VPORT,
12223 "MBOX dflt rpi: : "
12224 "status:x%x rpi:x%x",
12225 (uint32_t)pmbox->mbxStatus,
12226 pmbox->un.varWords[0], 0);
12227
12228 if (!pmbox->mbxStatus) {
12229 mp = (struct lpfc_dmabuf *)
12230 (pmb->context1);
12231 ndlp = (struct lpfc_nodelist *)
12232 pmb->context2;
12233
12234
12235
12236
12237
12238
12239 lpfc_unreg_login(phba,
12240 vport->vpi,
12241 pmbox->un.varWords[0],
12242 pmb);
12243 pmb->mbox_cmpl =
12244 lpfc_mbx_cmpl_dflt_rpi;
12245 pmb->context1 = mp;
12246 pmb->context2 = ndlp;
12247 pmb->vport = vport;
12248 rc = lpfc_sli_issue_mbox(phba,
12249 pmb,
12250 MBX_NOWAIT);
12251 if (rc != MBX_BUSY)
12252 lpfc_printf_log(phba,
12253 KERN_ERR,
12254 LOG_MBOX | LOG_SLI,
12255 "0350 rc should have"
12256 "been MBX_BUSY\n");
12257 if (rc != MBX_NOT_FINISHED)
12258 goto send_current_mbox;
12259 }
12260 }
12261 spin_lock_irqsave(
12262 &phba->pport->work_port_lock,
12263 iflag);
12264 phba->pport->work_port_events &=
12265 ~WORKER_MBOX_TMO;
12266 spin_unlock_irqrestore(
12267 &phba->pport->work_port_lock,
12268 iflag);
12269 lpfc_mbox_cmpl_put(phba, pmb);
12270 }
12271 } else
12272 spin_unlock_irqrestore(&phba->hbalock, iflag);
12273
12274 if ((work_ha_copy & HA_MBATT) &&
12275 (phba->sli.mbox_active == NULL)) {
12276send_current_mbox:
12277
12278 do {
12279 rc = lpfc_sli_issue_mbox(phba, NULL,
12280 MBX_NOWAIT);
12281 } while (rc == MBX_NOT_FINISHED);
12282 if (rc != MBX_SUCCESS)
12283 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12284 LOG_SLI, "0349 rc should be "
12285 "MBX_SUCCESS\n");
12286 }
12287
12288 spin_lock_irqsave(&phba->hbalock, iflag);
12289 phba->work_ha |= work_ha_copy;
12290 spin_unlock_irqrestore(&phba->hbalock, iflag);
12291 lpfc_worker_wake_up(phba);
12292 }
12293 return IRQ_HANDLED;
12294unplug_error:
12295 spin_unlock_irqrestore(&phba->hbalock, iflag);
12296 return IRQ_HANDLED;
12297
12298}
12299
12300
12301
12302
12303
12304
12305
12306
12307
12308
12309
12310
12311
12312
12313
12314
12315
12316
12317
12318
12319irqreturn_t
12320lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12321{
12322 struct lpfc_hba *phba;
12323 uint32_t ha_copy;
12324 unsigned long status;
12325 unsigned long iflag;
12326 struct lpfc_sli_ring *pring;
12327
12328
12329
12330
12331 phba = (struct lpfc_hba *) dev_id;
12332
12333 if (unlikely(!phba))
12334 return IRQ_NONE;
12335
12336
12337
12338
12339
12340 if (phba->intr_type == MSIX) {
12341
12342 if (lpfc_intr_state_check(phba))
12343 return IRQ_NONE;
12344
12345 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12346 return IRQ_HANDLED;
12347
12348 spin_lock_irqsave(&phba->hbalock, iflag);
12349
12350
12351
12352
12353 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12354 spin_unlock_irqrestore(&phba->hbalock, iflag);
12355 return IRQ_NONE;
12356 }
12357 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12358 phba->HAregaddr);
12359 readl(phba->HAregaddr);
12360 spin_unlock_irqrestore(&phba->hbalock, iflag);
12361 } else
12362 ha_copy = phba->ha_copy;
12363
12364
12365
12366
12367 ha_copy &= ~(phba->work_ha_mask);
12368
12369 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12370 status >>= (4*LPFC_FCP_RING);
12371 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12372 if (status & HA_RXMASK)
12373 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12374
12375 if (phba->cfg_multi_ring_support == 2) {
12376
12377
12378
12379
12380 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12381 status >>= (4*LPFC_EXTRA_RING);
12382 if (status & HA_RXMASK) {
12383 lpfc_sli_handle_fast_ring_event(phba,
12384 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12385 status);
12386 }
12387 }
12388 return IRQ_HANDLED;
12389}
12390
12391
12392
12393
12394
12395
12396
12397
12398
12399
12400
12401
12402
12403
12404
12405
12406
12407
12408irqreturn_t
12409lpfc_sli_intr_handler(int irq, void *dev_id)
12410{
12411 struct lpfc_hba *phba;
12412 irqreturn_t sp_irq_rc, fp_irq_rc;
12413 unsigned long status1, status2;
12414 uint32_t hc_copy;
12415
12416
12417
12418
12419
12420 phba = (struct lpfc_hba *) dev_id;
12421
12422 if (unlikely(!phba))
12423 return IRQ_NONE;
12424
12425
12426 if (lpfc_intr_state_check(phba))
12427 return IRQ_NONE;
12428
12429 spin_lock(&phba->hbalock);
12430 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12431 spin_unlock(&phba->hbalock);
12432 return IRQ_HANDLED;
12433 }
12434
12435 if (unlikely(!phba->ha_copy)) {
12436 spin_unlock(&phba->hbalock);
12437 return IRQ_NONE;
12438 } else if (phba->ha_copy & HA_ERATT) {
12439 if (phba->hba_flag & HBA_ERATT_HANDLED)
12440
12441 phba->ha_copy &= ~HA_ERATT;
12442 else
12443
12444 phba->hba_flag |= HBA_ERATT_HANDLED;
12445 }
12446
12447
12448
12449
12450 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12451 spin_unlock(&phba->hbalock);
12452 return IRQ_NONE;
12453 }
12454
12455
12456 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12457 spin_unlock(&phba->hbalock);
12458 return IRQ_HANDLED;
12459 }
12460 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12461 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12462 phba->HCregaddr);
12463 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12464 writel(hc_copy, phba->HCregaddr);
12465 readl(phba->HAregaddr);
12466 spin_unlock(&phba->hbalock);
12467
12468
12469
12470
12471
12472
12473 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12474
12475
12476 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12477 status2 >>= (4*LPFC_ELS_RING);
12478
12479 if (status1 || (status2 & HA_RXMASK))
12480 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12481 else
12482 sp_irq_rc = IRQ_NONE;
12483
12484
12485
12486
12487
12488
12489 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12490 status1 >>= (4*LPFC_FCP_RING);
12491
12492
12493 if (phba->cfg_multi_ring_support == 2) {
12494 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12495 status2 >>= (4*LPFC_EXTRA_RING);
12496 } else
12497 status2 = 0;
12498
12499 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12500 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12501 else
12502 fp_irq_rc = IRQ_NONE;
12503
12504
12505 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12506}
12507
12508
12509
12510
12511
12512
12513
12514
12515void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12516{
12517 struct lpfc_cq_event *cq_event;
12518
12519
12520 spin_lock_irq(&phba->hbalock);
12521 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12522 spin_unlock_irq(&phba->hbalock);
12523
12524 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12525
12526 spin_lock_irq(&phba->hbalock);
12527 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12528 cq_event, struct lpfc_cq_event, list);
12529 spin_unlock_irq(&phba->hbalock);
12530
12531 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12532
12533 lpfc_sli4_cq_event_release(phba, cq_event);
12534 }
12535}
12536
12537
12538
12539
12540
12541
12542
12543
12544void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12545{
12546 struct lpfc_cq_event *cq_event;
12547
12548
12549 spin_lock_irq(&phba->hbalock);
12550 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12551 spin_unlock_irq(&phba->hbalock);
12552
12553 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12554
12555 spin_lock_irq(&phba->hbalock);
12556 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12557 cq_event, struct lpfc_cq_event, list);
12558 spin_unlock_irq(&phba->hbalock);
12559
12560 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12561
12562 lpfc_sli4_cq_event_release(phba, cq_event);
12563 }
12564}
12565
12566
12567
12568
12569
12570
12571
12572
12573
12574
12575
12576
12577static void
12578lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12579 struct lpfc_iocbq *pIocbIn,
12580 struct lpfc_iocbq *pIocbOut,
12581 struct lpfc_wcqe_complete *wcqe)
12582{
12583 int numBdes, i;
12584 unsigned long iflags;
12585 uint32_t status, max_response;
12586 struct lpfc_dmabuf *dmabuf;
12587 struct ulp_bde64 *bpl, bde;
12588 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12589
12590 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12591 sizeof(struct lpfc_iocbq) - offset);
12592
12593 status = bf_get(lpfc_wcqe_c_status, wcqe);
12594 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12595 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12596 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12597 pIocbIn->iocb.un.fcpi.fcpi_parm =
12598 pIocbOut->iocb.un.fcpi.fcpi_parm -
12599 wcqe->total_data_placed;
12600 else
12601 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12602 else {
12603 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12604 switch (pIocbOut->iocb.ulpCommand) {
12605 case CMD_ELS_REQUEST64_CR:
12606 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12607 bpl = (struct ulp_bde64 *)dmabuf->virt;
12608 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12609 max_response = bde.tus.f.bdeSize;
12610 break;
12611 case CMD_GEN_REQUEST64_CR:
12612 max_response = 0;
12613 if (!pIocbOut->context3)
12614 break;
12615 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12616 sizeof(struct ulp_bde64);
12617 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12618 bpl = (struct ulp_bde64 *)dmabuf->virt;
12619 for (i = 0; i < numBdes; i++) {
12620 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12621 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12622 max_response += bde.tus.f.bdeSize;
12623 }
12624 break;
12625 default:
12626 max_response = wcqe->total_data_placed;
12627 break;
12628 }
12629 if (max_response < wcqe->total_data_placed)
12630 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12631 else
12632 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12633 wcqe->total_data_placed;
12634 }
12635
12636
12637 if (status == CQE_STATUS_DI_ERROR) {
12638 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12639
12640 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12641 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12642 else
12643 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12644
12645 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12646 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
12647 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12648 BGS_GUARD_ERR_MASK;
12649 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
12650 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12651 BGS_APPTAG_ERR_MASK;
12652 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
12653 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12654 BGS_REFTAG_ERR_MASK;
12655
12656
12657 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12658 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12659 BGS_HI_WATER_MARK_PRESENT_MASK;
12660 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12661 wcqe->total_data_placed;
12662 }
12663
12664
12665
12666
12667
12668 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12669 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12670 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12671 BGS_GUARD_ERR_MASK);
12672 }
12673
12674
12675 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12676 spin_lock_irqsave(&phba->hbalock, iflags);
12677 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12678 spin_unlock_irqrestore(&phba->hbalock, iflags);
12679 }
12680}
12681
12682
12683
12684
12685
12686
12687
12688
12689
12690
12691
12692
12693static struct lpfc_iocbq *
12694lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12695 struct lpfc_iocbq *irspiocbq)
12696{
12697 struct lpfc_sli_ring *pring;
12698 struct lpfc_iocbq *cmdiocbq;
12699 struct lpfc_wcqe_complete *wcqe;
12700 unsigned long iflags;
12701
12702 pring = lpfc_phba_elsring(phba);
12703 if (unlikely(!pring))
12704 return NULL;
12705
12706 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12707 spin_lock_irqsave(&pring->ring_lock, iflags);
12708 pring->stats.iocb_event++;
12709
12710 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12711 bf_get(lpfc_wcqe_c_request_tag, wcqe));
12712 if (unlikely(!cmdiocbq)) {
12713 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12714 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12715 "0386 ELS complete with no corresponding "
12716 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
12717 wcqe->word0, wcqe->total_data_placed,
12718 wcqe->parameter, wcqe->word3);
12719 lpfc_sli_release_iocbq(phba, irspiocbq);
12720 return NULL;
12721 }
12722
12723
12724 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12725 spin_unlock_irqrestore(&pring->ring_lock, iflags);
12726
12727
12728 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
12729
12730 return irspiocbq;
12731}
12732
12733inline struct lpfc_cq_event *
12734lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
12735{
12736 struct lpfc_cq_event *cq_event;
12737
12738
12739 cq_event = lpfc_sli4_cq_event_alloc(phba);
12740 if (!cq_event) {
12741 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12742 "0602 Failed to alloc CQ_EVENT entry\n");
12743 return NULL;
12744 }
12745
12746
12747 memcpy(&cq_event->cqe, entry, size);
12748 return cq_event;
12749}
12750
12751
12752
12753
12754
12755
12756
12757
12758
12759
12760
12761static bool
12762lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12763{
12764 struct lpfc_cq_event *cq_event;
12765 unsigned long iflags;
12766
12767 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12768 "0392 Async Event: word0:x%x, word1:x%x, "
12769 "word2:x%x, word3:x%x\n", mcqe->word0,
12770 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12771
12772 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
12773 if (!cq_event)
12774 return false;
12775 spin_lock_irqsave(&phba->hbalock, iflags);
12776 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12777
12778 phba->hba_flag |= ASYNC_EVENT;
12779 spin_unlock_irqrestore(&phba->hbalock, iflags);
12780
12781 return true;
12782}
12783
12784
12785
12786
12787
12788
12789
12790
12791
12792
12793
12794static bool
12795lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12796{
12797 uint32_t mcqe_status;
12798 MAILBOX_t *mbox, *pmbox;
12799 struct lpfc_mqe *mqe;
12800 struct lpfc_vport *vport;
12801 struct lpfc_nodelist *ndlp;
12802 struct lpfc_dmabuf *mp;
12803 unsigned long iflags;
12804 LPFC_MBOXQ_t *pmb;
12805 bool workposted = false;
12806 int rc;
12807
12808
12809 if (!bf_get(lpfc_trailer_completed, mcqe))
12810 goto out_no_mqe_complete;
12811
12812
12813 spin_lock_irqsave(&phba->hbalock, iflags);
12814 pmb = phba->sli.mbox_active;
12815 if (unlikely(!pmb)) {
12816 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12817 "1832 No pending MBOX command to handle\n");
12818 spin_unlock_irqrestore(&phba->hbalock, iflags);
12819 goto out_no_mqe_complete;
12820 }
12821 spin_unlock_irqrestore(&phba->hbalock, iflags);
12822 mqe = &pmb->u.mqe;
12823 pmbox = (MAILBOX_t *)&pmb->u.mqe;
12824 mbox = phba->mbox;
12825 vport = pmb->vport;
12826
12827
12828 phba->last_completion_time = jiffies;
12829 del_timer(&phba->sli.mbox_tmo);
12830
12831
12832 if (pmb->mbox_cmpl && mbox)
12833 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
12834
12835
12836
12837
12838
12839 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12840 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12841 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12842 bf_set(lpfc_mqe_status, mqe,
12843 (LPFC_MBX_ERROR_RANGE | mcqe_status));
12844 }
12845 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12846 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12847 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12848 "MBOX dflt rpi: status:x%x rpi:x%x",
12849 mcqe_status,
12850 pmbox->un.varWords[0], 0);
12851 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12852 mp = (struct lpfc_dmabuf *)(pmb->context1);
12853 ndlp = (struct lpfc_nodelist *)pmb->context2;
12854
12855
12856
12857 lpfc_unreg_login(phba, vport->vpi,
12858 pmbox->un.varWords[0], pmb);
12859 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12860 pmb->context1 = mp;
12861 pmb->context2 = ndlp;
12862 pmb->vport = vport;
12863 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12864 if (rc != MBX_BUSY)
12865 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12866 LOG_SLI, "0385 rc should "
12867 "have been MBX_BUSY\n");
12868 if (rc != MBX_NOT_FINISHED)
12869 goto send_current_mbox;
12870 }
12871 }
12872 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12873 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12874 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12875
12876
12877 spin_lock_irqsave(&phba->hbalock, iflags);
12878 __lpfc_mbox_cmpl_put(phba, pmb);
12879 phba->work_ha |= HA_MBATT;
12880 spin_unlock_irqrestore(&phba->hbalock, iflags);
12881 workposted = true;
12882
12883send_current_mbox:
12884 spin_lock_irqsave(&phba->hbalock, iflags);
12885
12886 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12887
12888 phba->sli.mbox_active = NULL;
12889 spin_unlock_irqrestore(&phba->hbalock, iflags);
12890
12891 lpfc_worker_wake_up(phba);
12892out_no_mqe_complete:
12893 if (bf_get(lpfc_trailer_consumed, mcqe))
12894 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12895 return workposted;
12896}
12897
12898
12899
12900
12901
12902
12903
12904
12905
12906
12907
12908
12909static bool
12910lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12911{
12912 struct lpfc_mcqe mcqe;
12913 bool workposted;
12914
12915
12916 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12917
12918
12919 if (!bf_get(lpfc_trailer_async, &mcqe))
12920 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12921 else
12922 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12923 return workposted;
12924}
12925
12926
12927
12928
12929
12930
12931
12932
12933
12934
12935
12936static bool
12937lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12938 struct lpfc_wcqe_complete *wcqe)
12939{
12940 struct lpfc_iocbq *irspiocbq;
12941 unsigned long iflags;
12942 struct lpfc_sli_ring *pring = cq->pring;
12943 int txq_cnt = 0;
12944 int txcmplq_cnt = 0;
12945 int fcp_txcmplq_cnt = 0;
12946
12947
12948 irspiocbq = lpfc_sli_get_iocbq(phba);
12949 if (!irspiocbq) {
12950 if (!list_empty(&pring->txq))
12951 txq_cnt++;
12952 if (!list_empty(&pring->txcmplq))
12953 txcmplq_cnt++;
12954 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12955 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12956 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12957 txq_cnt, phba->iocb_cnt,
12958 fcp_txcmplq_cnt,
12959 txcmplq_cnt);
12960 return false;
12961 }
12962
12963
12964 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12965 spin_lock_irqsave(&phba->hbalock, iflags);
12966 list_add_tail(&irspiocbq->cq_event.list,
12967 &phba->sli4_hba.sp_queue_event);
12968 phba->hba_flag |= HBA_SP_QUEUE_EVT;
12969 spin_unlock_irqrestore(&phba->hbalock, iflags);
12970
12971 return true;
12972}
12973
12974
12975
12976
12977
12978
12979
12980
12981
12982static void
12983lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12984 struct lpfc_wcqe_release *wcqe)
12985{
12986
12987 if (unlikely(!phba->sli4_hba.els_wq))
12988 return;
12989
12990 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12991 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12992 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12993 else
12994 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12995 "2579 Slow-path wqe consume event carries "
12996 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12997 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12998 phba->sli4_hba.els_wq->queue_id);
12999}
13000
13001
13002
13003
13004
13005
13006
13007
13008
13009
13010
13011static bool
13012lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13013 struct lpfc_queue *cq,
13014 struct sli4_wcqe_xri_aborted *wcqe)
13015{
13016 bool workposted = false;
13017 struct lpfc_cq_event *cq_event;
13018 unsigned long iflags;
13019
13020 switch (cq->subtype) {
13021 case LPFC_FCP:
13022 cq_event = lpfc_cq_event_setup(
13023 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13024 if (!cq_event)
13025 return false;
13026 spin_lock_irqsave(&phba->hbalock, iflags);
13027 list_add_tail(&cq_event->list,
13028 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
13029
13030 phba->hba_flag |= FCP_XRI_ABORT_EVENT;
13031 spin_unlock_irqrestore(&phba->hbalock, iflags);
13032 workposted = true;
13033 break;
13034 case LPFC_NVME_LS:
13035 case LPFC_ELS:
13036 cq_event = lpfc_cq_event_setup(
13037 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13038 if (!cq_event)
13039 return false;
13040 spin_lock_irqsave(&phba->hbalock, iflags);
13041 list_add_tail(&cq_event->list,
13042 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13043
13044 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13045 spin_unlock_irqrestore(&phba->hbalock, iflags);
13046 workposted = true;
13047 break;
13048 case LPFC_NVME:
13049
13050 if (phba->nvmet_support)
13051 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13052 else
13053 lpfc_sli4_nvme_xri_aborted(phba, wcqe);
13054
13055 workposted = false;
13056 break;
13057 default:
13058 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13059 "0603 Invalid CQ subtype %d: "
13060 "%08x %08x %08x %08x\n",
13061 cq->subtype, wcqe->word0, wcqe->parameter,
13062 wcqe->word2, wcqe->word3);
13063 workposted = false;
13064 break;
13065 }
13066 return workposted;
13067}
13068
13069
13070
13071
13072
13073
13074
13075
13076
13077
13078static bool
13079lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13080{
13081 bool workposted = false;
13082 struct fc_frame_header *fc_hdr;
13083 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13084 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13085 struct lpfc_nvmet_tgtport *tgtp;
13086 struct hbq_dmabuf *dma_buf;
13087 uint32_t status, rq_id;
13088 unsigned long iflags;
13089
13090
13091 if (unlikely(!hrq) || unlikely(!drq))
13092 return workposted;
13093
13094 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13095 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13096 else
13097 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13098 if (rq_id != hrq->queue_id)
13099 goto out;
13100
13101 status = bf_get(lpfc_rcqe_status, rcqe);
13102 switch (status) {
13103 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13105 "2537 Receive Frame Truncated!!\n");
13106 case FC_STATUS_RQ_SUCCESS:
13107 spin_lock_irqsave(&phba->hbalock, iflags);
13108 lpfc_sli4_rq_release(hrq, drq);
13109 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13110 if (!dma_buf) {
13111 hrq->RQ_no_buf_found++;
13112 spin_unlock_irqrestore(&phba->hbalock, iflags);
13113 goto out;
13114 }
13115 hrq->RQ_rcv_buf++;
13116 hrq->RQ_buf_posted--;
13117 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13118
13119
13120 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13121
13122
13123 list_add_tail(&dma_buf->cq_event.list,
13124 &phba->sli4_hba.sp_queue_event);
13125
13126 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13127 spin_unlock_irqrestore(&phba->hbalock, iflags);
13128 workposted = true;
13129 break;
13130 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13131 if (phba->nvmet_support) {
13132 tgtp = phba->targetport->private;
13133 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13134 "6402 RQE Error x%x, posted %d err_cnt "
13135 "%d: %x %x %x\n",
13136 status, hrq->RQ_buf_posted,
13137 hrq->RQ_no_posted_buf,
13138 atomic_read(&tgtp->rcv_fcp_cmd_in),
13139 atomic_read(&tgtp->rcv_fcp_cmd_out),
13140 atomic_read(&tgtp->xmt_fcp_release));
13141 }
13142
13143
13144 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13145 hrq->RQ_no_posted_buf++;
13146
13147 spin_lock_irqsave(&phba->hbalock, iflags);
13148 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13149 spin_unlock_irqrestore(&phba->hbalock, iflags);
13150 workposted = true;
13151 break;
13152 }
13153out:
13154 return workposted;
13155}
13156
13157
13158
13159
13160
13161
13162
13163
13164
13165
13166
13167
13168static bool
13169lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13170 struct lpfc_cqe *cqe)
13171{
13172 struct lpfc_cqe cqevt;
13173 bool workposted = false;
13174
13175
13176 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13177
13178
13179 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13180 case CQE_CODE_COMPL_WQE:
13181
13182 phba->last_completion_time = jiffies;
13183 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13184 (struct lpfc_wcqe_complete *)&cqevt);
13185 break;
13186 case CQE_CODE_RELEASE_WQE:
13187
13188 lpfc_sli4_sp_handle_rel_wcqe(phba,
13189 (struct lpfc_wcqe_release *)&cqevt);
13190 break;
13191 case CQE_CODE_XRI_ABORTED:
13192
13193 phba->last_completion_time = jiffies;
13194 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13195 (struct sli4_wcqe_xri_aborted *)&cqevt);
13196 break;
13197 case CQE_CODE_RECEIVE:
13198 case CQE_CODE_RECEIVE_V1:
13199
13200 phba->last_completion_time = jiffies;
13201 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13202 (struct lpfc_rcqe *)&cqevt);
13203 break;
13204 default:
13205 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13206 "0388 Not a valid WCQE code: x%x\n",
13207 bf_get(lpfc_cqe_code, &cqevt));
13208 break;
13209 }
13210 return workposted;
13211}
13212
13213
13214
13215
13216
13217
13218
13219
13220
13221
13222
13223
13224
13225
13226static void
13227lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13228 struct lpfc_queue *speq)
13229{
13230 struct lpfc_queue *cq = NULL, *childq;
13231 uint16_t cqid;
13232
13233
13234 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13235
13236 list_for_each_entry(childq, &speq->child_list, list) {
13237 if (childq->queue_id == cqid) {
13238 cq = childq;
13239 break;
13240 }
13241 }
13242 if (unlikely(!cq)) {
13243 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13244 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13245 "0365 Slow-path CQ identifier "
13246 "(%d) does not exist\n", cqid);
13247 return;
13248 }
13249
13250
13251 cq->assoc_qp = speq;
13252
13253 if (!queue_work(phba->wq, &cq->spwork))
13254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13255 "0390 Cannot schedule soft IRQ "
13256 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13257 cqid, cq->queue_id, smp_processor_id());
13258}
13259
13260
13261
13262
13263
13264
13265
13266
13267
13268
13269
13270
13271
13272static void
13273lpfc_sli4_sp_process_cq(struct work_struct *work)
13274{
13275 struct lpfc_queue *cq =
13276 container_of(work, struct lpfc_queue, spwork);
13277 struct lpfc_hba *phba = cq->phba;
13278 struct lpfc_cqe *cqe;
13279 bool workposted = false;
13280 int ccount = 0;
13281
13282
13283 switch (cq->type) {
13284 case LPFC_MCQ:
13285 while ((cqe = lpfc_sli4_cq_get(cq))) {
13286 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13287 if (!(++ccount % cq->entry_repost))
13288 break;
13289 cq->CQ_mbox++;
13290 }
13291 break;
13292 case LPFC_WCQ:
13293 while ((cqe = lpfc_sli4_cq_get(cq))) {
13294 if (cq->subtype == LPFC_FCP ||
13295 cq->subtype == LPFC_NVME) {
13296#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13297 if (phba->ktime_on)
13298 cq->isr_timestamp = ktime_get_ns();
13299 else
13300 cq->isr_timestamp = 0;
13301#endif
13302 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13303 cqe);
13304 } else {
13305 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13306 cqe);
13307 }
13308 if (!(++ccount % cq->entry_repost))
13309 break;
13310 }
13311
13312
13313 if (ccount > cq->CQ_max_cqe)
13314 cq->CQ_max_cqe = ccount;
13315 break;
13316 default:
13317 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13318 "0370 Invalid completion queue type (%d)\n",
13319 cq->type);
13320 return;
13321 }
13322
13323
13324 if (unlikely(ccount == 0))
13325 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13326 "0371 No entry from the CQ: identifier "
13327 "(x%x), type (%d)\n", cq->queue_id, cq->type);
13328
13329
13330 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13331
13332
13333 if (workposted)
13334 lpfc_worker_wake_up(phba);
13335}
13336
13337
13338
13339
13340
13341
13342
13343
13344
13345
13346static void
13347lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13348 struct lpfc_wcqe_complete *wcqe)
13349{
13350 struct lpfc_sli_ring *pring = cq->pring;
13351 struct lpfc_iocbq *cmdiocbq;
13352 struct lpfc_iocbq irspiocbq;
13353 unsigned long iflags;
13354
13355
13356 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13357
13358
13359
13360 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13361 IOSTAT_LOCAL_REJECT)) &&
13362 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13363 IOERR_NO_RESOURCES))
13364 phba->lpfc_rampdown_queue_depth(phba);
13365
13366
13367 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13368 "0373 FCP complete error: status=x%x, "
13369 "hw_status=x%x, total_data_specified=%d, "
13370 "parameter=x%x, word3=x%x\n",
13371 bf_get(lpfc_wcqe_c_status, wcqe),
13372 bf_get(lpfc_wcqe_c_hw_status, wcqe),
13373 wcqe->total_data_placed, wcqe->parameter,
13374 wcqe->word3);
13375 }
13376
13377
13378 spin_lock_irqsave(&pring->ring_lock, iflags);
13379 pring->stats.iocb_event++;
13380 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13381 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13382 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13383 if (unlikely(!cmdiocbq)) {
13384 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13385 "0374 FCP complete with no corresponding "
13386 "cmdiocb: iotag (%d)\n",
13387 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13388 return;
13389 }
13390#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13391 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13392#endif
13393 if (cmdiocbq->iocb_cmpl == NULL) {
13394 if (cmdiocbq->wqe_cmpl) {
13395 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13396 spin_lock_irqsave(&phba->hbalock, iflags);
13397 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13398 spin_unlock_irqrestore(&phba->hbalock, iflags);
13399 }
13400
13401
13402 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13403 return;
13404 }
13405 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13406 "0375 FCP cmdiocb not callback function "
13407 "iotag: (%d)\n",
13408 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13409 return;
13410 }
13411
13412
13413 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13414
13415 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13416 spin_lock_irqsave(&phba->hbalock, iflags);
13417 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13418 spin_unlock_irqrestore(&phba->hbalock, iflags);
13419 }
13420
13421
13422 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13423}
13424
13425
13426
13427
13428
13429
13430
13431
13432
13433
13434static void
13435lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13436 struct lpfc_wcqe_release *wcqe)
13437{
13438 struct lpfc_queue *childwq;
13439 bool wqid_matched = false;
13440 uint16_t hba_wqid;
13441
13442
13443 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13444 list_for_each_entry(childwq, &cq->child_list, list) {
13445 if (childwq->queue_id == hba_wqid) {
13446 lpfc_sli4_wq_release(childwq,
13447 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13448 if (childwq->q_flag & HBA_NVMET_WQFULL)
13449 lpfc_nvmet_wqfull_process(phba, childwq);
13450 wqid_matched = true;
13451 break;
13452 }
13453 }
13454
13455 if (wqid_matched != true)
13456 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13457 "2580 Fast-path wqe consume event carries "
13458 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13459}
13460
13461
13462
13463
13464
13465
13466
13467
13468
13469
13470static bool
13471lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13472 struct lpfc_rcqe *rcqe)
13473{
13474 bool workposted = false;
13475 struct lpfc_queue *hrq;
13476 struct lpfc_queue *drq;
13477 struct rqb_dmabuf *dma_buf;
13478 struct fc_frame_header *fc_hdr;
13479 struct lpfc_nvmet_tgtport *tgtp;
13480 uint32_t status, rq_id;
13481 unsigned long iflags;
13482 uint32_t fctl, idx;
13483
13484 if ((phba->nvmet_support == 0) ||
13485 (phba->sli4_hba.nvmet_cqset == NULL))
13486 return workposted;
13487
13488 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13489 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13490 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13491
13492
13493 if (unlikely(!hrq) || unlikely(!drq))
13494 return workposted;
13495
13496 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13497 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13498 else
13499 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13500
13501 if ((phba->nvmet_support == 0) ||
13502 (rq_id != hrq->queue_id))
13503 return workposted;
13504
13505 status = bf_get(lpfc_rcqe_status, rcqe);
13506 switch (status) {
13507 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13508 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13509 "6126 Receive Frame Truncated!!\n");
13510
13511 case FC_STATUS_RQ_SUCCESS:
13512 spin_lock_irqsave(&phba->hbalock, iflags);
13513 lpfc_sli4_rq_release(hrq, drq);
13514 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13515 if (!dma_buf) {
13516 hrq->RQ_no_buf_found++;
13517 spin_unlock_irqrestore(&phba->hbalock, iflags);
13518 goto out;
13519 }
13520 spin_unlock_irqrestore(&phba->hbalock, iflags);
13521 hrq->RQ_rcv_buf++;
13522 hrq->RQ_buf_posted--;
13523 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13524
13525
13526 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13527 fc_hdr->fh_f_ctl[1] << 8 |
13528 fc_hdr->fh_f_ctl[2]);
13529 if (((fctl &
13530 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13531 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13532 (fc_hdr->fh_seq_cnt != 0))
13533 goto drop;
13534
13535 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13536 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13537 lpfc_nvmet_unsol_fcp_event(
13538 phba, idx, dma_buf,
13539 cq->isr_timestamp);
13540 return false;
13541 }
13542drop:
13543 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13544 break;
13545 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13546 if (phba->nvmet_support) {
13547 tgtp = phba->targetport->private;
13548 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13549 "6401 RQE Error x%x, posted %d err_cnt "
13550 "%d: %x %x %x\n",
13551 status, hrq->RQ_buf_posted,
13552 hrq->RQ_no_posted_buf,
13553 atomic_read(&tgtp->rcv_fcp_cmd_in),
13554 atomic_read(&tgtp->rcv_fcp_cmd_out),
13555 atomic_read(&tgtp->xmt_fcp_release));
13556 }
13557
13558
13559 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13560 hrq->RQ_no_posted_buf++;
13561
13562 break;
13563 }
13564out:
13565 return workposted;
13566}
13567
13568
13569
13570
13571
13572
13573
13574
13575
13576static int
13577lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13578 struct lpfc_cqe *cqe)
13579{
13580 struct lpfc_wcqe_release wcqe;
13581 bool workposted = false;
13582
13583
13584 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13585
13586
13587 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13588 case CQE_CODE_COMPL_WQE:
13589 case CQE_CODE_NVME_ERSP:
13590 cq->CQ_wq++;
13591
13592 phba->last_completion_time = jiffies;
13593 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13594 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13595 (struct lpfc_wcqe_complete *)&wcqe);
13596 if (cq->subtype == LPFC_NVME_LS)
13597 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13598 (struct lpfc_wcqe_complete *)&wcqe);
13599 break;
13600 case CQE_CODE_RELEASE_WQE:
13601 cq->CQ_release_wqe++;
13602
13603 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13604 (struct lpfc_wcqe_release *)&wcqe);
13605 break;
13606 case CQE_CODE_XRI_ABORTED:
13607 cq->CQ_xri_aborted++;
13608
13609 phba->last_completion_time = jiffies;
13610 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13611 (struct sli4_wcqe_xri_aborted *)&wcqe);
13612 break;
13613 case CQE_CODE_RECEIVE_V1:
13614 case CQE_CODE_RECEIVE:
13615 phba->last_completion_time = jiffies;
13616 if (cq->subtype == LPFC_NVMET) {
13617 workposted = lpfc_sli4_nvmet_handle_rcqe(
13618 phba, cq, (struct lpfc_rcqe *)&wcqe);
13619 }
13620 break;
13621 default:
13622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13623 "0144 Not a valid CQE code: x%x\n",
13624 bf_get(lpfc_wcqe_c_code, &wcqe));
13625 break;
13626 }
13627 return workposted;
13628}
13629
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639
13640
13641
13642static void
13643lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13644 uint32_t qidx)
13645{
13646 struct lpfc_queue *cq = NULL;
13647 uint16_t cqid, id;
13648
13649 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13650 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13651 "0366 Not a valid completion "
13652 "event: majorcode=x%x, minorcode=x%x\n",
13653 bf_get_le32(lpfc_eqe_major_code, eqe),
13654 bf_get_le32(lpfc_eqe_minor_code, eqe));
13655 return;
13656 }
13657
13658
13659 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13660
13661 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
13662 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
13663 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
13664
13665 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
13666 goto process_cq;
13667 }
13668 }
13669
13670 if (phba->sli4_hba.nvme_cq_map &&
13671 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
13672
13673 cq = phba->sli4_hba.nvme_cq[qidx];
13674 goto process_cq;
13675 }
13676
13677 if (phba->sli4_hba.fcp_cq_map &&
13678 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13679
13680 cq = phba->sli4_hba.fcp_cq[qidx];
13681 goto process_cq;
13682 }
13683
13684 if (phba->sli4_hba.nvmels_cq &&
13685 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13686
13687 cq = phba->sli4_hba.nvmels_cq;
13688 }
13689
13690
13691 if (cq == NULL) {
13692 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
13693 return;
13694 }
13695
13696process_cq:
13697 if (unlikely(cqid != cq->queue_id)) {
13698 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13699 "0368 Miss-matched fast-path completion "
13700 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
13701 cqid, cq->queue_id);
13702 return;
13703 }
13704
13705
13706 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13707
13708 if (!queue_work(phba->wq, &cq->irqwork))
13709 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13710 "0363 Cannot schedule soft IRQ "
13711 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13712 cqid, cq->queue_id, smp_processor_id());
13713}
13714
13715
13716
13717
13718
13719
13720
13721
13722
13723
13724
13725
13726
13727static void
13728lpfc_sli4_hba_process_cq(struct work_struct *work)
13729{
13730 struct lpfc_queue *cq =
13731 container_of(work, struct lpfc_queue, irqwork);
13732 struct lpfc_hba *phba = cq->phba;
13733 struct lpfc_cqe *cqe;
13734 bool workposted = false;
13735 int ccount = 0;
13736
13737
13738 while ((cqe = lpfc_sli4_cq_get(cq))) {
13739#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13740 if (phba->ktime_on)
13741 cq->isr_timestamp = ktime_get_ns();
13742 else
13743 cq->isr_timestamp = 0;
13744#endif
13745 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13746 if (!(++ccount % cq->entry_repost))
13747 break;
13748 }
13749
13750
13751 if (ccount > cq->CQ_max_cqe)
13752 cq->CQ_max_cqe = ccount;
13753 cq->assoc_qp->EQ_cqe_cnt += ccount;
13754
13755
13756 if (unlikely(ccount == 0))
13757 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13758 "0369 No entry from fast-path completion "
13759 "queue fcpcqid=%d\n", cq->queue_id);
13760
13761
13762 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM);
13763
13764
13765 if (workposted)
13766 lpfc_worker_wake_up(phba);
13767}
13768
13769static void
13770lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13771{
13772 struct lpfc_eqe *eqe;
13773
13774
13775 while ((eqe = lpfc_sli4_eq_get(eq)))
13776 ;
13777
13778
13779 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13780}
13781
13782
13783
13784
13785
13786
13787
13788
13789
13790
13791
13792
13793
13794
13795
13796static void
13797lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13798{
13799 struct lpfc_queue *cq;
13800 uint16_t cqid;
13801
13802 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13803 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13804 "9147 Not a valid completion "
13805 "event: majorcode=x%x, minorcode=x%x\n",
13806 bf_get_le32(lpfc_eqe_major_code, eqe),
13807 bf_get_le32(lpfc_eqe_minor_code, eqe));
13808 return;
13809 }
13810
13811
13812 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13813
13814
13815 cq = phba->sli4_hba.oas_cq;
13816 if (unlikely(!cq)) {
13817 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13819 "9148 OAS completion queue "
13820 "does not exist\n");
13821 return;
13822 }
13823
13824 if (unlikely(cqid != cq->queue_id)) {
13825 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13826 "9149 Miss-matched fast-path compl "
13827 "queue id: eqcqid=%d, fcpcqid=%d\n",
13828 cqid, cq->queue_id);
13829 return;
13830 }
13831
13832
13833 cq->assoc_qp = phba->sli4_hba.fof_eq;
13834
13835
13836 if (!queue_work(phba->wq, &cq->irqwork))
13837 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13838 "0367 Cannot schedule soft IRQ "
13839 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13840 cqid, cq->queue_id, smp_processor_id());
13841}
13842
13843
13844
13845
13846
13847
13848
13849
13850
13851
13852
13853
13854
13855
13856
13857
13858
13859
13860
13861
13862
13863
13864irqreturn_t
13865lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13866{
13867 struct lpfc_hba *phba;
13868 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13869 struct lpfc_queue *eq;
13870 struct lpfc_eqe *eqe;
13871 unsigned long iflag;
13872 int ecount = 0;
13873
13874
13875 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13876 phba = hba_eq_hdl->phba;
13877
13878 if (unlikely(!phba))
13879 return IRQ_NONE;
13880
13881
13882 eq = phba->sli4_hba.fof_eq;
13883 if (unlikely(!eq))
13884 return IRQ_NONE;
13885
13886
13887 if (unlikely(lpfc_intr_state_check(phba))) {
13888
13889 spin_lock_irqsave(&phba->hbalock, iflag);
13890 if (phba->link_state < LPFC_LINK_DOWN)
13891
13892 lpfc_sli4_eq_flush(phba, eq);
13893 spin_unlock_irqrestore(&phba->hbalock, iflag);
13894 return IRQ_NONE;
13895 }
13896
13897
13898
13899
13900 while ((eqe = lpfc_sli4_eq_get(eq))) {
13901 lpfc_sli4_fof_handle_eqe(phba, eqe);
13902 if (!(++ecount % eq->entry_repost))
13903 break;
13904 eq->EQ_processed++;
13905 }
13906
13907
13908 if (ecount > eq->EQ_max_eqe)
13909 eq->EQ_max_eqe = ecount;
13910
13911
13912 if (unlikely(ecount == 0)) {
13913 eq->EQ_no_entry++;
13914
13915 if (phba->intr_type == MSIX)
13916
13917 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13918 "9145 MSI-X interrupt with no EQE\n");
13919 else {
13920 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13921 "9146 ISR interrupt with no EQE\n");
13922
13923 return IRQ_NONE;
13924 }
13925 }
13926
13927 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM);
13928 return IRQ_HANDLED;
13929}
13930
13931
13932
13933
13934
13935
13936
13937
13938
13939
13940
13941
13942
13943
13944
13945
13946
13947
13948
13949
13950
13951
13952
13953
13954
13955
13956
13957irqreturn_t
13958lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13959{
13960 struct lpfc_hba *phba;
13961 struct lpfc_hba_eq_hdl *hba_eq_hdl;
13962 struct lpfc_queue *fpeq;
13963 struct lpfc_eqe *eqe;
13964 unsigned long iflag;
13965 int ecount = 0;
13966 int hba_eqidx;
13967
13968
13969 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13970 phba = hba_eq_hdl->phba;
13971 hba_eqidx = hba_eq_hdl->idx;
13972
13973 if (unlikely(!phba))
13974 return IRQ_NONE;
13975 if (unlikely(!phba->sli4_hba.hba_eq))
13976 return IRQ_NONE;
13977
13978
13979 fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
13980 if (unlikely(!fpeq))
13981 return IRQ_NONE;
13982
13983 if (lpfc_fcp_look_ahead) {
13984 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
13985 phba->sli4_hba.sli4_eq_clr_intr(fpeq);
13986 else {
13987 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13988 return IRQ_NONE;
13989 }
13990 }
13991
13992
13993 if (unlikely(lpfc_intr_state_check(phba))) {
13994
13995 spin_lock_irqsave(&phba->hbalock, iflag);
13996 if (phba->link_state < LPFC_LINK_DOWN)
13997
13998 lpfc_sli4_eq_flush(phba, fpeq);
13999 spin_unlock_irqrestore(&phba->hbalock, iflag);
14000 if (lpfc_fcp_look_ahead)
14001 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14002 return IRQ_NONE;
14003 }
14004
14005
14006
14007
14008 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
14009 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
14010 if (!(++ecount % fpeq->entry_repost))
14011 break;
14012 fpeq->EQ_processed++;
14013 }
14014
14015
14016 if (ecount > fpeq->EQ_max_eqe)
14017 fpeq->EQ_max_eqe = ecount;
14018
14019
14020 phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
14021
14022 if (unlikely(ecount == 0)) {
14023 fpeq->EQ_no_entry++;
14024
14025 if (lpfc_fcp_look_ahead) {
14026 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14027 return IRQ_NONE;
14028 }
14029
14030 if (phba->intr_type == MSIX)
14031
14032 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14033 "0358 MSI-X interrupt with no EQE\n");
14034 else
14035
14036 return IRQ_NONE;
14037 }
14038
14039 if (lpfc_fcp_look_ahead)
14040 atomic_inc(&hba_eq_hdl->hba_eq_in_use);
14041
14042 return IRQ_HANDLED;
14043}
14044
14045
14046
14047
14048
14049
14050
14051
14052
14053
14054
14055
14056
14057
14058
14059
14060
14061
14062irqreturn_t
14063lpfc_sli4_intr_handler(int irq, void *dev_id)
14064{
14065 struct lpfc_hba *phba;
14066 irqreturn_t hba_irq_rc;
14067 bool hba_handled = false;
14068 int qidx;
14069
14070
14071 phba = (struct lpfc_hba *)dev_id;
14072
14073 if (unlikely(!phba))
14074 return IRQ_NONE;
14075
14076
14077
14078
14079 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
14080 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14081 &phba->sli4_hba.hba_eq_hdl[qidx]);
14082 if (hba_irq_rc == IRQ_HANDLED)
14083 hba_handled |= true;
14084 }
14085
14086 if (phba->cfg_fof) {
14087 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
14088 &phba->sli4_hba.hba_eq_hdl[qidx]);
14089 if (hba_irq_rc == IRQ_HANDLED)
14090 hba_handled |= true;
14091 }
14092
14093 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14094}
14095
14096
14097
14098
14099
14100
14101
14102
14103
14104void
14105lpfc_sli4_queue_free(struct lpfc_queue *queue)
14106{
14107 struct lpfc_dmabuf *dmabuf;
14108
14109 if (!queue)
14110 return;
14111
14112 while (!list_empty(&queue->page_list)) {
14113 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14114 list);
14115 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14116 dmabuf->virt, dmabuf->phys);
14117 kfree(dmabuf);
14118 }
14119 if (queue->rqbp) {
14120 lpfc_free_rq_buffer(queue->phba, queue);
14121 kfree(queue->rqbp);
14122 }
14123
14124 if (!list_empty(&queue->wq_list))
14125 list_del(&queue->wq_list);
14126
14127 kfree(queue);
14128 return;
14129}
14130
14131
14132
14133
14134
14135
14136
14137
14138
14139
14140
14141
14142struct lpfc_queue *
14143lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14144 uint32_t entry_size, uint32_t entry_count)
14145{
14146 struct lpfc_queue *queue;
14147 struct lpfc_dmabuf *dmabuf;
14148 int x, total_qe_count;
14149 void *dma_pointer;
14150 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14151
14152 if (!phba->sli4_hba.pc_sli4_params.supported)
14153 hw_page_size = page_size;
14154
14155 queue = kzalloc(sizeof(struct lpfc_queue) +
14156 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
14157 if (!queue)
14158 return NULL;
14159 queue->page_count = (ALIGN(entry_size * entry_count,
14160 hw_page_size))/hw_page_size;
14161
14162
14163 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
14164 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
14165
14166 INIT_LIST_HEAD(&queue->list);
14167 INIT_LIST_HEAD(&queue->wq_list);
14168 INIT_LIST_HEAD(&queue->wqfull_list);
14169 INIT_LIST_HEAD(&queue->page_list);
14170 INIT_LIST_HEAD(&queue->child_list);
14171
14172
14173
14174
14175 queue->entry_size = entry_size;
14176 queue->entry_count = entry_count;
14177 queue->page_size = hw_page_size;
14178 queue->phba = phba;
14179
14180 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
14181 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
14182 if (!dmabuf)
14183 goto out_fail;
14184 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
14185 hw_page_size, &dmabuf->phys,
14186 GFP_KERNEL);
14187 if (!dmabuf->virt) {
14188 kfree(dmabuf);
14189 goto out_fail;
14190 }
14191 dmabuf->buffer_tag = x;
14192 list_add_tail(&dmabuf->list, &queue->page_list);
14193
14194 dma_pointer = dmabuf->virt;
14195 for (; total_qe_count < entry_count &&
14196 dma_pointer < (hw_page_size + dmabuf->virt);
14197 total_qe_count++, dma_pointer += entry_size) {
14198 queue->qe[total_qe_count].address = dma_pointer;
14199 }
14200 }
14201 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14202 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14203
14204
14205
14206 return queue;
14207out_fail:
14208 lpfc_sli4_queue_free(queue);
14209 return NULL;
14210}
14211
14212
14213
14214
14215
14216
14217
14218
14219
14220
14221static void __iomem *
14222lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14223{
14224 if (!phba->pcidev)
14225 return NULL;
14226
14227 switch (pci_barset) {
14228 case WQ_PCI_BAR_0_AND_1:
14229 return phba->pci_bar0_memmap_p;
14230 case WQ_PCI_BAR_2_AND_3:
14231 return phba->pci_bar2_memmap_p;
14232 case WQ_PCI_BAR_4_AND_5:
14233 return phba->pci_bar4_memmap_p;
14234 default:
14235 break;
14236 }
14237 return NULL;
14238}
14239
14240
14241
14242
14243
14244
14245
14246
14247
14248
14249
14250
14251
14252
14253
14254
14255
14256
14257
14258int
14259lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14260 uint32_t numq, uint32_t imax)
14261{
14262 struct lpfc_mbx_modify_eq_delay *eq_delay;
14263 LPFC_MBOXQ_t *mbox;
14264 struct lpfc_queue *eq;
14265 int cnt, rc, length, status = 0;
14266 uint32_t shdr_status, shdr_add_status;
14267 uint32_t result, val;
14268 int qidx;
14269 union lpfc_sli4_cfg_shdr *shdr;
14270 uint16_t dmult;
14271
14272 if (startq >= phba->io_channel_irqs)
14273 return 0;
14274
14275 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14276 if (!mbox)
14277 return -ENOMEM;
14278 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14279 sizeof(struct lpfc_sli4_cfg_mhdr));
14280 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14281 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14282 length, LPFC_SLI4_MBX_EMBED);
14283 eq_delay = &mbox->u.mqe.un.eq_delay;
14284
14285
14286 result = imax / phba->io_channel_irqs;
14287 if (result > LPFC_DMULT_CONST || result == 0)
14288 dmult = 0;
14289 else
14290 dmult = LPFC_DMULT_CONST/result - 1;
14291 if (dmult > LPFC_DMULT_MAX)
14292 dmult = LPFC_DMULT_MAX;
14293
14294 cnt = 0;
14295 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14296 eq = phba->sli4_hba.hba_eq[qidx];
14297 if (!eq)
14298 continue;
14299 eq->q_mode = imax;
14300 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14301 eq_delay->u.request.eq[cnt].phase = 0;
14302 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14303 cnt++;
14304
14305
14306 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14307
14308
14309
14310 val = phba->cfg_fcp_imax;
14311 if (val) {
14312
14313 val = phba->cfg_fcp_imax /
14314 phba->io_channel_irqs;
14315
14316
14317 val = LPFC_SEC_TO_USEC / val;
14318 }
14319 eq->q_mode = val;
14320 } else {
14321 eq->q_mode = imax;
14322 }
14323
14324 if (cnt >= numq)
14325 break;
14326 }
14327 eq_delay->u.request.num_eq = cnt;
14328
14329 mbox->vport = phba->pport;
14330 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14331 mbox->context1 = NULL;
14332 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14333 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14334 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14335 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14336 if (shdr_status || shdr_add_status || rc) {
14337 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14338 "2512 MODIFY_EQ_DELAY mailbox failed with "
14339 "status x%x add_status x%x, mbx status x%x\n",
14340 shdr_status, shdr_add_status, rc);
14341 status = -ENXIO;
14342 }
14343 mempool_free(mbox, phba->mbox_mem_pool);
14344 return status;
14345}
14346
14347
14348
14349
14350
14351
14352
14353
14354
14355
14356
14357
14358
14359
14360
14361
14362
14363
14364
14365
14366
14367int
14368lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14369{
14370 struct lpfc_mbx_eq_create *eq_create;
14371 LPFC_MBOXQ_t *mbox;
14372 int rc, length, status = 0;
14373 struct lpfc_dmabuf *dmabuf;
14374 uint32_t shdr_status, shdr_add_status;
14375 union lpfc_sli4_cfg_shdr *shdr;
14376 uint16_t dmult;
14377 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14378
14379
14380 if (!eq)
14381 return -ENODEV;
14382 if (!phba->sli4_hba.pc_sli4_params.supported)
14383 hw_page_size = SLI4_PAGE_SIZE;
14384
14385 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14386 if (!mbox)
14387 return -ENOMEM;
14388 length = (sizeof(struct lpfc_mbx_eq_create) -
14389 sizeof(struct lpfc_sli4_cfg_mhdr));
14390 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14391 LPFC_MBOX_OPCODE_EQ_CREATE,
14392 length, LPFC_SLI4_MBX_EMBED);
14393 eq_create = &mbox->u.mqe.un.eq_create;
14394 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14395 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14396 eq->page_count);
14397 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14398 LPFC_EQE_SIZE);
14399 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14400
14401
14402 if (phba->sli4_hba.pc_sli4_params.eqav) {
14403 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14404 LPFC_Q_CREATE_VERSION_2);
14405 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14406 phba->sli4_hba.pc_sli4_params.eqav);
14407 }
14408
14409
14410 dmult = 0;
14411 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14412 dmult);
14413 switch (eq->entry_count) {
14414 default:
14415 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14416 "0360 Unsupported EQ count. (%d)\n",
14417 eq->entry_count);
14418 if (eq->entry_count < 256)
14419 return -EINVAL;
14420
14421 case 256:
14422 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14423 LPFC_EQ_CNT_256);
14424 break;
14425 case 512:
14426 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14427 LPFC_EQ_CNT_512);
14428 break;
14429 case 1024:
14430 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14431 LPFC_EQ_CNT_1024);
14432 break;
14433 case 2048:
14434 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14435 LPFC_EQ_CNT_2048);
14436 break;
14437 case 4096:
14438 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14439 LPFC_EQ_CNT_4096);
14440 break;
14441 }
14442 list_for_each_entry(dmabuf, &eq->page_list, list) {
14443 memset(dmabuf->virt, 0, hw_page_size);
14444 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14445 putPaddrLow(dmabuf->phys);
14446 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14447 putPaddrHigh(dmabuf->phys);
14448 }
14449 mbox->vport = phba->pport;
14450 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14451 mbox->context1 = NULL;
14452 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14453 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14454 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14455 if (shdr_status || shdr_add_status || rc) {
14456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14457 "2500 EQ_CREATE mailbox failed with "
14458 "status x%x add_status x%x, mbx status x%x\n",
14459 shdr_status, shdr_add_status, rc);
14460 status = -ENXIO;
14461 }
14462 eq->type = LPFC_EQ;
14463 eq->subtype = LPFC_NONE;
14464 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14465 if (eq->queue_id == 0xFFFF)
14466 status = -ENXIO;
14467 eq->host_index = 0;
14468 eq->hba_index = 0;
14469 eq->entry_repost = LPFC_EQ_REPOST;
14470
14471 mempool_free(mbox, phba->mbox_mem_pool);
14472 return status;
14473}
14474
14475
14476
14477
14478
14479
14480
14481
14482
14483
14484
14485
14486
14487
14488
14489
14490
14491
14492
14493
14494
14495
14496int
14497lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14498 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14499{
14500 struct lpfc_mbx_cq_create *cq_create;
14501 struct lpfc_dmabuf *dmabuf;
14502 LPFC_MBOXQ_t *mbox;
14503 int rc, length, status = 0;
14504 uint32_t shdr_status, shdr_add_status;
14505 union lpfc_sli4_cfg_shdr *shdr;
14506 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14507
14508
14509 if (!cq || !eq)
14510 return -ENODEV;
14511 if (!phba->sli4_hba.pc_sli4_params.supported)
14512 hw_page_size = cq->page_size;
14513
14514 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14515 if (!mbox)
14516 return -ENOMEM;
14517 length = (sizeof(struct lpfc_mbx_cq_create) -
14518 sizeof(struct lpfc_sli4_cfg_mhdr));
14519 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14520 LPFC_MBOX_OPCODE_CQ_CREATE,
14521 length, LPFC_SLI4_MBX_EMBED);
14522 cq_create = &mbox->u.mqe.un.cq_create;
14523 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14524 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14525 cq->page_count);
14526 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14527 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14528 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14529 phba->sli4_hba.pc_sli4_params.cqv);
14530 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14531 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14532 (cq->page_size / SLI4_PAGE_SIZE));
14533 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14534 eq->queue_id);
14535 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14536 phba->sli4_hba.pc_sli4_params.cqav);
14537 } else {
14538 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14539 eq->queue_id);
14540 }
14541 switch (cq->entry_count) {
14542 case 2048:
14543 case 4096:
14544 if (phba->sli4_hba.pc_sli4_params.cqv ==
14545 LPFC_Q_CREATE_VERSION_2) {
14546 cq_create->u.request.context.lpfc_cq_context_count =
14547 cq->entry_count;
14548 bf_set(lpfc_cq_context_count,
14549 &cq_create->u.request.context,
14550 LPFC_CQ_CNT_WORD7);
14551 break;
14552 }
14553
14554 default:
14555 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14556 "0361 Unsupported CQ count: "
14557 "entry cnt %d sz %d pg cnt %d\n",
14558 cq->entry_count, cq->entry_size,
14559 cq->page_count);
14560 if (cq->entry_count < 256) {
14561 status = -EINVAL;
14562 goto out;
14563 }
14564
14565 case 256:
14566 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14567 LPFC_CQ_CNT_256);
14568 break;
14569 case 512:
14570 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14571 LPFC_CQ_CNT_512);
14572 break;
14573 case 1024:
14574 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14575 LPFC_CQ_CNT_1024);
14576 break;
14577 }
14578 list_for_each_entry(dmabuf, &cq->page_list, list) {
14579 memset(dmabuf->virt, 0, cq->page_size);
14580 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14581 putPaddrLow(dmabuf->phys);
14582 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14583 putPaddrHigh(dmabuf->phys);
14584 }
14585 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14586
14587
14588 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14589 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14590 if (shdr_status || shdr_add_status || rc) {
14591 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14592 "2501 CQ_CREATE mailbox failed with "
14593 "status x%x add_status x%x, mbx status x%x\n",
14594 shdr_status, shdr_add_status, rc);
14595 status = -ENXIO;
14596 goto out;
14597 }
14598 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14599 if (cq->queue_id == 0xFFFF) {
14600 status = -ENXIO;
14601 goto out;
14602 }
14603
14604 list_add_tail(&cq->list, &eq->child_list);
14605
14606 cq->type = type;
14607 cq->subtype = subtype;
14608 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14609 cq->assoc_qid = eq->queue_id;
14610 cq->host_index = 0;
14611 cq->hba_index = 0;
14612 cq->entry_repost = LPFC_CQ_REPOST;
14613
14614out:
14615 mempool_free(mbox, phba->mbox_mem_pool);
14616 return status;
14617}
14618
14619
14620
14621
14622
14623
14624
14625
14626
14627
14628
14629
14630
14631
14632
14633
14634
14635
14636
14637
14638
14639
14640
14641int
14642lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14643 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
14644{
14645 struct lpfc_queue *cq;
14646 struct lpfc_queue *eq;
14647 struct lpfc_mbx_cq_create_set *cq_set;
14648 struct lpfc_dmabuf *dmabuf;
14649 LPFC_MBOXQ_t *mbox;
14650 int rc, length, alloclen, status = 0;
14651 int cnt, idx, numcq, page_idx = 0;
14652 uint32_t shdr_status, shdr_add_status;
14653 union lpfc_sli4_cfg_shdr *shdr;
14654 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14655
14656
14657 numcq = phba->cfg_nvmet_mrq;
14658 if (!cqp || !eqp || !numcq)
14659 return -ENODEV;
14660
14661 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14662 if (!mbox)
14663 return -ENOMEM;
14664
14665 length = sizeof(struct lpfc_mbx_cq_create_set);
14666 length += ((numcq * cqp[0]->page_count) *
14667 sizeof(struct dma_address));
14668 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14669 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14670 LPFC_SLI4_MBX_NEMBED);
14671 if (alloclen < length) {
14672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14673 "3098 Allocated DMA memory size (%d) is "
14674 "less than the requested DMA memory size "
14675 "(%d)\n", alloclen, length);
14676 status = -ENOMEM;
14677 goto out;
14678 }
14679 cq_set = mbox->sge_array->addr[0];
14680 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14681 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14682
14683 for (idx = 0; idx < numcq; idx++) {
14684 cq = cqp[idx];
14685 eq = eqp[idx];
14686 if (!cq || !eq) {
14687 status = -ENOMEM;
14688 goto out;
14689 }
14690 if (!phba->sli4_hba.pc_sli4_params.supported)
14691 hw_page_size = cq->page_size;
14692
14693 switch (idx) {
14694 case 0:
14695 bf_set(lpfc_mbx_cq_create_set_page_size,
14696 &cq_set->u.request,
14697 (hw_page_size / SLI4_PAGE_SIZE));
14698 bf_set(lpfc_mbx_cq_create_set_num_pages,
14699 &cq_set->u.request, cq->page_count);
14700 bf_set(lpfc_mbx_cq_create_set_evt,
14701 &cq_set->u.request, 1);
14702 bf_set(lpfc_mbx_cq_create_set_valid,
14703 &cq_set->u.request, 1);
14704 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14705 &cq_set->u.request, 0);
14706 bf_set(lpfc_mbx_cq_create_set_num_cq,
14707 &cq_set->u.request, numcq);
14708 bf_set(lpfc_mbx_cq_create_set_autovalid,
14709 &cq_set->u.request,
14710 phba->sli4_hba.pc_sli4_params.cqav);
14711 switch (cq->entry_count) {
14712 case 2048:
14713 case 4096:
14714 if (phba->sli4_hba.pc_sli4_params.cqv ==
14715 LPFC_Q_CREATE_VERSION_2) {
14716 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14717 &cq_set->u.request,
14718 cq->entry_count);
14719 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14720 &cq_set->u.request,
14721 LPFC_CQ_CNT_WORD7);
14722 break;
14723 }
14724
14725 default:
14726 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14727 "3118 Bad CQ count. (%d)\n",
14728 cq->entry_count);
14729 if (cq->entry_count < 256) {
14730 status = -EINVAL;
14731 goto out;
14732 }
14733
14734 case 256:
14735 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14736 &cq_set->u.request, LPFC_CQ_CNT_256);
14737 break;
14738 case 512:
14739 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14740 &cq_set->u.request, LPFC_CQ_CNT_512);
14741 break;
14742 case 1024:
14743 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14744 &cq_set->u.request, LPFC_CQ_CNT_1024);
14745 break;
14746 }
14747 bf_set(lpfc_mbx_cq_create_set_eq_id0,
14748 &cq_set->u.request, eq->queue_id);
14749 break;
14750 case 1:
14751 bf_set(lpfc_mbx_cq_create_set_eq_id1,
14752 &cq_set->u.request, eq->queue_id);
14753 break;
14754 case 2:
14755 bf_set(lpfc_mbx_cq_create_set_eq_id2,
14756 &cq_set->u.request, eq->queue_id);
14757 break;
14758 case 3:
14759 bf_set(lpfc_mbx_cq_create_set_eq_id3,
14760 &cq_set->u.request, eq->queue_id);
14761 break;
14762 case 4:
14763 bf_set(lpfc_mbx_cq_create_set_eq_id4,
14764 &cq_set->u.request, eq->queue_id);
14765 break;
14766 case 5:
14767 bf_set(lpfc_mbx_cq_create_set_eq_id5,
14768 &cq_set->u.request, eq->queue_id);
14769 break;
14770 case 6:
14771 bf_set(lpfc_mbx_cq_create_set_eq_id6,
14772 &cq_set->u.request, eq->queue_id);
14773 break;
14774 case 7:
14775 bf_set(lpfc_mbx_cq_create_set_eq_id7,
14776 &cq_set->u.request, eq->queue_id);
14777 break;
14778 case 8:
14779 bf_set(lpfc_mbx_cq_create_set_eq_id8,
14780 &cq_set->u.request, eq->queue_id);
14781 break;
14782 case 9:
14783 bf_set(lpfc_mbx_cq_create_set_eq_id9,
14784 &cq_set->u.request, eq->queue_id);
14785 break;
14786 case 10:
14787 bf_set(lpfc_mbx_cq_create_set_eq_id10,
14788 &cq_set->u.request, eq->queue_id);
14789 break;
14790 case 11:
14791 bf_set(lpfc_mbx_cq_create_set_eq_id11,
14792 &cq_set->u.request, eq->queue_id);
14793 break;
14794 case 12:
14795 bf_set(lpfc_mbx_cq_create_set_eq_id12,
14796 &cq_set->u.request, eq->queue_id);
14797 break;
14798 case 13:
14799 bf_set(lpfc_mbx_cq_create_set_eq_id13,
14800 &cq_set->u.request, eq->queue_id);
14801 break;
14802 case 14:
14803 bf_set(lpfc_mbx_cq_create_set_eq_id14,
14804 &cq_set->u.request, eq->queue_id);
14805 break;
14806 case 15:
14807 bf_set(lpfc_mbx_cq_create_set_eq_id15,
14808 &cq_set->u.request, eq->queue_id);
14809 break;
14810 }
14811
14812
14813 list_add_tail(&cq->list, &eq->child_list);
14814
14815 cq->type = type;
14816 cq->subtype = subtype;
14817 cq->assoc_qid = eq->queue_id;
14818 cq->host_index = 0;
14819 cq->hba_index = 0;
14820 cq->entry_repost = LPFC_CQ_REPOST;
14821 cq->chann = idx;
14822
14823 rc = 0;
14824 list_for_each_entry(dmabuf, &cq->page_list, list) {
14825 memset(dmabuf->virt, 0, hw_page_size);
14826 cnt = page_idx + dmabuf->buffer_tag;
14827 cq_set->u.request.page[cnt].addr_lo =
14828 putPaddrLow(dmabuf->phys);
14829 cq_set->u.request.page[cnt].addr_hi =
14830 putPaddrHigh(dmabuf->phys);
14831 rc++;
14832 }
14833 page_idx += rc;
14834 }
14835
14836 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14837
14838
14839 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14840 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14841 if (shdr_status || shdr_add_status || rc) {
14842 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14843 "3119 CQ_CREATE_SET mailbox failed with "
14844 "status x%x add_status x%x, mbx status x%x\n",
14845 shdr_status, shdr_add_status, rc);
14846 status = -ENXIO;
14847 goto out;
14848 }
14849 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
14850 if (rc == 0xFFFF) {
14851 status = -ENXIO;
14852 goto out;
14853 }
14854
14855 for (idx = 0; idx < numcq; idx++) {
14856 cq = cqp[idx];
14857 cq->queue_id = rc + idx;
14858 }
14859
14860out:
14861 lpfc_sli4_mbox_cmd_free(phba, mbox);
14862 return status;
14863}
14864
14865
14866
14867
14868
14869
14870
14871
14872
14873
14874
14875
14876
14877
14878
14879static void
14880lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
14881 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
14882{
14883 struct lpfc_mbx_mq_create *mq_create;
14884 struct lpfc_dmabuf *dmabuf;
14885 int length;
14886
14887 length = (sizeof(struct lpfc_mbx_mq_create) -
14888 sizeof(struct lpfc_sli4_cfg_mhdr));
14889 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14890 LPFC_MBOX_OPCODE_MQ_CREATE,
14891 length, LPFC_SLI4_MBX_EMBED);
14892 mq_create = &mbox->u.mqe.un.mq_create;
14893 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
14894 mq->page_count);
14895 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
14896 cq->queue_id);
14897 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
14898 switch (mq->entry_count) {
14899 case 16:
14900 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14901 LPFC_MQ_RING_SIZE_16);
14902 break;
14903 case 32:
14904 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14905 LPFC_MQ_RING_SIZE_32);
14906 break;
14907 case 64:
14908 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14909 LPFC_MQ_RING_SIZE_64);
14910 break;
14911 case 128:
14912 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14913 LPFC_MQ_RING_SIZE_128);
14914 break;
14915 }
14916 list_for_each_entry(dmabuf, &mq->page_list, list) {
14917 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14918 putPaddrLow(dmabuf->phys);
14919 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14920 putPaddrHigh(dmabuf->phys);
14921 }
14922}
14923
14924
14925
14926
14927
14928
14929
14930
14931
14932
14933
14934
14935
14936
14937
14938
14939
14940
14941
14942
14943
14944
14945int32_t
14946lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14947 struct lpfc_queue *cq, uint32_t subtype)
14948{
14949 struct lpfc_mbx_mq_create *mq_create;
14950 struct lpfc_mbx_mq_create_ext *mq_create_ext;
14951 struct lpfc_dmabuf *dmabuf;
14952 LPFC_MBOXQ_t *mbox;
14953 int rc, length, status = 0;
14954 uint32_t shdr_status, shdr_add_status;
14955 union lpfc_sli4_cfg_shdr *shdr;
14956 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14957
14958
14959 if (!mq || !cq)
14960 return -ENODEV;
14961 if (!phba->sli4_hba.pc_sli4_params.supported)
14962 hw_page_size = SLI4_PAGE_SIZE;
14963
14964 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14965 if (!mbox)
14966 return -ENOMEM;
14967 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
14968 sizeof(struct lpfc_sli4_cfg_mhdr));
14969 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14970 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
14971 length, LPFC_SLI4_MBX_EMBED);
14972
14973 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
14974 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
14975 bf_set(lpfc_mbx_mq_create_ext_num_pages,
14976 &mq_create_ext->u.request, mq->page_count);
14977 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
14978 &mq_create_ext->u.request, 1);
14979 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
14980 &mq_create_ext->u.request, 1);
14981 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14982 &mq_create_ext->u.request, 1);
14983 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14984 &mq_create_ext->u.request, 1);
14985 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14986 &mq_create_ext->u.request, 1);
14987 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
14988 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14989 phba->sli4_hba.pc_sli4_params.mqv);
14990 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14991 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14992 cq->queue_id);
14993 else
14994 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14995 cq->queue_id);
14996 switch (mq->entry_count) {
14997 default:
14998 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14999 "0362 Unsupported MQ count. (%d)\n",
15000 mq->entry_count);
15001 if (mq->entry_count < 16) {
15002 status = -EINVAL;
15003 goto out;
15004 }
15005
15006 case 16:
15007 bf_set(lpfc_mq_context_ring_size,
15008 &mq_create_ext->u.request.context,
15009 LPFC_MQ_RING_SIZE_16);
15010 break;
15011 case 32:
15012 bf_set(lpfc_mq_context_ring_size,
15013 &mq_create_ext->u.request.context,
15014 LPFC_MQ_RING_SIZE_32);
15015 break;
15016 case 64:
15017 bf_set(lpfc_mq_context_ring_size,
15018 &mq_create_ext->u.request.context,
15019 LPFC_MQ_RING_SIZE_64);
15020 break;
15021 case 128:
15022 bf_set(lpfc_mq_context_ring_size,
15023 &mq_create_ext->u.request.context,
15024 LPFC_MQ_RING_SIZE_128);
15025 break;
15026 }
15027 list_for_each_entry(dmabuf, &mq->page_list, list) {
15028 memset(dmabuf->virt, 0, hw_page_size);
15029 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15030 putPaddrLow(dmabuf->phys);
15031 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15032 putPaddrHigh(dmabuf->phys);
15033 }
15034 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15035 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15036 &mq_create_ext->u.response);
15037 if (rc != MBX_SUCCESS) {
15038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15039 "2795 MQ_CREATE_EXT failed with "
15040 "status x%x. Failback to MQ_CREATE.\n",
15041 rc);
15042 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15043 mq_create = &mbox->u.mqe.un.mq_create;
15044 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15045 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15046 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15047 &mq_create->u.response);
15048 }
15049
15050
15051 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15052 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15053 if (shdr_status || shdr_add_status || rc) {
15054 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15055 "2502 MQ_CREATE mailbox failed with "
15056 "status x%x add_status x%x, mbx status x%x\n",
15057 shdr_status, shdr_add_status, rc);
15058 status = -ENXIO;
15059 goto out;
15060 }
15061 if (mq->queue_id == 0xFFFF) {
15062 status = -ENXIO;
15063 goto out;
15064 }
15065 mq->type = LPFC_MQ;
15066 mq->assoc_qid = cq->queue_id;
15067 mq->subtype = subtype;
15068 mq->host_index = 0;
15069 mq->hba_index = 0;
15070 mq->entry_repost = LPFC_MQ_REPOST;
15071
15072
15073 list_add_tail(&mq->list, &cq->child_list);
15074out:
15075 mempool_free(mbox, phba->mbox_mem_pool);
15076 return status;
15077}
15078
15079
15080
15081
15082
15083
15084
15085
15086
15087
15088
15089
15090
15091
15092
15093
15094
15095
15096
15097
15098
15099
15100
15101int
15102lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15103 struct lpfc_queue *cq, uint32_t subtype)
15104{
15105 struct lpfc_mbx_wq_create *wq_create;
15106 struct lpfc_dmabuf *dmabuf;
15107 LPFC_MBOXQ_t *mbox;
15108 int rc, length, status = 0;
15109 uint32_t shdr_status, shdr_add_status;
15110 union lpfc_sli4_cfg_shdr *shdr;
15111 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15112 struct dma_address *page;
15113 void __iomem *bar_memmap_p;
15114 uint32_t db_offset;
15115 uint16_t pci_barset;
15116 uint8_t dpp_barset;
15117 uint32_t dpp_offset;
15118 unsigned long pg_addr;
15119 uint8_t wq_create_version;
15120
15121
15122 if (!wq || !cq)
15123 return -ENODEV;
15124 if (!phba->sli4_hba.pc_sli4_params.supported)
15125 hw_page_size = wq->page_size;
15126
15127 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15128 if (!mbox)
15129 return -ENOMEM;
15130 length = (sizeof(struct lpfc_mbx_wq_create) -
15131 sizeof(struct lpfc_sli4_cfg_mhdr));
15132 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15133 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15134 length, LPFC_SLI4_MBX_EMBED);
15135 wq_create = &mbox->u.mqe.un.wq_create;
15136 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15137 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15138 wq->page_count);
15139 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15140 cq->queue_id);
15141
15142
15143 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15144 phba->sli4_hba.pc_sli4_params.wqv);
15145
15146 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15147 (wq->page_size > SLI4_PAGE_SIZE))
15148 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15149 else
15150 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15151
15152
15153 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15154 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15155 else
15156 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15157
15158 switch (wq_create_version) {
15159 case LPFC_Q_CREATE_VERSION_1:
15160 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15161 wq->entry_count);
15162 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15163 LPFC_Q_CREATE_VERSION_1);
15164
15165 switch (wq->entry_size) {
15166 default:
15167 case 64:
15168 bf_set(lpfc_mbx_wq_create_wqe_size,
15169 &wq_create->u.request_1,
15170 LPFC_WQ_WQE_SIZE_64);
15171 break;
15172 case 128:
15173 bf_set(lpfc_mbx_wq_create_wqe_size,
15174 &wq_create->u.request_1,
15175 LPFC_WQ_WQE_SIZE_128);
15176 break;
15177 }
15178
15179 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15180 bf_set(lpfc_mbx_wq_create_page_size,
15181 &wq_create->u.request_1,
15182 (wq->page_size / SLI4_PAGE_SIZE));
15183 page = wq_create->u.request_1.page;
15184 break;
15185 default:
15186 page = wq_create->u.request.page;
15187 break;
15188 }
15189
15190 list_for_each_entry(dmabuf, &wq->page_list, list) {
15191 memset(dmabuf->virt, 0, hw_page_size);
15192 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15193 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15194 }
15195
15196 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15197 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15198
15199 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15200
15201 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15202 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15203 if (shdr_status || shdr_add_status || rc) {
15204 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15205 "2503 WQ_CREATE mailbox failed with "
15206 "status x%x add_status x%x, mbx status x%x\n",
15207 shdr_status, shdr_add_status, rc);
15208 status = -ENXIO;
15209 goto out;
15210 }
15211
15212 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15213 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15214 &wq_create->u.response);
15215 else
15216 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15217 &wq_create->u.response_1);
15218
15219 if (wq->queue_id == 0xFFFF) {
15220 status = -ENXIO;
15221 goto out;
15222 }
15223
15224 wq->db_format = LPFC_DB_LIST_FORMAT;
15225 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15226 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15227 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15228 &wq_create->u.response);
15229 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15230 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15232 "3265 WQ[%d] doorbell format "
15233 "not supported: x%x\n",
15234 wq->queue_id, wq->db_format);
15235 status = -EINVAL;
15236 goto out;
15237 }
15238 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15239 &wq_create->u.response);
15240 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15241 pci_barset);
15242 if (!bar_memmap_p) {
15243 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15244 "3263 WQ[%d] failed to memmap "
15245 "pci barset:x%x\n",
15246 wq->queue_id, pci_barset);
15247 status = -ENOMEM;
15248 goto out;
15249 }
15250 db_offset = wq_create->u.response.doorbell_offset;
15251 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15252 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15253 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15254 "3252 WQ[%d] doorbell offset "
15255 "not supported: x%x\n",
15256 wq->queue_id, db_offset);
15257 status = -EINVAL;
15258 goto out;
15259 }
15260 wq->db_regaddr = bar_memmap_p + db_offset;
15261 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15262 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15263 "format:x%x\n", wq->queue_id,
15264 pci_barset, db_offset, wq->db_format);
15265 } else
15266 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15267 } else {
15268
15269 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15270 &wq_create->u.response_1);
15271 if (wq->dpp_enable) {
15272 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15273 &wq_create->u.response_1);
15274 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15275 pci_barset);
15276 if (!bar_memmap_p) {
15277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15278 "3267 WQ[%d] failed to memmap "
15279 "pci barset:x%x\n",
15280 wq->queue_id, pci_barset);
15281 status = -ENOMEM;
15282 goto out;
15283 }
15284 db_offset = wq_create->u.response_1.doorbell_offset;
15285 wq->db_regaddr = bar_memmap_p + db_offset;
15286 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15287 &wq_create->u.response_1);
15288 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15289 &wq_create->u.response_1);
15290 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15291 dpp_barset);
15292 if (!bar_memmap_p) {
15293 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15294 "3268 WQ[%d] failed to memmap "
15295 "pci barset:x%x\n",
15296 wq->queue_id, dpp_barset);
15297 status = -ENOMEM;
15298 goto out;
15299 }
15300 dpp_offset = wq_create->u.response_1.dpp_offset;
15301 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15302 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15303 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15304 "dpp_id:x%x dpp_barset:x%x "
15305 "dpp_offset:x%x\n",
15306 wq->queue_id, pci_barset, db_offset,
15307 wq->dpp_id, dpp_barset, dpp_offset);
15308
15309
15310 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15311#ifdef CONFIG_X86
15312 rc = set_memory_wc(pg_addr, 1);
15313 if (rc) {
15314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15315 "3272 Cannot setup Combined "
15316 "Write on WQ[%d] - disable DPP\n",
15317 wq->queue_id);
15318 phba->cfg_enable_dpp = 0;
15319 }
15320#else
15321 phba->cfg_enable_dpp = 0;
15322#endif
15323 } else
15324 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15325 }
15326 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15327 if (wq->pring == NULL) {
15328 status = -ENOMEM;
15329 goto out;
15330 }
15331 wq->type = LPFC_WQ;
15332 wq->assoc_qid = cq->queue_id;
15333 wq->subtype = subtype;
15334 wq->host_index = 0;
15335 wq->hba_index = 0;
15336 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
15337
15338
15339 list_add_tail(&wq->list, &cq->child_list);
15340out:
15341 mempool_free(mbox, phba->mbox_mem_pool);
15342 return status;
15343}
15344
15345
15346
15347
15348
15349
15350
15351
15352
15353
15354
15355
15356
15357
15358
15359
15360
15361
15362
15363
15364
15365
15366
15367
15368int
15369lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15370 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15371{
15372 struct lpfc_mbx_rq_create *rq_create;
15373 struct lpfc_dmabuf *dmabuf;
15374 LPFC_MBOXQ_t *mbox;
15375 int rc, length, status = 0;
15376 uint32_t shdr_status, shdr_add_status;
15377 union lpfc_sli4_cfg_shdr *shdr;
15378 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15379 void __iomem *bar_memmap_p;
15380 uint32_t db_offset;
15381 uint16_t pci_barset;
15382
15383
15384 if (!hrq || !drq || !cq)
15385 return -ENODEV;
15386 if (!phba->sli4_hba.pc_sli4_params.supported)
15387 hw_page_size = SLI4_PAGE_SIZE;
15388
15389 if (hrq->entry_count != drq->entry_count)
15390 return -EINVAL;
15391 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15392 if (!mbox)
15393 return -ENOMEM;
15394 length = (sizeof(struct lpfc_mbx_rq_create) -
15395 sizeof(struct lpfc_sli4_cfg_mhdr));
15396 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15397 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15398 length, LPFC_SLI4_MBX_EMBED);
15399 rq_create = &mbox->u.mqe.un.rq_create;
15400 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15401 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15402 phba->sli4_hba.pc_sli4_params.rqv);
15403 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15404 bf_set(lpfc_rq_context_rqe_count_1,
15405 &rq_create->u.request.context,
15406 hrq->entry_count);
15407 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15408 bf_set(lpfc_rq_context_rqe_size,
15409 &rq_create->u.request.context,
15410 LPFC_RQE_SIZE_8);
15411 bf_set(lpfc_rq_context_page_size,
15412 &rq_create->u.request.context,
15413 LPFC_RQ_PAGE_SIZE_4096);
15414 } else {
15415 switch (hrq->entry_count) {
15416 default:
15417 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15418 "2535 Unsupported RQ count. (%d)\n",
15419 hrq->entry_count);
15420 if (hrq->entry_count < 512) {
15421 status = -EINVAL;
15422 goto out;
15423 }
15424
15425 case 512:
15426 bf_set(lpfc_rq_context_rqe_count,
15427 &rq_create->u.request.context,
15428 LPFC_RQ_RING_SIZE_512);
15429 break;
15430 case 1024:
15431 bf_set(lpfc_rq_context_rqe_count,
15432 &rq_create->u.request.context,
15433 LPFC_RQ_RING_SIZE_1024);
15434 break;
15435 case 2048:
15436 bf_set(lpfc_rq_context_rqe_count,
15437 &rq_create->u.request.context,
15438 LPFC_RQ_RING_SIZE_2048);
15439 break;
15440 case 4096:
15441 bf_set(lpfc_rq_context_rqe_count,
15442 &rq_create->u.request.context,
15443 LPFC_RQ_RING_SIZE_4096);
15444 break;
15445 }
15446 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15447 LPFC_HDR_BUF_SIZE);
15448 }
15449 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15450 cq->queue_id);
15451 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15452 hrq->page_count);
15453 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15454 memset(dmabuf->virt, 0, hw_page_size);
15455 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15456 putPaddrLow(dmabuf->phys);
15457 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15458 putPaddrHigh(dmabuf->phys);
15459 }
15460 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15461 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15462
15463 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15464
15465 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15466 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15467 if (shdr_status || shdr_add_status || rc) {
15468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15469 "2504 RQ_CREATE mailbox failed with "
15470 "status x%x add_status x%x, mbx status x%x\n",
15471 shdr_status, shdr_add_status, rc);
15472 status = -ENXIO;
15473 goto out;
15474 }
15475 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15476 if (hrq->queue_id == 0xFFFF) {
15477 status = -ENXIO;
15478 goto out;
15479 }
15480
15481 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15482 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15483 &rq_create->u.response);
15484 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15485 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15486 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15487 "3262 RQ [%d] doorbell format not "
15488 "supported: x%x\n", hrq->queue_id,
15489 hrq->db_format);
15490 status = -EINVAL;
15491 goto out;
15492 }
15493
15494 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15495 &rq_create->u.response);
15496 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15497 if (!bar_memmap_p) {
15498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15499 "3269 RQ[%d] failed to memmap pci "
15500 "barset:x%x\n", hrq->queue_id,
15501 pci_barset);
15502 status = -ENOMEM;
15503 goto out;
15504 }
15505
15506 db_offset = rq_create->u.response.doorbell_offset;
15507 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15508 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15509 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15510 "3270 RQ[%d] doorbell offset not "
15511 "supported: x%x\n", hrq->queue_id,
15512 db_offset);
15513 status = -EINVAL;
15514 goto out;
15515 }
15516 hrq->db_regaddr = bar_memmap_p + db_offset;
15517 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15518 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15519 "format:x%x\n", hrq->queue_id, pci_barset,
15520 db_offset, hrq->db_format);
15521 } else {
15522 hrq->db_format = LPFC_DB_RING_FORMAT;
15523 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15524 }
15525 hrq->type = LPFC_HRQ;
15526 hrq->assoc_qid = cq->queue_id;
15527 hrq->subtype = subtype;
15528 hrq->host_index = 0;
15529 hrq->hba_index = 0;
15530 hrq->entry_repost = LPFC_RQ_REPOST;
15531
15532
15533 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15534 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15535 length, LPFC_SLI4_MBX_EMBED);
15536 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15537 phba->sli4_hba.pc_sli4_params.rqv);
15538 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15539 bf_set(lpfc_rq_context_rqe_count_1,
15540 &rq_create->u.request.context, hrq->entry_count);
15541 if (subtype == LPFC_NVMET)
15542 rq_create->u.request.context.buffer_size =
15543 LPFC_NVMET_DATA_BUF_SIZE;
15544 else
15545 rq_create->u.request.context.buffer_size =
15546 LPFC_DATA_BUF_SIZE;
15547 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15548 LPFC_RQE_SIZE_8);
15549 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15550 (PAGE_SIZE/SLI4_PAGE_SIZE));
15551 } else {
15552 switch (drq->entry_count) {
15553 default:
15554 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15555 "2536 Unsupported RQ count. (%d)\n",
15556 drq->entry_count);
15557 if (drq->entry_count < 512) {
15558 status = -EINVAL;
15559 goto out;
15560 }
15561
15562 case 512:
15563 bf_set(lpfc_rq_context_rqe_count,
15564 &rq_create->u.request.context,
15565 LPFC_RQ_RING_SIZE_512);
15566 break;
15567 case 1024:
15568 bf_set(lpfc_rq_context_rqe_count,
15569 &rq_create->u.request.context,
15570 LPFC_RQ_RING_SIZE_1024);
15571 break;
15572 case 2048:
15573 bf_set(lpfc_rq_context_rqe_count,
15574 &rq_create->u.request.context,
15575 LPFC_RQ_RING_SIZE_2048);
15576 break;
15577 case 4096:
15578 bf_set(lpfc_rq_context_rqe_count,
15579 &rq_create->u.request.context,
15580 LPFC_RQ_RING_SIZE_4096);
15581 break;
15582 }
15583 if (subtype == LPFC_NVMET)
15584 bf_set(lpfc_rq_context_buf_size,
15585 &rq_create->u.request.context,
15586 LPFC_NVMET_DATA_BUF_SIZE);
15587 else
15588 bf_set(lpfc_rq_context_buf_size,
15589 &rq_create->u.request.context,
15590 LPFC_DATA_BUF_SIZE);
15591 }
15592 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15593 cq->queue_id);
15594 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15595 drq->page_count);
15596 list_for_each_entry(dmabuf, &drq->page_list, list) {
15597 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15598 putPaddrLow(dmabuf->phys);
15599 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15600 putPaddrHigh(dmabuf->phys);
15601 }
15602 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15603 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15604 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15605
15606 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15607 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15608 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15609 if (shdr_status || shdr_add_status || rc) {
15610 status = -ENXIO;
15611 goto out;
15612 }
15613 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15614 if (drq->queue_id == 0xFFFF) {
15615 status = -ENXIO;
15616 goto out;
15617 }
15618 drq->type = LPFC_DRQ;
15619 drq->assoc_qid = cq->queue_id;
15620 drq->subtype = subtype;
15621 drq->host_index = 0;
15622 drq->hba_index = 0;
15623 drq->entry_repost = LPFC_RQ_REPOST;
15624
15625
15626 list_add_tail(&hrq->list, &cq->child_list);
15627 list_add_tail(&drq->list, &cq->child_list);
15628
15629out:
15630 mempool_free(mbox, phba->mbox_mem_pool);
15631 return status;
15632}
15633
15634
15635
15636
15637
15638
15639
15640
15641
15642
15643
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655
15656
15657int
15658lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15659 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15660 uint32_t subtype)
15661{
15662 struct lpfc_queue *hrq, *drq, *cq;
15663 struct lpfc_mbx_rq_create_v2 *rq_create;
15664 struct lpfc_dmabuf *dmabuf;
15665 LPFC_MBOXQ_t *mbox;
15666 int rc, length, alloclen, status = 0;
15667 int cnt, idx, numrq, page_idx = 0;
15668 uint32_t shdr_status, shdr_add_status;
15669 union lpfc_sli4_cfg_shdr *shdr;
15670 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15671
15672 numrq = phba->cfg_nvmet_mrq;
15673
15674 if (!hrqp || !drqp || !cqp || !numrq)
15675 return -ENODEV;
15676 if (!phba->sli4_hba.pc_sli4_params.supported)
15677 hw_page_size = SLI4_PAGE_SIZE;
15678
15679 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15680 if (!mbox)
15681 return -ENOMEM;
15682
15683 length = sizeof(struct lpfc_mbx_rq_create_v2);
15684 length += ((2 * numrq * hrqp[0]->page_count) *
15685 sizeof(struct dma_address));
15686
15687 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15688 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15689 LPFC_SLI4_MBX_NEMBED);
15690 if (alloclen < length) {
15691 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15692 "3099 Allocated DMA memory size (%d) is "
15693 "less than the requested DMA memory size "
15694 "(%d)\n", alloclen, length);
15695 status = -ENOMEM;
15696 goto out;
15697 }
15698
15699
15700
15701 rq_create = mbox->sge_array->addr[0];
15702 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15703
15704 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15705 cnt = 0;
15706
15707 for (idx = 0; idx < numrq; idx++) {
15708 hrq = hrqp[idx];
15709 drq = drqp[idx];
15710 cq = cqp[idx];
15711
15712
15713 if (!hrq || !drq || !cq) {
15714 status = -ENODEV;
15715 goto out;
15716 }
15717
15718 if (hrq->entry_count != drq->entry_count) {
15719 status = -EINVAL;
15720 goto out;
15721 }
15722
15723 if (idx == 0) {
15724 bf_set(lpfc_mbx_rq_create_num_pages,
15725 &rq_create->u.request,
15726 hrq->page_count);
15727 bf_set(lpfc_mbx_rq_create_rq_cnt,
15728 &rq_create->u.request, (numrq * 2));
15729 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15730 1);
15731 bf_set(lpfc_rq_context_base_cq,
15732 &rq_create->u.request.context,
15733 cq->queue_id);
15734 bf_set(lpfc_rq_context_data_size,
15735 &rq_create->u.request.context,
15736 LPFC_NVMET_DATA_BUF_SIZE);
15737 bf_set(lpfc_rq_context_hdr_size,
15738 &rq_create->u.request.context,
15739 LPFC_HDR_BUF_SIZE);
15740 bf_set(lpfc_rq_context_rqe_count_1,
15741 &rq_create->u.request.context,
15742 hrq->entry_count);
15743 bf_set(lpfc_rq_context_rqe_size,
15744 &rq_create->u.request.context,
15745 LPFC_RQE_SIZE_8);
15746 bf_set(lpfc_rq_context_page_size,
15747 &rq_create->u.request.context,
15748 (PAGE_SIZE/SLI4_PAGE_SIZE));
15749 }
15750 rc = 0;
15751 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15752 memset(dmabuf->virt, 0, hw_page_size);
15753 cnt = page_idx + dmabuf->buffer_tag;
15754 rq_create->u.request.page[cnt].addr_lo =
15755 putPaddrLow(dmabuf->phys);
15756 rq_create->u.request.page[cnt].addr_hi =
15757 putPaddrHigh(dmabuf->phys);
15758 rc++;
15759 }
15760 page_idx += rc;
15761
15762 rc = 0;
15763 list_for_each_entry(dmabuf, &drq->page_list, list) {
15764 memset(dmabuf->virt, 0, hw_page_size);
15765 cnt = page_idx + dmabuf->buffer_tag;
15766 rq_create->u.request.page[cnt].addr_lo =
15767 putPaddrLow(dmabuf->phys);
15768 rq_create->u.request.page[cnt].addr_hi =
15769 putPaddrHigh(dmabuf->phys);
15770 rc++;
15771 }
15772 page_idx += rc;
15773
15774 hrq->db_format = LPFC_DB_RING_FORMAT;
15775 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15776 hrq->type = LPFC_HRQ;
15777 hrq->assoc_qid = cq->queue_id;
15778 hrq->subtype = subtype;
15779 hrq->host_index = 0;
15780 hrq->hba_index = 0;
15781 hrq->entry_repost = LPFC_RQ_REPOST;
15782
15783 drq->db_format = LPFC_DB_RING_FORMAT;
15784 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15785 drq->type = LPFC_DRQ;
15786 drq->assoc_qid = cq->queue_id;
15787 drq->subtype = subtype;
15788 drq->host_index = 0;
15789 drq->hba_index = 0;
15790 drq->entry_repost = LPFC_RQ_REPOST;
15791
15792 list_add_tail(&hrq->list, &cq->child_list);
15793 list_add_tail(&drq->list, &cq->child_list);
15794 }
15795
15796 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15797
15798 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15799 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15800 if (shdr_status || shdr_add_status || rc) {
15801 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15802 "3120 RQ_CREATE mailbox failed with "
15803 "status x%x add_status x%x, mbx status x%x\n",
15804 shdr_status, shdr_add_status, rc);
15805 status = -ENXIO;
15806 goto out;
15807 }
15808 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15809 if (rc == 0xFFFF) {
15810 status = -ENXIO;
15811 goto out;
15812 }
15813
15814
15815 for (idx = 0; idx < numrq; idx++) {
15816 hrq = hrqp[idx];
15817 hrq->queue_id = rc + (2 * idx);
15818 drq = drqp[idx];
15819 drq->queue_id = rc + (2 * idx) + 1;
15820 }
15821
15822out:
15823 lpfc_sli4_mbox_cmd_free(phba, mbox);
15824 return status;
15825}
15826
15827
15828
15829
15830
15831
15832
15833
15834
15835
15836
15837
15838
15839int
15840lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
15841{
15842 LPFC_MBOXQ_t *mbox;
15843 int rc, length, status = 0;
15844 uint32_t shdr_status, shdr_add_status;
15845 union lpfc_sli4_cfg_shdr *shdr;
15846
15847
15848 if (!eq)
15849 return -ENODEV;
15850 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
15851 if (!mbox)
15852 return -ENOMEM;
15853 length = (sizeof(struct lpfc_mbx_eq_destroy) -
15854 sizeof(struct lpfc_sli4_cfg_mhdr));
15855 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15856 LPFC_MBOX_OPCODE_EQ_DESTROY,
15857 length, LPFC_SLI4_MBX_EMBED);
15858 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
15859 eq->queue_id);
15860 mbox->vport = eq->phba->pport;
15861 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15862
15863 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
15864
15865 shdr = (union lpfc_sli4_cfg_shdr *)
15866 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
15867 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15868 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15869 if (shdr_status || shdr_add_status || rc) {
15870 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15871 "2505 EQ_DESTROY mailbox failed with "
15872 "status x%x add_status x%x, mbx status x%x\n",
15873 shdr_status, shdr_add_status, rc);
15874 status = -ENXIO;
15875 }
15876
15877
15878 list_del_init(&eq->list);
15879 mempool_free(mbox, eq->phba->mbox_mem_pool);
15880 return status;
15881}
15882
15883
15884
15885
15886
15887
15888
15889
15890
15891
15892
15893
15894
15895int
15896lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
15897{
15898 LPFC_MBOXQ_t *mbox;
15899 int rc, length, status = 0;
15900 uint32_t shdr_status, shdr_add_status;
15901 union lpfc_sli4_cfg_shdr *shdr;
15902
15903
15904 if (!cq)
15905 return -ENODEV;
15906 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
15907 if (!mbox)
15908 return -ENOMEM;
15909 length = (sizeof(struct lpfc_mbx_cq_destroy) -
15910 sizeof(struct lpfc_sli4_cfg_mhdr));
15911 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15912 LPFC_MBOX_OPCODE_CQ_DESTROY,
15913 length, LPFC_SLI4_MBX_EMBED);
15914 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
15915 cq->queue_id);
15916 mbox->vport = cq->phba->pport;
15917 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15918 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
15919
15920 shdr = (union lpfc_sli4_cfg_shdr *)
15921 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
15922 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15923 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15924 if (shdr_status || shdr_add_status || rc) {
15925 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15926 "2506 CQ_DESTROY mailbox failed with "
15927 "status x%x add_status x%x, mbx status x%x\n",
15928 shdr_status, shdr_add_status, rc);
15929 status = -ENXIO;
15930 }
15931
15932 list_del_init(&cq->list);
15933 mempool_free(mbox, cq->phba->mbox_mem_pool);
15934 return status;
15935}
15936
15937
15938
15939
15940
15941
15942
15943
15944
15945
15946
15947
15948
15949int
15950lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
15951{
15952 LPFC_MBOXQ_t *mbox;
15953 int rc, length, status = 0;
15954 uint32_t shdr_status, shdr_add_status;
15955 union lpfc_sli4_cfg_shdr *shdr;
15956
15957
15958 if (!mq)
15959 return -ENODEV;
15960 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
15961 if (!mbox)
15962 return -ENOMEM;
15963 length = (sizeof(struct lpfc_mbx_mq_destroy) -
15964 sizeof(struct lpfc_sli4_cfg_mhdr));
15965 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15966 LPFC_MBOX_OPCODE_MQ_DESTROY,
15967 length, LPFC_SLI4_MBX_EMBED);
15968 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
15969 mq->queue_id);
15970 mbox->vport = mq->phba->pport;
15971 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15972 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
15973
15974 shdr = (union lpfc_sli4_cfg_shdr *)
15975 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
15976 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15977 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15978 if (shdr_status || shdr_add_status || rc) {
15979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15980 "2507 MQ_DESTROY mailbox failed with "
15981 "status x%x add_status x%x, mbx status x%x\n",
15982 shdr_status, shdr_add_status, rc);
15983 status = -ENXIO;
15984 }
15985
15986 list_del_init(&mq->list);
15987 mempool_free(mbox, mq->phba->mbox_mem_pool);
15988 return status;
15989}
15990
15991
15992
15993
15994
15995
15996
15997
15998
15999
16000
16001
16002
16003int
16004lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16005{
16006 LPFC_MBOXQ_t *mbox;
16007 int rc, length, status = 0;
16008 uint32_t shdr_status, shdr_add_status;
16009 union lpfc_sli4_cfg_shdr *shdr;
16010
16011
16012 if (!wq)
16013 return -ENODEV;
16014 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16015 if (!mbox)
16016 return -ENOMEM;
16017 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16018 sizeof(struct lpfc_sli4_cfg_mhdr));
16019 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16020 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16021 length, LPFC_SLI4_MBX_EMBED);
16022 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16023 wq->queue_id);
16024 mbox->vport = wq->phba->pport;
16025 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16026 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16027 shdr = (union lpfc_sli4_cfg_shdr *)
16028 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16029 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16030 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16031 if (shdr_status || shdr_add_status || rc) {
16032 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16033 "2508 WQ_DESTROY mailbox failed with "
16034 "status x%x add_status x%x, mbx status x%x\n",
16035 shdr_status, shdr_add_status, rc);
16036 status = -ENXIO;
16037 }
16038
16039 list_del_init(&wq->list);
16040 kfree(wq->pring);
16041 wq->pring = NULL;
16042 mempool_free(mbox, wq->phba->mbox_mem_pool);
16043 return status;
16044}
16045
16046
16047
16048
16049
16050
16051
16052
16053
16054
16055
16056
16057
16058int
16059lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16060 struct lpfc_queue *drq)
16061{
16062 LPFC_MBOXQ_t *mbox;
16063 int rc, length, status = 0;
16064 uint32_t shdr_status, shdr_add_status;
16065 union lpfc_sli4_cfg_shdr *shdr;
16066
16067
16068 if (!hrq || !drq)
16069 return -ENODEV;
16070 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16071 if (!mbox)
16072 return -ENOMEM;
16073 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16074 sizeof(struct lpfc_sli4_cfg_mhdr));
16075 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16076 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16077 length, LPFC_SLI4_MBX_EMBED);
16078 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16079 hrq->queue_id);
16080 mbox->vport = hrq->phba->pport;
16081 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16082 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16083
16084 shdr = (union lpfc_sli4_cfg_shdr *)
16085 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16086 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16087 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16088 if (shdr_status || shdr_add_status || rc) {
16089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16090 "2509 RQ_DESTROY mailbox failed with "
16091 "status x%x add_status x%x, mbx status x%x\n",
16092 shdr_status, shdr_add_status, rc);
16093 if (rc != MBX_TIMEOUT)
16094 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16095 return -ENXIO;
16096 }
16097 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16098 drq->queue_id);
16099 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16100 shdr = (union lpfc_sli4_cfg_shdr *)
16101 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16102 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16103 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16104 if (shdr_status || shdr_add_status || rc) {
16105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16106 "2510 RQ_DESTROY mailbox failed with "
16107 "status x%x add_status x%x, mbx status x%x\n",
16108 shdr_status, shdr_add_status, rc);
16109 status = -ENXIO;
16110 }
16111 list_del_init(&hrq->list);
16112 list_del_init(&drq->list);
16113 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16114 return status;
16115}
16116
16117
16118
16119
16120
16121
16122
16123
16124
16125
16126
16127
16128
16129
16130
16131
16132
16133
16134
16135
16136
16137
16138
16139int
16140lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16141 dma_addr_t pdma_phys_addr0,
16142 dma_addr_t pdma_phys_addr1,
16143 uint16_t xritag)
16144{
16145 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16146 LPFC_MBOXQ_t *mbox;
16147 int rc;
16148 uint32_t shdr_status, shdr_add_status;
16149 uint32_t mbox_tmo;
16150 union lpfc_sli4_cfg_shdr *shdr;
16151
16152 if (xritag == NO_XRI) {
16153 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16154 "0364 Invalid param:\n");
16155 return -EINVAL;
16156 }
16157
16158 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16159 if (!mbox)
16160 return -ENOMEM;
16161
16162 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16163 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16164 sizeof(struct lpfc_mbx_post_sgl_pages) -
16165 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16166
16167 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16168 &mbox->u.mqe.un.post_sgl_pages;
16169 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16170 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16171
16172 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16173 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16174 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16175 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16176
16177 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16178 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16179 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16180 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16181 if (!phba->sli4_hba.intr_enable)
16182 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16183 else {
16184 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16185 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16186 }
16187
16188 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16189 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16190 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16191 if (rc != MBX_TIMEOUT)
16192 mempool_free(mbox, phba->mbox_mem_pool);
16193 if (shdr_status || shdr_add_status || rc) {
16194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16195 "2511 POST_SGL mailbox failed with "
16196 "status x%x add_status x%x, mbx status x%x\n",
16197 shdr_status, shdr_add_status, rc);
16198 }
16199 return 0;
16200}
16201
16202
16203
16204
16205
16206
16207
16208
16209
16210
16211
16212
16213
16214
16215static uint16_t
16216lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16217{
16218 unsigned long xri;
16219
16220
16221
16222
16223
16224 spin_lock_irq(&phba->hbalock);
16225 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16226 phba->sli4_hba.max_cfg_param.max_xri, 0);
16227 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16228 spin_unlock_irq(&phba->hbalock);
16229 return NO_XRI;
16230 } else {
16231 set_bit(xri, phba->sli4_hba.xri_bmask);
16232 phba->sli4_hba.max_cfg_param.xri_used++;
16233 }
16234 spin_unlock_irq(&phba->hbalock);
16235 return xri;
16236}
16237
16238
16239
16240
16241
16242
16243
16244
16245static void
16246__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16247{
16248 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16249 phba->sli4_hba.max_cfg_param.xri_used--;
16250 }
16251}
16252
16253
16254
16255
16256
16257
16258
16259
16260void
16261lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16262{
16263 spin_lock_irq(&phba->hbalock);
16264 __lpfc_sli4_free_xri(phba, xri);
16265 spin_unlock_irq(&phba->hbalock);
16266}
16267
16268
16269
16270
16271
16272
16273
16274
16275
16276
16277
16278uint16_t
16279lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16280{
16281 uint16_t xri_index;
16282
16283 xri_index = lpfc_sli4_alloc_xri(phba);
16284 if (xri_index == NO_XRI)
16285 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16286 "2004 Failed to allocate XRI.last XRITAG is %d"
16287 " Max XRI is %d, Used XRI is %d\n",
16288 xri_index,
16289 phba->sli4_hba.max_cfg_param.max_xri,
16290 phba->sli4_hba.max_cfg_param.xri_used);
16291 return xri_index;
16292}
16293
16294
16295
16296
16297
16298
16299
16300
16301
16302
16303
16304
16305static int
16306lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16307 struct list_head *post_sgl_list,
16308 int post_cnt)
16309{
16310 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16311 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16312 struct sgl_page_pairs *sgl_pg_pairs;
16313 void *viraddr;
16314 LPFC_MBOXQ_t *mbox;
16315 uint32_t reqlen, alloclen, pg_pairs;
16316 uint32_t mbox_tmo;
16317 uint16_t xritag_start = 0;
16318 int rc = 0;
16319 uint32_t shdr_status, shdr_add_status;
16320 union lpfc_sli4_cfg_shdr *shdr;
16321
16322 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16323 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16324 if (reqlen > SLI4_PAGE_SIZE) {
16325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16326 "2559 Block sgl registration required DMA "
16327 "size (%d) great than a page\n", reqlen);
16328 return -ENOMEM;
16329 }
16330
16331 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16332 if (!mbox)
16333 return -ENOMEM;
16334
16335
16336 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16337 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16338 LPFC_SLI4_MBX_NEMBED);
16339
16340 if (alloclen < reqlen) {
16341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16342 "0285 Allocated DMA memory size (%d) is "
16343 "less than the requested DMA memory "
16344 "size (%d)\n", alloclen, reqlen);
16345 lpfc_sli4_mbox_cmd_free(phba, mbox);
16346 return -ENOMEM;
16347 }
16348
16349 viraddr = mbox->sge_array->addr[0];
16350 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16351 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16352
16353 pg_pairs = 0;
16354 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16355
16356 sgl_pg_pairs->sgl_pg0_addr_lo =
16357 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16358 sgl_pg_pairs->sgl_pg0_addr_hi =
16359 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16360 sgl_pg_pairs->sgl_pg1_addr_lo =
16361 cpu_to_le32(putPaddrLow(0));
16362 sgl_pg_pairs->sgl_pg1_addr_hi =
16363 cpu_to_le32(putPaddrHigh(0));
16364
16365
16366 if (pg_pairs == 0)
16367 xritag_start = sglq_entry->sli4_xritag;
16368 sgl_pg_pairs++;
16369 pg_pairs++;
16370 }
16371
16372
16373 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16374 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16375 sgl->word0 = cpu_to_le32(sgl->word0);
16376
16377 if (!phba->sli4_hba.intr_enable)
16378 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16379 else {
16380 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16381 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16382 }
16383 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16384 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16385 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16386 if (rc != MBX_TIMEOUT)
16387 lpfc_sli4_mbox_cmd_free(phba, mbox);
16388 if (shdr_status || shdr_add_status || rc) {
16389 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16390 "2513 POST_SGL_BLOCK mailbox command failed "
16391 "status x%x add_status x%x mbx status x%x\n",
16392 shdr_status, shdr_add_status, rc);
16393 rc = -ENXIO;
16394 }
16395 return rc;
16396}
16397
16398
16399
16400
16401
16402
16403
16404
16405
16406
16407
16408
16409int
16410lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
16411 struct list_head *sblist,
16412 int count)
16413{
16414 struct lpfc_scsi_buf *psb;
16415 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16416 struct sgl_page_pairs *sgl_pg_pairs;
16417 void *viraddr;
16418 LPFC_MBOXQ_t *mbox;
16419 uint32_t reqlen, alloclen, pg_pairs;
16420 uint32_t mbox_tmo;
16421 uint16_t xritag_start = 0;
16422 int rc = 0;
16423 uint32_t shdr_status, shdr_add_status;
16424 dma_addr_t pdma_phys_bpl1;
16425 union lpfc_sli4_cfg_shdr *shdr;
16426
16427
16428 reqlen = count * sizeof(struct sgl_page_pairs) +
16429 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16430 if (reqlen > SLI4_PAGE_SIZE) {
16431 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16432 "0217 Block sgl registration required DMA "
16433 "size (%d) great than a page\n", reqlen);
16434 return -ENOMEM;
16435 }
16436 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16437 if (!mbox) {
16438 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16439 "0283 Failed to allocate mbox cmd memory\n");
16440 return -ENOMEM;
16441 }
16442
16443
16444 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16445 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16446 LPFC_SLI4_MBX_NEMBED);
16447
16448 if (alloclen < reqlen) {
16449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16450 "2561 Allocated DMA memory size (%d) is "
16451 "less than the requested DMA memory "
16452 "size (%d)\n", alloclen, reqlen);
16453 lpfc_sli4_mbox_cmd_free(phba, mbox);
16454 return -ENOMEM;
16455 }
16456
16457
16458 viraddr = mbox->sge_array->addr[0];
16459
16460
16461 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16462 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16463
16464 pg_pairs = 0;
16465 list_for_each_entry(psb, sblist, list) {
16466
16467 sgl_pg_pairs->sgl_pg0_addr_lo =
16468 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
16469 sgl_pg_pairs->sgl_pg0_addr_hi =
16470 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16471 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16472 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16473 else
16474 pdma_phys_bpl1 = 0;
16475 sgl_pg_pairs->sgl_pg1_addr_lo =
16476 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16477 sgl_pg_pairs->sgl_pg1_addr_hi =
16478 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16479
16480 if (pg_pairs == 0)
16481 xritag_start = psb->cur_iocbq.sli4_xritag;
16482 sgl_pg_pairs++;
16483 pg_pairs++;
16484 }
16485 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16486 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16487
16488 sgl->word0 = cpu_to_le32(sgl->word0);
16489
16490 if (!phba->sli4_hba.intr_enable)
16491 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16492 else {
16493 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16494 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16495 }
16496 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16497 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16498 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16499 if (rc != MBX_TIMEOUT)
16500 lpfc_sli4_mbox_cmd_free(phba, mbox);
16501 if (shdr_status || shdr_add_status || rc) {
16502 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16503 "2564 POST_SGL_BLOCK mailbox command failed "
16504 "status x%x add_status x%x mbx status x%x\n",
16505 shdr_status, shdr_add_status, rc);
16506 rc = -ENXIO;
16507 }
16508 return rc;
16509}
16510
16511
16512
16513
16514
16515
16516
16517
16518
16519
16520
16521static int
16522lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16523{
16524
16525 struct fc_vft_header *fc_vft_hdr;
16526 uint32_t *header = (uint32_t *) fc_hdr;
16527
16528#define FC_RCTL_MDS_DIAGS 0xF4
16529
16530 switch (fc_hdr->fh_r_ctl) {
16531 case FC_RCTL_DD_UNCAT:
16532 case FC_RCTL_DD_SOL_DATA:
16533 case FC_RCTL_DD_UNSOL_CTL:
16534 case FC_RCTL_DD_SOL_CTL:
16535 case FC_RCTL_DD_UNSOL_DATA:
16536 case FC_RCTL_DD_DATA_DESC:
16537 case FC_RCTL_DD_UNSOL_CMD:
16538 case FC_RCTL_DD_CMD_STATUS:
16539 case FC_RCTL_ELS_REQ:
16540 case FC_RCTL_ELS_REP:
16541 case FC_RCTL_ELS4_REQ:
16542 case FC_RCTL_ELS4_REP:
16543 case FC_RCTL_BA_NOP:
16544 case FC_RCTL_BA_ABTS:
16545 case FC_RCTL_BA_RMC:
16546 case FC_RCTL_BA_ACC:
16547 case FC_RCTL_BA_RJT:
16548 case FC_RCTL_BA_PRMT:
16549 case FC_RCTL_ACK_1:
16550 case FC_RCTL_ACK_0:
16551 case FC_RCTL_P_RJT:
16552 case FC_RCTL_F_RJT:
16553 case FC_RCTL_P_BSY:
16554 case FC_RCTL_F_BSY:
16555 case FC_RCTL_F_BSYL:
16556 case FC_RCTL_LCR:
16557 case FC_RCTL_MDS_DIAGS:
16558 case FC_RCTL_END:
16559 break;
16560 case FC_RCTL_VFTH:
16561 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16562 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16563 return lpfc_fc_frame_check(phba, fc_hdr);
16564 default:
16565 goto drop;
16566 }
16567
16568#define FC_TYPE_VENDOR_UNIQUE 0xFF
16569
16570 switch (fc_hdr->fh_type) {
16571 case FC_TYPE_BLS:
16572 case FC_TYPE_ELS:
16573 case FC_TYPE_FCP:
16574 case FC_TYPE_CT:
16575 case FC_TYPE_NVME:
16576 case FC_TYPE_VENDOR_UNIQUE:
16577 break;
16578 case FC_TYPE_IP:
16579 case FC_TYPE_ILS:
16580 default:
16581 goto drop;
16582 }
16583
16584 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16585 "2538 Received frame rctl:x%x, type:x%x, "
16586 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16587 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16588 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16589 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16590 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16591 be32_to_cpu(header[6]));
16592 return 0;
16593drop:
16594 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16595 "2539 Dropped frame rctl:x%x type:x%x\n",
16596 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16597 return 1;
16598}
16599
16600
16601
16602
16603
16604
16605
16606
16607
16608static uint32_t
16609lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16610{
16611 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16612
16613 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
16614 return 0;
16615 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
16616}
16617
16618
16619
16620
16621
16622
16623
16624
16625
16626
16627
16628
16629
16630static struct lpfc_vport *
16631lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
16632 uint16_t fcfi, uint32_t did)
16633{
16634 struct lpfc_vport **vports;
16635 struct lpfc_vport *vport = NULL;
16636 int i;
16637
16638 if (did == Fabric_DID)
16639 return phba->pport;
16640 if ((phba->pport->fc_flag & FC_PT2PT) &&
16641 !(phba->link_state == LPFC_HBA_READY))
16642 return phba->pport;
16643
16644 vports = lpfc_create_vport_work_array(phba);
16645 if (vports != NULL) {
16646 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
16647 if (phba->fcf.fcfi == fcfi &&
16648 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
16649 vports[i]->fc_myDID == did) {
16650 vport = vports[i];
16651 break;
16652 }
16653 }
16654 }
16655 lpfc_destroy_vport_work_array(phba, vports);
16656 return vport;
16657}
16658
16659
16660
16661
16662
16663
16664
16665
16666
16667
16668
16669static void
16670lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
16671{
16672 struct lpfc_dmabuf *h_buf;
16673 struct hbq_dmabuf *dmabuf = NULL;
16674
16675
16676 h_buf = list_get_first(&vport->rcv_buffer_list,
16677 struct lpfc_dmabuf, list);
16678 if (!h_buf)
16679 return;
16680 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16681 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
16682}
16683
16684
16685
16686
16687
16688
16689
16690
16691
16692void
16693lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
16694{
16695 struct lpfc_dmabuf *h_buf, *hnext;
16696 struct lpfc_dmabuf *d_buf, *dnext;
16697 struct hbq_dmabuf *dmabuf = NULL;
16698
16699
16700 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16701 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16702 list_del_init(&dmabuf->hbuf.list);
16703 list_for_each_entry_safe(d_buf, dnext,
16704 &dmabuf->dbuf.list, list) {
16705 list_del_init(&d_buf->list);
16706 lpfc_in_buf_free(vport->phba, d_buf);
16707 }
16708 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16709 }
16710}
16711
16712
16713
16714
16715
16716
16717
16718
16719
16720
16721
16722
16723
16724void
16725lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
16726{
16727 struct lpfc_dmabuf *h_buf, *hnext;
16728 struct lpfc_dmabuf *d_buf, *dnext;
16729 struct hbq_dmabuf *dmabuf = NULL;
16730 unsigned long timeout;
16731 int abort_count = 0;
16732
16733 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16734 vport->rcv_buffer_time_stamp);
16735 if (list_empty(&vport->rcv_buffer_list) ||
16736 time_before(jiffies, timeout))
16737 return;
16738
16739 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16740 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16741 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16742 dmabuf->time_stamp);
16743 if (time_before(jiffies, timeout))
16744 break;
16745 abort_count++;
16746 list_del_init(&dmabuf->hbuf.list);
16747 list_for_each_entry_safe(d_buf, dnext,
16748 &dmabuf->dbuf.list, list) {
16749 list_del_init(&d_buf->list);
16750 lpfc_in_buf_free(vport->phba, d_buf);
16751 }
16752 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16753 }
16754 if (abort_count)
16755 lpfc_update_rcv_time_stamp(vport);
16756}
16757
16758
16759
16760
16761
16762
16763
16764
16765
16766
16767
16768
16769
16770static struct hbq_dmabuf *
16771lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16772{
16773 struct fc_frame_header *new_hdr;
16774 struct fc_frame_header *temp_hdr;
16775 struct lpfc_dmabuf *d_buf;
16776 struct lpfc_dmabuf *h_buf;
16777 struct hbq_dmabuf *seq_dmabuf = NULL;
16778 struct hbq_dmabuf *temp_dmabuf = NULL;
16779 uint8_t found = 0;
16780
16781 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16782 dmabuf->time_stamp = jiffies;
16783 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16784
16785
16786 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16787 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16788 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16789 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16790 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16791 continue;
16792
16793 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16794 break;
16795 }
16796 if (!seq_dmabuf) {
16797
16798
16799
16800
16801 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16802 lpfc_update_rcv_time_stamp(vport);
16803 return dmabuf;
16804 }
16805 temp_hdr = seq_dmabuf->hbuf.virt;
16806 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
16807 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16808 list_del_init(&seq_dmabuf->hbuf.list);
16809 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16810 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16811 lpfc_update_rcv_time_stamp(vport);
16812 return dmabuf;
16813 }
16814
16815 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
16816 seq_dmabuf->time_stamp = jiffies;
16817 lpfc_update_rcv_time_stamp(vport);
16818 if (list_empty(&seq_dmabuf->dbuf.list)) {
16819 temp_hdr = dmabuf->hbuf.virt;
16820 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16821 return seq_dmabuf;
16822 }
16823
16824 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
16825 while (!found) {
16826 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16827 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
16828
16829
16830
16831
16832 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
16833 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16834 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
16835 found = 1;
16836 break;
16837 }
16838
16839 if (&d_buf->list == &seq_dmabuf->dbuf.list)
16840 break;
16841 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
16842 }
16843
16844 if (found)
16845 return seq_dmabuf;
16846 return NULL;
16847}
16848
16849
16850
16851
16852
16853
16854
16855
16856
16857
16858
16859
16860
16861
16862
16863
16864
16865static bool
16866lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
16867 struct hbq_dmabuf *dmabuf)
16868{
16869 struct fc_frame_header *new_hdr;
16870 struct fc_frame_header *temp_hdr;
16871 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
16872 struct hbq_dmabuf *seq_dmabuf = NULL;
16873
16874
16875 INIT_LIST_HEAD(&dmabuf->dbuf.list);
16876 INIT_LIST_HEAD(&dmabuf->hbuf.list);
16877 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16878 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16879 temp_hdr = (struct fc_frame_header *)h_buf->virt;
16880 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16881 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16882 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16883 continue;
16884
16885 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16886 break;
16887 }
16888
16889
16890 if (seq_dmabuf) {
16891 list_for_each_entry_safe(d_buf, n_buf,
16892 &seq_dmabuf->dbuf.list, list) {
16893 list_del_init(&d_buf->list);
16894 lpfc_in_buf_free(vport->phba, d_buf);
16895 }
16896 return true;
16897 }
16898 return false;
16899}
16900
16901
16902
16903
16904
16905
16906
16907
16908
16909
16910
16911
16912
16913
16914
16915
16916
16917static bool
16918lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16919{
16920 struct lpfc_hba *phba = vport->phba;
16921 int handled;
16922
16923
16924 if (phba->sli_rev < LPFC_SLI_REV4)
16925 return false;
16926
16927
16928 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
16929 if (handled)
16930 return true;
16931
16932 return false;
16933}
16934
16935
16936
16937
16938
16939
16940
16941
16942
16943
16944
16945static void
16946lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
16947 struct lpfc_iocbq *cmd_iocbq,
16948 struct lpfc_iocbq *rsp_iocbq)
16949{
16950 struct lpfc_nodelist *ndlp;
16951
16952 if (cmd_iocbq) {
16953 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
16954 lpfc_nlp_put(ndlp);
16955 lpfc_nlp_not_used(ndlp);
16956 lpfc_sli_release_iocbq(phba, cmd_iocbq);
16957 }
16958
16959
16960 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
16961 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16962 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
16963 rsp_iocbq->iocb.ulpStatus,
16964 rsp_iocbq->iocb.un.ulpWord[4]);
16965}
16966
16967
16968
16969
16970
16971
16972
16973
16974
16975uint16_t
16976lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
16977 uint16_t xri)
16978{
16979 uint16_t i;
16980
16981 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
16982 if (xri == phba->sli4_hba.xri_ids[i])
16983 return i;
16984 }
16985 return NO_XRI;
16986}
16987
16988
16989
16990
16991
16992
16993
16994
16995
16996void
16997lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
16998 struct fc_frame_header *fc_hdr, bool aborted)
16999{
17000 struct lpfc_hba *phba = vport->phba;
17001 struct lpfc_iocbq *ctiocb = NULL;
17002 struct lpfc_nodelist *ndlp;
17003 uint16_t oxid, rxid, xri, lxri;
17004 uint32_t sid, fctl;
17005 IOCB_t *icmd;
17006 int rc;
17007
17008 if (!lpfc_is_link_up(phba))
17009 return;
17010
17011 sid = sli4_sid_from_fc_hdr(fc_hdr);
17012 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17013 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17014
17015 ndlp = lpfc_findnode_did(vport, sid);
17016 if (!ndlp) {
17017 ndlp = lpfc_nlp_init(vport, sid);
17018 if (!ndlp) {
17019 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17020 "1268 Failed to allocate ndlp for "
17021 "oxid:x%x SID:x%x\n", oxid, sid);
17022 return;
17023 }
17024
17025 lpfc_enqueue_node(vport, ndlp);
17026 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17027
17028 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17029 if (!ndlp) {
17030 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17031 "3275 Failed to active ndlp found "
17032 "for oxid:x%x SID:x%x\n", oxid, sid);
17033 return;
17034 }
17035 }
17036
17037
17038 ctiocb = lpfc_sli_get_iocbq(phba);
17039 if (!ctiocb)
17040 return;
17041
17042
17043 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17044
17045 icmd = &ctiocb->iocb;
17046 icmd->un.xseq64.bdl.bdeSize = 0;
17047 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17048 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17049 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17050 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17051
17052
17053 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17054 icmd->ulpBdeCount = 0;
17055 icmd->ulpLe = 1;
17056 icmd->ulpClass = CLASS3;
17057 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17058 ctiocb->context1 = lpfc_nlp_get(ndlp);
17059
17060 ctiocb->iocb_cmpl = NULL;
17061 ctiocb->vport = phba->pport;
17062 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17063 ctiocb->sli4_lxritag = NO_XRI;
17064 ctiocb->sli4_xritag = NO_XRI;
17065
17066 if (fctl & FC_FC_EX_CTX)
17067
17068
17069
17070 xri = oxid;
17071 else
17072 xri = rxid;
17073 lxri = lpfc_sli4_xri_inrange(phba, xri);
17074 if (lxri != NO_XRI)
17075 lpfc_set_rrq_active(phba, ndlp, lxri,
17076 (xri == oxid) ? rxid : oxid, 0);
17077
17078
17079
17080
17081
17082 if ((fctl & FC_FC_EX_CTX) &&
17083 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17084 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17085 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17086 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17087 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17088 }
17089
17090
17091
17092
17093
17094 if (aborted == false) {
17095 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17096 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17097 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17098 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17099 }
17100
17101 if (fctl & FC_FC_EX_CTX) {
17102
17103
17104
17105
17106 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17107 } else {
17108
17109
17110
17111
17112 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17113 }
17114 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17115 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17116
17117
17118 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17119 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17120 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17121
17122 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17123 if (rc == IOCB_ERROR) {
17124 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17125 "2925 Failed to issue CT ABTS RSP x%x on "
17126 "xri x%x, Data x%x\n",
17127 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17128 phba->link_state);
17129 lpfc_nlp_put(ndlp);
17130 ctiocb->context1 = NULL;
17131 lpfc_sli_release_iocbq(phba, ctiocb);
17132 }
17133}
17134
17135
17136
17137
17138
17139
17140
17141
17142
17143
17144
17145
17146
17147
17148static void
17149lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17150 struct hbq_dmabuf *dmabuf)
17151{
17152 struct lpfc_hba *phba = vport->phba;
17153 struct fc_frame_header fc_hdr;
17154 uint32_t fctl;
17155 bool aborted;
17156
17157
17158 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17159 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17160
17161 if (fctl & FC_FC_EX_CTX) {
17162
17163 aborted = true;
17164 } else {
17165
17166 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17167 if (aborted == false)
17168 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17169 }
17170 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17171
17172 if (phba->nvmet_support) {
17173 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17174 return;
17175 }
17176
17177
17178 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17179}
17180
17181
17182
17183
17184
17185
17186
17187
17188
17189
17190
17191
17192
17193static int
17194lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17195{
17196 struct fc_frame_header *hdr;
17197 struct lpfc_dmabuf *d_buf;
17198 struct hbq_dmabuf *seq_dmabuf;
17199 uint32_t fctl;
17200 int seq_count = 0;
17201
17202 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17203
17204 if (hdr->fh_seq_cnt != seq_count)
17205 return 0;
17206 fctl = (hdr->fh_f_ctl[0] << 16 |
17207 hdr->fh_f_ctl[1] << 8 |
17208 hdr->fh_f_ctl[2]);
17209
17210 if (fctl & FC_FC_END_SEQ)
17211 return 1;
17212 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17213 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17214 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17215
17216 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17217 return 0;
17218 fctl = (hdr->fh_f_ctl[0] << 16 |
17219 hdr->fh_f_ctl[1] << 8 |
17220 hdr->fh_f_ctl[2]);
17221
17222 if (fctl & FC_FC_END_SEQ)
17223 return 1;
17224 }
17225 return 0;
17226}
17227
17228
17229
17230
17231
17232
17233
17234
17235
17236
17237
17238
17239
17240
17241static struct lpfc_iocbq *
17242lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17243{
17244 struct hbq_dmabuf *hbq_buf;
17245 struct lpfc_dmabuf *d_buf, *n_buf;
17246 struct lpfc_iocbq *first_iocbq, *iocbq;
17247 struct fc_frame_header *fc_hdr;
17248 uint32_t sid;
17249 uint32_t len, tot_len;
17250 struct ulp_bde64 *pbde;
17251
17252 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17253
17254 list_del_init(&seq_dmabuf->hbuf.list);
17255 lpfc_update_rcv_time_stamp(vport);
17256
17257 sid = sli4_sid_from_fc_hdr(fc_hdr);
17258 tot_len = 0;
17259
17260 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17261 if (first_iocbq) {
17262
17263 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17264 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17265 first_iocbq->vport = vport;
17266
17267
17268 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17269 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17270 first_iocbq->iocb.un.rcvels.parmRo =
17271 sli4_did_from_fc_hdr(fc_hdr);
17272 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17273 } else
17274 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17275 first_iocbq->iocb.ulpContext = NO_XRI;
17276 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17277 be16_to_cpu(fc_hdr->fh_ox_id);
17278
17279 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17280 vport->phba->vpi_ids[vport->vpi];
17281
17282 tot_len = bf_get(lpfc_rcqe_length,
17283 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17284
17285 first_iocbq->context2 = &seq_dmabuf->dbuf;
17286 first_iocbq->context3 = NULL;
17287 first_iocbq->iocb.ulpBdeCount = 1;
17288 if (tot_len > LPFC_DATA_BUF_SIZE)
17289 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17290 LPFC_DATA_BUF_SIZE;
17291 else
17292 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17293
17294 first_iocbq->iocb.un.rcvels.remoteID = sid;
17295
17296 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17297 }
17298 iocbq = first_iocbq;
17299
17300
17301
17302
17303 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17304 if (!iocbq) {
17305 lpfc_in_buf_free(vport->phba, d_buf);
17306 continue;
17307 }
17308 if (!iocbq->context3) {
17309 iocbq->context3 = d_buf;
17310 iocbq->iocb.ulpBdeCount++;
17311
17312 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17313 len = bf_get(lpfc_rcqe_length,
17314 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17315 pbde = (struct ulp_bde64 *)
17316 &iocbq->iocb.unsli3.sli3Words[4];
17317 if (len > LPFC_DATA_BUF_SIZE)
17318 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17319 else
17320 pbde->tus.f.bdeSize = len;
17321
17322 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17323 tot_len += len;
17324 } else {
17325 iocbq = lpfc_sli_get_iocbq(vport->phba);
17326 if (!iocbq) {
17327 if (first_iocbq) {
17328 first_iocbq->iocb.ulpStatus =
17329 IOSTAT_FCP_RSP_ERROR;
17330 first_iocbq->iocb.un.ulpWord[4] =
17331 IOERR_NO_RESOURCES;
17332 }
17333 lpfc_in_buf_free(vport->phba, d_buf);
17334 continue;
17335 }
17336
17337 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17338 len = bf_get(lpfc_rcqe_length,
17339 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17340 iocbq->context2 = d_buf;
17341 iocbq->context3 = NULL;
17342 iocbq->iocb.ulpBdeCount = 1;
17343 if (len > LPFC_DATA_BUF_SIZE)
17344 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17345 LPFC_DATA_BUF_SIZE;
17346 else
17347 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17348
17349 tot_len += len;
17350 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17351
17352 iocbq->iocb.un.rcvels.remoteID = sid;
17353 list_add_tail(&iocbq->list, &first_iocbq->list);
17354 }
17355 }
17356 return first_iocbq;
17357}
17358
17359static void
17360lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17361 struct hbq_dmabuf *seq_dmabuf)
17362{
17363 struct fc_frame_header *fc_hdr;
17364 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17365 struct lpfc_hba *phba = vport->phba;
17366
17367 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17368 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17369 if (!iocbq) {
17370 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17371 "2707 Ring %d handler: Failed to allocate "
17372 "iocb Rctl x%x Type x%x received\n",
17373 LPFC_ELS_RING,
17374 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17375 return;
17376 }
17377 if (!lpfc_complete_unsol_iocb(phba,
17378 phba->sli4_hba.els_wq->pring,
17379 iocbq, fc_hdr->fh_r_ctl,
17380 fc_hdr->fh_type))
17381 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17382 "2540 Ring %d handler: unexpected Rctl "
17383 "x%x Type x%x received\n",
17384 LPFC_ELS_RING,
17385 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17386
17387
17388 list_for_each_entry_safe(curr_iocb, next_iocb,
17389 &iocbq->list, list) {
17390 list_del_init(&curr_iocb->list);
17391 lpfc_sli_release_iocbq(phba, curr_iocb);
17392 }
17393 lpfc_sli_release_iocbq(phba, iocbq);
17394}
17395
17396static void
17397lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17398 struct lpfc_iocbq *rspiocb)
17399{
17400 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17401
17402 if (pcmd && pcmd->virt)
17403 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17404 kfree(pcmd);
17405 lpfc_sli_release_iocbq(phba, cmdiocb);
17406}
17407
17408static void
17409lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17410 struct hbq_dmabuf *dmabuf)
17411{
17412 struct fc_frame_header *fc_hdr;
17413 struct lpfc_hba *phba = vport->phba;
17414 struct lpfc_iocbq *iocbq = NULL;
17415 union lpfc_wqe *wqe;
17416 struct lpfc_dmabuf *pcmd = NULL;
17417 uint32_t frame_len;
17418 int rc;
17419
17420 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17421 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17422
17423
17424 iocbq = lpfc_sli_get_iocbq(phba);
17425 if (!iocbq)
17426 goto exit;
17427
17428
17429 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17430 if (pcmd)
17431 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17432 &pcmd->phys);
17433 if (!pcmd || !pcmd->virt)
17434 goto exit;
17435
17436 INIT_LIST_HEAD(&pcmd->list);
17437
17438
17439 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17440
17441
17442 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17443 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17444 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17445 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17446
17447 iocbq->context2 = pcmd;
17448 iocbq->vport = vport;
17449 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17450 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17451
17452
17453
17454
17455
17456 wqe = (union lpfc_wqe *)&iocbq->iocb;
17457
17458 wqe->send_frame.frame_len = frame_len;
17459 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17460 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17461 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17462 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17463 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17464 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17465
17466 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17467 iocbq->iocb.ulpLe = 1;
17468 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17469 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17470 if (rc == IOCB_ERROR)
17471 goto exit;
17472
17473 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17474 return;
17475
17476exit:
17477 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17478 "2023 Unable to process MDS loopback frame\n");
17479 if (pcmd && pcmd->virt)
17480 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17481 kfree(pcmd);
17482 if (iocbq)
17483 lpfc_sli_release_iocbq(phba, iocbq);
17484 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17485}
17486
17487
17488
17489
17490
17491
17492
17493
17494
17495
17496
17497
17498void
17499lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17500 struct hbq_dmabuf *dmabuf)
17501{
17502 struct hbq_dmabuf *seq_dmabuf;
17503 struct fc_frame_header *fc_hdr;
17504 struct lpfc_vport *vport;
17505 uint32_t fcfi;
17506 uint32_t did;
17507
17508
17509 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17510
17511
17512 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17513 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17514 return;
17515 }
17516
17517 if ((bf_get(lpfc_cqe_code,
17518 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17519 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17520 &dmabuf->cq_event.cqe.rcqe_cmpl);
17521 else
17522 fcfi = bf_get(lpfc_rcqe_fcf_id,
17523 &dmabuf->cq_event.cqe.rcqe_cmpl);
17524
17525 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17526 vport = phba->pport;
17527
17528 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17529 return;
17530 }
17531
17532
17533 did = sli4_did_from_fc_hdr(fc_hdr);
17534
17535 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17536 if (!vport) {
17537
17538 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17539 return;
17540 }
17541
17542
17543 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17544 (did != Fabric_DID)) {
17545
17546
17547
17548
17549
17550 if (!(vport->fc_flag & FC_PT2PT) ||
17551 (phba->link_state == LPFC_HBA_READY)) {
17552 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17553 return;
17554 }
17555 }
17556
17557
17558 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17559 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17560 return;
17561 }
17562
17563
17564 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17565 if (!seq_dmabuf) {
17566
17567 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17568 return;
17569 }
17570
17571 if (!lpfc_seq_complete(seq_dmabuf))
17572 return;
17573
17574
17575 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17576}
17577
17578
17579
17580
17581
17582
17583
17584
17585
17586
17587
17588
17589
17590
17591
17592
17593
17594
17595
17596
17597
17598
17599int
17600lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
17601{
17602 struct lpfc_rpi_hdr *rpi_page;
17603 uint32_t rc = 0;
17604 uint16_t lrpi = 0;
17605
17606
17607 if (!phba->sli4_hba.rpi_hdrs_in_use)
17608 goto exit;
17609 if (phba->sli4_hba.extents_in_use)
17610 return -EIO;
17611
17612 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
17613
17614
17615
17616
17617
17618 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
17619 LPFC_RPI_RSRC_RDY)
17620 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17621
17622 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
17623 if (rc != MBX_SUCCESS) {
17624 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17625 "2008 Error %d posting all rpi "
17626 "headers\n", rc);
17627 rc = -EIO;
17628 break;
17629 }
17630 }
17631
17632 exit:
17633 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
17634 LPFC_RPI_RSRC_RDY);
17635 return rc;
17636}
17637
17638
17639
17640
17641
17642
17643
17644
17645
17646
17647
17648
17649
17650
17651
17652int
17653lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17654{
17655 LPFC_MBOXQ_t *mboxq;
17656 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
17657 uint32_t rc = 0;
17658 uint32_t shdr_status, shdr_add_status;
17659 union lpfc_sli4_cfg_shdr *shdr;
17660
17661
17662 if (!phba->sli4_hba.rpi_hdrs_in_use)
17663 return rc;
17664 if (phba->sli4_hba.extents_in_use)
17665 return -EIO;
17666
17667
17668 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17669 if (!mboxq) {
17670 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17671 "2001 Unable to allocate memory for issuing "
17672 "SLI_CONFIG_SPECIAL mailbox command\n");
17673 return -ENOMEM;
17674 }
17675
17676
17677 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
17678 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17679 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
17680 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
17681 sizeof(struct lpfc_sli4_cfg_mhdr),
17682 LPFC_SLI4_MBX_EMBED);
17683
17684
17685
17686 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
17687 rpi_page->start_rpi);
17688 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
17689 hdr_tmpl, rpi_page->page_count);
17690
17691 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
17692 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
17693 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
17694 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
17695 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17696 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17697 if (rc != MBX_TIMEOUT)
17698 mempool_free(mboxq, phba->mbox_mem_pool);
17699 if (shdr_status || shdr_add_status || rc) {
17700 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17701 "2514 POST_RPI_HDR mailbox failed with "
17702 "status x%x add_status x%x, mbx status x%x\n",
17703 shdr_status, shdr_add_status, rc);
17704 rc = -ENXIO;
17705 } else {
17706
17707
17708
17709
17710 spin_lock_irq(&phba->hbalock);
17711 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17712 spin_unlock_irq(&phba->hbalock);
17713 }
17714 return rc;
17715}
17716
17717
17718
17719
17720
17721
17722
17723
17724
17725
17726
17727
17728
17729
17730int
17731lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
17732{
17733 unsigned long rpi;
17734 uint16_t max_rpi, rpi_limit;
17735 uint16_t rpi_remaining, lrpi = 0;
17736 struct lpfc_rpi_hdr *rpi_hdr;
17737 unsigned long iflag;
17738
17739
17740
17741
17742
17743 spin_lock_irqsave(&phba->hbalock, iflag);
17744 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
17745 rpi_limit = phba->sli4_hba.next_rpi;
17746
17747 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
17748 if (rpi >= rpi_limit)
17749 rpi = LPFC_RPI_ALLOC_ERROR;
17750 else {
17751 set_bit(rpi, phba->sli4_hba.rpi_bmask);
17752 phba->sli4_hba.max_cfg_param.rpi_used++;
17753 phba->sli4_hba.rpi_count++;
17754 }
17755 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17756 "0001 rpi:%x max:%x lim:%x\n",
17757 (int) rpi, max_rpi, rpi_limit);
17758
17759
17760
17761
17762
17763 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
17764 (phba->sli4_hba.rpi_count >= max_rpi)) {
17765 spin_unlock_irqrestore(&phba->hbalock, iflag);
17766 return rpi;
17767 }
17768
17769
17770
17771
17772
17773 if (!phba->sli4_hba.rpi_hdrs_in_use) {
17774 spin_unlock_irqrestore(&phba->hbalock, iflag);
17775 return rpi;
17776 }
17777
17778
17779
17780
17781
17782
17783
17784 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
17785 spin_unlock_irqrestore(&phba->hbalock, iflag);
17786 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
17787 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
17788 if (!rpi_hdr) {
17789 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17790 "2002 Error Could not grow rpi "
17791 "count\n");
17792 } else {
17793 lrpi = rpi_hdr->start_rpi;
17794 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17795 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
17796 }
17797 }
17798
17799 return rpi;
17800}
17801
17802
17803
17804
17805
17806
17807
17808
17809static void
17810__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17811{
17812 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
17813 phba->sli4_hba.rpi_count--;
17814 phba->sli4_hba.max_cfg_param.rpi_used--;
17815 }
17816}
17817
17818
17819
17820
17821
17822
17823
17824
17825void
17826lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17827{
17828 spin_lock_irq(&phba->hbalock);
17829 __lpfc_sli4_free_rpi(phba, rpi);
17830 spin_unlock_irq(&phba->hbalock);
17831}
17832
17833
17834
17835
17836
17837
17838
17839
17840void
17841lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
17842{
17843 kfree(phba->sli4_hba.rpi_bmask);
17844 kfree(phba->sli4_hba.rpi_ids);
17845 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
17846}
17847
17848
17849
17850
17851
17852
17853
17854
17855int
17856lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
17857 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
17858{
17859 LPFC_MBOXQ_t *mboxq;
17860 struct lpfc_hba *phba = ndlp->phba;
17861 int rc;
17862
17863
17864 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17865 if (!mboxq)
17866 return -ENOMEM;
17867
17868
17869 lpfc_resume_rpi(mboxq, ndlp);
17870 if (cmpl) {
17871 mboxq->mbox_cmpl = cmpl;
17872 mboxq->context1 = arg;
17873 mboxq->context2 = ndlp;
17874 } else
17875 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17876 mboxq->vport = ndlp->vport;
17877 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17878 if (rc == MBX_NOT_FINISHED) {
17879 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17880 "2010 Resume RPI Mailbox failed "
17881 "status %d, mbxStatus x%x\n", rc,
17882 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17883 mempool_free(mboxq, phba->mbox_mem_pool);
17884 return -EIO;
17885 }
17886 return 0;
17887}
17888
17889
17890
17891
17892
17893
17894
17895
17896
17897
17898
17899int
17900lpfc_sli4_init_vpi(struct lpfc_vport *vport)
17901{
17902 LPFC_MBOXQ_t *mboxq;
17903 int rc = 0;
17904 int retval = MBX_SUCCESS;
17905 uint32_t mbox_tmo;
17906 struct lpfc_hba *phba = vport->phba;
17907 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17908 if (!mboxq)
17909 return -ENOMEM;
17910 lpfc_init_vpi(phba, mboxq, vport->vpi);
17911 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
17912 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
17913 if (rc != MBX_SUCCESS) {
17914 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
17915 "2022 INIT VPI Mailbox failed "
17916 "status %d, mbxStatus x%x\n", rc,
17917 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17918 retval = -EIO;
17919 }
17920 if (rc != MBX_TIMEOUT)
17921 mempool_free(mboxq, vport->phba->mbox_mem_pool);
17922
17923 return retval;
17924}
17925
17926
17927
17928
17929
17930
17931
17932
17933
17934
17935static void
17936lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
17937{
17938 void *virt_addr;
17939 union lpfc_sli4_cfg_shdr *shdr;
17940 uint32_t shdr_status, shdr_add_status;
17941
17942 virt_addr = mboxq->sge_array->addr[0];
17943
17944 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
17945 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17946 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17947
17948 if ((shdr_status || shdr_add_status) &&
17949 (shdr_status != STATUS_FCF_IN_USE))
17950 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17951 "2558 ADD_FCF_RECORD mailbox failed with "
17952 "status x%x add_status x%x\n",
17953 shdr_status, shdr_add_status);
17954
17955 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17956}
17957
17958
17959
17960
17961
17962
17963
17964
17965
17966
17967int
17968lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
17969{
17970 int rc = 0;
17971 LPFC_MBOXQ_t *mboxq;
17972 uint8_t *bytep;
17973 void *virt_addr;
17974 struct lpfc_mbx_sge sge;
17975 uint32_t alloc_len, req_len;
17976 uint32_t fcfindex;
17977
17978 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17979 if (!mboxq) {
17980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17981 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
17982 return -ENOMEM;
17983 }
17984
17985 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
17986 sizeof(uint32_t);
17987
17988
17989 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17990 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
17991 req_len, LPFC_SLI4_MBX_NEMBED);
17992 if (alloc_len < req_len) {
17993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17994 "2523 Allocated DMA memory size (x%x) is "
17995 "less than the requested DMA memory "
17996 "size (x%x)\n", alloc_len, req_len);
17997 lpfc_sli4_mbox_cmd_free(phba, mboxq);
17998 return -ENOMEM;
17999 }
18000
18001
18002
18003
18004
18005 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18006 virt_addr = mboxq->sge_array->addr[0];
18007
18008
18009
18010
18011 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18012 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18013 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18014
18015
18016
18017
18018
18019
18020 bytep += sizeof(uint32_t);
18021 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18022 mboxq->vport = phba->pport;
18023 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18024 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18025 if (rc == MBX_NOT_FINISHED) {
18026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18027 "2515 ADD_FCF_RECORD mailbox failed with "
18028 "status 0x%x\n", rc);
18029 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18030 rc = -EIO;
18031 } else
18032 rc = 0;
18033
18034 return rc;
18035}
18036
18037
18038
18039
18040
18041
18042
18043
18044
18045
18046
18047void
18048lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18049 struct fcf_record *fcf_record,
18050 uint16_t fcf_index)
18051{
18052 memset(fcf_record, 0, sizeof(struct fcf_record));
18053 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18054 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18055 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18056 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18057 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18058 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18059 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18060 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18061 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18062 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18063 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18064 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18065 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18066 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18067 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18068 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18069 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18070
18071 if (phba->valid_vlan) {
18072 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18073 = 1 << (phba->vlan_id % 8);
18074 }
18075}
18076
18077
18078
18079
18080
18081
18082
18083
18084
18085
18086
18087
18088
18089int
18090lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18091{
18092 int rc = 0, error;
18093 LPFC_MBOXQ_t *mboxq;
18094
18095 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18096 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18097 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18098 if (!mboxq) {
18099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18100 "2000 Failed to allocate mbox for "
18101 "READ_FCF cmd\n");
18102 error = -ENOMEM;
18103 goto fail_fcf_scan;
18104 }
18105
18106 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18107 if (rc) {
18108 error = -EINVAL;
18109 goto fail_fcf_scan;
18110 }
18111
18112 mboxq->vport = phba->pport;
18113 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18114
18115 spin_lock_irq(&phba->hbalock);
18116 phba->hba_flag |= FCF_TS_INPROG;
18117 spin_unlock_irq(&phba->hbalock);
18118
18119 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18120 if (rc == MBX_NOT_FINISHED)
18121 error = -EIO;
18122 else {
18123
18124 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18125 phba->fcf.eligible_fcf_cnt = 0;
18126 error = 0;
18127 }
18128fail_fcf_scan:
18129 if (error) {
18130 if (mboxq)
18131 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18132
18133 spin_lock_irq(&phba->hbalock);
18134 phba->hba_flag &= ~FCF_TS_INPROG;
18135 spin_unlock_irq(&phba->hbalock);
18136 }
18137 return error;
18138}
18139
18140
18141
18142
18143
18144
18145
18146
18147
18148
18149
18150
18151int
18152lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18153{
18154 int rc = 0, error;
18155 LPFC_MBOXQ_t *mboxq;
18156
18157 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18158 if (!mboxq) {
18159 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18160 "2763 Failed to allocate mbox for "
18161 "READ_FCF cmd\n");
18162 error = -ENOMEM;
18163 goto fail_fcf_read;
18164 }
18165
18166 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18167 if (rc) {
18168 error = -EINVAL;
18169 goto fail_fcf_read;
18170 }
18171
18172 mboxq->vport = phba->pport;
18173 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18174 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18175 if (rc == MBX_NOT_FINISHED)
18176 error = -EIO;
18177 else
18178 error = 0;
18179
18180fail_fcf_read:
18181 if (error && mboxq)
18182 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18183 return error;
18184}
18185
18186
18187
18188
18189
18190
18191
18192
18193
18194
18195
18196
18197int
18198lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18199{
18200 int rc = 0, error;
18201 LPFC_MBOXQ_t *mboxq;
18202
18203 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18204 if (!mboxq) {
18205 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18206 "2758 Failed to allocate mbox for "
18207 "READ_FCF cmd\n");
18208 error = -ENOMEM;
18209 goto fail_fcf_read;
18210 }
18211
18212 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18213 if (rc) {
18214 error = -EINVAL;
18215 goto fail_fcf_read;
18216 }
18217
18218 mboxq->vport = phba->pport;
18219 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18220 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18221 if (rc == MBX_NOT_FINISHED)
18222 error = -EIO;
18223 else
18224 error = 0;
18225
18226fail_fcf_read:
18227 if (error && mboxq)
18228 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18229 return error;
18230}
18231
18232
18233
18234
18235
18236
18237
18238
18239
18240
18241
18242
18243
18244
18245static int
18246lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18247{
18248 uint16_t next_fcf_pri;
18249 uint16_t last_index;
18250 struct lpfc_fcf_pri *fcf_pri;
18251 int rc;
18252 int ret = 0;
18253
18254 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18255 LPFC_SLI4_FCF_TBL_INDX_MAX);
18256 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18257 "3060 Last IDX %d\n", last_index);
18258
18259
18260 spin_lock_irq(&phba->hbalock);
18261 if (list_empty(&phba->fcf.fcf_pri_list) ||
18262 list_is_singular(&phba->fcf.fcf_pri_list)) {
18263 spin_unlock_irq(&phba->hbalock);
18264 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18265 "3061 Last IDX %d\n", last_index);
18266 return 0;
18267 }
18268 spin_unlock_irq(&phba->hbalock);
18269
18270 next_fcf_pri = 0;
18271
18272
18273
18274
18275 memset(phba->fcf.fcf_rr_bmask, 0,
18276 sizeof(*phba->fcf.fcf_rr_bmask));
18277 spin_lock_irq(&phba->hbalock);
18278 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18279 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18280 continue;
18281
18282
18283
18284
18285 if (!next_fcf_pri)
18286 next_fcf_pri = fcf_pri->fcf_rec.priority;
18287 spin_unlock_irq(&phba->hbalock);
18288 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18289 rc = lpfc_sli4_fcf_rr_index_set(phba,
18290 fcf_pri->fcf_rec.fcf_index);
18291 if (rc)
18292 return 0;
18293 }
18294 spin_lock_irq(&phba->hbalock);
18295 }
18296
18297
18298
18299
18300
18301 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18302 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18303 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18304
18305
18306
18307
18308 if (!next_fcf_pri)
18309 next_fcf_pri = fcf_pri->fcf_rec.priority;
18310 spin_unlock_irq(&phba->hbalock);
18311 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18312 rc = lpfc_sli4_fcf_rr_index_set(phba,
18313 fcf_pri->fcf_rec.fcf_index);
18314 if (rc)
18315 return 0;
18316 }
18317 spin_lock_irq(&phba->hbalock);
18318 }
18319 } else
18320 ret = 1;
18321 spin_unlock_irq(&phba->hbalock);
18322
18323 return ret;
18324}
18325
18326
18327
18328
18329
18330
18331
18332
18333
18334
18335uint16_t
18336lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18337{
18338 uint16_t next_fcf_index;
18339
18340initial_priority:
18341
18342 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18343
18344next_priority:
18345
18346 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18347 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18348 LPFC_SLI4_FCF_TBL_INDX_MAX,
18349 next_fcf_index);
18350
18351
18352 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18353
18354
18355
18356
18357
18358 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18359 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18360 }
18361
18362
18363
18364 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18365 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18366
18367
18368
18369
18370
18371
18372 if (lpfc_check_next_fcf_pri_level(phba))
18373 goto initial_priority;
18374 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18375 "2844 No roundrobin failover FCF available\n");
18376 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
18377 return LPFC_FCOE_FCF_NEXT_NONE;
18378 else {
18379 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18380 "3063 Only FCF available idx %d, flag %x\n",
18381 next_fcf_index,
18382 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
18383 return next_fcf_index;
18384 }
18385 }
18386
18387 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18388 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18389 LPFC_FCF_FLOGI_FAILED) {
18390 if (list_is_singular(&phba->fcf.fcf_pri_list))
18391 return LPFC_FCOE_FCF_NEXT_NONE;
18392
18393 goto next_priority;
18394 }
18395
18396 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18397 "2845 Get next roundrobin failover FCF (x%x)\n",
18398 next_fcf_index);
18399
18400 return next_fcf_index;
18401}
18402
18403
18404
18405
18406
18407
18408
18409
18410
18411
18412
18413
18414
18415int
18416lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18417{
18418 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18419 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18420 "2610 FCF (x%x) reached driver's book "
18421 "keeping dimension:x%x\n",
18422 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18423 return -EINVAL;
18424 }
18425
18426 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18427
18428 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18429 "2790 Set FCF (x%x) to roundrobin FCF failover "
18430 "bmask\n", fcf_index);
18431
18432 return 0;
18433}
18434
18435
18436
18437
18438
18439
18440
18441
18442
18443
18444void
18445lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18446{
18447 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18448 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18449 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18450 "2762 FCF (x%x) reached driver's book "
18451 "keeping dimension:x%x\n",
18452 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18453 return;
18454 }
18455
18456 spin_lock_irq(&phba->hbalock);
18457 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18458 list) {
18459 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18460 list_del_init(&fcf_pri->list);
18461 break;
18462 }
18463 }
18464 spin_unlock_irq(&phba->hbalock);
18465 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18466
18467 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18468 "2791 Clear FCF (x%x) from roundrobin failover "
18469 "bmask\n", fcf_index);
18470}
18471
18472
18473
18474
18475
18476
18477
18478
18479
18480static void
18481lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18482{
18483 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18484 uint32_t shdr_status, shdr_add_status;
18485
18486 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18487
18488 shdr_status = bf_get(lpfc_mbox_hdr_status,
18489 &redisc_fcf->header.cfg_shdr.response);
18490 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18491 &redisc_fcf->header.cfg_shdr.response);
18492 if (shdr_status || shdr_add_status) {
18493 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18494 "2746 Requesting for FCF rediscovery failed "
18495 "status x%x add_status x%x\n",
18496 shdr_status, shdr_add_status);
18497 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18498 spin_lock_irq(&phba->hbalock);
18499 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18500 spin_unlock_irq(&phba->hbalock);
18501
18502
18503
18504
18505 lpfc_retry_pport_discovery(phba);
18506 } else {
18507 spin_lock_irq(&phba->hbalock);
18508 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18509 spin_unlock_irq(&phba->hbalock);
18510
18511
18512
18513
18514
18515 lpfc_sli4_fcf_dead_failthrough(phba);
18516 }
18517 } else {
18518 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18519 "2775 Start FCF rediscover quiescent timer\n");
18520
18521
18522
18523
18524 lpfc_fcf_redisc_wait_start_timer(phba);
18525 }
18526
18527 mempool_free(mbox, phba->mbox_mem_pool);
18528}
18529
18530
18531
18532
18533
18534
18535
18536
18537int
18538lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18539{
18540 LPFC_MBOXQ_t *mbox;
18541 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18542 int rc, length;
18543
18544
18545 lpfc_cancel_all_vport_retry_delay_timer(phba);
18546
18547 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18548 if (!mbox) {
18549 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18550 "2745 Failed to allocate mbox for "
18551 "requesting FCF rediscover.\n");
18552 return -ENOMEM;
18553 }
18554
18555 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18556 sizeof(struct lpfc_sli4_cfg_mhdr));
18557 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18558 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18559 length, LPFC_SLI4_MBX_EMBED);
18560
18561 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18562
18563 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18564
18565
18566 mbox->vport = phba->pport;
18567 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18568 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18569
18570 if (rc == MBX_NOT_FINISHED) {
18571 mempool_free(mbox, phba->mbox_mem_pool);
18572 return -EIO;
18573 }
18574 return 0;
18575}
18576
18577
18578
18579
18580
18581
18582
18583
18584void
18585lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18586{
18587 uint32_t link_state;
18588
18589
18590
18591
18592
18593
18594 link_state = phba->link_state;
18595 lpfc_linkdown(phba);
18596 phba->link_state = link_state;
18597
18598
18599 lpfc_unregister_unused_fcf(phba);
18600}
18601
18602
18603
18604
18605
18606
18607
18608
18609
18610
18611static uint32_t
18612lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18613{
18614 LPFC_MBOXQ_t *pmb = NULL;
18615 MAILBOX_t *mb;
18616 uint32_t offset = 0;
18617 int rc;
18618
18619 if (!rgn23_data)
18620 return 0;
18621
18622 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18623 if (!pmb) {
18624 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18625 "2600 failed to allocate mailbox memory\n");
18626 return 0;
18627 }
18628 mb = &pmb->u.mb;
18629
18630 do {
18631 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
18632 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
18633
18634 if (rc != MBX_SUCCESS) {
18635 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
18636 "2601 failed to read config "
18637 "region 23, rc 0x%x Status 0x%x\n",
18638 rc, mb->mbxStatus);
18639 mb->un.varDmp.word_cnt = 0;
18640 }
18641
18642
18643
18644
18645 if (mb->un.varDmp.word_cnt == 0)
18646 break;
18647 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18648 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
18649
18650 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
18651 rgn23_data + offset,
18652 mb->un.varDmp.word_cnt);
18653 offset += mb->un.varDmp.word_cnt;
18654 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
18655
18656 mempool_free(pmb, phba->mbox_mem_pool);
18657 return offset;
18658}
18659
18660
18661
18662
18663
18664
18665
18666
18667
18668
18669static uint32_t
18670lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18671{
18672 LPFC_MBOXQ_t *mboxq = NULL;
18673 struct lpfc_dmabuf *mp = NULL;
18674 struct lpfc_mqe *mqe;
18675 uint32_t data_length = 0;
18676 int rc;
18677
18678 if (!rgn23_data)
18679 return 0;
18680
18681 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18682 if (!mboxq) {
18683 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18684 "3105 failed to allocate mailbox memory\n");
18685 return 0;
18686 }
18687
18688 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
18689 goto out;
18690 mqe = &mboxq->u.mqe;
18691 mp = (struct lpfc_dmabuf *) mboxq->context1;
18692 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18693 if (rc)
18694 goto out;
18695 data_length = mqe->un.mb_words[5];
18696 if (data_length == 0)
18697 goto out;
18698 if (data_length > DMP_RGN23_SIZE) {
18699 data_length = 0;
18700 goto out;
18701 }
18702 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
18703out:
18704 mempool_free(mboxq, phba->mbox_mem_pool);
18705 if (mp) {
18706 lpfc_mbuf_free(phba, mp->virt, mp->phys);
18707 kfree(mp);
18708 }
18709 return data_length;
18710}
18711
18712
18713
18714
18715
18716
18717
18718
18719
18720void
18721lpfc_sli_read_link_ste(struct lpfc_hba *phba)
18722{
18723 uint8_t *rgn23_data = NULL;
18724 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
18725 uint32_t offset = 0;
18726
18727
18728 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
18729 if (!rgn23_data)
18730 goto out;
18731
18732 if (phba->sli_rev < LPFC_SLI_REV4)
18733 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
18734 else {
18735 if_type = bf_get(lpfc_sli_intf_if_type,
18736 &phba->sli4_hba.sli_intf);
18737 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
18738 goto out;
18739 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
18740 }
18741
18742 if (!data_size)
18743 goto out;
18744
18745
18746 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18748 "2619 Config region 23 has bad signature\n");
18749 goto out;
18750 }
18751 offset += 4;
18752
18753
18754 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18756 "2620 Config region 23 has bad version\n");
18757 goto out;
18758 }
18759 offset += 4;
18760
18761
18762 while (offset < data_size) {
18763 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
18764 break;
18765
18766
18767
18768
18769 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
18770 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
18771 (rgn23_data[offset + 3] != 0)) {
18772 offset += rgn23_data[offset + 1] * 4 + 4;
18773 continue;
18774 }
18775
18776
18777 sub_tlv_len = rgn23_data[offset + 1] * 4;
18778 offset += 4;
18779 tlv_offset = 0;
18780
18781
18782
18783
18784 while ((offset < data_size) &&
18785 (tlv_offset < sub_tlv_len)) {
18786 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
18787 offset += 4;
18788 tlv_offset += 4;
18789 break;
18790 }
18791 if (rgn23_data[offset] != PORT_STE_TYPE) {
18792 offset += rgn23_data[offset + 1] * 4 + 4;
18793 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
18794 continue;
18795 }
18796
18797
18798 if (!rgn23_data[offset + 2])
18799 phba->hba_flag |= LINK_DISABLED;
18800
18801 goto out;
18802 }
18803 }
18804
18805out:
18806 kfree(rgn23_data);
18807 return;
18808}
18809
18810
18811
18812
18813
18814
18815
18816
18817
18818
18819
18820
18821
18822
18823
18824
18825
18826
18827
18828
18829int
18830lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
18831 uint32_t size, uint32_t *offset)
18832{
18833 struct lpfc_mbx_wr_object *wr_object;
18834 LPFC_MBOXQ_t *mbox;
18835 int rc = 0, i = 0;
18836 uint32_t shdr_status, shdr_add_status;
18837 uint32_t mbox_tmo;
18838 union lpfc_sli4_cfg_shdr *shdr;
18839 struct lpfc_dmabuf *dmabuf;
18840 uint32_t written = 0;
18841
18842 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18843 if (!mbox)
18844 return -ENOMEM;
18845
18846 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
18847 LPFC_MBOX_OPCODE_WRITE_OBJECT,
18848 sizeof(struct lpfc_mbx_wr_object) -
18849 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18850
18851 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
18852 wr_object->u.request.write_offset = *offset;
18853 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
18854 wr_object->u.request.object_name[0] =
18855 cpu_to_le32(wr_object->u.request.object_name[0]);
18856 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
18857 list_for_each_entry(dmabuf, dmabuf_list, list) {
18858 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
18859 break;
18860 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
18861 wr_object->u.request.bde[i].addrHigh =
18862 putPaddrHigh(dmabuf->phys);
18863 if (written + SLI4_PAGE_SIZE >= size) {
18864 wr_object->u.request.bde[i].tus.f.bdeSize =
18865 (size - written);
18866 written += (size - written);
18867 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
18868 } else {
18869 wr_object->u.request.bde[i].tus.f.bdeSize =
18870 SLI4_PAGE_SIZE;
18871 written += SLI4_PAGE_SIZE;
18872 }
18873 i++;
18874 }
18875 wr_object->u.request.bde_count = i;
18876 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
18877 if (!phba->sli4_hba.intr_enable)
18878 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18879 else {
18880 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18881 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18882 }
18883
18884 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18885 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18886 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18887 if (rc != MBX_TIMEOUT)
18888 mempool_free(mbox, phba->mbox_mem_pool);
18889 if (shdr_status || shdr_add_status || rc) {
18890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18891 "3025 Write Object mailbox failed with "
18892 "status x%x add_status x%x, mbx status x%x\n",
18893 shdr_status, shdr_add_status, rc);
18894 rc = -ENXIO;
18895 *offset = shdr_add_status;
18896 } else
18897 *offset += wr_object->u.response.actual_write_length;
18898 return rc;
18899}
18900
18901
18902
18903
18904
18905
18906
18907
18908
18909
18910void
18911lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
18912{
18913 struct lpfc_hba *phba = vport->phba;
18914 LPFC_MBOXQ_t *mb, *nextmb;
18915 struct lpfc_dmabuf *mp;
18916 struct lpfc_nodelist *ndlp;
18917 struct lpfc_nodelist *act_mbx_ndlp = NULL;
18918 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
18919 LIST_HEAD(mbox_cmd_list);
18920 uint8_t restart_loop;
18921
18922
18923 spin_lock_irq(&phba->hbalock);
18924 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
18925 if (mb->vport != vport)
18926 continue;
18927
18928 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18929 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18930 continue;
18931
18932 list_del(&mb->list);
18933 list_add_tail(&mb->list, &mbox_cmd_list);
18934 }
18935
18936 mb = phba->sli.mbox_active;
18937 if (mb && (mb->vport == vport)) {
18938 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
18939 (mb->u.mb.mbxCommand == MBX_REG_VPI))
18940 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18941 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18942 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
18943
18944 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
18945
18946 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18947 }
18948 }
18949
18950 do {
18951 restart_loop = 0;
18952 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
18953
18954
18955
18956
18957 if ((mb->vport != vport) ||
18958 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
18959 continue;
18960
18961 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18962 (mb->u.mb.mbxCommand != MBX_REG_VPI))
18963 continue;
18964
18965 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18966 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18967 ndlp = (struct lpfc_nodelist *)mb->context2;
18968
18969 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18970 restart_loop = 1;
18971 spin_unlock_irq(&phba->hbalock);
18972 spin_lock(shost->host_lock);
18973 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18974 spin_unlock(shost->host_lock);
18975 spin_lock_irq(&phba->hbalock);
18976 break;
18977 }
18978 }
18979 } while (restart_loop);
18980
18981 spin_unlock_irq(&phba->hbalock);
18982
18983
18984 while (!list_empty(&mbox_cmd_list)) {
18985 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
18986 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18987 mp = (struct lpfc_dmabuf *) (mb->context1);
18988 if (mp) {
18989 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
18990 kfree(mp);
18991 }
18992 ndlp = (struct lpfc_nodelist *) mb->context2;
18993 mb->context2 = NULL;
18994 if (ndlp) {
18995 spin_lock(shost->host_lock);
18996 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18997 spin_unlock(shost->host_lock);
18998 lpfc_nlp_put(ndlp);
18999 }
19000 }
19001 mempool_free(mb, phba->mbox_mem_pool);
19002 }
19003
19004
19005 if (act_mbx_ndlp) {
19006 spin_lock(shost->host_lock);
19007 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19008 spin_unlock(shost->host_lock);
19009 lpfc_nlp_put(act_mbx_ndlp);
19010 }
19011}
19012
19013
19014
19015
19016
19017
19018
19019
19020
19021
19022
19023
19024uint32_t
19025lpfc_drain_txq(struct lpfc_hba *phba)
19026{
19027 LIST_HEAD(completions);
19028 struct lpfc_sli_ring *pring;
19029 struct lpfc_iocbq *piocbq = NULL;
19030 unsigned long iflags = 0;
19031 char *fail_msg = NULL;
19032 struct lpfc_sglq *sglq;
19033 union lpfc_wqe128 wqe;
19034 uint32_t txq_cnt = 0;
19035
19036 pring = lpfc_phba_elsring(phba);
19037 if (unlikely(!pring))
19038 return 0;
19039
19040 spin_lock_irqsave(&pring->ring_lock, iflags);
19041 list_for_each_entry(piocbq, &pring->txq, list) {
19042 txq_cnt++;
19043 }
19044
19045 if (txq_cnt > pring->txq_max)
19046 pring->txq_max = txq_cnt;
19047
19048 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19049
19050 while (!list_empty(&pring->txq)) {
19051 spin_lock_irqsave(&pring->ring_lock, iflags);
19052
19053 piocbq = lpfc_sli_ringtx_get(phba, pring);
19054 if (!piocbq) {
19055 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19056 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19057 "2823 txq empty and txq_cnt is %d\n ",
19058 txq_cnt);
19059 break;
19060 }
19061 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19062 if (!sglq) {
19063 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19064 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19065 break;
19066 }
19067 txq_cnt--;
19068
19069
19070
19071
19072 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19073 piocbq->sli4_xritag = sglq->sli4_xritag;
19074 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19075 fail_msg = "to convert bpl to sgl";
19076 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19077 fail_msg = "to convert iocb to wqe";
19078 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
19079 fail_msg = " - Wq is full";
19080 else
19081 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19082
19083 if (fail_msg) {
19084
19085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19086 "2822 IOCB failed %s iotag 0x%x "
19087 "xri 0x%x\n",
19088 fail_msg,
19089 piocbq->iotag, piocbq->sli4_xritag);
19090 list_add_tail(&piocbq->list, &completions);
19091 }
19092 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19093 }
19094
19095
19096 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19097 IOERR_SLI_ABORTED);
19098
19099 return txq_cnt;
19100}
19101
19102
19103
19104
19105
19106
19107
19108
19109
19110
19111
19112
19113
19114
19115
19116
19117
19118
19119static uint16_t
19120lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19121 struct lpfc_sglq *sglq)
19122{
19123 uint16_t xritag = NO_XRI;
19124 struct ulp_bde64 *bpl = NULL;
19125 struct ulp_bde64 bde;
19126 struct sli4_sge *sgl = NULL;
19127 struct lpfc_dmabuf *dmabuf;
19128 union lpfc_wqe128 *wqe;
19129 int numBdes = 0;
19130 int i = 0;
19131 uint32_t offset = 0;
19132 int inbound = 0;
19133 uint32_t cmd;
19134
19135 if (!pwqeq || !sglq)
19136 return xritag;
19137
19138 sgl = (struct sli4_sge *)sglq->sgl;
19139 wqe = &pwqeq->wqe;
19140 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19141
19142 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19143 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19144 return sglq->sli4_xritag;
19145 numBdes = pwqeq->rsvd2;
19146 if (numBdes) {
19147
19148
19149
19150
19151 if (pwqeq->context3)
19152 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19153 else
19154 return xritag;
19155
19156 bpl = (struct ulp_bde64 *)dmabuf->virt;
19157 if (!bpl)
19158 return xritag;
19159
19160 for (i = 0; i < numBdes; i++) {
19161
19162 sgl->addr_hi = bpl->addrHigh;
19163 sgl->addr_lo = bpl->addrLow;
19164
19165 sgl->word2 = le32_to_cpu(sgl->word2);
19166 if ((i+1) == numBdes)
19167 bf_set(lpfc_sli4_sge_last, sgl, 1);
19168 else
19169 bf_set(lpfc_sli4_sge_last, sgl, 0);
19170
19171
19172
19173 bde.tus.w = le32_to_cpu(bpl->tus.w);
19174 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19175
19176
19177
19178
19179 switch (cmd) {
19180 case CMD_GEN_REQUEST64_WQE:
19181
19182 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19183 inbound++;
19184
19185 if (inbound == 1)
19186 offset = 0;
19187 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19188 bf_set(lpfc_sli4_sge_type, sgl,
19189 LPFC_SGE_TYPE_DATA);
19190 offset += bde.tus.f.bdeSize;
19191 break;
19192 case CMD_FCP_TRSP64_WQE:
19193 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19194 bf_set(lpfc_sli4_sge_type, sgl,
19195 LPFC_SGE_TYPE_DATA);
19196 break;
19197 case CMD_FCP_TSEND64_WQE:
19198 case CMD_FCP_TRECEIVE64_WQE:
19199 bf_set(lpfc_sli4_sge_type, sgl,
19200 bpl->tus.f.bdeFlags);
19201 if (i < 3)
19202 offset = 0;
19203 else
19204 offset += bde.tus.f.bdeSize;
19205 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19206 break;
19207 }
19208 sgl->word2 = cpu_to_le32(sgl->word2);
19209 bpl++;
19210 sgl++;
19211 }
19212 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19213
19214
19215
19216
19217 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19218 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19219 sgl->word2 = le32_to_cpu(sgl->word2);
19220 bf_set(lpfc_sli4_sge_last, sgl, 1);
19221 sgl->word2 = cpu_to_le32(sgl->word2);
19222 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19223 }
19224 return sglq->sli4_xritag;
19225}
19226
19227
19228
19229
19230
19231
19232
19233int
19234lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
19235 struct lpfc_iocbq *pwqe)
19236{
19237 union lpfc_wqe128 *wqe = &pwqe->wqe;
19238 struct lpfc_nvmet_rcv_ctx *ctxp;
19239 struct lpfc_queue *wq;
19240 struct lpfc_sglq *sglq;
19241 struct lpfc_sli_ring *pring;
19242 unsigned long iflags;
19243 uint32_t ret = 0;
19244
19245
19246 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19247 pring = phba->sli4_hba.nvmels_wq->pring;
19248 spin_lock_irqsave(&pring->ring_lock, iflags);
19249 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19250 if (!sglq) {
19251 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19252 return WQE_BUSY;
19253 }
19254 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19255 pwqe->sli4_xritag = sglq->sli4_xritag;
19256 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19257 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19258 return WQE_ERROR;
19259 }
19260 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19261 pwqe->sli4_xritag);
19262 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19263 if (ret) {
19264 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19265 return ret;
19266 }
19267
19268 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19269 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19270 return 0;
19271 }
19272
19273
19274 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19275
19276 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19277
19278 spin_lock_irqsave(&pring->ring_lock, iflags);
19279 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19280 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19281 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19282 ret = lpfc_sli4_wq_put(wq, wqe);
19283 if (ret) {
19284 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19285 return ret;
19286 }
19287 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19288 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19289 return 0;
19290 }
19291
19292
19293 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19294
19295 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
19296
19297 spin_lock_irqsave(&pring->ring_lock, iflags);
19298 ctxp = pwqe->context2;
19299 sglq = ctxp->ctxbuf->sglq;
19300 if (pwqe->sli4_xritag == NO_XRI) {
19301 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19302 pwqe->sli4_xritag = sglq->sli4_xritag;
19303 }
19304 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19305 pwqe->sli4_xritag);
19306 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
19307 bf_set(wqe_cqid, &wqe->generic.wqe_com,
19308 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
19309 ret = lpfc_sli4_wq_put(wq, wqe);
19310 if (ret) {
19311 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19312 return ret;
19313 }
19314 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19315 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19316 return 0;
19317 }
19318 return WQE_ERROR;
19319}
19320