1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/aer.h>
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
41
42#include <linux/nvme-fc-driver.h>
43
44#include "lpfc_hw4.h"
45#include "lpfc_hw.h"
46#include "lpfc_sli.h"
47#include "lpfc_sli4.h"
48#include "lpfc_nl.h"
49#include "lpfc_disc.h"
50#include "lpfc.h"
51#include "lpfc_scsi.h"
52#include "lpfc_nvme.h"
53#include "lpfc_nvmet.h"
54#include "lpfc_crtn.h"
55#include "lpfc_logmsg.h"
56#include "lpfc_compat.h"
57#include "lpfc_debugfs.h"
58#include "lpfc_vport.h"
59#include "lpfc_version.h"
60
61
62typedef enum _lpfc_iocb_type {
63 LPFC_UNKNOWN_IOCB,
64 LPFC_UNSOL_IOCB,
65 LPFC_SOL_IOCB,
66 LPFC_ABORT_IOCB
67} lpfc_iocb_type;
68
69
70
71static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint32_t);
73static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
74 uint8_t *, uint32_t *);
75static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
76 struct lpfc_iocbq *);
77static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 struct hbq_dmabuf *);
79static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 struct hbq_dmabuf *dmabuf);
81static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84 int);
85static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 struct lpfc_queue *eq,
87 struct lpfc_eqe *eqe);
88static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
89static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
90
91static IOCB_t *
92lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
93{
94 return &iocbq->iocb;
95}
96
97#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
98
99
100
101
102
103
104
105
106
107
108
109
110
111static void
112lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
113{
114 uint64_t *src = srcp;
115 uint64_t *dest = destp;
116 int i;
117
118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
119 *dest++ = *src++;
120}
121#else
122#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
123#endif
124
125
126
127
128
129
130
131
132
133
134
135
136
137static int
138lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
139{
140 union lpfc_wqe *temp_wqe;
141 struct lpfc_register doorbell;
142 uint32_t host_index;
143 uint32_t idx;
144 uint32_t i = 0;
145 uint8_t *tmp;
146 u32 if_type;
147
148
149 if (unlikely(!q))
150 return -ENOMEM;
151 temp_wqe = lpfc_sli4_qe(q, q->host_index);
152
153
154 idx = ((q->host_index + 1) % q->entry_count);
155 if (idx == q->hba_index) {
156 q->WQ_overflow++;
157 return -EBUSY;
158 }
159 q->WQ_posted++;
160
161 if (!((q->host_index + 1) % q->notify_interval))
162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
163 else
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
168 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
169
170 tmp = (uint8_t *)temp_wqe;
171#ifdef __raw_writeq
172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
173 __raw_writeq(*((uint64_t *)(tmp + i)),
174 q->dpp_regaddr + i);
175#else
176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
177 __raw_writel(*((uint32_t *)(tmp + i)),
178 q->dpp_regaddr + i);
179#endif
180 }
181
182 wmb();
183
184
185 host_index = q->host_index;
186
187 q->host_index = idx;
188
189
190 doorbell.word0 = 0;
191 if (q->db_format == LPFC_DB_LIST_FORMAT) {
192 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
196 q->dpp_id);
197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
198 q->queue_id);
199 } else {
200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
202
203
204 if_type = bf_get(lpfc_sli_intf_if_type,
205 &q->phba->sli4_hba.sli_intf);
206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
207 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
208 host_index);
209 }
210 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
213 } else {
214 return -EINVAL;
215 }
216 writel(doorbell.word0, q->db_regaddr);
217
218 return 0;
219}
220
221
222
223
224
225
226
227
228
229
230
231
232static uint32_t
233lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
234{
235 uint32_t released = 0;
236
237
238 if (unlikely(!q))
239 return 0;
240
241 if (q->hba_index == index)
242 return 0;
243 do {
244 q->hba_index = ((q->hba_index + 1) % q->entry_count);
245 released++;
246 } while (q->hba_index != index);
247 return released;
248}
249
250
251
252
253
254
255
256
257
258
259
260
261
262static uint32_t
263lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
264{
265 struct lpfc_mqe *temp_mqe;
266 struct lpfc_register doorbell;
267
268
269 if (unlikely(!q))
270 return -ENOMEM;
271 temp_mqe = lpfc_sli4_qe(q, q->host_index);
272
273
274 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
275 return -ENOMEM;
276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
277
278 q->phba->mbox = (MAILBOX_t *)temp_mqe;
279
280
281 q->host_index = ((q->host_index + 1) % q->entry_count);
282
283
284 doorbell.word0 = 0;
285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
288 return 0;
289}
290
291
292
293
294
295
296
297
298
299
300
301static uint32_t
302lpfc_sli4_mq_release(struct lpfc_queue *q)
303{
304
305 if (unlikely(!q))
306 return 0;
307
308
309 q->phba->mbox = NULL;
310 q->hba_index = ((q->hba_index + 1) % q->entry_count);
311 return 1;
312}
313
314
315
316
317
318
319
320
321
322
323static struct lpfc_eqe *
324lpfc_sli4_eq_get(struct lpfc_queue *q)
325{
326 struct lpfc_eqe *eqe;
327
328
329 if (unlikely(!q))
330 return NULL;
331 eqe = lpfc_sli4_qe(q, q->host_index);
332
333
334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
335 return NULL;
336
337
338
339
340
341
342
343
344
345
346 mb();
347 return eqe;
348}
349
350
351
352
353
354
355void
356lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
357{
358 struct lpfc_register doorbell;
359
360 doorbell.word0 = 0;
361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
367}
368
369
370
371
372
373
374void
375lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
376{
377 struct lpfc_register doorbell;
378
379 doorbell.word0 = 0;
380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
382}
383
384
385
386
387
388
389
390
391
392
393
394
395void
396lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
397 uint32_t count, bool arm)
398{
399 struct lpfc_register doorbell;
400
401
402 if (unlikely(!q || (count == 0 && !arm)))
403 return;
404
405
406 doorbell.word0 = 0;
407 if (arm) {
408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
410 }
411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
417
418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
419 readl(q->phba->sli4_hba.EQDBregaddr);
420}
421
422
423
424
425
426
427
428
429
430
431
432
433void
434lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
435 uint32_t count, bool arm)
436{
437 struct lpfc_register doorbell;
438
439
440 if (unlikely(!q || (count == 0 && !arm)))
441 return;
442
443
444 doorbell.word0 = 0;
445 if (arm)
446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
450
451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
452 readl(q->phba->sli4_hba.EQDBregaddr);
453}
454
455static void
456__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
457 struct lpfc_eqe *eqe)
458{
459 if (!phba->sli4_hba.pc_sli4_params.eqav)
460 bf_set_le32(lpfc_eqe_valid, eqe, 0);
461
462 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
463
464
465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
466 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
467}
468
469static void
470lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
471{
472 struct lpfc_eqe *eqe;
473 uint32_t count = 0;
474
475
476 eqe = lpfc_sli4_eq_get(eq);
477 while (eqe) {
478 __lpfc_sli4_consume_eqe(phba, eq, eqe);
479 count++;
480 eqe = lpfc_sli4_eq_get(eq);
481 }
482
483
484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM);
485}
486
487static int
488lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq)
489{
490 struct lpfc_eqe *eqe;
491 int count = 0, consumed = 0;
492
493 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
494 goto rearm_and_exit;
495
496 eqe = lpfc_sli4_eq_get(eq);
497 while (eqe) {
498 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
499 __lpfc_sli4_consume_eqe(phba, eq, eqe);
500
501 consumed++;
502 if (!(++count % eq->max_proc_limit))
503 break;
504
505 if (!(count % eq->notify_interval)) {
506 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
507 LPFC_QUEUE_NOARM);
508 consumed = 0;
509 }
510
511 eqe = lpfc_sli4_eq_get(eq);
512 }
513 eq->EQ_processed += count;
514
515
516 if (count > eq->EQ_max_eqe)
517 eq->EQ_max_eqe = count;
518
519 eq->queue_claimed = 0;
520
521rearm_and_exit:
522
523 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM);
524
525 return count;
526}
527
528
529
530
531
532
533
534
535
536
537static struct lpfc_cqe *
538lpfc_sli4_cq_get(struct lpfc_queue *q)
539{
540 struct lpfc_cqe *cqe;
541
542
543 if (unlikely(!q))
544 return NULL;
545 cqe = lpfc_sli4_qe(q, q->host_index);
546
547
548 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
549 return NULL;
550
551
552
553
554
555
556
557
558
559 mb();
560 return cqe;
561}
562
563static void
564__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
565 struct lpfc_cqe *cqe)
566{
567 if (!phba->sli4_hba.pc_sli4_params.cqav)
568 bf_set_le32(lpfc_cqe_valid, cqe, 0);
569
570 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
571
572
573 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
574 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
575}
576
577
578
579
580
581
582
583
584
585
586
587
588void
589lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
590 uint32_t count, bool arm)
591{
592 struct lpfc_register doorbell;
593
594
595 if (unlikely(!q || (count == 0 && !arm)))
596 return;
597
598
599 doorbell.word0 = 0;
600 if (arm)
601 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
602 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
603 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
604 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
605 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
606 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
607 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
608}
609
610
611
612
613
614
615
616
617
618
619
620
621void
622lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
623 uint32_t count, bool arm)
624{
625 struct lpfc_register doorbell;
626
627
628 if (unlikely(!q || (count == 0 && !arm)))
629 return;
630
631
632 doorbell.word0 = 0;
633 if (arm)
634 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
635 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
636 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
637 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
638}
639
640
641
642
643
644
645
646
647
648
649
650
651
652int
653lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
654 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
655{
656 struct lpfc_rqe *temp_hrqe;
657 struct lpfc_rqe *temp_drqe;
658 struct lpfc_register doorbell;
659 int hq_put_index;
660 int dq_put_index;
661
662
663 if (unlikely(!hq) || unlikely(!dq))
664 return -ENOMEM;
665 hq_put_index = hq->host_index;
666 dq_put_index = dq->host_index;
667 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
668 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
669
670 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
671 return -EINVAL;
672 if (hq_put_index != dq_put_index)
673 return -EINVAL;
674
675 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
676 return -EBUSY;
677 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
678 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
679
680
681 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
682 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
683 hq->RQ_buf_posted++;
684
685
686 if (!(hq->host_index % hq->notify_interval)) {
687 doorbell.word0 = 0;
688 if (hq->db_format == LPFC_DB_RING_FORMAT) {
689 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
690 hq->notify_interval);
691 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
692 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
693 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
694 hq->notify_interval);
695 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
696 hq->host_index);
697 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
698 } else {
699 return -EINVAL;
700 }
701 writel(doorbell.word0, hq->db_regaddr);
702 }
703 return hq_put_index;
704}
705
706
707
708
709
710
711
712
713
714
715
716static uint32_t
717lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
718{
719
720 if (unlikely(!hq) || unlikely(!dq))
721 return 0;
722
723 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
724 return 0;
725 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
726 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
727 return 1;
728}
729
730
731
732
733
734
735
736
737
738
739
740static inline IOCB_t *
741lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
742{
743 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
744 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
745}
746
747
748
749
750
751
752
753
754
755
756
757static inline IOCB_t *
758lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
759{
760 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
761 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
762}
763
764
765
766
767
768
769
770
771
772
773struct lpfc_iocbq *
774__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
775{
776 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
777 struct lpfc_iocbq * iocbq = NULL;
778
779 lockdep_assert_held(&phba->hbalock);
780
781 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
782 if (iocbq)
783 phba->iocb_cnt++;
784 if (phba->iocb_cnt > phba->iocb_max)
785 phba->iocb_max = phba->iocb_cnt;
786 return iocbq;
787}
788
789
790
791
792
793
794
795
796
797
798
799
800
801struct lpfc_sglq *
802__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
803{
804 struct lpfc_sglq *sglq;
805
806 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
807 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
808 return sglq;
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823struct lpfc_sglq *
824__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
825{
826 struct lpfc_sglq *sglq;
827
828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
829 return sglq;
830}
831
832
833
834
835
836
837
838
839void
840lpfc_clr_rrq_active(struct lpfc_hba *phba,
841 uint16_t xritag,
842 struct lpfc_node_rrq *rrq)
843{
844 struct lpfc_nodelist *ndlp = NULL;
845
846 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
847 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
848
849
850
851
852
853 if ((!ndlp) && rrq->ndlp)
854 ndlp = rrq->ndlp;
855
856 if (!ndlp)
857 goto out;
858
859 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
860 rrq->send_rrq = 0;
861 rrq->xritag = 0;
862 rrq->rrq_stop_time = 0;
863 }
864out:
865 mempool_free(rrq, phba->rrq_pool);
866}
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882void
883lpfc_handle_rrq_active(struct lpfc_hba *phba)
884{
885 struct lpfc_node_rrq *rrq;
886 struct lpfc_node_rrq *nextrrq;
887 unsigned long next_time;
888 unsigned long iflags;
889 LIST_HEAD(send_rrq);
890
891 spin_lock_irqsave(&phba->hbalock, iflags);
892 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
893 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
894 list_for_each_entry_safe(rrq, nextrrq,
895 &phba->active_rrq_list, list) {
896 if (time_after(jiffies, rrq->rrq_stop_time))
897 list_move(&rrq->list, &send_rrq);
898 else if (time_before(rrq->rrq_stop_time, next_time))
899 next_time = rrq->rrq_stop_time;
900 }
901 spin_unlock_irqrestore(&phba->hbalock, iflags);
902 if ((!list_empty(&phba->active_rrq_list)) &&
903 (!(phba->pport->load_flag & FC_UNLOADING)))
904 mod_timer(&phba->rrq_tmr, next_time);
905 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
906 list_del(&rrq->list);
907 if (!rrq->send_rrq) {
908
909 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
910 } else if (lpfc_send_rrq(phba, rrq)) {
911
912
913
914 lpfc_clr_rrq_active(phba, rrq->xritag,
915 rrq);
916 }
917 }
918}
919
920
921
922
923
924
925
926
927
928
929struct lpfc_node_rrq *
930lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
931{
932 struct lpfc_hba *phba = vport->phba;
933 struct lpfc_node_rrq *rrq;
934 struct lpfc_node_rrq *nextrrq;
935 unsigned long iflags;
936
937 if (phba->sli_rev != LPFC_SLI_REV4)
938 return NULL;
939 spin_lock_irqsave(&phba->hbalock, iflags);
940 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
941 if (rrq->vport == vport && rrq->xritag == xri &&
942 rrq->nlp_DID == did){
943 list_del(&rrq->list);
944 spin_unlock_irqrestore(&phba->hbalock, iflags);
945 return rrq;
946 }
947 }
948 spin_unlock_irqrestore(&phba->hbalock, iflags);
949 return NULL;
950}
951
952
953
954
955
956
957
958
959
960void
961lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
962
963{
964 struct lpfc_hba *phba = vport->phba;
965 struct lpfc_node_rrq *rrq;
966 struct lpfc_node_rrq *nextrrq;
967 unsigned long iflags;
968 LIST_HEAD(rrq_list);
969
970 if (phba->sli_rev != LPFC_SLI_REV4)
971 return;
972 if (!ndlp) {
973 lpfc_sli4_vport_delete_els_xri_aborted(vport);
974 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
975 }
976 spin_lock_irqsave(&phba->hbalock, iflags);
977 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
978 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
979 list_move(&rrq->list, &rrq_list);
980 spin_unlock_irqrestore(&phba->hbalock, iflags);
981
982 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
983 list_del(&rrq->list);
984 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
985 }
986}
987
988
989
990
991
992
993
994
995
996
997
998int
999lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1000 uint16_t xritag)
1001{
1002 if (!ndlp)
1003 return 0;
1004 if (!ndlp->active_rrqs_xri_bitmap)
1005 return 0;
1006 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1007 return 1;
1008 else
1009 return 0;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027int
1028lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1029 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1030{
1031 unsigned long iflags;
1032 struct lpfc_node_rrq *rrq;
1033 int empty;
1034
1035 if (!ndlp)
1036 return -EINVAL;
1037
1038 if (!phba->cfg_enable_rrq)
1039 return -EINVAL;
1040
1041 spin_lock_irqsave(&phba->hbalock, iflags);
1042 if (phba->pport->load_flag & FC_UNLOADING) {
1043 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1044 goto out;
1045 }
1046
1047
1048
1049
1050 if (NLP_CHK_FREE_REQ(ndlp))
1051 goto out;
1052
1053 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1054 goto out;
1055
1056 if (!ndlp->active_rrqs_xri_bitmap)
1057 goto out;
1058
1059 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1060 goto out;
1061
1062 spin_unlock_irqrestore(&phba->hbalock, iflags);
1063 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1064 if (!rrq) {
1065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1066 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1067 " DID:0x%x Send:%d\n",
1068 xritag, rxid, ndlp->nlp_DID, send_rrq);
1069 return -EINVAL;
1070 }
1071 if (phba->cfg_enable_rrq == 1)
1072 rrq->send_rrq = send_rrq;
1073 else
1074 rrq->send_rrq = 0;
1075 rrq->xritag = xritag;
1076 rrq->rrq_stop_time = jiffies +
1077 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1078 rrq->ndlp = ndlp;
1079 rrq->nlp_DID = ndlp->nlp_DID;
1080 rrq->vport = ndlp->vport;
1081 rrq->rxid = rxid;
1082 spin_lock_irqsave(&phba->hbalock, iflags);
1083 empty = list_empty(&phba->active_rrq_list);
1084 list_add_tail(&rrq->list, &phba->active_rrq_list);
1085 phba->hba_flag |= HBA_RRQ_ACTIVE;
1086 if (empty)
1087 lpfc_worker_wake_up(phba);
1088 spin_unlock_irqrestore(&phba->hbalock, iflags);
1089 return 0;
1090out:
1091 spin_unlock_irqrestore(&phba->hbalock, iflags);
1092 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1093 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1094 " DID:0x%x Send:%d\n",
1095 xritag, rxid, ndlp->nlp_DID, send_rrq);
1096 return -EINVAL;
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110static struct lpfc_sglq *
1111__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1112{
1113 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1114 struct lpfc_sglq *sglq = NULL;
1115 struct lpfc_sglq *start_sglq = NULL;
1116 struct lpfc_io_buf *lpfc_cmd;
1117 struct lpfc_nodelist *ndlp;
1118 struct lpfc_sli_ring *pring = NULL;
1119 int found = 0;
1120
1121 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1122 pring = phba->sli4_hba.nvmels_wq->pring;
1123 else
1124 pring = lpfc_phba_elsring(phba);
1125
1126 lockdep_assert_held(&pring->ring_lock);
1127
1128 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1129 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1130 ndlp = lpfc_cmd->rdata->pnode;
1131 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1132 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1133 ndlp = piocbq->context_un.ndlp;
1134 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1135 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1136 ndlp = NULL;
1137 else
1138 ndlp = piocbq->context_un.ndlp;
1139 } else {
1140 ndlp = piocbq->context1;
1141 }
1142
1143 spin_lock(&phba->sli4_hba.sgl_list_lock);
1144 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1145 start_sglq = sglq;
1146 while (!found) {
1147 if (!sglq)
1148 break;
1149 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1150 test_bit(sglq->sli4_lxritag,
1151 ndlp->active_rrqs_xri_bitmap)) {
1152
1153
1154
1155 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1156 sglq = NULL;
1157 list_remove_head(lpfc_els_sgl_list, sglq,
1158 struct lpfc_sglq, list);
1159 if (sglq == start_sglq) {
1160 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1161 sglq = NULL;
1162 break;
1163 } else
1164 continue;
1165 }
1166 sglq->ndlp = ndlp;
1167 found = 1;
1168 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1169 sglq->state = SGL_ALLOCATED;
1170 }
1171 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1172 return sglq;
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185struct lpfc_sglq *
1186__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1187{
1188 struct list_head *lpfc_nvmet_sgl_list;
1189 struct lpfc_sglq *sglq = NULL;
1190
1191 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1192
1193 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1194
1195 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1196 if (!sglq)
1197 return NULL;
1198 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1199 sglq->state = SGL_ALLOCATED;
1200 return sglq;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212struct lpfc_iocbq *
1213lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1214{
1215 struct lpfc_iocbq * iocbq = NULL;
1216 unsigned long iflags;
1217
1218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 iocbq = __lpfc_sli_get_iocbq(phba);
1220 spin_unlock_irqrestore(&phba->hbalock, iflags);
1221 return iocbq;
1222}
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242static void
1243__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1244{
1245 struct lpfc_sglq *sglq;
1246 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1247 unsigned long iflag = 0;
1248 struct lpfc_sli_ring *pring;
1249
1250 lockdep_assert_held(&phba->hbalock);
1251
1252 if (iocbq->sli4_xritag == NO_XRI)
1253 sglq = NULL;
1254 else
1255 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1256
1257
1258 if (sglq) {
1259 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1260 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1261 iflag);
1262 sglq->state = SGL_FREED;
1263 sglq->ndlp = NULL;
1264 list_add_tail(&sglq->list,
1265 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1266 spin_unlock_irqrestore(
1267 &phba->sli4_hba.sgl_list_lock, iflag);
1268 goto out;
1269 }
1270
1271 pring = phba->sli4_hba.els_wq->pring;
1272 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1273 (sglq->state != SGL_XRI_ABORTED)) {
1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1275 iflag);
1276 list_add(&sglq->list,
1277 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1278 spin_unlock_irqrestore(
1279 &phba->sli4_hba.sgl_list_lock, iflag);
1280 } else {
1281 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1282 iflag);
1283 sglq->state = SGL_FREED;
1284 sglq->ndlp = NULL;
1285 list_add_tail(&sglq->list,
1286 &phba->sli4_hba.lpfc_els_sgl_list);
1287 spin_unlock_irqrestore(
1288 &phba->sli4_hba.sgl_list_lock, iflag);
1289
1290
1291 if (!list_empty(&pring->txq))
1292 lpfc_worker_wake_up(phba);
1293 }
1294 }
1295
1296out:
1297
1298
1299
1300 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1301 iocbq->sli4_lxritag = NO_XRI;
1302 iocbq->sli4_xritag = NO_XRI;
1303 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1304 LPFC_IO_NVME_LS);
1305 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1306}
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static void
1320__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1321{
1322 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1323
1324 lockdep_assert_held(&phba->hbalock);
1325
1326
1327
1328
1329 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1330 iocbq->sli4_xritag = NO_XRI;
1331 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344static void
1345__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1346{
1347 lockdep_assert_held(&phba->hbalock);
1348
1349 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1350 phba->iocb_cnt--;
1351}
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361void
1362lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1363{
1364 unsigned long iflags;
1365
1366
1367
1368
1369 spin_lock_irqsave(&phba->hbalock, iflags);
1370 __lpfc_sli_release_iocbq(phba, iocbq);
1371 spin_unlock_irqrestore(&phba->hbalock, iflags);
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386void
1387lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1388 uint32_t ulpstatus, uint32_t ulpWord4)
1389{
1390 struct lpfc_iocbq *piocb;
1391
1392 while (!list_empty(iocblist)) {
1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1394 if (!piocb->iocb_cmpl)
1395 lpfc_sli_release_iocbq(phba, piocb);
1396 else {
1397 piocb->iocb.ulpStatus = ulpstatus;
1398 piocb->iocb.un.ulpWord[4] = ulpWord4;
1399 (piocb->iocb_cmpl) (phba, piocb, piocb);
1400 }
1401 }
1402 return;
1403}
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420static lpfc_iocb_type
1421lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1422{
1423 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1424
1425 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1426 return 0;
1427
1428 switch (iocb_cmnd) {
1429 case CMD_XMIT_SEQUENCE_CR:
1430 case CMD_XMIT_SEQUENCE_CX:
1431 case CMD_XMIT_BCAST_CN:
1432 case CMD_XMIT_BCAST_CX:
1433 case CMD_ELS_REQUEST_CR:
1434 case CMD_ELS_REQUEST_CX:
1435 case CMD_CREATE_XRI_CR:
1436 case CMD_CREATE_XRI_CX:
1437 case CMD_GET_RPI_CN:
1438 case CMD_XMIT_ELS_RSP_CX:
1439 case CMD_GET_RPI_CR:
1440 case CMD_FCP_IWRITE_CR:
1441 case CMD_FCP_IWRITE_CX:
1442 case CMD_FCP_IREAD_CR:
1443 case CMD_FCP_IREAD_CX:
1444 case CMD_FCP_ICMND_CR:
1445 case CMD_FCP_ICMND_CX:
1446 case CMD_FCP_TSEND_CX:
1447 case CMD_FCP_TRSP_CX:
1448 case CMD_FCP_TRECEIVE_CX:
1449 case CMD_FCP_AUTO_TRSP_CX:
1450 case CMD_ADAPTER_MSG:
1451 case CMD_ADAPTER_DUMP:
1452 case CMD_XMIT_SEQUENCE64_CR:
1453 case CMD_XMIT_SEQUENCE64_CX:
1454 case CMD_XMIT_BCAST64_CN:
1455 case CMD_XMIT_BCAST64_CX:
1456 case CMD_ELS_REQUEST64_CR:
1457 case CMD_ELS_REQUEST64_CX:
1458 case CMD_FCP_IWRITE64_CR:
1459 case CMD_FCP_IWRITE64_CX:
1460 case CMD_FCP_IREAD64_CR:
1461 case CMD_FCP_IREAD64_CX:
1462 case CMD_FCP_ICMND64_CR:
1463 case CMD_FCP_ICMND64_CX:
1464 case CMD_FCP_TSEND64_CX:
1465 case CMD_FCP_TRSP64_CX:
1466 case CMD_FCP_TRECEIVE64_CX:
1467 case CMD_GEN_REQUEST64_CR:
1468 case CMD_GEN_REQUEST64_CX:
1469 case CMD_XMIT_ELS_RSP64_CX:
1470 case DSSCMD_IWRITE64_CR:
1471 case DSSCMD_IWRITE64_CX:
1472 case DSSCMD_IREAD64_CR:
1473 case DSSCMD_IREAD64_CX:
1474 type = LPFC_SOL_IOCB;
1475 break;
1476 case CMD_ABORT_XRI_CN:
1477 case CMD_ABORT_XRI_CX:
1478 case CMD_CLOSE_XRI_CN:
1479 case CMD_CLOSE_XRI_CX:
1480 case CMD_XRI_ABORTED_CX:
1481 case CMD_ABORT_MXRI64_CN:
1482 case CMD_XMIT_BLS_RSP64_CX:
1483 type = LPFC_ABORT_IOCB;
1484 break;
1485 case CMD_RCV_SEQUENCE_CX:
1486 case CMD_RCV_ELS_REQ_CX:
1487 case CMD_RCV_SEQUENCE64_CX:
1488 case CMD_RCV_ELS_REQ64_CX:
1489 case CMD_ASYNC_STATUS:
1490 case CMD_IOCB_RCV_SEQ64_CX:
1491 case CMD_IOCB_RCV_ELS64_CX:
1492 case CMD_IOCB_RCV_CONT64_CX:
1493 case CMD_IOCB_RET_XRI64_CX:
1494 type = LPFC_UNSOL_IOCB;
1495 break;
1496 case CMD_IOCB_XMIT_MSEQ64_CR:
1497 case CMD_IOCB_XMIT_MSEQ64_CX:
1498 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1499 case CMD_IOCB_RCV_ELS_LIST64_CX:
1500 case CMD_IOCB_CLOSE_EXTENDED_CN:
1501 case CMD_IOCB_ABORT_EXTENDED_CN:
1502 case CMD_IOCB_RET_HBQE64_CN:
1503 case CMD_IOCB_FCP_IBIDIR64_CR:
1504 case CMD_IOCB_FCP_IBIDIR64_CX:
1505 case CMD_IOCB_FCP_ITASKMGT64_CX:
1506 case CMD_IOCB_LOGENTRY_CN:
1507 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1508 printk("%s - Unhandled SLI-3 Command x%x\n",
1509 __func__, iocb_cmnd);
1510 type = LPFC_UNKNOWN_IOCB;
1511 break;
1512 default:
1513 type = LPFC_UNKNOWN_IOCB;
1514 break;
1515 }
1516
1517 return type;
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static int
1532lpfc_sli_ring_map(struct lpfc_hba *phba)
1533{
1534 struct lpfc_sli *psli = &phba->sli;
1535 LPFC_MBOXQ_t *pmb;
1536 MAILBOX_t *pmbox;
1537 int i, rc, ret = 0;
1538
1539 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1540 if (!pmb)
1541 return -ENOMEM;
1542 pmbox = &pmb->u.mb;
1543 phba->link_state = LPFC_INIT_MBX_CMDS;
1544 for (i = 0; i < psli->num_rings; i++) {
1545 lpfc_config_ring(phba, i, pmb);
1546 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1547 if (rc != MBX_SUCCESS) {
1548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1549 "0446 Adapter failed to init (%d), "
1550 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1551 "ring %d\n",
1552 rc, pmbox->mbxCommand,
1553 pmbox->mbxStatus, i);
1554 phba->link_state = LPFC_HBA_ERROR;
1555 ret = -ENXIO;
1556 break;
1557 }
1558 }
1559 mempool_free(pmb, phba->mbox_mem_pool);
1560 return ret;
1561}
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576static int
1577lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1578 struct lpfc_iocbq *piocb)
1579{
1580 if (phba->sli_rev == LPFC_SLI_REV4)
1581 lockdep_assert_held(&pring->ring_lock);
1582 else
1583 lockdep_assert_held(&phba->hbalock);
1584
1585 BUG_ON(!piocb);
1586
1587 list_add_tail(&piocb->list, &pring->txcmplq);
1588 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1589 pring->txcmplq_cnt++;
1590
1591 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1592 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1593 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1594 BUG_ON(!piocb->vport);
1595 if (!(piocb->vport->load_flag & FC_UNLOADING))
1596 mod_timer(&piocb->vport->els_tmofunc,
1597 jiffies +
1598 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1599 }
1600
1601 return 0;
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614struct lpfc_iocbq *
1615lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1616{
1617 struct lpfc_iocbq *cmd_iocb;
1618
1619 lockdep_assert_held(&phba->hbalock);
1620
1621 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1622 return cmd_iocb;
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639static IOCB_t *
1640lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1641{
1642 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1643 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1644
1645 lockdep_assert_held(&phba->hbalock);
1646
1647 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1648 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1649 pring->sli.sli3.next_cmdidx = 0;
1650
1651 if (unlikely(pring->sli.sli3.local_getidx ==
1652 pring->sli.sli3.next_cmdidx)) {
1653
1654 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1655
1656 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1658 "0315 Ring %d issue: portCmdGet %d "
1659 "is bigger than cmd ring %d\n",
1660 pring->ringno,
1661 pring->sli.sli3.local_getidx,
1662 max_cmd_idx);
1663
1664 phba->link_state = LPFC_HBA_ERROR;
1665
1666
1667
1668
1669 phba->work_ha |= HA_ERATT;
1670 phba->work_hs = HS_FFER3;
1671
1672 lpfc_worker_wake_up(phba);
1673
1674 return NULL;
1675 }
1676
1677 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1678 return NULL;
1679 }
1680
1681 return lpfc_cmd_iocb(phba, pring);
1682}
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696uint16_t
1697lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1698{
1699 struct lpfc_iocbq **new_arr;
1700 struct lpfc_iocbq **old_arr;
1701 size_t new_len;
1702 struct lpfc_sli *psli = &phba->sli;
1703 uint16_t iotag;
1704
1705 spin_lock_irq(&phba->hbalock);
1706 iotag = psli->last_iotag;
1707 if(++iotag < psli->iocbq_lookup_len) {
1708 psli->last_iotag = iotag;
1709 psli->iocbq_lookup[iotag] = iocbq;
1710 spin_unlock_irq(&phba->hbalock);
1711 iocbq->iotag = iotag;
1712 return iotag;
1713 } else if (psli->iocbq_lookup_len < (0xffff
1714 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1715 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1716 spin_unlock_irq(&phba->hbalock);
1717 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1718 GFP_KERNEL);
1719 if (new_arr) {
1720 spin_lock_irq(&phba->hbalock);
1721 old_arr = psli->iocbq_lookup;
1722 if (new_len <= psli->iocbq_lookup_len) {
1723
1724 kfree(new_arr);
1725 iotag = psli->last_iotag;
1726 if(++iotag < psli->iocbq_lookup_len) {
1727 psli->last_iotag = iotag;
1728 psli->iocbq_lookup[iotag] = iocbq;
1729 spin_unlock_irq(&phba->hbalock);
1730 iocbq->iotag = iotag;
1731 return iotag;
1732 }
1733 spin_unlock_irq(&phba->hbalock);
1734 return 0;
1735 }
1736 if (psli->iocbq_lookup)
1737 memcpy(new_arr, old_arr,
1738 ((psli->last_iotag + 1) *
1739 sizeof (struct lpfc_iocbq *)));
1740 psli->iocbq_lookup = new_arr;
1741 psli->iocbq_lookup_len = new_len;
1742 psli->last_iotag = iotag;
1743 psli->iocbq_lookup[iotag] = iocbq;
1744 spin_unlock_irq(&phba->hbalock);
1745 iocbq->iotag = iotag;
1746 kfree(old_arr);
1747 return iotag;
1748 }
1749 } else
1750 spin_unlock_irq(&phba->hbalock);
1751
1752 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1753 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1754 psli->last_iotag);
1755
1756 return 0;
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773static void
1774lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1775 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1776{
1777 lockdep_assert_held(&phba->hbalock);
1778
1779
1780
1781 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1782
1783
1784 if (pring->ringno == LPFC_ELS_RING) {
1785 lpfc_debugfs_slow_ring_trc(phba,
1786 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1787 *(((uint32_t *) &nextiocb->iocb) + 4),
1788 *(((uint32_t *) &nextiocb->iocb) + 6),
1789 *(((uint32_t *) &nextiocb->iocb) + 7));
1790 }
1791
1792
1793
1794
1795 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1796 wmb();
1797 pring->stats.iocb_cmd++;
1798
1799
1800
1801
1802
1803
1804 if (nextiocb->iocb_cmpl)
1805 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1806 else
1807 __lpfc_sli_release_iocbq(phba, nextiocb);
1808
1809
1810
1811
1812
1813 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1814 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829static void
1830lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1831{
1832 int ringno = pring->ringno;
1833
1834 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1835
1836 wmb();
1837
1838
1839
1840
1841
1842 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1843 readl(phba->CAregaddr);
1844
1845 pring->stats.iocb_cmd_full++;
1846}
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857static void
1858lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1859{
1860 int ringno = pring->ringno;
1861
1862
1863
1864
1865 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1866 wmb();
1867 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1868 readl(phba->CAregaddr);
1869 }
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881static void
1882lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1883{
1884 IOCB_t *iocb;
1885 struct lpfc_iocbq *nextiocb;
1886
1887 lockdep_assert_held(&phba->hbalock);
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 if (lpfc_is_link_up(phba) &&
1898 (!list_empty(&pring->txq)) &&
1899 (pring->ringno != LPFC_FCP_RING ||
1900 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1901
1902 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1903 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1904 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1905
1906 if (iocb)
1907 lpfc_sli_update_ring(phba, pring);
1908 else
1909 lpfc_sli_update_full_ring(phba, pring);
1910 }
1911
1912 return;
1913}
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925static struct lpfc_hbq_entry *
1926lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1927{
1928 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1929
1930 lockdep_assert_held(&phba->hbalock);
1931
1932 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1933 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1934 hbqp->next_hbqPutIdx = 0;
1935
1936 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1937 uint32_t raw_index = phba->hbq_get[hbqno];
1938 uint32_t getidx = le32_to_cpu(raw_index);
1939
1940 hbqp->local_hbqGetIdx = getidx;
1941
1942 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1943 lpfc_printf_log(phba, KERN_ERR,
1944 LOG_SLI | LOG_VPORT,
1945 "1802 HBQ %d: local_hbqGetIdx "
1946 "%u is > than hbqp->entry_count %u\n",
1947 hbqno, hbqp->local_hbqGetIdx,
1948 hbqp->entry_count);
1949
1950 phba->link_state = LPFC_HBA_ERROR;
1951 return NULL;
1952 }
1953
1954 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1955 return NULL;
1956 }
1957
1958 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1959 hbqp->hbqPutIdx;
1960}
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971void
1972lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1973{
1974 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1975 struct hbq_dmabuf *hbq_buf;
1976 unsigned long flags;
1977 int i, hbq_count;
1978
1979 hbq_count = lpfc_sli_hbq_count();
1980
1981 spin_lock_irqsave(&phba->hbalock, flags);
1982 for (i = 0; i < hbq_count; ++i) {
1983 list_for_each_entry_safe(dmabuf, next_dmabuf,
1984 &phba->hbqs[i].hbq_buffer_list, list) {
1985 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1986 list_del(&hbq_buf->dbuf.list);
1987 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1988 }
1989 phba->hbqs[i].buffer_count = 0;
1990 }
1991
1992
1993 phba->hbq_in_use = 0;
1994 spin_unlock_irqrestore(&phba->hbalock, flags);
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009static int
2010lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2011 struct hbq_dmabuf *hbq_buf)
2012{
2013 lockdep_assert_held(&phba->hbalock);
2014 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2015}
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028static int
2029lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2030 struct hbq_dmabuf *hbq_buf)
2031{
2032 struct lpfc_hbq_entry *hbqe;
2033 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2034
2035 lockdep_assert_held(&phba->hbalock);
2036
2037 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2038 if (hbqe) {
2039 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2040
2041 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2042 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2043 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2044 hbqe->bde.tus.f.bdeFlags = 0;
2045 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2046 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2047
2048 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2049 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2050
2051 readl(phba->hbq_put + hbqno);
2052 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2053 return 0;
2054 } else
2055 return -ENOMEM;
2056}
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static int
2069lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2070 struct hbq_dmabuf *hbq_buf)
2071{
2072 int rc;
2073 struct lpfc_rqe hrqe;
2074 struct lpfc_rqe drqe;
2075 struct lpfc_queue *hrq;
2076 struct lpfc_queue *drq;
2077
2078 if (hbqno != LPFC_ELS_HBQ)
2079 return 1;
2080 hrq = phba->sli4_hba.hdr_rq;
2081 drq = phba->sli4_hba.dat_rq;
2082
2083 lockdep_assert_held(&phba->hbalock);
2084 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2085 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2086 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2087 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2088 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2089 if (rc < 0)
2090 return rc;
2091 hbq_buf->tag = (rc | (hbqno << 16));
2092 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2093 return 0;
2094}
2095
2096
2097static struct lpfc_hbq_init lpfc_els_hbq = {
2098 .rn = 1,
2099 .entry_count = 256,
2100 .mask_count = 0,
2101 .profile = 0,
2102 .ring_mask = (1 << LPFC_ELS_RING),
2103 .buffer_count = 0,
2104 .init_count = 40,
2105 .add_count = 40,
2106};
2107
2108
2109struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2110 &lpfc_els_hbq,
2111};
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123static int
2124lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2125{
2126 uint32_t i, posted = 0;
2127 unsigned long flags;
2128 struct hbq_dmabuf *hbq_buffer;
2129 LIST_HEAD(hbq_buf_list);
2130 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2131 return 0;
2132
2133 if ((phba->hbqs[hbqno].buffer_count + count) >
2134 lpfc_hbq_defs[hbqno]->entry_count)
2135 count = lpfc_hbq_defs[hbqno]->entry_count -
2136 phba->hbqs[hbqno].buffer_count;
2137 if (!count)
2138 return 0;
2139
2140 for (i = 0; i < count; i++) {
2141 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2142 if (!hbq_buffer)
2143 break;
2144 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2145 }
2146
2147 spin_lock_irqsave(&phba->hbalock, flags);
2148 if (!phba->hbq_in_use)
2149 goto err;
2150 while (!list_empty(&hbq_buf_list)) {
2151 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2152 dbuf.list);
2153 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2154 (hbqno << 16));
2155 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2156 phba->hbqs[hbqno].buffer_count++;
2157 posted++;
2158 } else
2159 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2160 }
2161 spin_unlock_irqrestore(&phba->hbalock, flags);
2162 return posted;
2163err:
2164 spin_unlock_irqrestore(&phba->hbalock, flags);
2165 while (!list_empty(&hbq_buf_list)) {
2166 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2167 dbuf.list);
2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2169 }
2170 return 0;
2171}
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182int
2183lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2184{
2185 if (phba->sli_rev == LPFC_SLI_REV4)
2186 return 0;
2187 else
2188 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2189 lpfc_hbq_defs[qno]->add_count);
2190}
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201static int
2202lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2203{
2204 if (phba->sli_rev == LPFC_SLI_REV4)
2205 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2206 lpfc_hbq_defs[qno]->entry_count);
2207 else
2208 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2209 lpfc_hbq_defs[qno]->init_count);
2210}
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220static struct hbq_dmabuf *
2221lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2222{
2223 struct lpfc_dmabuf *d_buf;
2224
2225 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2226 if (!d_buf)
2227 return NULL;
2228 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2229}
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239static struct rqb_dmabuf *
2240lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2241{
2242 struct lpfc_dmabuf *h_buf;
2243 struct lpfc_rqb *rqbp;
2244
2245 rqbp = hrq->rqbp;
2246 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2247 struct lpfc_dmabuf, list);
2248 if (!h_buf)
2249 return NULL;
2250 rqbp->buffer_count--;
2251 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2252}
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263static struct hbq_dmabuf *
2264lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2265{
2266 struct lpfc_dmabuf *d_buf;
2267 struct hbq_dmabuf *hbq_buf;
2268 uint32_t hbqno;
2269
2270 hbqno = tag >> 16;
2271 if (hbqno >= LPFC_MAX_HBQS)
2272 return NULL;
2273
2274 spin_lock_irq(&phba->hbalock);
2275 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2276 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2277 if (hbq_buf->tag == tag) {
2278 spin_unlock_irq(&phba->hbalock);
2279 return hbq_buf;
2280 }
2281 }
2282 spin_unlock_irq(&phba->hbalock);
2283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2284 "1803 Bad hbq tag. Data: x%x x%x\n",
2285 tag, phba->hbqs[tag >> 16].buffer_count);
2286 return NULL;
2287}
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298void
2299lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2300{
2301 uint32_t hbqno;
2302
2303 if (hbq_buffer) {
2304 hbqno = hbq_buffer->tag >> 16;
2305 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2306 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2307 }
2308}
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319static int
2320lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2321{
2322 uint8_t ret;
2323
2324 switch (mbxCommand) {
2325 case MBX_LOAD_SM:
2326 case MBX_READ_NV:
2327 case MBX_WRITE_NV:
2328 case MBX_WRITE_VPARMS:
2329 case MBX_RUN_BIU_DIAG:
2330 case MBX_INIT_LINK:
2331 case MBX_DOWN_LINK:
2332 case MBX_CONFIG_LINK:
2333 case MBX_CONFIG_RING:
2334 case MBX_RESET_RING:
2335 case MBX_READ_CONFIG:
2336 case MBX_READ_RCONFIG:
2337 case MBX_READ_SPARM:
2338 case MBX_READ_STATUS:
2339 case MBX_READ_RPI:
2340 case MBX_READ_XRI:
2341 case MBX_READ_REV:
2342 case MBX_READ_LNK_STAT:
2343 case MBX_REG_LOGIN:
2344 case MBX_UNREG_LOGIN:
2345 case MBX_CLEAR_LA:
2346 case MBX_DUMP_MEMORY:
2347 case MBX_DUMP_CONTEXT:
2348 case MBX_RUN_DIAGS:
2349 case MBX_RESTART:
2350 case MBX_UPDATE_CFG:
2351 case MBX_DOWN_LOAD:
2352 case MBX_DEL_LD_ENTRY:
2353 case MBX_RUN_PROGRAM:
2354 case MBX_SET_MASK:
2355 case MBX_SET_VARIABLE:
2356 case MBX_UNREG_D_ID:
2357 case MBX_KILL_BOARD:
2358 case MBX_CONFIG_FARP:
2359 case MBX_BEACON:
2360 case MBX_LOAD_AREA:
2361 case MBX_RUN_BIU_DIAG64:
2362 case MBX_CONFIG_PORT:
2363 case MBX_READ_SPARM64:
2364 case MBX_READ_RPI64:
2365 case MBX_REG_LOGIN64:
2366 case MBX_READ_TOPOLOGY:
2367 case MBX_WRITE_WWN:
2368 case MBX_SET_DEBUG:
2369 case MBX_LOAD_EXP_ROM:
2370 case MBX_ASYNCEVT_ENABLE:
2371 case MBX_REG_VPI:
2372 case MBX_UNREG_VPI:
2373 case MBX_HEARTBEAT:
2374 case MBX_PORT_CAPABILITIES:
2375 case MBX_PORT_IOV_CONTROL:
2376 case MBX_SLI4_CONFIG:
2377 case MBX_SLI4_REQ_FTRS:
2378 case MBX_REG_FCFI:
2379 case MBX_UNREG_FCFI:
2380 case MBX_REG_VFI:
2381 case MBX_UNREG_VFI:
2382 case MBX_INIT_VPI:
2383 case MBX_INIT_VFI:
2384 case MBX_RESUME_RPI:
2385 case MBX_READ_EVENT_LOG_STATUS:
2386 case MBX_READ_EVENT_LOG:
2387 case MBX_SECURITY_MGMT:
2388 case MBX_AUTH_PORT:
2389 case MBX_ACCESS_VDATA:
2390 ret = mbxCommand;
2391 break;
2392 default:
2393 ret = MBX_SHUTDOWN;
2394 break;
2395 }
2396 return ret;
2397}
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410void
2411lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2412{
2413 unsigned long drvr_flag;
2414 struct completion *pmbox_done;
2415
2416
2417
2418
2419
2420 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2421 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2422 pmbox_done = (struct completion *)pmboxq->context3;
2423 if (pmbox_done)
2424 complete(pmbox_done);
2425 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2426 return;
2427}
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440void
2441lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2442{
2443 struct lpfc_vport *vport = pmb->vport;
2444 struct lpfc_dmabuf *mp;
2445 struct lpfc_nodelist *ndlp;
2446 struct Scsi_Host *shost;
2447 uint16_t rpi, vpi;
2448 int rc;
2449
2450 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2451
2452 if (mp) {
2453 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2454 kfree(mp);
2455 }
2456
2457
2458
2459
2460
2461 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2462 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2463 !pmb->u.mb.mbxStatus) {
2464 rpi = pmb->u.mb.un.varWords[0];
2465 vpi = pmb->u.mb.un.varRegLogin.vpi;
2466 lpfc_unreg_login(phba, vpi, rpi, pmb);
2467 pmb->vport = vport;
2468 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2469 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2470 if (rc != MBX_NOT_FINISHED)
2471 return;
2472 }
2473
2474 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2475 !(phba->pport->load_flag & FC_UNLOADING) &&
2476 !pmb->u.mb.mbxStatus) {
2477 shost = lpfc_shost_from_vport(vport);
2478 spin_lock_irq(shost->host_lock);
2479 vport->vpi_state |= LPFC_VPI_REGISTERED;
2480 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2481 spin_unlock_irq(shost->host_lock);
2482 }
2483
2484 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2485 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2486 lpfc_nlp_put(ndlp);
2487 pmb->ctx_buf = NULL;
2488 pmb->ctx_ndlp = NULL;
2489 }
2490
2491 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2492 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2493
2494
2495 if (ndlp) {
2496 lpfc_printf_vlog(
2497 vport,
2498 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2499 "1438 UNREG cmpl deferred mbox x%x "
2500 "on NPort x%x Data: x%x x%x %p\n",
2501 ndlp->nlp_rpi, ndlp->nlp_DID,
2502 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2503
2504 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2505 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2506 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2507 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2508 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2509 } else {
2510 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2511 }
2512 pmb->ctx_ndlp = NULL;
2513 }
2514 }
2515
2516
2517 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2518 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2519 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2520 "2860 SLI authentication is required "
2521 "for INIT_LINK but has not done yet\n");
2522
2523 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2524 lpfc_sli4_mbox_cmd_free(phba, pmb);
2525 else
2526 mempool_free(pmb, phba->mbox_mem_pool);
2527}
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541void
2542lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2543{
2544 struct lpfc_vport *vport = pmb->vport;
2545 struct lpfc_nodelist *ndlp;
2546
2547 ndlp = pmb->ctx_ndlp;
2548 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2549 if (phba->sli_rev == LPFC_SLI_REV4 &&
2550 (bf_get(lpfc_sli_intf_if_type,
2551 &phba->sli4_hba.sli_intf) >=
2552 LPFC_SLI_INTF_IF_TYPE_2)) {
2553 if (ndlp) {
2554 lpfc_printf_vlog(
2555 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2556 "0010 UNREG_LOGIN vpi:%x "
2557 "rpi:%x DID:%x defer x%x flg x%x "
2558 "map:%x %p\n",
2559 vport->vpi, ndlp->nlp_rpi,
2560 ndlp->nlp_DID, ndlp->nlp_defer_did,
2561 ndlp->nlp_flag,
2562 ndlp->nlp_usg_map, ndlp);
2563 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2564 lpfc_nlp_put(ndlp);
2565
2566
2567
2568
2569 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2570 (ndlp->nlp_defer_did !=
2571 NLP_EVT_NOTHING_PENDING)) {
2572 lpfc_printf_vlog(
2573 vport, KERN_INFO, LOG_DISCOVERY,
2574 "4111 UNREG cmpl deferred "
2575 "clr x%x on "
2576 "NPort x%x Data: x%x %p\n",
2577 ndlp->nlp_rpi, ndlp->nlp_DID,
2578 ndlp->nlp_defer_did, ndlp);
2579 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2580 ndlp->nlp_defer_did =
2581 NLP_EVT_NOTHING_PENDING;
2582 lpfc_issue_els_plogi(
2583 vport, ndlp->nlp_DID, 0);
2584 } else {
2585 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2586 }
2587 }
2588 }
2589 }
2590
2591 mempool_free(pmb, phba->mbox_mem_pool);
2592}
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607int
2608lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2609{
2610 MAILBOX_t *pmbox;
2611 LPFC_MBOXQ_t *pmb;
2612 int rc;
2613 LIST_HEAD(cmplq);
2614
2615 phba->sli.slistat.mbox_event++;
2616
2617
2618 spin_lock_irq(&phba->hbalock);
2619 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2620 spin_unlock_irq(&phba->hbalock);
2621
2622
2623 do {
2624 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2625 if (pmb == NULL)
2626 break;
2627
2628 pmbox = &pmb->u.mb;
2629
2630 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2631 if (pmb->vport) {
2632 lpfc_debugfs_disc_trc(pmb->vport,
2633 LPFC_DISC_TRC_MBOX_VPORT,
2634 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2635 (uint32_t)pmbox->mbxCommand,
2636 pmbox->un.varWords[0],
2637 pmbox->un.varWords[1]);
2638 }
2639 else {
2640 lpfc_debugfs_disc_trc(phba->pport,
2641 LPFC_DISC_TRC_MBOX,
2642 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2643 (uint32_t)pmbox->mbxCommand,
2644 pmbox->un.varWords[0],
2645 pmbox->un.varWords[1]);
2646 }
2647 }
2648
2649
2650
2651
2652 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2653 MBX_SHUTDOWN) {
2654
2655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2656 "(%d):0323 Unknown Mailbox command "
2657 "x%x (x%x/x%x) Cmpl\n",
2658 pmb->vport ? pmb->vport->vpi : 0,
2659 pmbox->mbxCommand,
2660 lpfc_sli_config_mbox_subsys_get(phba,
2661 pmb),
2662 lpfc_sli_config_mbox_opcode_get(phba,
2663 pmb));
2664 phba->link_state = LPFC_HBA_ERROR;
2665 phba->work_hs = HS_FFER3;
2666 lpfc_handle_eratt(phba);
2667 continue;
2668 }
2669
2670 if (pmbox->mbxStatus) {
2671 phba->sli.slistat.mbox_stat_err++;
2672 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2673
2674 lpfc_printf_log(phba, KERN_INFO,
2675 LOG_MBOX | LOG_SLI,
2676 "(%d):0305 Mbox cmd cmpl "
2677 "error - RETRYing Data: x%x "
2678 "(x%x/x%x) x%x x%x x%x\n",
2679 pmb->vport ? pmb->vport->vpi : 0,
2680 pmbox->mbxCommand,
2681 lpfc_sli_config_mbox_subsys_get(phba,
2682 pmb),
2683 lpfc_sli_config_mbox_opcode_get(phba,
2684 pmb),
2685 pmbox->mbxStatus,
2686 pmbox->un.varWords[0],
2687 pmb->vport->port_state);
2688 pmbox->mbxStatus = 0;
2689 pmbox->mbxOwner = OWN_HOST;
2690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2691 if (rc != MBX_NOT_FINISHED)
2692 continue;
2693 }
2694 }
2695
2696
2697 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2698 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2699 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2700 "x%x x%x x%x\n",
2701 pmb->vport ? pmb->vport->vpi : 0,
2702 pmbox->mbxCommand,
2703 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2704 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2705 pmb->mbox_cmpl,
2706 *((uint32_t *) pmbox),
2707 pmbox->un.varWords[0],
2708 pmbox->un.varWords[1],
2709 pmbox->un.varWords[2],
2710 pmbox->un.varWords[3],
2711 pmbox->un.varWords[4],
2712 pmbox->un.varWords[5],
2713 pmbox->un.varWords[6],
2714 pmbox->un.varWords[7],
2715 pmbox->un.varWords[8],
2716 pmbox->un.varWords[9],
2717 pmbox->un.varWords[10]);
2718
2719 if (pmb->mbox_cmpl)
2720 pmb->mbox_cmpl(phba,pmb);
2721 } while (1);
2722 return 0;
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737static struct lpfc_dmabuf *
2738lpfc_sli_get_buff(struct lpfc_hba *phba,
2739 struct lpfc_sli_ring *pring,
2740 uint32_t tag)
2741{
2742 struct hbq_dmabuf *hbq_entry;
2743
2744 if (tag & QUE_BUFTAG_BIT)
2745 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2746 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2747 if (!hbq_entry)
2748 return NULL;
2749 return &hbq_entry->dbuf;
2750}
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764static int
2765lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2766 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2767 uint32_t fch_type)
2768{
2769 int i;
2770
2771 switch (fch_type) {
2772 case FC_TYPE_NVME:
2773 lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2774 return 1;
2775 default:
2776 break;
2777 }
2778
2779
2780 if (pring->prt[0].profile) {
2781 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2782 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2783 saveq);
2784 return 1;
2785 }
2786
2787
2788 for (i = 0; i < pring->num_mask; i++) {
2789 if ((pring->prt[i].rctl == fch_r_ctl) &&
2790 (pring->prt[i].type == fch_type)) {
2791 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2792 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2793 (phba, pring, saveq);
2794 return 1;
2795 }
2796 }
2797 return 0;
2798}
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814static int
2815lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2816 struct lpfc_iocbq *saveq)
2817{
2818 IOCB_t * irsp;
2819 WORD5 * w5p;
2820 uint32_t Rctl, Type;
2821 struct lpfc_iocbq *iocbq;
2822 struct lpfc_dmabuf *dmzbuf;
2823
2824 irsp = &(saveq->iocb);
2825
2826 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2827 if (pring->lpfc_sli_rcv_async_status)
2828 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2829 else
2830 lpfc_printf_log(phba,
2831 KERN_WARNING,
2832 LOG_SLI,
2833 "0316 Ring %d handler: unexpected "
2834 "ASYNC_STATUS iocb received evt_code "
2835 "0x%x\n",
2836 pring->ringno,
2837 irsp->un.asyncstat.evt_code);
2838 return 1;
2839 }
2840
2841 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2842 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2843 if (irsp->ulpBdeCount > 0) {
2844 dmzbuf = lpfc_sli_get_buff(phba, pring,
2845 irsp->un.ulpWord[3]);
2846 lpfc_in_buf_free(phba, dmzbuf);
2847 }
2848
2849 if (irsp->ulpBdeCount > 1) {
2850 dmzbuf = lpfc_sli_get_buff(phba, pring,
2851 irsp->unsli3.sli3Words[3]);
2852 lpfc_in_buf_free(phba, dmzbuf);
2853 }
2854
2855 if (irsp->ulpBdeCount > 2) {
2856 dmzbuf = lpfc_sli_get_buff(phba, pring,
2857 irsp->unsli3.sli3Words[7]);
2858 lpfc_in_buf_free(phba, dmzbuf);
2859 }
2860
2861 return 1;
2862 }
2863
2864 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2865 if (irsp->ulpBdeCount != 0) {
2866 saveq->context2 = lpfc_sli_get_buff(phba, pring,
2867 irsp->un.ulpWord[3]);
2868 if (!saveq->context2)
2869 lpfc_printf_log(phba,
2870 KERN_ERR,
2871 LOG_SLI,
2872 "0341 Ring %d Cannot find buffer for "
2873 "an unsolicited iocb. tag 0x%x\n",
2874 pring->ringno,
2875 irsp->un.ulpWord[3]);
2876 }
2877 if (irsp->ulpBdeCount == 2) {
2878 saveq->context3 = lpfc_sli_get_buff(phba, pring,
2879 irsp->unsli3.sli3Words[7]);
2880 if (!saveq->context3)
2881 lpfc_printf_log(phba,
2882 KERN_ERR,
2883 LOG_SLI,
2884 "0342 Ring %d Cannot find buffer for an"
2885 " unsolicited iocb. tag 0x%x\n",
2886 pring->ringno,
2887 irsp->unsli3.sli3Words[7]);
2888 }
2889 list_for_each_entry(iocbq, &saveq->list, list) {
2890 irsp = &(iocbq->iocb);
2891 if (irsp->ulpBdeCount != 0) {
2892 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2893 irsp->un.ulpWord[3]);
2894 if (!iocbq->context2)
2895 lpfc_printf_log(phba,
2896 KERN_ERR,
2897 LOG_SLI,
2898 "0343 Ring %d Cannot find "
2899 "buffer for an unsolicited iocb"
2900 ". tag 0x%x\n", pring->ringno,
2901 irsp->un.ulpWord[3]);
2902 }
2903 if (irsp->ulpBdeCount == 2) {
2904 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2905 irsp->unsli3.sli3Words[7]);
2906 if (!iocbq->context3)
2907 lpfc_printf_log(phba,
2908 KERN_ERR,
2909 LOG_SLI,
2910 "0344 Ring %d Cannot find "
2911 "buffer for an unsolicited "
2912 "iocb. tag 0x%x\n",
2913 pring->ringno,
2914 irsp->unsli3.sli3Words[7]);
2915 }
2916 }
2917 }
2918 if (irsp->ulpBdeCount != 0 &&
2919 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2920 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2921 int found = 0;
2922
2923
2924 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2925 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2926 saveq->iocb.unsli3.rcvsli3.ox_id) {
2927 list_add_tail(&saveq->list, &iocbq->list);
2928 found = 1;
2929 break;
2930 }
2931 }
2932 if (!found)
2933 list_add_tail(&saveq->clist,
2934 &pring->iocb_continue_saveq);
2935 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2936 list_del_init(&iocbq->clist);
2937 saveq = iocbq;
2938 irsp = &(saveq->iocb);
2939 } else
2940 return 0;
2941 }
2942 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2943 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2944 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2945 Rctl = FC_RCTL_ELS_REQ;
2946 Type = FC_TYPE_ELS;
2947 } else {
2948 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2949 Rctl = w5p->hcsw.Rctl;
2950 Type = w5p->hcsw.Type;
2951
2952
2953 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2954 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2955 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2956 Rctl = FC_RCTL_ELS_REQ;
2957 Type = FC_TYPE_ELS;
2958 w5p->hcsw.Rctl = Rctl;
2959 w5p->hcsw.Type = Type;
2960 }
2961 }
2962
2963 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2964 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2965 "0313 Ring %d handler: unexpected Rctl x%x "
2966 "Type x%x received\n",
2967 pring->ringno, Rctl, Type);
2968
2969 return 1;
2970}
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985static struct lpfc_iocbq *
2986lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2987 struct lpfc_sli_ring *pring,
2988 struct lpfc_iocbq *prspiocb)
2989{
2990 struct lpfc_iocbq *cmd_iocb = NULL;
2991 uint16_t iotag;
2992 spinlock_t *temp_lock = NULL;
2993 unsigned long iflag = 0;
2994
2995 if (phba->sli_rev == LPFC_SLI_REV4)
2996 temp_lock = &pring->ring_lock;
2997 else
2998 temp_lock = &phba->hbalock;
2999
3000 spin_lock_irqsave(temp_lock, iflag);
3001 iotag = prspiocb->iocb.ulpIoTag;
3002
3003 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3004 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3005 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3006
3007 list_del_init(&cmd_iocb->list);
3008 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3009 pring->txcmplq_cnt--;
3010 spin_unlock_irqrestore(temp_lock, iflag);
3011 return cmd_iocb;
3012 }
3013 }
3014
3015 spin_unlock_irqrestore(temp_lock, iflag);
3016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3017 "0317 iotag x%x is out of "
3018 "range: max iotag x%x wd0 x%x\n",
3019 iotag, phba->sli.last_iotag,
3020 *(((uint32_t *) &prspiocb->iocb) + 7));
3021 return NULL;
3022}
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036static struct lpfc_iocbq *
3037lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3038 struct lpfc_sli_ring *pring, uint16_t iotag)
3039{
3040 struct lpfc_iocbq *cmd_iocb = NULL;
3041 spinlock_t *temp_lock = NULL;
3042 unsigned long iflag = 0;
3043
3044 if (phba->sli_rev == LPFC_SLI_REV4)
3045 temp_lock = &pring->ring_lock;
3046 else
3047 temp_lock = &phba->hbalock;
3048
3049 spin_lock_irqsave(temp_lock, iflag);
3050 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3051 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3052 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3053
3054 list_del_init(&cmd_iocb->list);
3055 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3056 pring->txcmplq_cnt--;
3057 spin_unlock_irqrestore(temp_lock, iflag);
3058 return cmd_iocb;
3059 }
3060 }
3061
3062 spin_unlock_irqrestore(temp_lock, iflag);
3063 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3064 "0372 iotag x%x lookup error: max iotag (x%x) "
3065 "iocb_flag x%x\n",
3066 iotag, phba->sli.last_iotag,
3067 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3068 return NULL;
3069}
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088static int
3089lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3090 struct lpfc_iocbq *saveq)
3091{
3092 struct lpfc_iocbq *cmdiocbp;
3093 int rc = 1;
3094 unsigned long iflag;
3095
3096 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3097 if (cmdiocbp) {
3098 if (cmdiocbp->iocb_cmpl) {
3099
3100
3101
3102
3103 if (saveq->iocb.ulpStatus &&
3104 (pring->ringno == LPFC_ELS_RING) &&
3105 (cmdiocbp->iocb.ulpCommand ==
3106 CMD_ELS_REQUEST64_CR))
3107 lpfc_send_els_failure_event(phba,
3108 cmdiocbp, saveq);
3109
3110
3111
3112
3113
3114 if (pring->ringno == LPFC_ELS_RING) {
3115 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3116 (cmdiocbp->iocb_flag &
3117 LPFC_DRIVER_ABORTED)) {
3118 spin_lock_irqsave(&phba->hbalock,
3119 iflag);
3120 cmdiocbp->iocb_flag &=
3121 ~LPFC_DRIVER_ABORTED;
3122 spin_unlock_irqrestore(&phba->hbalock,
3123 iflag);
3124 saveq->iocb.ulpStatus =
3125 IOSTAT_LOCAL_REJECT;
3126 saveq->iocb.un.ulpWord[4] =
3127 IOERR_SLI_ABORTED;
3128
3129
3130
3131
3132
3133 spin_lock_irqsave(&phba->hbalock,
3134 iflag);
3135 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3136 spin_unlock_irqrestore(&phba->hbalock,
3137 iflag);
3138 }
3139 if (phba->sli_rev == LPFC_SLI_REV4) {
3140 if (saveq->iocb_flag &
3141 LPFC_EXCHANGE_BUSY) {
3142
3143
3144
3145
3146
3147
3148 spin_lock_irqsave(
3149 &phba->hbalock, iflag);
3150 cmdiocbp->iocb_flag |=
3151 LPFC_EXCHANGE_BUSY;
3152 spin_unlock_irqrestore(
3153 &phba->hbalock, iflag);
3154 }
3155 if (cmdiocbp->iocb_flag &
3156 LPFC_DRIVER_ABORTED) {
3157
3158
3159
3160
3161
3162 spin_lock_irqsave(
3163 &phba->hbalock, iflag);
3164 cmdiocbp->iocb_flag &=
3165 ~LPFC_DRIVER_ABORTED;
3166 spin_unlock_irqrestore(
3167 &phba->hbalock, iflag);
3168 cmdiocbp->iocb.ulpStatus =
3169 IOSTAT_LOCAL_REJECT;
3170 cmdiocbp->iocb.un.ulpWord[4] =
3171 IOERR_ABORT_REQUESTED;
3172
3173
3174
3175
3176
3177
3178 saveq->iocb.ulpStatus =
3179 IOSTAT_LOCAL_REJECT;
3180 saveq->iocb.un.ulpWord[4] =
3181 IOERR_SLI_ABORTED;
3182 spin_lock_irqsave(
3183 &phba->hbalock, iflag);
3184 saveq->iocb_flag |=
3185 LPFC_DELAY_MEM_FREE;
3186 spin_unlock_irqrestore(
3187 &phba->hbalock, iflag);
3188 }
3189 }
3190 }
3191 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3192 } else
3193 lpfc_sli_release_iocbq(phba, cmdiocbp);
3194 } else {
3195
3196
3197
3198
3199
3200 if (pring->ringno != LPFC_ELS_RING) {
3201
3202
3203
3204
3205 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3206 "0322 Ring %d handler: "
3207 "unexpected completion IoTag x%x "
3208 "Data: x%x x%x x%x x%x\n",
3209 pring->ringno,
3210 saveq->iocb.ulpIoTag,
3211 saveq->iocb.ulpStatus,
3212 saveq->iocb.un.ulpWord[4],
3213 saveq->iocb.ulpCommand,
3214 saveq->iocb.ulpContext);
3215 }
3216 }
3217
3218 return rc;
3219}
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231static void
3232lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3233{
3234 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3235
3236
3237
3238
3239 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3240 "0312 Ring %d handler: portRspPut %d "
3241 "is bigger than rsp ring %d\n",
3242 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3243 pring->sli.sli3.numRiocb);
3244
3245 phba->link_state = LPFC_HBA_ERROR;
3246
3247
3248
3249
3250
3251 phba->work_ha |= HA_ERATT;
3252 phba->work_hs = HS_FFER3;
3253
3254 lpfc_worker_wake_up(phba);
3255
3256 return;
3257}
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269void lpfc_poll_eratt(struct timer_list *t)
3270{
3271 struct lpfc_hba *phba;
3272 uint32_t eratt = 0;
3273 uint64_t sli_intr, cnt;
3274
3275 phba = from_timer(phba, t, eratt_poll);
3276
3277
3278 sli_intr = phba->sli.slistat.sli_intr;
3279
3280 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3281 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3282 sli_intr);
3283 else
3284 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3285
3286
3287 do_div(cnt, phba->eratt_poll_interval);
3288 phba->sli.slistat.sli_ips = cnt;
3289
3290 phba->sli.slistat.sli_prev_intr = sli_intr;
3291
3292
3293 eratt = lpfc_sli_check_eratt(phba);
3294
3295 if (eratt)
3296
3297 lpfc_worker_wake_up(phba);
3298 else
3299
3300 mod_timer(&phba->eratt_poll,
3301 jiffies +
3302 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3303 return;
3304}
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324int
3325lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3326 struct lpfc_sli_ring *pring, uint32_t mask)
3327{
3328 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3329 IOCB_t *irsp = NULL;
3330 IOCB_t *entry = NULL;
3331 struct lpfc_iocbq *cmdiocbq = NULL;
3332 struct lpfc_iocbq rspiocbq;
3333 uint32_t status;
3334 uint32_t portRspPut, portRspMax;
3335 int rc = 1;
3336 lpfc_iocb_type type;
3337 unsigned long iflag;
3338 uint32_t rsp_cmpl = 0;
3339
3340 spin_lock_irqsave(&phba->hbalock, iflag);
3341 pring->stats.iocb_event++;
3342
3343
3344
3345
3346
3347 portRspMax = pring->sli.sli3.numRiocb;
3348 portRspPut = le32_to_cpu(pgp->rspPutInx);
3349 if (unlikely(portRspPut >= portRspMax)) {
3350 lpfc_sli_rsp_pointers_error(phba, pring);
3351 spin_unlock_irqrestore(&phba->hbalock, iflag);
3352 return 1;
3353 }
3354 if (phba->fcp_ring_in_use) {
3355 spin_unlock_irqrestore(&phba->hbalock, iflag);
3356 return 1;
3357 } else
3358 phba->fcp_ring_in_use = 1;
3359
3360 rmb();
3361 while (pring->sli.sli3.rspidx != portRspPut) {
3362
3363
3364
3365
3366
3367 entry = lpfc_resp_iocb(phba, pring);
3368 phba->last_completion_time = jiffies;
3369
3370 if (++pring->sli.sli3.rspidx >= portRspMax)
3371 pring->sli.sli3.rspidx = 0;
3372
3373 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3374 (uint32_t *) &rspiocbq.iocb,
3375 phba->iocb_rsp_size);
3376 INIT_LIST_HEAD(&(rspiocbq.list));
3377 irsp = &rspiocbq.iocb;
3378
3379 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3380 pring->stats.iocb_rsp++;
3381 rsp_cmpl++;
3382
3383 if (unlikely(irsp->ulpStatus)) {
3384
3385
3386
3387
3388 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3389 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3390 IOERR_NO_RESOURCES)) {
3391 spin_unlock_irqrestore(&phba->hbalock, iflag);
3392 phba->lpfc_rampdown_queue_depth(phba);
3393 spin_lock_irqsave(&phba->hbalock, iflag);
3394 }
3395
3396
3397 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3398 "0336 Rsp Ring %d error: IOCB Data: "
3399 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3400 pring->ringno,
3401 irsp->un.ulpWord[0],
3402 irsp->un.ulpWord[1],
3403 irsp->un.ulpWord[2],
3404 irsp->un.ulpWord[3],
3405 irsp->un.ulpWord[4],
3406 irsp->un.ulpWord[5],
3407 *(uint32_t *)&irsp->un1,
3408 *((uint32_t *)&irsp->un1 + 1));
3409 }
3410
3411 switch (type) {
3412 case LPFC_ABORT_IOCB:
3413 case LPFC_SOL_IOCB:
3414
3415
3416
3417
3418 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3420 "0333 IOCB cmd 0x%x"
3421 " processed. Skipping"
3422 " completion\n",
3423 irsp->ulpCommand);
3424 break;
3425 }
3426
3427 spin_unlock_irqrestore(&phba->hbalock, iflag);
3428 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3429 &rspiocbq);
3430 spin_lock_irqsave(&phba->hbalock, iflag);
3431 if (unlikely(!cmdiocbq))
3432 break;
3433 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3434 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3435 if (cmdiocbq->iocb_cmpl) {
3436 spin_unlock_irqrestore(&phba->hbalock, iflag);
3437 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3438 &rspiocbq);
3439 spin_lock_irqsave(&phba->hbalock, iflag);
3440 }
3441 break;
3442 case LPFC_UNSOL_IOCB:
3443 spin_unlock_irqrestore(&phba->hbalock, iflag);
3444 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3445 spin_lock_irqsave(&phba->hbalock, iflag);
3446 break;
3447 default:
3448 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3449 char adaptermsg[LPFC_MAX_ADPTMSG];
3450 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3451 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3452 MAX_MSG_DATA);
3453 dev_warn(&((phba->pcidev)->dev),
3454 "lpfc%d: %s\n",
3455 phba->brd_no, adaptermsg);
3456 } else {
3457
3458 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3459 "0334 Unknown IOCB command "
3460 "Data: x%x, x%x x%x x%x x%x\n",
3461 type, irsp->ulpCommand,
3462 irsp->ulpStatus,
3463 irsp->ulpIoTag,
3464 irsp->ulpContext);
3465 }
3466 break;
3467 }
3468
3469
3470
3471
3472
3473
3474
3475 writel(pring->sli.sli3.rspidx,
3476 &phba->host_gp[pring->ringno].rspGetInx);
3477
3478 if (pring->sli.sli3.rspidx == portRspPut)
3479 portRspPut = le32_to_cpu(pgp->rspPutInx);
3480 }
3481
3482 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3483 pring->stats.iocb_rsp_full++;
3484 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3485 writel(status, phba->CAregaddr);
3486 readl(phba->CAregaddr);
3487 }
3488 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3489 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3490 pring->stats.iocb_cmd_empty++;
3491
3492
3493 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3494 lpfc_sli_resume_iocb(phba, pring);
3495
3496 if ((pring->lpfc_sli_cmd_available))
3497 (pring->lpfc_sli_cmd_available) (phba, pring);
3498
3499 }
3500
3501 phba->fcp_ring_in_use = 0;
3502 spin_unlock_irqrestore(&phba->hbalock, iflag);
3503 return rc;
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524static struct lpfc_iocbq *
3525lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3526 struct lpfc_iocbq *rspiocbp)
3527{
3528 struct lpfc_iocbq *saveq;
3529 struct lpfc_iocbq *cmdiocbp;
3530 struct lpfc_iocbq *next_iocb;
3531 IOCB_t *irsp = NULL;
3532 uint32_t free_saveq;
3533 uint8_t iocb_cmd_type;
3534 lpfc_iocb_type type;
3535 unsigned long iflag;
3536 int rc;
3537
3538 spin_lock_irqsave(&phba->hbalock, iflag);
3539
3540 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3541 pring->iocb_continueq_cnt++;
3542
3543
3544 irsp = &rspiocbp->iocb;
3545 if (irsp->ulpLe) {
3546
3547
3548
3549
3550 free_saveq = 1;
3551 saveq = list_get_first(&pring->iocb_continueq,
3552 struct lpfc_iocbq, list);
3553 irsp = &(saveq->iocb);
3554 list_del_init(&pring->iocb_continueq);
3555 pring->iocb_continueq_cnt = 0;
3556
3557 pring->stats.iocb_rsp++;
3558
3559
3560
3561
3562
3563 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3564 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3565 IOERR_NO_RESOURCES)) {
3566 spin_unlock_irqrestore(&phba->hbalock, iflag);
3567 phba->lpfc_rampdown_queue_depth(phba);
3568 spin_lock_irqsave(&phba->hbalock, iflag);
3569 }
3570
3571 if (irsp->ulpStatus) {
3572
3573 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3574 "0328 Rsp Ring %d error: "
3575 "IOCB Data: "
3576 "x%x x%x x%x x%x "
3577 "x%x x%x x%x x%x "
3578 "x%x x%x x%x x%x "
3579 "x%x x%x x%x x%x\n",
3580 pring->ringno,
3581 irsp->un.ulpWord[0],
3582 irsp->un.ulpWord[1],
3583 irsp->un.ulpWord[2],
3584 irsp->un.ulpWord[3],
3585 irsp->un.ulpWord[4],
3586 irsp->un.ulpWord[5],
3587 *(((uint32_t *) irsp) + 6),
3588 *(((uint32_t *) irsp) + 7),
3589 *(((uint32_t *) irsp) + 8),
3590 *(((uint32_t *) irsp) + 9),
3591 *(((uint32_t *) irsp) + 10),
3592 *(((uint32_t *) irsp) + 11),
3593 *(((uint32_t *) irsp) + 12),
3594 *(((uint32_t *) irsp) + 13),
3595 *(((uint32_t *) irsp) + 14),
3596 *(((uint32_t *) irsp) + 15));
3597 }
3598
3599
3600
3601
3602
3603
3604
3605 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3606 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3607 switch (type) {
3608 case LPFC_SOL_IOCB:
3609 spin_unlock_irqrestore(&phba->hbalock, iflag);
3610 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3611 spin_lock_irqsave(&phba->hbalock, iflag);
3612 break;
3613
3614 case LPFC_UNSOL_IOCB:
3615 spin_unlock_irqrestore(&phba->hbalock, iflag);
3616 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3617 spin_lock_irqsave(&phba->hbalock, iflag);
3618 if (!rc)
3619 free_saveq = 0;
3620 break;
3621
3622 case LPFC_ABORT_IOCB:
3623 cmdiocbp = NULL;
3624 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3625 spin_unlock_irqrestore(&phba->hbalock, iflag);
3626 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3627 saveq);
3628 spin_lock_irqsave(&phba->hbalock, iflag);
3629 }
3630 if (cmdiocbp) {
3631
3632 if (cmdiocbp->iocb_cmpl) {
3633 spin_unlock_irqrestore(&phba->hbalock,
3634 iflag);
3635 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3636 saveq);
3637 spin_lock_irqsave(&phba->hbalock,
3638 iflag);
3639 } else
3640 __lpfc_sli_release_iocbq(phba,
3641 cmdiocbp);
3642 }
3643 break;
3644
3645 case LPFC_UNKNOWN_IOCB:
3646 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3647 char adaptermsg[LPFC_MAX_ADPTMSG];
3648 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3649 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3650 MAX_MSG_DATA);
3651 dev_warn(&((phba->pcidev)->dev),
3652 "lpfc%d: %s\n",
3653 phba->brd_no, adaptermsg);
3654 } else {
3655
3656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3657 "0335 Unknown IOCB "
3658 "command Data: x%x "
3659 "x%x x%x x%x\n",
3660 irsp->ulpCommand,
3661 irsp->ulpStatus,
3662 irsp->ulpIoTag,
3663 irsp->ulpContext);
3664 }
3665 break;
3666 }
3667
3668 if (free_saveq) {
3669 list_for_each_entry_safe(rspiocbp, next_iocb,
3670 &saveq->list, list) {
3671 list_del_init(&rspiocbp->list);
3672 __lpfc_sli_release_iocbq(phba, rspiocbp);
3673 }
3674 __lpfc_sli_release_iocbq(phba, saveq);
3675 }
3676 rspiocbp = NULL;
3677 }
3678 spin_unlock_irqrestore(&phba->hbalock, iflag);
3679 return rspiocbp;
3680}
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691void
3692lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3693 struct lpfc_sli_ring *pring, uint32_t mask)
3694{
3695 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3696}
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709static void
3710lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3711 struct lpfc_sli_ring *pring, uint32_t mask)
3712{
3713 struct lpfc_pgp *pgp;
3714 IOCB_t *entry;
3715 IOCB_t *irsp = NULL;
3716 struct lpfc_iocbq *rspiocbp = NULL;
3717 uint32_t portRspPut, portRspMax;
3718 unsigned long iflag;
3719 uint32_t status;
3720
3721 pgp = &phba->port_gp[pring->ringno];
3722 spin_lock_irqsave(&phba->hbalock, iflag);
3723 pring->stats.iocb_event++;
3724
3725
3726
3727
3728
3729 portRspMax = pring->sli.sli3.numRiocb;
3730 portRspPut = le32_to_cpu(pgp->rspPutInx);
3731 if (portRspPut >= portRspMax) {
3732
3733
3734
3735
3736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3737 "0303 Ring %d handler: portRspPut %d "
3738 "is bigger than rsp ring %d\n",
3739 pring->ringno, portRspPut, portRspMax);
3740
3741 phba->link_state = LPFC_HBA_ERROR;
3742 spin_unlock_irqrestore(&phba->hbalock, iflag);
3743
3744 phba->work_hs = HS_FFER3;
3745 lpfc_handle_eratt(phba);
3746
3747 return;
3748 }
3749
3750 rmb();
3751 while (pring->sli.sli3.rspidx != portRspPut) {
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765 entry = lpfc_resp_iocb(phba, pring);
3766
3767 phba->last_completion_time = jiffies;
3768 rspiocbp = __lpfc_sli_get_iocbq(phba);
3769 if (rspiocbp == NULL) {
3770 printk(KERN_ERR "%s: out of buffers! Failing "
3771 "completion.\n", __func__);
3772 break;
3773 }
3774
3775 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3776 phba->iocb_rsp_size);
3777 irsp = &rspiocbp->iocb;
3778
3779 if (++pring->sli.sli3.rspidx >= portRspMax)
3780 pring->sli.sli3.rspidx = 0;
3781
3782 if (pring->ringno == LPFC_ELS_RING) {
3783 lpfc_debugfs_slow_ring_trc(phba,
3784 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3785 *(((uint32_t *) irsp) + 4),
3786 *(((uint32_t *) irsp) + 6),
3787 *(((uint32_t *) irsp) + 7));
3788 }
3789
3790 writel(pring->sli.sli3.rspidx,
3791 &phba->host_gp[pring->ringno].rspGetInx);
3792
3793 spin_unlock_irqrestore(&phba->hbalock, iflag);
3794
3795 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3796 spin_lock_irqsave(&phba->hbalock, iflag);
3797
3798
3799
3800
3801
3802
3803 if (pring->sli.sli3.rspidx == portRspPut) {
3804 portRspPut = le32_to_cpu(pgp->rspPutInx);
3805 }
3806 }
3807
3808 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3809
3810 pring->stats.iocb_rsp_full++;
3811
3812 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3813 writel(status, phba->CAregaddr);
3814 readl(phba->CAregaddr);
3815 }
3816 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3817 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3818 pring->stats.iocb_cmd_empty++;
3819
3820
3821 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3822 lpfc_sli_resume_iocb(phba, pring);
3823
3824 if ((pring->lpfc_sli_cmd_available))
3825 (pring->lpfc_sli_cmd_available) (phba, pring);
3826
3827 }
3828
3829 spin_unlock_irqrestore(&phba->hbalock, iflag);
3830 return;
3831}
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845static void
3846lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3847 struct lpfc_sli_ring *pring, uint32_t mask)
3848{
3849 struct lpfc_iocbq *irspiocbq;
3850 struct hbq_dmabuf *dmabuf;
3851 struct lpfc_cq_event *cq_event;
3852 unsigned long iflag;
3853 int count = 0;
3854
3855 spin_lock_irqsave(&phba->hbalock, iflag);
3856 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3857 spin_unlock_irqrestore(&phba->hbalock, iflag);
3858 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3859
3860 spin_lock_irqsave(&phba->hbalock, iflag);
3861 list_remove_head(&phba->sli4_hba.sp_queue_event,
3862 cq_event, struct lpfc_cq_event, list);
3863 spin_unlock_irqrestore(&phba->hbalock, iflag);
3864
3865 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3866 case CQE_CODE_COMPL_WQE:
3867 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3868 cq_event);
3869
3870 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3871 irspiocbq);
3872 if (irspiocbq)
3873 lpfc_sli_sp_handle_rspiocb(phba, pring,
3874 irspiocbq);
3875 count++;
3876 break;
3877 case CQE_CODE_RECEIVE:
3878 case CQE_CODE_RECEIVE_V1:
3879 dmabuf = container_of(cq_event, struct hbq_dmabuf,
3880 cq_event);
3881 lpfc_sli4_handle_received_buffer(phba, dmabuf);
3882 count++;
3883 break;
3884 default:
3885 break;
3886 }
3887
3888
3889 if (count == 64)
3890 break;
3891 }
3892}
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904void
3905lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3906{
3907 LIST_HEAD(completions);
3908 struct lpfc_iocbq *iocb, *next_iocb;
3909
3910 if (pring->ringno == LPFC_ELS_RING) {
3911 lpfc_fabric_abort_hba(phba);
3912 }
3913
3914
3915
3916
3917 if (phba->sli_rev >= LPFC_SLI_REV4) {
3918 spin_lock_irq(&pring->ring_lock);
3919 list_splice_init(&pring->txq, &completions);
3920 pring->txq_cnt = 0;
3921 spin_unlock_irq(&pring->ring_lock);
3922
3923 spin_lock_irq(&phba->hbalock);
3924
3925 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3926 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3927 spin_unlock_irq(&phba->hbalock);
3928 } else {
3929 spin_lock_irq(&phba->hbalock);
3930 list_splice_init(&pring->txq, &completions);
3931 pring->txq_cnt = 0;
3932
3933
3934 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3935 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3936 spin_unlock_irq(&phba->hbalock);
3937 }
3938
3939
3940 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3941 IOERR_SLI_ABORTED);
3942}
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954void
3955lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3956{
3957 struct lpfc_sli *psli = &phba->sli;
3958 struct lpfc_sli_ring *pring;
3959 uint32_t i;
3960
3961
3962 if (phba->sli_rev >= LPFC_SLI_REV4) {
3963 for (i = 0; i < phba->cfg_hdw_queue; i++) {
3964 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
3965 lpfc_sli_abort_iocb_ring(phba, pring);
3966 }
3967 } else {
3968 pring = &psli->sli3_ring[LPFC_FCP_RING];
3969 lpfc_sli_abort_iocb_ring(phba, pring);
3970 }
3971}
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982
3983void
3984lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3985{
3986 LIST_HEAD(txq);
3987 LIST_HEAD(txcmplq);
3988 struct lpfc_sli *psli = &phba->sli;
3989 struct lpfc_sli_ring *pring;
3990 uint32_t i;
3991 struct lpfc_iocbq *piocb, *next_iocb;
3992
3993 spin_lock_irq(&phba->hbalock);
3994
3995 phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3996 spin_unlock_irq(&phba->hbalock);
3997
3998
3999 if (phba->sli_rev >= LPFC_SLI_REV4) {
4000 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4001 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
4002
4003 spin_lock_irq(&pring->ring_lock);
4004
4005 list_splice_init(&pring->txq, &txq);
4006 list_for_each_entry_safe(piocb, next_iocb,
4007 &pring->txcmplq, list)
4008 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4009
4010 list_splice_init(&pring->txcmplq, &txcmplq);
4011 pring->txq_cnt = 0;
4012 pring->txcmplq_cnt = 0;
4013 spin_unlock_irq(&pring->ring_lock);
4014
4015
4016 lpfc_sli_cancel_iocbs(phba, &txq,
4017 IOSTAT_LOCAL_REJECT,
4018 IOERR_SLI_DOWN);
4019
4020 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4021 IOSTAT_LOCAL_REJECT,
4022 IOERR_SLI_DOWN);
4023 }
4024 } else {
4025 pring = &psli->sli3_ring[LPFC_FCP_RING];
4026
4027 spin_lock_irq(&phba->hbalock);
4028
4029 list_splice_init(&pring->txq, &txq);
4030 list_for_each_entry_safe(piocb, next_iocb,
4031 &pring->txcmplq, list)
4032 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4033
4034 list_splice_init(&pring->txcmplq, &txcmplq);
4035 pring->txq_cnt = 0;
4036 pring->txcmplq_cnt = 0;
4037 spin_unlock_irq(&phba->hbalock);
4038
4039
4040 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4041 IOERR_SLI_DOWN);
4042
4043 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4044 IOERR_SLI_DOWN);
4045 }
4046}
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058void
4059lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
4060{
4061 LIST_HEAD(txcmplq);
4062 struct lpfc_sli_ring *pring;
4063 uint32_t i;
4064 struct lpfc_iocbq *piocb, *next_iocb;
4065
4066 if ((phba->sli_rev < LPFC_SLI_REV4) ||
4067 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
4068 return;
4069
4070
4071 spin_lock_irq(&phba->hbalock);
4072 phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
4073 spin_unlock_irq(&phba->hbalock);
4074
4075
4076
4077
4078
4079 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4080 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
4081
4082 spin_lock_irq(&pring->ring_lock);
4083 list_for_each_entry_safe(piocb, next_iocb,
4084 &pring->txcmplq, list)
4085 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4086
4087 list_splice_init(&pring->txcmplq, &txcmplq);
4088 pring->txcmplq_cnt = 0;
4089 spin_unlock_irq(&pring->ring_lock);
4090
4091
4092 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4093 IOSTAT_LOCAL_REJECT,
4094 IOERR_SLI_DOWN);
4095 }
4096}
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111static int
4112lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4113{
4114 uint32_t status;
4115 int i = 0;
4116 int retval = 0;
4117
4118
4119 if (lpfc_readl(phba->HSregaddr, &status))
4120 return 1;
4121
4122
4123
4124
4125
4126
4127
4128 while (((status & mask) != mask) &&
4129 !(status & HS_FFERM) &&
4130 i++ < 20) {
4131
4132 if (i <= 5)
4133 msleep(10);
4134 else if (i <= 10)
4135 msleep(500);
4136 else
4137 msleep(2500);
4138
4139 if (i == 15) {
4140
4141 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4142 lpfc_sli_brdrestart(phba);
4143 }
4144
4145 if (lpfc_readl(phba->HSregaddr, &status)) {
4146 retval = 1;
4147 break;
4148 }
4149 }
4150
4151
4152 if ((status & HS_FFERM) || (i >= 20)) {
4153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4154 "2751 Adapter failed to restart, "
4155 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4156 status,
4157 readl(phba->MBslimaddr + 0xa8),
4158 readl(phba->MBslimaddr + 0xac));
4159 phba->link_state = LPFC_HBA_ERROR;
4160 retval = 1;
4161 }
4162
4163 return retval;
4164}
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177static int
4178lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4179{
4180 uint32_t status;
4181 int retval = 0;
4182
4183
4184 status = lpfc_sli4_post_status_check(phba);
4185
4186 if (status) {
4187 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4188 lpfc_sli_brdrestart(phba);
4189 status = lpfc_sli4_post_status_check(phba);
4190 }
4191
4192
4193 if (status) {
4194 phba->link_state = LPFC_HBA_ERROR;
4195 retval = 1;
4196 } else
4197 phba->sli4_hba.intr_enable = 0;
4198
4199 return retval;
4200}
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210int
4211lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4212{
4213 return phba->lpfc_sli_brdready(phba, mask);
4214}
4215
4216#define BARRIER_TEST_PATTERN (0xdeadbeef)
4217
4218
4219
4220
4221
4222
4223
4224
4225void lpfc_reset_barrier(struct lpfc_hba *phba)
4226{
4227 uint32_t __iomem *resp_buf;
4228 uint32_t __iomem *mbox_buf;
4229 volatile uint32_t mbox;
4230 uint32_t hc_copy, ha_copy, resp_data;
4231 int i;
4232 uint8_t hdrtype;
4233
4234 lockdep_assert_held(&phba->hbalock);
4235
4236 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4237 if (hdrtype != 0x80 ||
4238 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4239 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4240 return;
4241
4242
4243
4244
4245
4246 resp_buf = phba->MBslimaddr;
4247
4248
4249 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4250 return;
4251 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4252 readl(phba->HCregaddr);
4253 phba->link_flag |= LS_IGNORE_ERATT;
4254
4255 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4256 return;
4257 if (ha_copy & HA_ERATT) {
4258
4259 writel(HA_ERATT, phba->HAregaddr);
4260 phba->pport->stopped = 1;
4261 }
4262
4263 mbox = 0;
4264 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4265 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4266
4267 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4268 mbox_buf = phba->MBslimaddr;
4269 writel(mbox, mbox_buf);
4270
4271 for (i = 0; i < 50; i++) {
4272 if (lpfc_readl((resp_buf + 1), &resp_data))
4273 return;
4274 if (resp_data != ~(BARRIER_TEST_PATTERN))
4275 mdelay(1);
4276 else
4277 break;
4278 }
4279 resp_data = 0;
4280 if (lpfc_readl((resp_buf + 1), &resp_data))
4281 return;
4282 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4283 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4284 phba->pport->stopped)
4285 goto restore_hc;
4286 else
4287 goto clear_errat;
4288 }
4289
4290 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4291 resp_data = 0;
4292 for (i = 0; i < 500; i++) {
4293 if (lpfc_readl(resp_buf, &resp_data))
4294 return;
4295 if (resp_data != mbox)
4296 mdelay(1);
4297 else
4298 break;
4299 }
4300
4301clear_errat:
4302
4303 while (++i < 500) {
4304 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4305 return;
4306 if (!(ha_copy & HA_ERATT))
4307 mdelay(1);
4308 else
4309 break;
4310 }
4311
4312 if (readl(phba->HAregaddr) & HA_ERATT) {
4313 writel(HA_ERATT, phba->HAregaddr);
4314 phba->pport->stopped = 1;
4315 }
4316
4317restore_hc:
4318 phba->link_flag &= ~LS_IGNORE_ERATT;
4319 writel(hc_copy, phba->HCregaddr);
4320 readl(phba->HCregaddr);
4321}
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334int
4335lpfc_sli_brdkill(struct lpfc_hba *phba)
4336{
4337 struct lpfc_sli *psli;
4338 LPFC_MBOXQ_t *pmb;
4339 uint32_t status;
4340 uint32_t ha_copy;
4341 int retval;
4342 int i = 0;
4343
4344 psli = &phba->sli;
4345
4346
4347 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4348 "0329 Kill HBA Data: x%x x%x\n",
4349 phba->pport->port_state, psli->sli_flag);
4350
4351 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4352 if (!pmb)
4353 return 1;
4354
4355
4356 spin_lock_irq(&phba->hbalock);
4357 if (lpfc_readl(phba->HCregaddr, &status)) {
4358 spin_unlock_irq(&phba->hbalock);
4359 mempool_free(pmb, phba->mbox_mem_pool);
4360 return 1;
4361 }
4362 status &= ~HC_ERINT_ENA;
4363 writel(status, phba->HCregaddr);
4364 readl(phba->HCregaddr);
4365 phba->link_flag |= LS_IGNORE_ERATT;
4366 spin_unlock_irq(&phba->hbalock);
4367
4368 lpfc_kill_board(phba, pmb);
4369 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4370 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4371
4372 if (retval != MBX_SUCCESS) {
4373 if (retval != MBX_BUSY)
4374 mempool_free(pmb, phba->mbox_mem_pool);
4375 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4376 "2752 KILL_BOARD command failed retval %d\n",
4377 retval);
4378 spin_lock_irq(&phba->hbalock);
4379 phba->link_flag &= ~LS_IGNORE_ERATT;
4380 spin_unlock_irq(&phba->hbalock);
4381 return 1;
4382 }
4383
4384 spin_lock_irq(&phba->hbalock);
4385 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4386 spin_unlock_irq(&phba->hbalock);
4387
4388 mempool_free(pmb, phba->mbox_mem_pool);
4389
4390
4391
4392
4393
4394
4395 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4396 return 1;
4397 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4398 mdelay(100);
4399 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4400 return 1;
4401 }
4402
4403 del_timer_sync(&psli->mbox_tmo);
4404 if (ha_copy & HA_ERATT) {
4405 writel(HA_ERATT, phba->HAregaddr);
4406 phba->pport->stopped = 1;
4407 }
4408 spin_lock_irq(&phba->hbalock);
4409 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4410 psli->mbox_active = NULL;
4411 phba->link_flag &= ~LS_IGNORE_ERATT;
4412 spin_unlock_irq(&phba->hbalock);
4413
4414 lpfc_hba_down_post(phba);
4415 phba->link_state = LPFC_HBA_ERROR;
4416
4417 return ha_copy & HA_ERATT ? 0 : 1;
4418}
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431int
4432lpfc_sli_brdreset(struct lpfc_hba *phba)
4433{
4434 struct lpfc_sli *psli;
4435 struct lpfc_sli_ring *pring;
4436 uint16_t cfg_value;
4437 int i;
4438
4439 psli = &phba->sli;
4440
4441
4442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4443 "0325 Reset HBA Data: x%x x%x\n",
4444 (phba->pport) ? phba->pport->port_state : 0,
4445 psli->sli_flag);
4446
4447
4448 phba->fc_eventTag = 0;
4449 phba->link_events = 0;
4450 if (phba->pport) {
4451 phba->pport->fc_myDID = 0;
4452 phba->pport->fc_prevDID = 0;
4453 }
4454
4455
4456 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4457 return -EIO;
4458
4459 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4460 (cfg_value &
4461 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4462
4463 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4464
4465
4466 writel(HC_INITFF, phba->HCregaddr);
4467 mdelay(1);
4468 readl(phba->HCregaddr);
4469 writel(0, phba->HCregaddr);
4470 readl(phba->HCregaddr);
4471
4472
4473 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4474
4475
4476 for (i = 0; i < psli->num_rings; i++) {
4477 pring = &psli->sli3_ring[i];
4478 pring->flag = 0;
4479 pring->sli.sli3.rspidx = 0;
4480 pring->sli.sli3.next_cmdidx = 0;
4481 pring->sli.sli3.local_getidx = 0;
4482 pring->sli.sli3.cmdidx = 0;
4483 pring->missbufcnt = 0;
4484 }
4485
4486 phba->link_state = LPFC_WARM_START;
4487 return 0;
4488}
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500int
4501lpfc_sli4_brdreset(struct lpfc_hba *phba)
4502{
4503 struct lpfc_sli *psli = &phba->sli;
4504 uint16_t cfg_value;
4505 int rc = 0;
4506
4507
4508 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4509 "0295 Reset HBA Data: x%x x%x x%x\n",
4510 phba->pport->port_state, psli->sli_flag,
4511 phba->hba_flag);
4512
4513
4514 phba->fc_eventTag = 0;
4515 phba->link_events = 0;
4516 phba->pport->fc_myDID = 0;
4517 phba->pport->fc_prevDID = 0;
4518
4519 spin_lock_irq(&phba->hbalock);
4520 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4521 phba->fcf.fcf_flag = 0;
4522 spin_unlock_irq(&phba->hbalock);
4523
4524
4525 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4526 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4527 return rc;
4528 }
4529
4530
4531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4532 "0389 Performing PCI function reset!\n");
4533
4534
4535 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4537 "3205 PCI read Config failed\n");
4538 return -EIO;
4539 }
4540
4541 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4542 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4543
4544
4545 rc = lpfc_pci_function_reset(phba);
4546
4547
4548 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4549
4550 return rc;
4551}
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566static int
4567lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4568{
4569 MAILBOX_t *mb;
4570 struct lpfc_sli *psli;
4571 volatile uint32_t word0;
4572 void __iomem *to_slim;
4573 uint32_t hba_aer_enabled;
4574
4575 spin_lock_irq(&phba->hbalock);
4576
4577
4578 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4579
4580 psli = &phba->sli;
4581
4582
4583 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4584 "0337 Restart HBA Data: x%x x%x\n",
4585 (phba->pport) ? phba->pport->port_state : 0,
4586 psli->sli_flag);
4587
4588 word0 = 0;
4589 mb = (MAILBOX_t *) &word0;
4590 mb->mbxCommand = MBX_RESTART;
4591 mb->mbxHc = 1;
4592
4593 lpfc_reset_barrier(phba);
4594
4595 to_slim = phba->MBslimaddr;
4596 writel(*(uint32_t *) mb, to_slim);
4597 readl(to_slim);
4598
4599
4600 if (phba->pport && phba->pport->port_state)
4601 word0 = 1;
4602 else
4603 word0 = 0;
4604 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4605 writel(*(uint32_t *) mb, to_slim);
4606 readl(to_slim);
4607
4608 lpfc_sli_brdreset(phba);
4609 if (phba->pport)
4610 phba->pport->stopped = 0;
4611 phba->link_state = LPFC_INIT_START;
4612 phba->hba_flag = 0;
4613 spin_unlock_irq(&phba->hbalock);
4614
4615 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4616 psli->stats_start = ktime_get_seconds();
4617
4618
4619 mdelay(100);
4620
4621
4622 if (hba_aer_enabled)
4623 pci_disable_pcie_error_reporting(phba->pcidev);
4624
4625 lpfc_hba_down_post(phba);
4626
4627 return 0;
4628}
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639static int
4640lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4641{
4642 struct lpfc_sli *psli = &phba->sli;
4643 uint32_t hba_aer_enabled;
4644 int rc;
4645
4646
4647 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4648 "0296 Restart HBA Data: x%x x%x\n",
4649 phba->pport->port_state, psli->sli_flag);
4650
4651
4652 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4653
4654 rc = lpfc_sli4_brdreset(phba);
4655 if (rc)
4656 return rc;
4657
4658 spin_lock_irq(&phba->hbalock);
4659 phba->pport->stopped = 0;
4660 phba->link_state = LPFC_INIT_START;
4661 phba->hba_flag = 0;
4662 spin_unlock_irq(&phba->hbalock);
4663
4664 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4665 psli->stats_start = ktime_get_seconds();
4666
4667
4668 if (hba_aer_enabled)
4669 pci_disable_pcie_error_reporting(phba->pcidev);
4670
4671 lpfc_hba_down_post(phba);
4672 lpfc_sli4_queue_destroy(phba);
4673
4674 return rc;
4675}
4676
4677
4678
4679
4680
4681
4682
4683
4684int
4685lpfc_sli_brdrestart(struct lpfc_hba *phba)
4686{
4687 return phba->lpfc_sli_brdrestart(phba);
4688}
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700int
4701lpfc_sli_chipset_init(struct lpfc_hba *phba)
4702{
4703 uint32_t status, i = 0;
4704
4705
4706 if (lpfc_readl(phba->HSregaddr, &status))
4707 return -EIO;
4708
4709
4710 i = 0;
4711 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721 if (i++ >= 200) {
4722
4723
4724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4725 "0436 Adapter failed to init, "
4726 "timeout, status reg x%x, "
4727 "FW Data: A8 x%x AC x%x\n", status,
4728 readl(phba->MBslimaddr + 0xa8),
4729 readl(phba->MBslimaddr + 0xac));
4730 phba->link_state = LPFC_HBA_ERROR;
4731 return -ETIMEDOUT;
4732 }
4733
4734
4735 if (status & HS_FFERM) {
4736
4737
4738
4739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4740 "0437 Adapter failed to init, "
4741 "chipset, status reg x%x, "
4742 "FW Data: A8 x%x AC x%x\n", status,
4743 readl(phba->MBslimaddr + 0xa8),
4744 readl(phba->MBslimaddr + 0xac));
4745 phba->link_state = LPFC_HBA_ERROR;
4746 return -EIO;
4747 }
4748
4749 if (i <= 10)
4750 msleep(10);
4751 else if (i <= 100)
4752 msleep(100);
4753 else
4754 msleep(1000);
4755
4756 if (i == 150) {
4757
4758 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4759 lpfc_sli_brdrestart(phba);
4760 }
4761
4762 if (lpfc_readl(phba->HSregaddr, &status))
4763 return -EIO;
4764 }
4765
4766
4767 if (status & HS_FFERM) {
4768
4769
4770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4771 "0438 Adapter failed to init, chipset, "
4772 "status reg x%x, "
4773 "FW Data: A8 x%x AC x%x\n", status,
4774 readl(phba->MBslimaddr + 0xa8),
4775 readl(phba->MBslimaddr + 0xac));
4776 phba->link_state = LPFC_HBA_ERROR;
4777 return -EIO;
4778 }
4779
4780
4781 writel(0, phba->HCregaddr);
4782 readl(phba->HCregaddr);
4783
4784
4785 writel(0xffffffff, phba->HAregaddr);
4786 readl(phba->HAregaddr);
4787 return 0;
4788}
4789
4790
4791
4792
4793
4794
4795
4796int
4797lpfc_sli_hbq_count(void)
4798{
4799 return ARRAY_SIZE(lpfc_hbq_defs);
4800}
4801
4802
4803
4804
4805
4806
4807
4808
4809static int
4810lpfc_sli_hbq_entry_count(void)
4811{
4812 int hbq_count = lpfc_sli_hbq_count();
4813 int count = 0;
4814 int i;
4815
4816 for (i = 0; i < hbq_count; ++i)
4817 count += lpfc_hbq_defs[i]->entry_count;
4818 return count;
4819}
4820
4821
4822
4823
4824
4825
4826
4827int
4828lpfc_sli_hbq_size(void)
4829{
4830 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4831}
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842static int
4843lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4844{
4845 int hbq_count = lpfc_sli_hbq_count();
4846 LPFC_MBOXQ_t *pmb;
4847 MAILBOX_t *pmbox;
4848 uint32_t hbqno;
4849 uint32_t hbq_entry_index;
4850
4851
4852
4853
4854 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4855
4856 if (!pmb)
4857 return -ENOMEM;
4858
4859 pmbox = &pmb->u.mb;
4860
4861
4862 phba->link_state = LPFC_INIT_MBX_CMDS;
4863 phba->hbq_in_use = 1;
4864
4865 hbq_entry_index = 0;
4866 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4867 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4868 phba->hbqs[hbqno].hbqPutIdx = 0;
4869 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4870 phba->hbqs[hbqno].entry_count =
4871 lpfc_hbq_defs[hbqno]->entry_count;
4872 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4873 hbq_entry_index, pmb);
4874 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4875
4876 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4877
4878
4879
4880 lpfc_printf_log(phba, KERN_ERR,
4881 LOG_SLI | LOG_VPORT,
4882 "1805 Adapter failed to init. "
4883 "Data: x%x x%x x%x\n",
4884 pmbox->mbxCommand,
4885 pmbox->mbxStatus, hbqno);
4886
4887 phba->link_state = LPFC_HBA_ERROR;
4888 mempool_free(pmb, phba->mbox_mem_pool);
4889 return -ENXIO;
4890 }
4891 }
4892 phba->hbq_count = hbq_count;
4893
4894 mempool_free(pmb, phba->mbox_mem_pool);
4895
4896
4897 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4898 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4899 return 0;
4900}
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911static int
4912lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4913{
4914 phba->hbq_in_use = 1;
4915 phba->hbqs[LPFC_ELS_HBQ].entry_count =
4916 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4917 phba->hbq_count = 1;
4918 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4919
4920 return 0;
4921}
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936int
4937lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4938{
4939 LPFC_MBOXQ_t *pmb;
4940 uint32_t resetcount = 0, rc = 0, done = 0;
4941
4942 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4943 if (!pmb) {
4944 phba->link_state = LPFC_HBA_ERROR;
4945 return -ENOMEM;
4946 }
4947
4948 phba->sli_rev = sli_mode;
4949 while (resetcount < 2 && !done) {
4950 spin_lock_irq(&phba->hbalock);
4951 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4952 spin_unlock_irq(&phba->hbalock);
4953 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4954 lpfc_sli_brdrestart(phba);
4955 rc = lpfc_sli_chipset_init(phba);
4956 if (rc)
4957 break;
4958
4959 spin_lock_irq(&phba->hbalock);
4960 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4961 spin_unlock_irq(&phba->hbalock);
4962 resetcount++;
4963
4964
4965
4966
4967
4968
4969 rc = lpfc_config_port_prep(phba);
4970 if (rc == -ERESTART) {
4971 phba->link_state = LPFC_LINK_UNKNOWN;
4972 continue;
4973 } else if (rc)
4974 break;
4975
4976 phba->link_state = LPFC_INIT_MBX_CMDS;
4977 lpfc_config_port(phba, pmb);
4978 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4979 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4980 LPFC_SLI3_HBQ_ENABLED |
4981 LPFC_SLI3_CRP_ENABLED |
4982 LPFC_SLI3_DSS_ENABLED);
4983 if (rc != MBX_SUCCESS) {
4984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4985 "0442 Adapter failed to init, mbxCmd x%x "
4986 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4987 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4988 spin_lock_irq(&phba->hbalock);
4989 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4990 spin_unlock_irq(&phba->hbalock);
4991 rc = -ENXIO;
4992 } else {
4993
4994 spin_lock_irq(&phba->hbalock);
4995 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4996 spin_unlock_irq(&phba->hbalock);
4997 done = 1;
4998
4999 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5000 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5001 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5002 "3110 Port did not grant ASABT\n");
5003 }
5004 }
5005 if (!done) {
5006 rc = -EINVAL;
5007 goto do_prep_failed;
5008 }
5009 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5010 if (!pmb->u.mb.un.varCfgPort.cMA) {
5011 rc = -ENXIO;
5012 goto do_prep_failed;
5013 }
5014 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5015 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5016 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5017 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5018 phba->max_vpi : phba->max_vports;
5019
5020 } else
5021 phba->max_vpi = 0;
5022 phba->fips_level = 0;
5023 phba->fips_spec_rev = 0;
5024 if (pmb->u.mb.un.varCfgPort.gdss) {
5025 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
5026 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
5027 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
5028 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5029 "2850 Security Crypto Active. FIPS x%d "
5030 "(Spec Rev: x%d)",
5031 phba->fips_level, phba->fips_spec_rev);
5032 }
5033 if (pmb->u.mb.un.varCfgPort.sec_err) {
5034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5035 "2856 Config Port Security Crypto "
5036 "Error: x%x ",
5037 pmb->u.mb.un.varCfgPort.sec_err);
5038 }
5039 if (pmb->u.mb.un.varCfgPort.gerbm)
5040 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5041 if (pmb->u.mb.un.varCfgPort.gcrp)
5042 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5043
5044 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5045 phba->port_gp = phba->mbox->us.s3_pgp.port;
5046
5047 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5048 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5049 phba->cfg_enable_bg = 0;
5050 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5052 "0443 Adapter did not grant "
5053 "BlockGuard\n");
5054 }
5055 }
5056 } else {
5057 phba->hbq_get = NULL;
5058 phba->port_gp = phba->mbox->us.s2.port;
5059 phba->max_vpi = 0;
5060 }
5061do_prep_failed:
5062 mempool_free(pmb, phba->mbox_mem_pool);
5063 return rc;
5064}
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080int
5081lpfc_sli_hba_setup(struct lpfc_hba *phba)
5082{
5083 uint32_t rc;
5084 int mode = 3, i;
5085 int longs;
5086
5087 switch (phba->cfg_sli_mode) {
5088 case 2:
5089 if (phba->cfg_enable_npiv) {
5090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5091 "1824 NPIV enabled: Override sli_mode "
5092 "parameter (%d) to auto (0).\n",
5093 phba->cfg_sli_mode);
5094 break;
5095 }
5096 mode = 2;
5097 break;
5098 case 0:
5099 case 3:
5100 break;
5101 default:
5102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5103 "1819 Unrecognized sli_mode parameter: %d.\n",
5104 phba->cfg_sli_mode);
5105
5106 break;
5107 }
5108 phba->fcp_embed_io = 0;
5109
5110 rc = lpfc_sli_config_port(phba, mode);
5111
5112 if (rc && phba->cfg_sli_mode == 3)
5113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5114 "1820 Unable to select SLI-3. "
5115 "Not supported by adapter.\n");
5116 if (rc && mode != 2)
5117 rc = lpfc_sli_config_port(phba, 2);
5118 else if (rc && mode == 2)
5119 rc = lpfc_sli_config_port(phba, 3);
5120 if (rc)
5121 goto lpfc_sli_hba_setup_error;
5122
5123
5124 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5125 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5126 if (!rc) {
5127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5128 "2709 This device supports "
5129 "Advanced Error Reporting (AER)\n");
5130 spin_lock_irq(&phba->hbalock);
5131 phba->hba_flag |= HBA_AER_ENABLED;
5132 spin_unlock_irq(&phba->hbalock);
5133 } else {
5134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5135 "2708 This device does not support "
5136 "Advanced Error Reporting (AER): %d\n",
5137 rc);
5138 phba->cfg_aer_support = 0;
5139 }
5140 }
5141
5142 if (phba->sli_rev == 3) {
5143 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5144 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5145 } else {
5146 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5147 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5148 phba->sli3_options = 0;
5149 }
5150
5151 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5152 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5153 phba->sli_rev, phba->max_vpi);
5154 rc = lpfc_sli_ring_map(phba);
5155
5156 if (rc)
5157 goto lpfc_sli_hba_setup_error;
5158
5159
5160 if (phba->sli_rev == LPFC_SLI_REV3) {
5161
5162
5163
5164
5165
5166 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5167 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5168 phba->vpi_bmask = kcalloc(longs,
5169 sizeof(unsigned long),
5170 GFP_KERNEL);
5171 if (!phba->vpi_bmask) {
5172 rc = -ENOMEM;
5173 goto lpfc_sli_hba_setup_error;
5174 }
5175
5176 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5177 sizeof(uint16_t),
5178 GFP_KERNEL);
5179 if (!phba->vpi_ids) {
5180 kfree(phba->vpi_bmask);
5181 rc = -ENOMEM;
5182 goto lpfc_sli_hba_setup_error;
5183 }
5184 for (i = 0; i < phba->max_vpi; i++)
5185 phba->vpi_ids[i] = i;
5186 }
5187 }
5188
5189
5190 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5191 rc = lpfc_sli_hbq_setup(phba);
5192 if (rc)
5193 goto lpfc_sli_hba_setup_error;
5194 }
5195 spin_lock_irq(&phba->hbalock);
5196 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5197 spin_unlock_irq(&phba->hbalock);
5198
5199 rc = lpfc_config_port_post(phba);
5200 if (rc)
5201 goto lpfc_sli_hba_setup_error;
5202
5203 return rc;
5204
5205lpfc_sli_hba_setup_error:
5206 phba->link_state = LPFC_HBA_ERROR;
5207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5208 "0445 Firmware initialization failed\n");
5209 return rc;
5210}
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220static int
5221lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5222{
5223 LPFC_MBOXQ_t *mboxq;
5224 struct lpfc_dmabuf *mp;
5225 struct lpfc_mqe *mqe;
5226 uint32_t data_length;
5227 int rc;
5228
5229
5230 phba->valid_vlan = 0;
5231 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5232 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5233 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5234
5235 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5236 if (!mboxq)
5237 return -ENOMEM;
5238
5239 mqe = &mboxq->u.mqe;
5240 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5241 rc = -ENOMEM;
5242 goto out_free_mboxq;
5243 }
5244
5245 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5246 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5247
5248 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5249 "(%d):2571 Mailbox cmd x%x Status x%x "
5250 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5251 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5252 "CQ: x%x x%x x%x x%x\n",
5253 mboxq->vport ? mboxq->vport->vpi : 0,
5254 bf_get(lpfc_mqe_command, mqe),
5255 bf_get(lpfc_mqe_status, mqe),
5256 mqe->un.mb_words[0], mqe->un.mb_words[1],
5257 mqe->un.mb_words[2], mqe->un.mb_words[3],
5258 mqe->un.mb_words[4], mqe->un.mb_words[5],
5259 mqe->un.mb_words[6], mqe->un.mb_words[7],
5260 mqe->un.mb_words[8], mqe->un.mb_words[9],
5261 mqe->un.mb_words[10], mqe->un.mb_words[11],
5262 mqe->un.mb_words[12], mqe->un.mb_words[13],
5263 mqe->un.mb_words[14], mqe->un.mb_words[15],
5264 mqe->un.mb_words[16], mqe->un.mb_words[50],
5265 mboxq->mcqe.word0,
5266 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5267 mboxq->mcqe.trailer);
5268
5269 if (rc) {
5270 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5271 kfree(mp);
5272 rc = -EIO;
5273 goto out_free_mboxq;
5274 }
5275 data_length = mqe->un.mb_words[5];
5276 if (data_length > DMP_RGN23_SIZE) {
5277 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5278 kfree(mp);
5279 rc = -EIO;
5280 goto out_free_mboxq;
5281 }
5282
5283 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5284 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5285 kfree(mp);
5286 rc = 0;
5287
5288out_free_mboxq:
5289 mempool_free(mboxq, phba->mbox_mem_pool);
5290 return rc;
5291}
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308static int
5309lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5310 uint8_t *vpd, uint32_t *vpd_size)
5311{
5312 int rc = 0;
5313 uint32_t dma_size;
5314 struct lpfc_dmabuf *dmabuf;
5315 struct lpfc_mqe *mqe;
5316
5317 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5318 if (!dmabuf)
5319 return -ENOMEM;
5320
5321
5322
5323
5324
5325 dma_size = *vpd_size;
5326 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5327 &dmabuf->phys, GFP_KERNEL);
5328 if (!dmabuf->virt) {
5329 kfree(dmabuf);
5330 return -ENOMEM;
5331 }
5332
5333
5334
5335
5336
5337
5338 lpfc_read_rev(phba, mboxq);
5339 mqe = &mboxq->u.mqe;
5340 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5341 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5342 mqe->un.read_rev.word1 &= 0x0000FFFF;
5343 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5344 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5345
5346 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5347 if (rc) {
5348 dma_free_coherent(&phba->pcidev->dev, dma_size,
5349 dmabuf->virt, dmabuf->phys);
5350 kfree(dmabuf);
5351 return -EIO;
5352 }
5353
5354
5355
5356
5357
5358
5359 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5360 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5361
5362 memcpy(vpd, dmabuf->virt, *vpd_size);
5363
5364 dma_free_coherent(&phba->pcidev->dev, dma_size,
5365 dmabuf->virt, dmabuf->phys);
5366 kfree(dmabuf);
5367 return 0;
5368}
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381static int
5382lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5383{
5384 LPFC_MBOXQ_t *mboxq;
5385 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5386 struct lpfc_controller_attribute *cntl_attr;
5387 void *virtaddr = NULL;
5388 uint32_t alloclen, reqlen;
5389 uint32_t shdr_status, shdr_add_status;
5390 union lpfc_sli4_cfg_shdr *shdr;
5391 int rc;
5392
5393 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5394 if (!mboxq)
5395 return -ENOMEM;
5396
5397
5398 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5399 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5400 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5401 LPFC_SLI4_MBX_NEMBED);
5402
5403 if (alloclen < reqlen) {
5404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5405 "3084 Allocated DMA memory size (%d) is "
5406 "less than the requested DMA memory size "
5407 "(%d)\n", alloclen, reqlen);
5408 rc = -ENOMEM;
5409 goto out_free_mboxq;
5410 }
5411 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5412 virtaddr = mboxq->sge_array->addr[0];
5413 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5414 shdr = &mbx_cntl_attr->cfg_shdr;
5415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5417 if (shdr_status || shdr_add_status || rc) {
5418 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5419 "3085 Mailbox x%x (x%x/x%x) failed, "
5420 "rc:x%x, status:x%x, add_status:x%x\n",
5421 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5422 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5423 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5424 rc, shdr_status, shdr_add_status);
5425 rc = -ENXIO;
5426 goto out_free_mboxq;
5427 }
5428
5429 cntl_attr = &mbx_cntl_attr->cntl_attr;
5430 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5431 phba->sli4_hba.lnk_info.lnk_tp =
5432 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5433 phba->sli4_hba.lnk_info.lnk_no =
5434 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5435
5436 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5437 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5438 sizeof(phba->BIOSVersion));
5439
5440 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5441 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5442 phba->sli4_hba.lnk_info.lnk_tp,
5443 phba->sli4_hba.lnk_info.lnk_no,
5444 phba->BIOSVersion);
5445out_free_mboxq:
5446 if (rc != MBX_TIMEOUT) {
5447 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5448 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5449 else
5450 mempool_free(mboxq, phba->mbox_mem_pool);
5451 }
5452 return rc;
5453}
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466static int
5467lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5468{
5469 LPFC_MBOXQ_t *mboxq;
5470 struct lpfc_mbx_get_port_name *get_port_name;
5471 uint32_t shdr_status, shdr_add_status;
5472 union lpfc_sli4_cfg_shdr *shdr;
5473 char cport_name = 0;
5474 int rc;
5475
5476
5477 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5478 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5479
5480 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5481 if (!mboxq)
5482 return -ENOMEM;
5483
5484 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5485 lpfc_sli4_read_config(phba);
5486 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5487 goto retrieve_ppname;
5488
5489
5490 rc = lpfc_sli4_get_ctl_attr(phba);
5491 if (rc)
5492 goto out_free_mboxq;
5493
5494retrieve_ppname:
5495 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5496 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5497 sizeof(struct lpfc_mbx_get_port_name) -
5498 sizeof(struct lpfc_sli4_cfg_mhdr),
5499 LPFC_SLI4_MBX_EMBED);
5500 get_port_name = &mboxq->u.mqe.un.get_port_name;
5501 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5502 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5503 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5504 phba->sli4_hba.lnk_info.lnk_tp);
5505 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5506 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5507 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5508 if (shdr_status || shdr_add_status || rc) {
5509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5510 "3087 Mailbox x%x (x%x/x%x) failed: "
5511 "rc:x%x, status:x%x, add_status:x%x\n",
5512 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5513 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5514 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5515 rc, shdr_status, shdr_add_status);
5516 rc = -ENXIO;
5517 goto out_free_mboxq;
5518 }
5519 switch (phba->sli4_hba.lnk_info.lnk_no) {
5520 case LPFC_LINK_NUMBER_0:
5521 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5522 &get_port_name->u.response);
5523 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5524 break;
5525 case LPFC_LINK_NUMBER_1:
5526 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5527 &get_port_name->u.response);
5528 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5529 break;
5530 case LPFC_LINK_NUMBER_2:
5531 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5532 &get_port_name->u.response);
5533 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5534 break;
5535 case LPFC_LINK_NUMBER_3:
5536 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5537 &get_port_name->u.response);
5538 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5539 break;
5540 default:
5541 break;
5542 }
5543
5544 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5545 phba->Port[0] = cport_name;
5546 phba->Port[1] = '\0';
5547 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5548 "3091 SLI get port name: %s\n", phba->Port);
5549 }
5550
5551out_free_mboxq:
5552 if (rc != MBX_TIMEOUT) {
5553 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5554 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5555 else
5556 mempool_free(mboxq, phba->mbox_mem_pool);
5557 }
5558 return rc;
5559}
5560
5561
5562
5563
5564
5565
5566
5567
5568static void
5569lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5570{
5571 int qidx;
5572 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5573 struct lpfc_sli4_hdw_queue *qp;
5574 struct lpfc_queue *eq;
5575
5576 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5577 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5578 if (sli4_hba->nvmels_cq)
5579 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5580 LPFC_QUEUE_REARM);
5581
5582 if (sli4_hba->hdwq) {
5583
5584 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5585 qp = &sli4_hba->hdwq[qidx];
5586
5587 sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0,
5588 LPFC_QUEUE_REARM);
5589 sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0,
5590 LPFC_QUEUE_REARM);
5591 }
5592
5593
5594 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5595 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5596
5597 sli4_hba->sli4_write_eq_db(phba, eq,
5598 0, LPFC_QUEUE_REARM);
5599 }
5600 }
5601
5602 if (phba->nvmet_support) {
5603 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5604 sli4_hba->sli4_write_cq_db(phba,
5605 sli4_hba->nvmet_cqset[qidx], 0,
5606 LPFC_QUEUE_REARM);
5607 }
5608 }
5609}
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623int
5624lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5625 uint16_t *extnt_count, uint16_t *extnt_size)
5626{
5627 int rc = 0;
5628 uint32_t length;
5629 uint32_t mbox_tmo;
5630 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5631 LPFC_MBOXQ_t *mbox;
5632
5633 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5634 if (!mbox)
5635 return -ENOMEM;
5636
5637
5638 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5639 sizeof(struct lpfc_sli4_cfg_mhdr));
5640 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5641 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5642 length, LPFC_SLI4_MBX_EMBED);
5643
5644
5645 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5646 LPFC_SLI4_MBX_EMBED);
5647 if (unlikely(rc)) {
5648 rc = -EIO;
5649 goto err_exit;
5650 }
5651
5652 if (!phba->sli4_hba.intr_enable)
5653 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5654 else {
5655 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5656 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5657 }
5658 if (unlikely(rc)) {
5659 rc = -EIO;
5660 goto err_exit;
5661 }
5662
5663 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5664 if (bf_get(lpfc_mbox_hdr_status,
5665 &rsrc_info->header.cfg_shdr.response)) {
5666 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5667 "2930 Failed to get resource extents "
5668 "Status 0x%x Add'l Status 0x%x\n",
5669 bf_get(lpfc_mbox_hdr_status,
5670 &rsrc_info->header.cfg_shdr.response),
5671 bf_get(lpfc_mbox_hdr_add_status,
5672 &rsrc_info->header.cfg_shdr.response));
5673 rc = -EIO;
5674 goto err_exit;
5675 }
5676
5677 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5678 &rsrc_info->u.rsp);
5679 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5680 &rsrc_info->u.rsp);
5681
5682 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5683 "3162 Retrieved extents type-%d from port: count:%d, "
5684 "size:%d\n", type, *extnt_count, *extnt_size);
5685
5686err_exit:
5687 mempool_free(mbox, phba->mbox_mem_pool);
5688 return rc;
5689}
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706static int
5707lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5708{
5709 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5710 uint16_t size_diff, rsrc_ext_size;
5711 int rc = 0;
5712 struct lpfc_rsrc_blks *rsrc_entry;
5713 struct list_head *rsrc_blk_list = NULL;
5714
5715 size_diff = 0;
5716 curr_ext_cnt = 0;
5717 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5718 &rsrc_ext_cnt,
5719 &rsrc_ext_size);
5720 if (unlikely(rc))
5721 return -EIO;
5722
5723 switch (type) {
5724 case LPFC_RSC_TYPE_FCOE_RPI:
5725 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5726 break;
5727 case LPFC_RSC_TYPE_FCOE_VPI:
5728 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5729 break;
5730 case LPFC_RSC_TYPE_FCOE_XRI:
5731 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5732 break;
5733 case LPFC_RSC_TYPE_FCOE_VFI:
5734 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5735 break;
5736 default:
5737 break;
5738 }
5739
5740 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5741 curr_ext_cnt++;
5742 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5743 size_diff++;
5744 }
5745
5746 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5747 rc = 1;
5748
5749 return rc;
5750}
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769static int
5770lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5771 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5772{
5773 int rc = 0;
5774 uint32_t req_len;
5775 uint32_t emb_len;
5776 uint32_t alloc_len, mbox_tmo;
5777
5778
5779 req_len = extnt_cnt * sizeof(uint16_t);
5780
5781
5782
5783
5784
5785 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5786 sizeof(uint32_t);
5787
5788
5789
5790
5791
5792 *emb = LPFC_SLI4_MBX_EMBED;
5793 if (req_len > emb_len) {
5794 req_len = extnt_cnt * sizeof(uint16_t) +
5795 sizeof(union lpfc_sli4_cfg_shdr) +
5796 sizeof(uint32_t);
5797 *emb = LPFC_SLI4_MBX_NEMBED;
5798 }
5799
5800 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5801 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5802 req_len, *emb);
5803 if (alloc_len < req_len) {
5804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5805 "2982 Allocated DMA memory size (x%x) is "
5806 "less than the requested DMA memory "
5807 "size (x%x)\n", alloc_len, req_len);
5808 return -ENOMEM;
5809 }
5810 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5811 if (unlikely(rc))
5812 return -EIO;
5813
5814 if (!phba->sli4_hba.intr_enable)
5815 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5816 else {
5817 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5818 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5819 }
5820
5821 if (unlikely(rc))
5822 rc = -EIO;
5823 return rc;
5824}
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834static int
5835lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5836{
5837 bool emb = false;
5838 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5839 uint16_t rsrc_id, rsrc_start, j, k;
5840 uint16_t *ids;
5841 int i, rc;
5842 unsigned long longs;
5843 unsigned long *bmask;
5844 struct lpfc_rsrc_blks *rsrc_blks;
5845 LPFC_MBOXQ_t *mbox;
5846 uint32_t length;
5847 struct lpfc_id_range *id_array = NULL;
5848 void *virtaddr = NULL;
5849 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5850 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5851 struct list_head *ext_blk_list;
5852
5853 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5854 &rsrc_cnt,
5855 &rsrc_size);
5856 if (unlikely(rc))
5857 return -EIO;
5858
5859 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5860 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5861 "3009 No available Resource Extents "
5862 "for resource type 0x%x: Count: 0x%x, "
5863 "Size 0x%x\n", type, rsrc_cnt,
5864 rsrc_size);
5865 return -ENOMEM;
5866 }
5867
5868 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5869 "2903 Post resource extents type-0x%x: "
5870 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5871
5872 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5873 if (!mbox)
5874 return -ENOMEM;
5875
5876 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5877 if (unlikely(rc)) {
5878 rc = -EIO;
5879 goto err_exit;
5880 }
5881
5882
5883
5884
5885
5886
5887
5888 if (emb == LPFC_SLI4_MBX_EMBED) {
5889 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5890 id_array = &rsrc_ext->u.rsp.id[0];
5891 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5892 } else {
5893 virtaddr = mbox->sge_array->addr[0];
5894 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5895 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5896 id_array = &n_rsrc->id;
5897 }
5898
5899 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5900 rsrc_id_cnt = rsrc_cnt * rsrc_size;
5901
5902
5903
5904
5905
5906 length = sizeof(struct lpfc_rsrc_blks);
5907 switch (type) {
5908 case LPFC_RSC_TYPE_FCOE_RPI:
5909 phba->sli4_hba.rpi_bmask = kcalloc(longs,
5910 sizeof(unsigned long),
5911 GFP_KERNEL);
5912 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5913 rc = -ENOMEM;
5914 goto err_exit;
5915 }
5916 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
5917 sizeof(uint16_t),
5918 GFP_KERNEL);
5919 if (unlikely(!phba->sli4_hba.rpi_ids)) {
5920 kfree(phba->sli4_hba.rpi_bmask);
5921 rc = -ENOMEM;
5922 goto err_exit;
5923 }
5924
5925
5926
5927
5928
5929
5930 phba->sli4_hba.next_rpi = rsrc_id_cnt;
5931
5932
5933 bmask = phba->sli4_hba.rpi_bmask;
5934 ids = phba->sli4_hba.rpi_ids;
5935 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5936 break;
5937 case LPFC_RSC_TYPE_FCOE_VPI:
5938 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
5939 GFP_KERNEL);
5940 if (unlikely(!phba->vpi_bmask)) {
5941 rc = -ENOMEM;
5942 goto err_exit;
5943 }
5944 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
5945 GFP_KERNEL);
5946 if (unlikely(!phba->vpi_ids)) {
5947 kfree(phba->vpi_bmask);
5948 rc = -ENOMEM;
5949 goto err_exit;
5950 }
5951
5952
5953 bmask = phba->vpi_bmask;
5954 ids = phba->vpi_ids;
5955 ext_blk_list = &phba->lpfc_vpi_blk_list;
5956 break;
5957 case LPFC_RSC_TYPE_FCOE_XRI:
5958 phba->sli4_hba.xri_bmask = kcalloc(longs,
5959 sizeof(unsigned long),
5960 GFP_KERNEL);
5961 if (unlikely(!phba->sli4_hba.xri_bmask)) {
5962 rc = -ENOMEM;
5963 goto err_exit;
5964 }
5965 phba->sli4_hba.max_cfg_param.xri_used = 0;
5966 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
5967 sizeof(uint16_t),
5968 GFP_KERNEL);
5969 if (unlikely(!phba->sli4_hba.xri_ids)) {
5970 kfree(phba->sli4_hba.xri_bmask);
5971 rc = -ENOMEM;
5972 goto err_exit;
5973 }
5974
5975
5976 bmask = phba->sli4_hba.xri_bmask;
5977 ids = phba->sli4_hba.xri_ids;
5978 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5979 break;
5980 case LPFC_RSC_TYPE_FCOE_VFI:
5981 phba->sli4_hba.vfi_bmask = kcalloc(longs,
5982 sizeof(unsigned long),
5983 GFP_KERNEL);
5984 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5985 rc = -ENOMEM;
5986 goto err_exit;
5987 }
5988 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
5989 sizeof(uint16_t),
5990 GFP_KERNEL);
5991 if (unlikely(!phba->sli4_hba.vfi_ids)) {
5992 kfree(phba->sli4_hba.vfi_bmask);
5993 rc = -ENOMEM;
5994 goto err_exit;
5995 }
5996
5997
5998 bmask = phba->sli4_hba.vfi_bmask;
5999 ids = phba->sli4_hba.vfi_ids;
6000 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6001 break;
6002 default:
6003
6004 id_array = NULL;
6005 bmask = NULL;
6006 ids = NULL;
6007 ext_blk_list = NULL;
6008 goto err_exit;
6009 }
6010
6011
6012
6013
6014
6015
6016
6017 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6018 if ((i % 2) == 0)
6019 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6020 &id_array[k]);
6021 else
6022 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6023 &id_array[k]);
6024
6025 rsrc_blks = kzalloc(length, GFP_KERNEL);
6026 if (unlikely(!rsrc_blks)) {
6027 rc = -ENOMEM;
6028 kfree(bmask);
6029 kfree(ids);
6030 goto err_exit;
6031 }
6032 rsrc_blks->rsrc_start = rsrc_id;
6033 rsrc_blks->rsrc_size = rsrc_size;
6034 list_add_tail(&rsrc_blks->list, ext_blk_list);
6035 rsrc_start = rsrc_id;
6036 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6037 phba->sli4_hba.io_xri_start = rsrc_start +
6038 lpfc_sli4_get_iocb_cnt(phba);
6039 }
6040
6041 while (rsrc_id < (rsrc_start + rsrc_size)) {
6042 ids[j] = rsrc_id;
6043 rsrc_id++;
6044 j++;
6045 }
6046
6047 if ((i % 2) == 1)
6048 k++;
6049 }
6050 err_exit:
6051 lpfc_sli4_mbox_cmd_free(phba, mbox);
6052 return rc;
6053}
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066static int
6067lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6068{
6069 int rc;
6070 uint32_t length, mbox_tmo = 0;
6071 LPFC_MBOXQ_t *mbox;
6072 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6073 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6074
6075 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6076 if (!mbox)
6077 return -ENOMEM;
6078
6079
6080
6081
6082
6083
6084 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6085 sizeof(struct lpfc_sli4_cfg_mhdr));
6086 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6087 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6088 length, LPFC_SLI4_MBX_EMBED);
6089
6090
6091 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6092 LPFC_SLI4_MBX_EMBED);
6093 if (unlikely(rc)) {
6094 rc = -EIO;
6095 goto out_free_mbox;
6096 }
6097 if (!phba->sli4_hba.intr_enable)
6098 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6099 else {
6100 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6101 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6102 }
6103 if (unlikely(rc)) {
6104 rc = -EIO;
6105 goto out_free_mbox;
6106 }
6107
6108 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6109 if (bf_get(lpfc_mbox_hdr_status,
6110 &dealloc_rsrc->header.cfg_shdr.response)) {
6111 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6112 "2919 Failed to release resource extents "
6113 "for type %d - Status 0x%x Add'l Status 0x%x. "
6114 "Resource memory not released.\n",
6115 type,
6116 bf_get(lpfc_mbox_hdr_status,
6117 &dealloc_rsrc->header.cfg_shdr.response),
6118 bf_get(lpfc_mbox_hdr_add_status,
6119 &dealloc_rsrc->header.cfg_shdr.response));
6120 rc = -EIO;
6121 goto out_free_mbox;
6122 }
6123
6124
6125 switch (type) {
6126 case LPFC_RSC_TYPE_FCOE_VPI:
6127 kfree(phba->vpi_bmask);
6128 kfree(phba->vpi_ids);
6129 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6130 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6131 &phba->lpfc_vpi_blk_list, list) {
6132 list_del_init(&rsrc_blk->list);
6133 kfree(rsrc_blk);
6134 }
6135 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6136 break;
6137 case LPFC_RSC_TYPE_FCOE_XRI:
6138 kfree(phba->sli4_hba.xri_bmask);
6139 kfree(phba->sli4_hba.xri_ids);
6140 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6141 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6142 list_del_init(&rsrc_blk->list);
6143 kfree(rsrc_blk);
6144 }
6145 break;
6146 case LPFC_RSC_TYPE_FCOE_VFI:
6147 kfree(phba->sli4_hba.vfi_bmask);
6148 kfree(phba->sli4_hba.vfi_ids);
6149 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6150 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6151 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6152 list_del_init(&rsrc_blk->list);
6153 kfree(rsrc_blk);
6154 }
6155 break;
6156 case LPFC_RSC_TYPE_FCOE_RPI:
6157
6158 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6159 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6160 list_del_init(&rsrc_blk->list);
6161 kfree(rsrc_blk);
6162 }
6163 break;
6164 default:
6165 break;
6166 }
6167
6168 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6169
6170 out_free_mbox:
6171 mempool_free(mbox, phba->mbox_mem_pool);
6172 return rc;
6173}
6174
6175static void
6176lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6177 uint32_t feature)
6178{
6179 uint32_t len;
6180
6181 len = sizeof(struct lpfc_mbx_set_feature) -
6182 sizeof(struct lpfc_sli4_cfg_mhdr);
6183 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6184 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6185 LPFC_SLI4_MBX_EMBED);
6186
6187 switch (feature) {
6188 case LPFC_SET_UE_RECOVERY:
6189 bf_set(lpfc_mbx_set_feature_UER,
6190 &mbox->u.mqe.un.set_feature, 1);
6191 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6192 mbox->u.mqe.un.set_feature.param_len = 8;
6193 break;
6194 case LPFC_SET_MDS_DIAGS:
6195 bf_set(lpfc_mbx_set_feature_mds,
6196 &mbox->u.mqe.un.set_feature, 1);
6197 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6198 &mbox->u.mqe.un.set_feature, 1);
6199 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6200 mbox->u.mqe.un.set_feature.param_len = 8;
6201 break;
6202 }
6203
6204 return;
6205}
6206
6207
6208
6209
6210
6211
6212
6213
6214void
6215lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6216{
6217 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6218
6219 ras_fwlog->ras_active = false;
6220
6221
6222 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6223 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6224}
6225
6226
6227
6228
6229
6230
6231
6232
6233void
6234lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6235{
6236 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6237 struct lpfc_dmabuf *dmabuf, *next;
6238
6239 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6240 list_for_each_entry_safe(dmabuf, next,
6241 &ras_fwlog->fwlog_buff_list,
6242 list) {
6243 list_del(&dmabuf->list);
6244 dma_free_coherent(&phba->pcidev->dev,
6245 LPFC_RAS_MAX_ENTRY_SIZE,
6246 dmabuf->virt, dmabuf->phys);
6247 kfree(dmabuf);
6248 }
6249 }
6250
6251 if (ras_fwlog->lwpd.virt) {
6252 dma_free_coherent(&phba->pcidev->dev,
6253 sizeof(uint32_t) * 2,
6254 ras_fwlog->lwpd.virt,
6255 ras_fwlog->lwpd.phys);
6256 ras_fwlog->lwpd.virt = NULL;
6257 }
6258
6259 ras_fwlog->ras_active = false;
6260}
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273static int
6274lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6275 uint32_t fwlog_buff_count)
6276{
6277 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6278 struct lpfc_dmabuf *dmabuf;
6279 int rc = 0, i = 0;
6280
6281
6282 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6283
6284
6285 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6286 sizeof(uint32_t) * 2,
6287 &ras_fwlog->lwpd.phys,
6288 GFP_KERNEL);
6289 if (!ras_fwlog->lwpd.virt) {
6290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6291 "6185 LWPD Memory Alloc Failed\n");
6292
6293 return -ENOMEM;
6294 }
6295
6296 ras_fwlog->fw_buffcount = fwlog_buff_count;
6297 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6298 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6299 GFP_KERNEL);
6300 if (!dmabuf) {
6301 rc = -ENOMEM;
6302 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6303 "6186 Memory Alloc failed FW logging");
6304 goto free_mem;
6305 }
6306
6307 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6308 LPFC_RAS_MAX_ENTRY_SIZE,
6309 &dmabuf->phys, GFP_KERNEL);
6310 if (!dmabuf->virt) {
6311 kfree(dmabuf);
6312 rc = -ENOMEM;
6313 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6314 "6187 DMA Alloc Failed FW logging");
6315 goto free_mem;
6316 }
6317 dmabuf->buffer_tag = i;
6318 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6319 }
6320
6321free_mem:
6322 if (rc)
6323 lpfc_sli4_ras_dma_free(phba);
6324
6325 return rc;
6326}
6327
6328
6329
6330
6331
6332
6333
6334
6335static void
6336lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6337{
6338 MAILBOX_t *mb;
6339 union lpfc_sli4_cfg_shdr *shdr;
6340 uint32_t shdr_status, shdr_add_status;
6341 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6342
6343 mb = &pmb->u.mb;
6344
6345 shdr = (union lpfc_sli4_cfg_shdr *)
6346 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6347 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6348 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6349
6350 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6351 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6352 "6188 FW LOG mailbox "
6353 "completed with status x%x add_status x%x,"
6354 " mbx status x%x\n",
6355 shdr_status, shdr_add_status, mb->mbxStatus);
6356
6357 ras_fwlog->ras_hwsupport = false;
6358 goto disable_ras;
6359 }
6360
6361 ras_fwlog->ras_active = true;
6362 mempool_free(pmb, phba->mbox_mem_pool);
6363
6364 return;
6365
6366disable_ras:
6367
6368 lpfc_sli4_ras_dma_free(phba);
6369 mempool_free(pmb, phba->mbox_mem_pool);
6370}
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380
6381int
6382lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6383 uint32_t fwlog_level,
6384 uint32_t fwlog_enable)
6385{
6386 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6387 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6388 struct lpfc_dmabuf *dmabuf;
6389 LPFC_MBOXQ_t *mbox;
6390 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6391 int rc = 0;
6392
6393 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6394 phba->cfg_ras_fwlog_buffsize);
6395 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6396
6397
6398
6399
6400
6401 if (!ras_fwlog->lwpd.virt) {
6402 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6403 if (rc) {
6404 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6405 "6189 FW Log Memory Allocation Failed");
6406 return rc;
6407 }
6408 }
6409
6410
6411 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6412 if (!mbox) {
6413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6414 "6190 RAS MBX Alloc Failed");
6415 rc = -ENOMEM;
6416 goto mem_free;
6417 }
6418
6419 ras_fwlog->fw_loglevel = fwlog_level;
6420 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6421 sizeof(struct lpfc_sli4_cfg_mhdr));
6422
6423 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6424 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6425 len, LPFC_SLI4_MBX_EMBED);
6426
6427 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6428 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6429 fwlog_enable);
6430 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6431 ras_fwlog->fw_loglevel);
6432 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6433 ras_fwlog->fw_buffcount);
6434 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6435 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6436
6437
6438 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6439 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6440
6441 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6442 putPaddrLow(dmabuf->phys);
6443
6444 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6445 putPaddrHigh(dmabuf->phys);
6446 }
6447
6448
6449 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6450 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6451
6452 mbox->vport = phba->pport;
6453 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6454
6455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6456
6457 if (rc == MBX_NOT_FINISHED) {
6458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6459 "6191 FW-Log Mailbox failed. "
6460 "status %d mbxStatus : x%x", rc,
6461 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6462 mempool_free(mbox, phba->mbox_mem_pool);
6463 rc = -EIO;
6464 goto mem_free;
6465 } else
6466 rc = 0;
6467mem_free:
6468 if (rc)
6469 lpfc_sli4_ras_dma_free(phba);
6470
6471 return rc;
6472}
6473
6474
6475
6476
6477
6478
6479
6480void
6481lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6482{
6483
6484 if (lpfc_check_fwlog_support(phba))
6485 return;
6486
6487 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6488 LPFC_RAS_ENABLE_LOGGING);
6489}
6490
6491
6492
6493
6494
6495
6496
6497int
6498lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6499{
6500 int i, rc, error = 0;
6501 uint16_t count, base;
6502 unsigned long longs;
6503
6504 if (!phba->sli4_hba.rpi_hdrs_in_use)
6505 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6506 if (phba->sli4_hba.extents_in_use) {
6507
6508
6509
6510
6511
6512 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6513 LPFC_IDX_RSRC_RDY) {
6514
6515
6516
6517
6518
6519 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6520 LPFC_RSC_TYPE_FCOE_VFI);
6521 if (rc != 0)
6522 error++;
6523 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6524 LPFC_RSC_TYPE_FCOE_VPI);
6525 if (rc != 0)
6526 error++;
6527 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6528 LPFC_RSC_TYPE_FCOE_XRI);
6529 if (rc != 0)
6530 error++;
6531 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6532 LPFC_RSC_TYPE_FCOE_RPI);
6533 if (rc != 0)
6534 error++;
6535
6536
6537
6538
6539
6540
6541
6542 if (error) {
6543 lpfc_printf_log(phba, KERN_INFO,
6544 LOG_MBOX | LOG_INIT,
6545 "2931 Detected extent resource "
6546 "change. Reallocating all "
6547 "extents.\n");
6548 rc = lpfc_sli4_dealloc_extent(phba,
6549 LPFC_RSC_TYPE_FCOE_VFI);
6550 rc = lpfc_sli4_dealloc_extent(phba,
6551 LPFC_RSC_TYPE_FCOE_VPI);
6552 rc = lpfc_sli4_dealloc_extent(phba,
6553 LPFC_RSC_TYPE_FCOE_XRI);
6554 rc = lpfc_sli4_dealloc_extent(phba,
6555 LPFC_RSC_TYPE_FCOE_RPI);
6556 } else
6557 return 0;
6558 }
6559
6560 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6561 if (unlikely(rc))
6562 goto err_exit;
6563
6564 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6565 if (unlikely(rc))
6566 goto err_exit;
6567
6568 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6569 if (unlikely(rc))
6570 goto err_exit;
6571
6572 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6573 if (unlikely(rc))
6574 goto err_exit;
6575 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6576 LPFC_IDX_RSRC_RDY);
6577 return rc;
6578 } else {
6579
6580
6581
6582
6583
6584
6585
6586 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6587 LPFC_IDX_RSRC_RDY) {
6588 lpfc_sli4_dealloc_resource_identifiers(phba);
6589 lpfc_sli4_remove_rpis(phba);
6590 }
6591
6592 count = phba->sli4_hba.max_cfg_param.max_rpi;
6593 if (count <= 0) {
6594 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6595 "3279 Invalid provisioning of "
6596 "rpi:%d\n", count);
6597 rc = -EINVAL;
6598 goto err_exit;
6599 }
6600 base = phba->sli4_hba.max_cfg_param.rpi_base;
6601 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6602 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6603 sizeof(unsigned long),
6604 GFP_KERNEL);
6605 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6606 rc = -ENOMEM;
6607 goto err_exit;
6608 }
6609 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6610 GFP_KERNEL);
6611 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6612 rc = -ENOMEM;
6613 goto free_rpi_bmask;
6614 }
6615
6616 for (i = 0; i < count; i++)
6617 phba->sli4_hba.rpi_ids[i] = base + i;
6618
6619
6620 count = phba->sli4_hba.max_cfg_param.max_vpi;
6621 if (count <= 0) {
6622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6623 "3280 Invalid provisioning of "
6624 "vpi:%d\n", count);
6625 rc = -EINVAL;
6626 goto free_rpi_ids;
6627 }
6628 base = phba->sli4_hba.max_cfg_param.vpi_base;
6629 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6630 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6631 GFP_KERNEL);
6632 if (unlikely(!phba->vpi_bmask)) {
6633 rc = -ENOMEM;
6634 goto free_rpi_ids;
6635 }
6636 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6637 GFP_KERNEL);
6638 if (unlikely(!phba->vpi_ids)) {
6639 rc = -ENOMEM;
6640 goto free_vpi_bmask;
6641 }
6642
6643 for (i = 0; i < count; i++)
6644 phba->vpi_ids[i] = base + i;
6645
6646
6647 count = phba->sli4_hba.max_cfg_param.max_xri;
6648 if (count <= 0) {
6649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6650 "3281 Invalid provisioning of "
6651 "xri:%d\n", count);
6652 rc = -EINVAL;
6653 goto free_vpi_ids;
6654 }
6655 base = phba->sli4_hba.max_cfg_param.xri_base;
6656 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6657 phba->sli4_hba.xri_bmask = kcalloc(longs,
6658 sizeof(unsigned long),
6659 GFP_KERNEL);
6660 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6661 rc = -ENOMEM;
6662 goto free_vpi_ids;
6663 }
6664 phba->sli4_hba.max_cfg_param.xri_used = 0;
6665 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6666 GFP_KERNEL);
6667 if (unlikely(!phba->sli4_hba.xri_ids)) {
6668 rc = -ENOMEM;
6669 goto free_xri_bmask;
6670 }
6671
6672 for (i = 0; i < count; i++)
6673 phba->sli4_hba.xri_ids[i] = base + i;
6674
6675
6676 count = phba->sli4_hba.max_cfg_param.max_vfi;
6677 if (count <= 0) {
6678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6679 "3282 Invalid provisioning of "
6680 "vfi:%d\n", count);
6681 rc = -EINVAL;
6682 goto free_xri_ids;
6683 }
6684 base = phba->sli4_hba.max_cfg_param.vfi_base;
6685 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6686 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6687 sizeof(unsigned long),
6688 GFP_KERNEL);
6689 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6690 rc = -ENOMEM;
6691 goto free_xri_ids;
6692 }
6693 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6694 GFP_KERNEL);
6695 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6696 rc = -ENOMEM;
6697 goto free_vfi_bmask;
6698 }
6699
6700 for (i = 0; i < count; i++)
6701 phba->sli4_hba.vfi_ids[i] = base + i;
6702
6703
6704
6705
6706
6707 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6708 LPFC_IDX_RSRC_RDY);
6709 return 0;
6710 }
6711
6712 free_vfi_bmask:
6713 kfree(phba->sli4_hba.vfi_bmask);
6714 phba->sli4_hba.vfi_bmask = NULL;
6715 free_xri_ids:
6716 kfree(phba->sli4_hba.xri_ids);
6717 phba->sli4_hba.xri_ids = NULL;
6718 free_xri_bmask:
6719 kfree(phba->sli4_hba.xri_bmask);
6720 phba->sli4_hba.xri_bmask = NULL;
6721 free_vpi_ids:
6722 kfree(phba->vpi_ids);
6723 phba->vpi_ids = NULL;
6724 free_vpi_bmask:
6725 kfree(phba->vpi_bmask);
6726 phba->vpi_bmask = NULL;
6727 free_rpi_ids:
6728 kfree(phba->sli4_hba.rpi_ids);
6729 phba->sli4_hba.rpi_ids = NULL;
6730 free_rpi_bmask:
6731 kfree(phba->sli4_hba.rpi_bmask);
6732 phba->sli4_hba.rpi_bmask = NULL;
6733 err_exit:
6734 return rc;
6735}
6736
6737
6738
6739
6740
6741
6742
6743
6744int
6745lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6746{
6747 if (phba->sli4_hba.extents_in_use) {
6748 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6749 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6750 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6751 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6752 } else {
6753 kfree(phba->vpi_bmask);
6754 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6755 kfree(phba->vpi_ids);
6756 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6757 kfree(phba->sli4_hba.xri_bmask);
6758 kfree(phba->sli4_hba.xri_ids);
6759 kfree(phba->sli4_hba.vfi_bmask);
6760 kfree(phba->sli4_hba.vfi_ids);
6761 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6762 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6763 }
6764
6765 return 0;
6766}
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778int
6779lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6780 uint16_t *extnt_cnt, uint16_t *extnt_size)
6781{
6782 bool emb;
6783 int rc = 0;
6784 uint16_t curr_blks = 0;
6785 uint32_t req_len, emb_len;
6786 uint32_t alloc_len, mbox_tmo;
6787 struct list_head *blk_list_head;
6788 struct lpfc_rsrc_blks *rsrc_blk;
6789 LPFC_MBOXQ_t *mbox;
6790 void *virtaddr = NULL;
6791 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6792 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6793 union lpfc_sli4_cfg_shdr *shdr;
6794
6795 switch (type) {
6796 case LPFC_RSC_TYPE_FCOE_VPI:
6797 blk_list_head = &phba->lpfc_vpi_blk_list;
6798 break;
6799 case LPFC_RSC_TYPE_FCOE_XRI:
6800 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6801 break;
6802 case LPFC_RSC_TYPE_FCOE_VFI:
6803 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6804 break;
6805 case LPFC_RSC_TYPE_FCOE_RPI:
6806 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6807 break;
6808 default:
6809 return -EIO;
6810 }
6811
6812
6813 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6814 if (curr_blks == 0) {
6815
6816
6817
6818
6819
6820
6821
6822 *extnt_size = rsrc_blk->rsrc_size;
6823 }
6824 curr_blks++;
6825 }
6826
6827
6828
6829
6830
6831 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6832 sizeof(uint32_t);
6833
6834
6835
6836
6837
6838 emb = LPFC_SLI4_MBX_EMBED;
6839 req_len = emb_len;
6840 if (req_len > emb_len) {
6841 req_len = curr_blks * sizeof(uint16_t) +
6842 sizeof(union lpfc_sli4_cfg_shdr) +
6843 sizeof(uint32_t);
6844 emb = LPFC_SLI4_MBX_NEMBED;
6845 }
6846
6847 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6848 if (!mbox)
6849 return -ENOMEM;
6850 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6851
6852 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6853 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6854 req_len, emb);
6855 if (alloc_len < req_len) {
6856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6857 "2983 Allocated DMA memory size (x%x) is "
6858 "less than the requested DMA memory "
6859 "size (x%x)\n", alloc_len, req_len);
6860 rc = -ENOMEM;
6861 goto err_exit;
6862 }
6863 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6864 if (unlikely(rc)) {
6865 rc = -EIO;
6866 goto err_exit;
6867 }
6868
6869 if (!phba->sli4_hba.intr_enable)
6870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6871 else {
6872 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6874 }
6875
6876 if (unlikely(rc)) {
6877 rc = -EIO;
6878 goto err_exit;
6879 }
6880
6881
6882
6883
6884
6885
6886
6887 if (emb == LPFC_SLI4_MBX_EMBED) {
6888 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6889 shdr = &rsrc_ext->header.cfg_shdr;
6890 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6891 } else {
6892 virtaddr = mbox->sge_array->addr[0];
6893 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6894 shdr = &n_rsrc->cfg_shdr;
6895 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6896 }
6897
6898 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6899 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6900 "2984 Failed to read allocated resources "
6901 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6902 type,
6903 bf_get(lpfc_mbox_hdr_status, &shdr->response),
6904 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6905 rc = -EIO;
6906 goto err_exit;
6907 }
6908 err_exit:
6909 lpfc_sli4_mbox_cmd_free(phba, mbox);
6910 return rc;
6911}
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930static int
6931lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6932 struct list_head *sgl_list, int cnt)
6933{
6934 struct lpfc_sglq *sglq_entry = NULL;
6935 struct lpfc_sglq *sglq_entry_next = NULL;
6936 struct lpfc_sglq *sglq_entry_first = NULL;
6937 int status, total_cnt;
6938 int post_cnt = 0, num_posted = 0, block_cnt = 0;
6939 int last_xritag = NO_XRI;
6940 LIST_HEAD(prep_sgl_list);
6941 LIST_HEAD(blck_sgl_list);
6942 LIST_HEAD(allc_sgl_list);
6943 LIST_HEAD(post_sgl_list);
6944 LIST_HEAD(free_sgl_list);
6945
6946 spin_lock_irq(&phba->hbalock);
6947 spin_lock(&phba->sli4_hba.sgl_list_lock);
6948 list_splice_init(sgl_list, &allc_sgl_list);
6949 spin_unlock(&phba->sli4_hba.sgl_list_lock);
6950 spin_unlock_irq(&phba->hbalock);
6951
6952 total_cnt = cnt;
6953 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6954 &allc_sgl_list, list) {
6955 list_del_init(&sglq_entry->list);
6956 block_cnt++;
6957 if ((last_xritag != NO_XRI) &&
6958 (sglq_entry->sli4_xritag != last_xritag + 1)) {
6959
6960 list_splice_init(&prep_sgl_list, &blck_sgl_list);
6961 post_cnt = block_cnt - 1;
6962
6963 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6964 block_cnt = 1;
6965 } else {
6966
6967 list_add_tail(&sglq_entry->list, &prep_sgl_list);
6968
6969 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6970 list_splice_init(&prep_sgl_list,
6971 &blck_sgl_list);
6972 post_cnt = block_cnt;
6973 block_cnt = 0;
6974 }
6975 }
6976 num_posted++;
6977
6978
6979 last_xritag = sglq_entry->sli4_xritag;
6980
6981
6982 if (num_posted == total_cnt) {
6983 if (post_cnt == 0) {
6984 list_splice_init(&prep_sgl_list,
6985 &blck_sgl_list);
6986 post_cnt = block_cnt;
6987 } else if (block_cnt == 1) {
6988 status = lpfc_sli4_post_sgl(phba,
6989 sglq_entry->phys, 0,
6990 sglq_entry->sli4_xritag);
6991 if (!status) {
6992
6993 list_add_tail(&sglq_entry->list,
6994 &post_sgl_list);
6995 } else {
6996
6997 lpfc_printf_log(phba, KERN_WARNING,
6998 LOG_SLI,
6999 "3159 Failed to post "
7000 "sgl, xritag:x%x\n",
7001 sglq_entry->sli4_xritag);
7002 list_add_tail(&sglq_entry->list,
7003 &free_sgl_list);
7004 total_cnt--;
7005 }
7006 }
7007 }
7008
7009
7010 if (post_cnt == 0)
7011 continue;
7012
7013
7014 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7015 post_cnt);
7016
7017 if (!status) {
7018
7019 list_splice_init(&blck_sgl_list, &post_sgl_list);
7020 } else {
7021
7022 sglq_entry_first = list_first_entry(&blck_sgl_list,
7023 struct lpfc_sglq,
7024 list);
7025 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7026 "3160 Failed to post sgl-list, "
7027 "xritag:x%x-x%x\n",
7028 sglq_entry_first->sli4_xritag,
7029 (sglq_entry_first->sli4_xritag +
7030 post_cnt - 1));
7031 list_splice_init(&blck_sgl_list, &free_sgl_list);
7032 total_cnt -= post_cnt;
7033 }
7034
7035
7036 if (block_cnt == 0)
7037 last_xritag = NO_XRI;
7038
7039
7040 post_cnt = 0;
7041 }
7042
7043
7044 lpfc_free_sgl_list(phba, &free_sgl_list);
7045
7046
7047 if (!list_empty(&post_sgl_list)) {
7048 spin_lock_irq(&phba->hbalock);
7049 spin_lock(&phba->sli4_hba.sgl_list_lock);
7050 list_splice_init(&post_sgl_list, sgl_list);
7051 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7052 spin_unlock_irq(&phba->hbalock);
7053 } else {
7054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7055 "3161 Failure to post sgl to port.\n");
7056 return -EIO;
7057 }
7058
7059
7060 return total_cnt;
7061}
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075static int
7076lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7077{
7078 LIST_HEAD(post_nblist);
7079 int num_posted, rc = 0;
7080
7081
7082 lpfc_io_buf_flush(phba, &post_nblist);
7083
7084
7085 if (!list_empty(&post_nblist)) {
7086 num_posted = lpfc_sli4_post_io_sgl_list(
7087 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7088
7089 if (num_posted == 0)
7090 rc = -EIO;
7091 }
7092 return rc;
7093}
7094
7095static void
7096lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7097{
7098 uint32_t len;
7099
7100 len = sizeof(struct lpfc_mbx_set_host_data) -
7101 sizeof(struct lpfc_sli4_cfg_mhdr);
7102 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7103 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7104 LPFC_SLI4_MBX_EMBED);
7105
7106 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7107 mbox->u.mqe.un.set_host_data.param_len =
7108 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7109 snprintf(mbox->u.mqe.un.set_host_data.data,
7110 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7111 "Linux %s v"LPFC_DRIVER_VERSION,
7112 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7113}
7114
7115int
7116lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7117 struct lpfc_queue *drq, int count, int idx)
7118{
7119 int rc, i;
7120 struct lpfc_rqe hrqe;
7121 struct lpfc_rqe drqe;
7122 struct lpfc_rqb *rqbp;
7123 unsigned long flags;
7124 struct rqb_dmabuf *rqb_buffer;
7125 LIST_HEAD(rqb_buf_list);
7126
7127 spin_lock_irqsave(&phba->hbalock, flags);
7128 rqbp = hrq->rqbp;
7129 for (i = 0; i < count; i++) {
7130
7131 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7132 break;
7133 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7134 if (!rqb_buffer)
7135 break;
7136 rqb_buffer->hrq = hrq;
7137 rqb_buffer->drq = drq;
7138 rqb_buffer->idx = idx;
7139 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7140 }
7141 while (!list_empty(&rqb_buf_list)) {
7142 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7143 hbuf.list);
7144
7145 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7146 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7147 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7148 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7149 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7150 if (rc < 0) {
7151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7152 "6421 Cannot post to HRQ %d: %x %x %x "
7153 "DRQ %x %x\n",
7154 hrq->queue_id,
7155 hrq->host_index,
7156 hrq->hba_index,
7157 hrq->entry_count,
7158 drq->host_index,
7159 drq->hba_index);
7160 rqbp->rqb_free_buffer(phba, rqb_buffer);
7161 } else {
7162 list_add_tail(&rqb_buffer->hbuf.list,
7163 &rqbp->rqb_buffer_list);
7164 rqbp->buffer_count++;
7165 }
7166 }
7167 spin_unlock_irqrestore(&phba->hbalock, flags);
7168 return 1;
7169}
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180int
7181lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7182{
7183 int rc, i, cnt, len;
7184 LPFC_MBOXQ_t *mboxq;
7185 struct lpfc_mqe *mqe;
7186 uint8_t *vpd;
7187 uint32_t vpd_size;
7188 uint32_t ftr_rsp = 0;
7189 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7190 struct lpfc_vport *vport = phba->pport;
7191 struct lpfc_dmabuf *mp;
7192 struct lpfc_rqb *rqbp;
7193
7194
7195 rc = lpfc_pci_function_reset(phba);
7196 if (unlikely(rc))
7197 return -ENODEV;
7198
7199
7200 rc = lpfc_sli4_post_status_check(phba);
7201 if (unlikely(rc))
7202 return -ENODEV;
7203 else {
7204 spin_lock_irq(&phba->hbalock);
7205 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7206 spin_unlock_irq(&phba->hbalock);
7207 }
7208
7209
7210
7211
7212
7213 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7214 if (!mboxq)
7215 return -ENOMEM;
7216
7217
7218 vpd_size = SLI4_PAGE_SIZE;
7219 vpd = kzalloc(vpd_size, GFP_KERNEL);
7220 if (!vpd) {
7221 rc = -ENOMEM;
7222 goto out_free_mbox;
7223 }
7224
7225 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7226 if (unlikely(rc)) {
7227 kfree(vpd);
7228 goto out_free_mbox;
7229 }
7230
7231 mqe = &mboxq->u.mqe;
7232 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7233 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7234 phba->hba_flag |= HBA_FCOE_MODE;
7235 phba->fcp_embed_io = 0;
7236 } else {
7237 phba->hba_flag &= ~HBA_FCOE_MODE;
7238 }
7239
7240 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7241 LPFC_DCBX_CEE_MODE)
7242 phba->hba_flag |= HBA_FIP_SUPPORT;
7243 else
7244 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7245
7246 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
7247
7248 if (phba->sli_rev != LPFC_SLI_REV4) {
7249 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7250 "0376 READ_REV Error. SLI Level %d "
7251 "FCoE enabled %d\n",
7252 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7253 rc = -EIO;
7254 kfree(vpd);
7255 goto out_free_mbox;
7256 }
7257
7258
7259
7260
7261
7262
7263 if (phba->hba_flag & HBA_FCOE_MODE &&
7264 lpfc_sli4_read_fcoe_params(phba))
7265 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7266 "2570 Failed to read FCoE parameters\n");
7267
7268
7269
7270
7271
7272 rc = lpfc_sli4_retrieve_pport_name(phba);
7273 if (!rc)
7274 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7275 "3080 Successful retrieving SLI4 device "
7276 "physical port name: %s.\n", phba->Port);
7277
7278 rc = lpfc_sli4_get_ctl_attr(phba);
7279 if (!rc)
7280 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7281 "8351 Successful retrieving SLI4 device "
7282 "CTL ATTR\n");
7283
7284
7285
7286
7287
7288
7289 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7290 if (unlikely(!rc)) {
7291 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7292 "0377 Error %d parsing vpd. "
7293 "Using defaults.\n", rc);
7294 rc = 0;
7295 }
7296 kfree(vpd);
7297
7298
7299 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7300 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7301
7302
7303
7304
7305
7306 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7307 LPFC_SLI_INTF_IF_TYPE_6) &&
7308 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7309 (phba->vpd.rev.smRev == 0) &&
7310 (phba->cfg_nvme_embed_cmd == 1))
7311 phba->cfg_nvme_embed_cmd = 0;
7312
7313 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7314 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7315 &mqe->un.read_rev);
7316 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7317 &mqe->un.read_rev);
7318 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7319 &mqe->un.read_rev);
7320 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7321 &mqe->un.read_rev);
7322 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7323 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7324 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7325 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7326 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7327 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7328 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7329 "(%d):0380 READ_REV Status x%x "
7330 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7331 mboxq->vport ? mboxq->vport->vpi : 0,
7332 bf_get(lpfc_mqe_status, mqe),
7333 phba->vpd.rev.opFwName,
7334 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7335 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7336
7337
7338 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
7339 if (phba->pport->cfg_lun_queue_depth > rc) {
7340 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7341 "3362 LUN queue depth changed from %d to %d\n",
7342 phba->pport->cfg_lun_queue_depth, rc);
7343 phba->pport->cfg_lun_queue_depth = rc;
7344 }
7345
7346 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7347 LPFC_SLI_INTF_IF_TYPE_0) {
7348 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7349 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7350 if (rc == MBX_SUCCESS) {
7351 phba->hba_flag |= HBA_RECOVERABLE_UE;
7352
7353 phba->eratt_poll_interval = 1;
7354 phba->sli4_hba.ue_to_sr = bf_get(
7355 lpfc_mbx_set_feature_UESR,
7356 &mboxq->u.mqe.un.set_feature);
7357 phba->sli4_hba.ue_to_rp = bf_get(
7358 lpfc_mbx_set_feature_UERP,
7359 &mboxq->u.mqe.un.set_feature);
7360 }
7361 }
7362
7363 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7364
7365 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7366 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7367 if (rc != MBX_SUCCESS)
7368 phba->mds_diags_support = 0;
7369 }
7370
7371
7372
7373
7374
7375 lpfc_request_features(phba, mboxq);
7376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7377 if (unlikely(rc)) {
7378 rc = -EIO;
7379 goto out_free_mbox;
7380 }
7381
7382
7383
7384
7385
7386 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7388 "0378 No support for fcpi mode.\n");
7389 ftr_rsp++;
7390 }
7391
7392
7393 if (phba->hba_flag & HBA_FCOE_MODE) {
7394 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7395 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7396 else
7397 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7398 }
7399
7400
7401
7402
7403
7404
7405 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7406 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7407 phba->cfg_enable_bg = 0;
7408 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7409 ftr_rsp++;
7410 }
7411 }
7412
7413 if (phba->max_vpi && phba->cfg_enable_npiv &&
7414 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7415 ftr_rsp++;
7416
7417 if (ftr_rsp) {
7418 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7419 "0379 Feature Mismatch Data: x%08x %08x "
7420 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7421 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7422 phba->cfg_enable_npiv, phba->max_vpi);
7423 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7424 phba->cfg_enable_bg = 0;
7425 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7426 phba->cfg_enable_npiv = 0;
7427 }
7428
7429
7430 spin_lock_irq(&phba->hbalock);
7431 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7432 spin_unlock_irq(&phba->hbalock);
7433
7434
7435
7436
7437
7438 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7439 if (rc) {
7440 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7441 "2920 Failed to alloc Resource IDs "
7442 "rc = x%x\n", rc);
7443 goto out_free_mbox;
7444 }
7445
7446 lpfc_set_host_data(phba, mboxq);
7447
7448 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7449 if (rc) {
7450 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7451 "2134 Failed to set host os driver version %x",
7452 rc);
7453 }
7454
7455
7456 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7457 if (rc) {
7458 phba->link_state = LPFC_HBA_ERROR;
7459 rc = -ENOMEM;
7460 goto out_free_mbox;
7461 }
7462
7463 mboxq->vport = vport;
7464 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7465 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7466 if (rc == MBX_SUCCESS) {
7467 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7468 rc = 0;
7469 }
7470
7471
7472
7473
7474
7475 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7476 kfree(mp);
7477 mboxq->ctx_buf = NULL;
7478 if (unlikely(rc)) {
7479 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7480 "0382 READ_SPARAM command failed "
7481 "status %d, mbxStatus x%x\n",
7482 rc, bf_get(lpfc_mqe_status, mqe));
7483 phba->link_state = LPFC_HBA_ERROR;
7484 rc = -EIO;
7485 goto out_free_mbox;
7486 }
7487
7488 lpfc_update_vport_wwn(vport);
7489
7490
7491 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7492 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7493
7494
7495 rc = lpfc_sli4_queue_create(phba);
7496 if (rc) {
7497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7498 "3089 Failed to allocate queues\n");
7499 rc = -ENODEV;
7500 goto out_free_mbox;
7501 }
7502
7503 rc = lpfc_sli4_queue_setup(phba);
7504 if (unlikely(rc)) {
7505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7506 "0381 Error %d during queue setup.\n ", rc);
7507 goto out_stop_timers;
7508 }
7509
7510 lpfc_sli4_setup(phba);
7511 lpfc_sli4_queue_init(phba);
7512
7513
7514 rc = lpfc_sli4_els_sgl_update(phba);
7515 if (unlikely(rc)) {
7516 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7517 "1400 Failed to update xri-sgl size and "
7518 "mapping: %d\n", rc);
7519 goto out_destroy_queue;
7520 }
7521
7522
7523 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7524 phba->sli4_hba.els_xri_cnt);
7525 if (unlikely(rc < 0)) {
7526 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7527 "0582 Error %d during els sgl post "
7528 "operation\n", rc);
7529 rc = -ENODEV;
7530 goto out_destroy_queue;
7531 }
7532 phba->sli4_hba.els_xri_cnt = rc;
7533
7534 if (phba->nvmet_support) {
7535
7536 rc = lpfc_sli4_nvmet_sgl_update(phba);
7537 if (unlikely(rc)) {
7538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7539 "6308 Failed to update nvmet-sgl size "
7540 "and mapping: %d\n", rc);
7541 goto out_destroy_queue;
7542 }
7543
7544
7545 rc = lpfc_sli4_repost_sgl_list(
7546 phba,
7547 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7548 phba->sli4_hba.nvmet_xri_cnt);
7549 if (unlikely(rc < 0)) {
7550 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7551 "3117 Error %d during nvmet "
7552 "sgl post\n", rc);
7553 rc = -ENODEV;
7554 goto out_destroy_queue;
7555 }
7556 phba->sli4_hba.nvmet_xri_cnt = rc;
7557
7558 cnt = phba->cfg_iocb_cnt * 1024;
7559
7560 cnt += phba->sli4_hba.nvmet_xri_cnt;
7561 } else {
7562
7563 rc = lpfc_sli4_io_sgl_update(phba);
7564 if (unlikely(rc)) {
7565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7566 "6082 Failed to update nvme-sgl size "
7567 "and mapping: %d\n", rc);
7568 goto out_destroy_queue;
7569 }
7570
7571
7572 rc = lpfc_sli4_repost_io_sgl_list(phba);
7573 if (unlikely(rc)) {
7574 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7575 "6116 Error %d during nvme sgl post "
7576 "operation\n", rc);
7577
7578
7579 rc = -ENODEV;
7580 goto out_destroy_queue;
7581 }
7582 cnt = phba->cfg_iocb_cnt * 1024;
7583 }
7584
7585 if (!phba->sli.iocbq_lookup) {
7586
7587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7588 "2821 initialize iocb list %d total %d\n",
7589 phba->cfg_iocb_cnt, cnt);
7590 rc = lpfc_init_iocb_list(phba, cnt);
7591 if (rc) {
7592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7593 "1413 Failed to init iocb list.\n");
7594 goto out_destroy_queue;
7595 }
7596 }
7597
7598 if (phba->nvmet_support)
7599 lpfc_nvmet_create_targetport(phba);
7600
7601 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7602
7603 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7604 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7605 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7606 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7607 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7608 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7609 rqbp->buffer_count = 0;
7610
7611 lpfc_post_rq_buffer(
7612 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7613 phba->sli4_hba.nvmet_mrq_data[i],
7614 phba->cfg_nvmet_mrq_post, i);
7615 }
7616 }
7617
7618
7619 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7620 if (unlikely(rc)) {
7621 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7622 "0393 Error %d during rpi post operation\n",
7623 rc);
7624 rc = -ENODEV;
7625 goto out_destroy_queue;
7626 }
7627 lpfc_sli4_node_prep(phba);
7628
7629 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7630 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7631
7632
7633
7634 lpfc_reg_fcfi(phba, mboxq);
7635 mboxq->vport = phba->pport;
7636 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7637 if (rc != MBX_SUCCESS)
7638 goto out_unset_queue;
7639 rc = 0;
7640 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7641 &mboxq->u.mqe.un.reg_fcfi);
7642 } else {
7643
7644
7645
7646 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7647 mboxq->vport = phba->pport;
7648 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7649 if (rc != MBX_SUCCESS)
7650 goto out_unset_queue;
7651 rc = 0;
7652 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7653 &mboxq->u.mqe.un.reg_fcfi_mrq);
7654
7655
7656 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7657 mboxq->vport = phba->pport;
7658 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7659 if (rc != MBX_SUCCESS)
7660 goto out_unset_queue;
7661 rc = 0;
7662 }
7663
7664 lpfc_sli_read_link_ste(phba);
7665 }
7666
7667
7668
7669
7670 if (phba->nvmet_support == 0) {
7671 if (phba->sli4_hba.io_xri_cnt == 0) {
7672 len = lpfc_new_io_buf(
7673 phba, phba->sli4_hba.io_xri_max);
7674 if (len == 0) {
7675 rc = -ENOMEM;
7676 goto out_unset_queue;
7677 }
7678
7679 if (phba->cfg_xri_rebalancing)
7680 lpfc_create_multixri_pools(phba);
7681 }
7682 } else {
7683 phba->cfg_xri_rebalancing = 0;
7684 }
7685
7686
7687 spin_lock_irq(&phba->hbalock);
7688 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7689 spin_unlock_irq(&phba->hbalock);
7690
7691
7692 lpfc_sli4_rb_setup(phba);
7693
7694
7695 phba->fcf.fcf_flag = 0;
7696 phba->fcf.current_rec.flag = 0;
7697
7698
7699 mod_timer(&vport->els_tmofunc,
7700 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7701
7702
7703 mod_timer(&phba->hb_tmofunc,
7704 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7705 phba->hb_outstanding = 0;
7706 phba->last_completion_time = jiffies;
7707
7708
7709 if (phba->cfg_auto_imax)
7710 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7711 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7712
7713
7714 mod_timer(&phba->eratt_poll,
7715 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7716
7717
7718 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7719 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7720 if (!rc) {
7721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7722 "2829 This device supports "
7723 "Advanced Error Reporting (AER)\n");
7724 spin_lock_irq(&phba->hbalock);
7725 phba->hba_flag |= HBA_AER_ENABLED;
7726 spin_unlock_irq(&phba->hbalock);
7727 } else {
7728 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7729 "2830 This device does not support "
7730 "Advanced Error Reporting (AER)\n");
7731 phba->cfg_aer_support = 0;
7732 }
7733 rc = 0;
7734 }
7735
7736
7737
7738
7739
7740 spin_lock_irq(&phba->hbalock);
7741 phba->link_state = LPFC_LINK_DOWN;
7742
7743
7744 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7745 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7746 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7747 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7748 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7749 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7750 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7751 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7752 spin_unlock_irq(&phba->hbalock);
7753
7754
7755 lpfc_sli4_arm_cqeq_intr(phba);
7756
7757
7758 phba->sli4_hba.intr_enable = 1;
7759
7760 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7761 (phba->hba_flag & LINK_DISABLED)) {
7762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7763 "3103 Adapter Link is disabled.\n");
7764 lpfc_down_link(phba, mboxq);
7765 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7766 if (rc != MBX_SUCCESS) {
7767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7768 "3104 Adapter failed to issue "
7769 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7770 goto out_io_buff_free;
7771 }
7772 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7773
7774 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7775 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7776 if (rc)
7777 goto out_io_buff_free;
7778 }
7779 }
7780 mempool_free(mboxq, phba->mbox_mem_pool);
7781 return rc;
7782out_io_buff_free:
7783
7784 lpfc_io_free(phba);
7785out_unset_queue:
7786
7787 lpfc_sli4_queue_unset(phba);
7788out_destroy_queue:
7789 lpfc_free_iocb_list(phba);
7790 lpfc_sli4_queue_destroy(phba);
7791out_stop_timers:
7792 lpfc_stop_hba_timers(phba);
7793out_free_mbox:
7794 mempool_free(mboxq, phba->mbox_mem_pool);
7795 return rc;
7796}
7797
7798
7799
7800
7801
7802
7803
7804
7805
7806
7807
7808
7809
7810void
7811lpfc_mbox_timeout(struct timer_list *t)
7812{
7813 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7814 unsigned long iflag;
7815 uint32_t tmo_posted;
7816
7817 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7818 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7819 if (!tmo_posted)
7820 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7821 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7822
7823 if (!tmo_posted)
7824 lpfc_worker_wake_up(phba);
7825 return;
7826}
7827
7828
7829
7830
7831
7832
7833
7834
7835
7836static bool
7837lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7838{
7839
7840 uint32_t idx;
7841 struct lpfc_queue *mcq;
7842 struct lpfc_mcqe *mcqe;
7843 bool pending_completions = false;
7844 uint8_t qe_valid;
7845
7846 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7847 return false;
7848
7849
7850
7851 mcq = phba->sli4_hba.mbx_cq;
7852 idx = mcq->hba_index;
7853 qe_valid = mcq->qe_valid;
7854 while (bf_get_le32(lpfc_cqe_valid,
7855 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7856 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
7857 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7858 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7859 pending_completions = true;
7860 break;
7861 }
7862 idx = (idx + 1) % mcq->entry_count;
7863 if (mcq->hba_index == idx)
7864 break;
7865
7866
7867 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
7868 qe_valid = (qe_valid) ? 0 : 1;
7869 }
7870 return pending_completions;
7871
7872}
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882
7883
7884
7885static bool
7886lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7887{
7888 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
7889 uint32_t eqidx;
7890 struct lpfc_queue *fpeq = NULL;
7891 struct lpfc_queue *eq;
7892 bool mbox_pending;
7893
7894 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7895 return false;
7896
7897
7898 if (sli4_hba->hdwq) {
7899 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
7900 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
7901 if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
7902 fpeq = eq;
7903 break;
7904 }
7905 }
7906 }
7907 if (!fpeq)
7908 return false;
7909
7910
7911
7912 sli4_hba->sli4_eq_clr_intr(fpeq);
7913
7914
7915
7916 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7917
7918
7919
7920
7921
7922
7923
7924
7925 if (mbox_pending)
7926
7927 lpfc_sli4_process_eq(phba, fpeq);
7928 else
7929
7930 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
7931
7932 return mbox_pending;
7933
7934}
7935
7936
7937
7938
7939
7940
7941
7942
7943
7944void
7945lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7946{
7947 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7948 MAILBOX_t *mb = NULL;
7949
7950 struct lpfc_sli *psli = &phba->sli;
7951
7952
7953 if (lpfc_sli4_process_missed_mbox_completions(phba))
7954 return;
7955
7956 if (pmbox != NULL)
7957 mb = &pmbox->u.mb;
7958
7959
7960
7961
7962
7963 spin_lock_irq(&phba->hbalock);
7964 if (pmbox == NULL) {
7965 lpfc_printf_log(phba, KERN_WARNING,
7966 LOG_MBOX | LOG_SLI,
7967 "0353 Active Mailbox cleared - mailbox timeout "
7968 "exiting\n");
7969 spin_unlock_irq(&phba->hbalock);
7970 return;
7971 }
7972
7973
7974 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7975 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7976 mb->mbxCommand,
7977 phba->pport->port_state,
7978 phba->sli.sli_flag,
7979 phba->sli.mbox_active);
7980 spin_unlock_irq(&phba->hbalock);
7981
7982
7983
7984
7985
7986 spin_lock_irq(&phba->pport->work_port_lock);
7987 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7988 spin_unlock_irq(&phba->pport->work_port_lock);
7989 spin_lock_irq(&phba->hbalock);
7990 phba->link_state = LPFC_LINK_UNKNOWN;
7991 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7992 spin_unlock_irq(&phba->hbalock);
7993
7994 lpfc_sli_abort_fcp_rings(phba);
7995
7996 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7997 "0345 Resetting board due to mailbox timeout\n");
7998
7999
8000 lpfc_reset_hba(phba);
8001}
8002
8003
8004
8005
8006
8007
8008
8009
8010
8011
8012
8013
8014
8015
8016
8017
8018
8019
8020
8021
8022
8023
8024
8025
8026
8027
8028
8029static int
8030lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8031 uint32_t flag)
8032{
8033 MAILBOX_t *mbx;
8034 struct lpfc_sli *psli = &phba->sli;
8035 uint32_t status, evtctr;
8036 uint32_t ha_copy, hc_copy;
8037 int i;
8038 unsigned long timeout;
8039 unsigned long drvr_flag = 0;
8040 uint32_t word0, ldata;
8041 void __iomem *to_slim;
8042 int processing_queue = 0;
8043
8044 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8045 if (!pmbox) {
8046 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8047
8048 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8049 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8050 return MBX_SUCCESS;
8051 }
8052 processing_queue = 1;
8053 pmbox = lpfc_mbox_get(phba);
8054 if (!pmbox) {
8055 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8056 return MBX_SUCCESS;
8057 }
8058 }
8059
8060 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8061 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8062 if(!pmbox->vport) {
8063 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8064 lpfc_printf_log(phba, KERN_ERR,
8065 LOG_MBOX | LOG_VPORT,
8066 "1806 Mbox x%x failed. No vport\n",
8067 pmbox->u.mb.mbxCommand);
8068 dump_stack();
8069 goto out_not_finished;
8070 }
8071 }
8072
8073
8074 if (unlikely(pci_channel_offline(phba->pcidev))) {
8075 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8076 goto out_not_finished;
8077 }
8078
8079
8080 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8081 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8082 goto out_not_finished;
8083 }
8084
8085 psli = &phba->sli;
8086
8087 mbx = &pmbox->u.mb;
8088 status = MBX_SUCCESS;
8089
8090 if (phba->link_state == LPFC_HBA_ERROR) {
8091 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8092
8093
8094 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8095 "(%d):0311 Mailbox command x%x cannot "
8096 "issue Data: x%x x%x\n",
8097 pmbox->vport ? pmbox->vport->vpi : 0,
8098 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8099 goto out_not_finished;
8100 }
8101
8102 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8103 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8104 !(hc_copy & HC_MBINT_ENA)) {
8105 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8107 "(%d):2528 Mailbox command x%x cannot "
8108 "issue Data: x%x x%x\n",
8109 pmbox->vport ? pmbox->vport->vpi : 0,
8110 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8111 goto out_not_finished;
8112 }
8113 }
8114
8115 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8116
8117
8118
8119
8120
8121 if (flag & MBX_POLL) {
8122 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8123
8124
8125 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8126 "(%d):2529 Mailbox command x%x "
8127 "cannot issue Data: x%x x%x\n",
8128 pmbox->vport ? pmbox->vport->vpi : 0,
8129 pmbox->u.mb.mbxCommand,
8130 psli->sli_flag, flag);
8131 goto out_not_finished;
8132 }
8133
8134 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8135 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8136
8137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8138 "(%d):2530 Mailbox command x%x "
8139 "cannot issue Data: x%x x%x\n",
8140 pmbox->vport ? pmbox->vport->vpi : 0,
8141 pmbox->u.mb.mbxCommand,
8142 psli->sli_flag, flag);
8143 goto out_not_finished;
8144 }
8145
8146
8147
8148
8149 lpfc_mbox_put(phba, pmbox);
8150
8151
8152 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8153 "(%d):0308 Mbox cmd issue - BUSY Data: "
8154 "x%x x%x x%x x%x\n",
8155 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8156 mbx->mbxCommand,
8157 phba->pport ? phba->pport->port_state : 0xff,
8158 psli->sli_flag, flag);
8159
8160 psli->slistat.mbox_busy++;
8161 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8162
8163 if (pmbox->vport) {
8164 lpfc_debugfs_disc_trc(pmbox->vport,
8165 LPFC_DISC_TRC_MBOX_VPORT,
8166 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8167 (uint32_t)mbx->mbxCommand,
8168 mbx->un.varWords[0], mbx->un.varWords[1]);
8169 }
8170 else {
8171 lpfc_debugfs_disc_trc(phba->pport,
8172 LPFC_DISC_TRC_MBOX,
8173 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8174 (uint32_t)mbx->mbxCommand,
8175 mbx->un.varWords[0], mbx->un.varWords[1]);
8176 }
8177
8178 return MBX_BUSY;
8179 }
8180
8181 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8182
8183
8184 if (flag != MBX_POLL) {
8185 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8186 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8187 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8188 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8189
8190 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8191 "(%d):2531 Mailbox command x%x "
8192 "cannot issue Data: x%x x%x\n",
8193 pmbox->vport ? pmbox->vport->vpi : 0,
8194 pmbox->u.mb.mbxCommand,
8195 psli->sli_flag, flag);
8196 goto out_not_finished;
8197 }
8198
8199 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8200 1000);
8201 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8202 }
8203
8204
8205 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8206 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8207 "x%x\n",
8208 pmbox->vport ? pmbox->vport->vpi : 0,
8209 mbx->mbxCommand,
8210 phba->pport ? phba->pport->port_state : 0xff,
8211 psli->sli_flag, flag);
8212
8213 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8214 if (pmbox->vport) {
8215 lpfc_debugfs_disc_trc(pmbox->vport,
8216 LPFC_DISC_TRC_MBOX_VPORT,
8217 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8218 (uint32_t)mbx->mbxCommand,
8219 mbx->un.varWords[0], mbx->un.varWords[1]);
8220 }
8221 else {
8222 lpfc_debugfs_disc_trc(phba->pport,
8223 LPFC_DISC_TRC_MBOX,
8224 "MBOX Send: cmd:x%x mb:x%x x%x",
8225 (uint32_t)mbx->mbxCommand,
8226 mbx->un.varWords[0], mbx->un.varWords[1]);
8227 }
8228 }
8229
8230 psli->slistat.mbox_cmd++;
8231 evtctr = psli->slistat.mbox_event;
8232
8233
8234 mbx->mbxOwner = OWN_CHIP;
8235
8236 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8237
8238 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8239 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8240 = (uint8_t *)phba->mbox_ext
8241 - (uint8_t *)phba->mbox;
8242 }
8243
8244
8245 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8246 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8247 (uint8_t *)phba->mbox_ext,
8248 pmbox->in_ext_byte_len);
8249 }
8250
8251 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8252 } else {
8253
8254 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8255 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8256 = MAILBOX_HBA_EXT_OFFSET;
8257
8258
8259 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8260 lpfc_memcpy_to_slim(phba->MBslimaddr +
8261 MAILBOX_HBA_EXT_OFFSET,
8262 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8263
8264 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8265
8266 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8267 MAILBOX_CMD_SIZE);
8268
8269
8270
8271 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8272 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8273 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8274
8275
8276 ldata = *((uint32_t *)mbx);
8277 to_slim = phba->MBslimaddr;
8278 writel(ldata, to_slim);
8279 readl(to_slim);
8280
8281 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8282
8283 psli->sli_flag |= LPFC_SLI_ACTIVE;
8284 }
8285
8286 wmb();
8287
8288 switch (flag) {
8289 case MBX_NOWAIT:
8290
8291 psli->mbox_active = pmbox;
8292
8293 writel(CA_MBATT, phba->CAregaddr);
8294 readl(phba->CAregaddr);
8295
8296 break;
8297
8298 case MBX_POLL:
8299
8300 psli->mbox_active = NULL;
8301
8302 writel(CA_MBATT, phba->CAregaddr);
8303 readl(phba->CAregaddr);
8304
8305 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8306
8307 word0 = *((uint32_t *)phba->mbox);
8308 word0 = le32_to_cpu(word0);
8309 } else {
8310
8311 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8312 spin_unlock_irqrestore(&phba->hbalock,
8313 drvr_flag);
8314 goto out_not_finished;
8315 }
8316 }
8317
8318
8319 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8320 spin_unlock_irqrestore(&phba->hbalock,
8321 drvr_flag);
8322 goto out_not_finished;
8323 }
8324 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8325 1000) + jiffies;
8326 i = 0;
8327
8328 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8329 (!(ha_copy & HA_MBATT) &&
8330 (phba->link_state > LPFC_WARM_START))) {
8331 if (time_after(jiffies, timeout)) {
8332 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8333 spin_unlock_irqrestore(&phba->hbalock,
8334 drvr_flag);
8335 goto out_not_finished;
8336 }
8337
8338
8339
8340 if (((word0 & OWN_CHIP) != OWN_CHIP)
8341 && (evtctr != psli->slistat.mbox_event))
8342 break;
8343
8344 if (i++ > 10) {
8345 spin_unlock_irqrestore(&phba->hbalock,
8346 drvr_flag);
8347 msleep(1);
8348 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8349 }
8350
8351 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8352
8353 word0 = *((uint32_t *)phba->mbox);
8354 word0 = le32_to_cpu(word0);
8355 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8356 MAILBOX_t *slimmb;
8357 uint32_t slimword0;
8358
8359 slimword0 = readl(phba->MBslimaddr);
8360 slimmb = (MAILBOX_t *) & slimword0;
8361 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8362 && slimmb->mbxStatus) {
8363 psli->sli_flag &=
8364 ~LPFC_SLI_ACTIVE;
8365 word0 = slimword0;
8366 }
8367 }
8368 } else {
8369
8370 word0 = readl(phba->MBslimaddr);
8371 }
8372
8373 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8374 spin_unlock_irqrestore(&phba->hbalock,
8375 drvr_flag);
8376 goto out_not_finished;
8377 }
8378 }
8379
8380 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8381
8382 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8383 MAILBOX_CMD_SIZE);
8384
8385 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8386 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8387 pmbox->ctx_buf,
8388 pmbox->out_ext_byte_len);
8389 }
8390 } else {
8391
8392 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8393 MAILBOX_CMD_SIZE);
8394
8395 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8396 lpfc_memcpy_from_slim(
8397 pmbox->ctx_buf,
8398 phba->MBslimaddr +
8399 MAILBOX_HBA_EXT_OFFSET,
8400 pmbox->out_ext_byte_len);
8401 }
8402 }
8403
8404 writel(HA_MBATT, phba->HAregaddr);
8405 readl(phba->HAregaddr);
8406
8407 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8408 status = mbx->mbxStatus;
8409 }
8410
8411 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8412 return status;
8413
8414out_not_finished:
8415 if (processing_queue) {
8416 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8417 lpfc_mbox_cmpl_put(phba, pmbox);
8418 }
8419 return MBX_NOT_FINISHED;
8420}
8421
8422
8423
8424
8425
8426
8427
8428
8429
8430
8431
8432
8433
8434static int
8435lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8436{
8437 struct lpfc_sli *psli = &phba->sli;
8438 int rc = 0;
8439 unsigned long timeout = 0;
8440
8441
8442 spin_lock_irq(&phba->hbalock);
8443 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8444
8445
8446
8447 if (phba->sli.mbox_active)
8448 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8449 phba->sli.mbox_active) *
8450 1000) + jiffies;
8451 spin_unlock_irq(&phba->hbalock);
8452
8453
8454 if (timeout)
8455 lpfc_sli4_process_missed_mbox_completions(phba);
8456
8457
8458 while (phba->sli.mbox_active) {
8459
8460 msleep(2);
8461 if (time_after(jiffies, timeout)) {
8462
8463 rc = 1;
8464 break;
8465 }
8466 }
8467
8468
8469 if (rc) {
8470 spin_lock_irq(&phba->hbalock);
8471 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8472 spin_unlock_irq(&phba->hbalock);
8473 }
8474 return rc;
8475}
8476
8477
8478
8479
8480
8481
8482
8483
8484
8485
8486
8487
8488static void
8489lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8490{
8491 struct lpfc_sli *psli = &phba->sli;
8492
8493 spin_lock_irq(&phba->hbalock);
8494 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8495
8496 spin_unlock_irq(&phba->hbalock);
8497 return;
8498 }
8499
8500
8501
8502
8503
8504
8505 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8506 spin_unlock_irq(&phba->hbalock);
8507
8508
8509 lpfc_worker_wake_up(phba);
8510}
8511
8512
8513
8514
8515
8516
8517
8518
8519
8520
8521
8522
8523static int
8524lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8525{
8526 uint32_t db_ready;
8527 unsigned long timeout;
8528 struct lpfc_register bmbx_reg;
8529
8530 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8531 * 1000) + jiffies;
8532
8533 do {
8534 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8535 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8536 if (!db_ready)
8537 mdelay(2);
8538
8539 if (time_after(jiffies, timeout))
8540 return MBXERR_ERROR;
8541 } while (!db_ready);
8542
8543 return 0;
8544}
8545
8546
8547
8548
8549
8550
8551
8552
8553
8554
8555
8556
8557
8558
8559
8560
8561
8562static int
8563lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8564{
8565 int rc = MBX_SUCCESS;
8566 unsigned long iflag;
8567 uint32_t mcqe_status;
8568 uint32_t mbx_cmnd;
8569 struct lpfc_sli *psli = &phba->sli;
8570 struct lpfc_mqe *mb = &mboxq->u.mqe;
8571 struct lpfc_bmbx_create *mbox_rgn;
8572 struct dma_address *dma_address;
8573
8574
8575
8576
8577
8578 spin_lock_irqsave(&phba->hbalock, iflag);
8579 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8580 spin_unlock_irqrestore(&phba->hbalock, iflag);
8581 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8582 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8583 "cannot issue Data: x%x x%x\n",
8584 mboxq->vport ? mboxq->vport->vpi : 0,
8585 mboxq->u.mb.mbxCommand,
8586 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8587 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8588 psli->sli_flag, MBX_POLL);
8589 return MBXERR_ERROR;
8590 }
8591
8592 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8593 phba->sli.mbox_active = mboxq;
8594 spin_unlock_irqrestore(&phba->hbalock, iflag);
8595
8596
8597 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8598 if (rc)
8599 goto exit;
8600
8601
8602
8603
8604
8605 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8606 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8607 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8608 sizeof(struct lpfc_mqe));
8609
8610
8611 dma_address = &phba->sli4_hba.bmbx.dma_address;
8612 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8613
8614
8615 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8616 if (rc)
8617 goto exit;
8618
8619
8620 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8621
8622
8623 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8624 if (rc)
8625 goto exit;
8626
8627
8628
8629
8630
8631
8632 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8633 sizeof(struct lpfc_mqe));
8634 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8635 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8636 sizeof(struct lpfc_mcqe));
8637 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8638
8639
8640
8641
8642
8643 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8644 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8645 bf_set(lpfc_mqe_status, mb,
8646 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8647 rc = MBXERR_ERROR;
8648 } else
8649 lpfc_sli4_swap_str(phba, mboxq);
8650
8651 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8652 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8653 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8654 " x%x x%x CQ: x%x x%x x%x x%x\n",
8655 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8656 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8657 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8658 bf_get(lpfc_mqe_status, mb),
8659 mb->un.mb_words[0], mb->un.mb_words[1],
8660 mb->un.mb_words[2], mb->un.mb_words[3],
8661 mb->un.mb_words[4], mb->un.mb_words[5],
8662 mb->un.mb_words[6], mb->un.mb_words[7],
8663 mb->un.mb_words[8], mb->un.mb_words[9],
8664 mb->un.mb_words[10], mb->un.mb_words[11],
8665 mb->un.mb_words[12], mboxq->mcqe.word0,
8666 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8667 mboxq->mcqe.trailer);
8668exit:
8669
8670 spin_lock_irqsave(&phba->hbalock, iflag);
8671 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8672 phba->sli.mbox_active = NULL;
8673 spin_unlock_irqrestore(&phba->hbalock, iflag);
8674 return rc;
8675}
8676
8677
8678
8679
8680
8681
8682
8683
8684
8685
8686
8687
8688
8689static int
8690lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8691 uint32_t flag)
8692{
8693 struct lpfc_sli *psli = &phba->sli;
8694 unsigned long iflags;
8695 int rc;
8696
8697
8698 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8699
8700 rc = lpfc_mbox_dev_check(phba);
8701 if (unlikely(rc)) {
8702 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8703 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8704 "cannot issue Data: x%x x%x\n",
8705 mboxq->vport ? mboxq->vport->vpi : 0,
8706 mboxq->u.mb.mbxCommand,
8707 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8708 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8709 psli->sli_flag, flag);
8710 goto out_not_finished;
8711 }
8712
8713
8714 if (!phba->sli4_hba.intr_enable) {
8715 if (flag == MBX_POLL)
8716 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8717 else
8718 rc = -EIO;
8719 if (rc != MBX_SUCCESS)
8720 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8721 "(%d):2541 Mailbox command x%x "
8722 "(x%x/x%x) failure: "
8723 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8724 "Data: x%x x%x\n,",
8725 mboxq->vport ? mboxq->vport->vpi : 0,
8726 mboxq->u.mb.mbxCommand,
8727 lpfc_sli_config_mbox_subsys_get(phba,
8728 mboxq),
8729 lpfc_sli_config_mbox_opcode_get(phba,
8730 mboxq),
8731 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8732 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8733 bf_get(lpfc_mcqe_ext_status,
8734 &mboxq->mcqe),
8735 psli->sli_flag, flag);
8736 return rc;
8737 } else if (flag == MBX_POLL) {
8738 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8739 "(%d):2542 Try to issue mailbox command "
8740 "x%x (x%x/x%x) synchronously ahead of async "
8741 "mailbox command queue: x%x x%x\n",
8742 mboxq->vport ? mboxq->vport->vpi : 0,
8743 mboxq->u.mb.mbxCommand,
8744 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8745 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8746 psli->sli_flag, flag);
8747
8748 rc = lpfc_sli4_async_mbox_block(phba);
8749 if (!rc) {
8750
8751 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8752 if (rc != MBX_SUCCESS)
8753 lpfc_printf_log(phba, KERN_WARNING,
8754 LOG_MBOX | LOG_SLI,
8755 "(%d):2597 Sync Mailbox command "
8756 "x%x (x%x/x%x) failure: "
8757 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8758 "Data: x%x x%x\n,",
8759 mboxq->vport ? mboxq->vport->vpi : 0,
8760 mboxq->u.mb.mbxCommand,
8761 lpfc_sli_config_mbox_subsys_get(phba,
8762 mboxq),
8763 lpfc_sli_config_mbox_opcode_get(phba,
8764 mboxq),
8765 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8766 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8767 bf_get(lpfc_mcqe_ext_status,
8768 &mboxq->mcqe),
8769 psli->sli_flag, flag);
8770
8771 lpfc_sli4_async_mbox_unblock(phba);
8772 }
8773 return rc;
8774 }
8775
8776
8777 rc = lpfc_mbox_cmd_check(phba, mboxq);
8778 if (rc) {
8779 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8780 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8781 "cannot issue Data: x%x x%x\n",
8782 mboxq->vport ? mboxq->vport->vpi : 0,
8783 mboxq->u.mb.mbxCommand,
8784 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8785 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8786 psli->sli_flag, flag);
8787 goto out_not_finished;
8788 }
8789
8790
8791 psli->slistat.mbox_busy++;
8792 spin_lock_irqsave(&phba->hbalock, iflags);
8793 lpfc_mbox_put(phba, mboxq);
8794 spin_unlock_irqrestore(&phba->hbalock, iflags);
8795 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8796 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8797 "x%x (x%x/x%x) x%x x%x x%x\n",
8798 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8799 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8800 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8801 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8802 phba->pport->port_state,
8803 psli->sli_flag, MBX_NOWAIT);
8804
8805 lpfc_worker_wake_up(phba);
8806
8807 return MBX_BUSY;
8808
8809out_not_finished:
8810 return MBX_NOT_FINISHED;
8811}
8812
8813
8814
8815
8816
8817
8818
8819
8820
8821int
8822lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8823{
8824 struct lpfc_sli *psli = &phba->sli;
8825 LPFC_MBOXQ_t *mboxq;
8826 int rc = MBX_SUCCESS;
8827 unsigned long iflags;
8828 struct lpfc_mqe *mqe;
8829 uint32_t mbx_cmnd;
8830
8831
8832 if (unlikely(!phba->sli4_hba.intr_enable))
8833 return MBX_NOT_FINISHED;
8834
8835
8836 spin_lock_irqsave(&phba->hbalock, iflags);
8837 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8838 spin_unlock_irqrestore(&phba->hbalock, iflags);
8839 return MBX_NOT_FINISHED;
8840 }
8841 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8842 spin_unlock_irqrestore(&phba->hbalock, iflags);
8843 return MBX_NOT_FINISHED;
8844 }
8845 if (unlikely(phba->sli.mbox_active)) {
8846 spin_unlock_irqrestore(&phba->hbalock, iflags);
8847 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8848 "0384 There is pending active mailbox cmd\n");
8849 return MBX_NOT_FINISHED;
8850 }
8851
8852 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8853
8854
8855 mboxq = lpfc_mbox_get(phba);
8856
8857
8858 if (!mboxq) {
8859 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8860 spin_unlock_irqrestore(&phba->hbalock, iflags);
8861 return MBX_SUCCESS;
8862 }
8863 phba->sli.mbox_active = mboxq;
8864 spin_unlock_irqrestore(&phba->hbalock, iflags);
8865
8866
8867 rc = lpfc_mbox_dev_check(phba);
8868 if (unlikely(rc))
8869
8870 goto out_not_finished;
8871
8872
8873 mqe = &mboxq->u.mqe;
8874 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8875
8876
8877 mod_timer(&psli->mbox_tmo, (jiffies +
8878 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8879
8880 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8881 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8882 "x%x x%x\n",
8883 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8884 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8885 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8886 phba->pport->port_state, psli->sli_flag);
8887
8888 if (mbx_cmnd != MBX_HEARTBEAT) {
8889 if (mboxq->vport) {
8890 lpfc_debugfs_disc_trc(mboxq->vport,
8891 LPFC_DISC_TRC_MBOX_VPORT,
8892 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8893 mbx_cmnd, mqe->un.mb_words[0],
8894 mqe->un.mb_words[1]);
8895 } else {
8896 lpfc_debugfs_disc_trc(phba->pport,
8897 LPFC_DISC_TRC_MBOX,
8898 "MBOX Send: cmd:x%x mb:x%x x%x",
8899 mbx_cmnd, mqe->un.mb_words[0],
8900 mqe->un.mb_words[1]);
8901 }
8902 }
8903 psli->slistat.mbox_cmd++;
8904
8905
8906 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8907 if (rc != MBX_SUCCESS) {
8908 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8909 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8910 "cannot issue Data: x%x x%x\n",
8911 mboxq->vport ? mboxq->vport->vpi : 0,
8912 mboxq->u.mb.mbxCommand,
8913 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8914 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8915 psli->sli_flag, MBX_NOWAIT);
8916 goto out_not_finished;
8917 }
8918
8919 return rc;
8920
8921out_not_finished:
8922 spin_lock_irqsave(&phba->hbalock, iflags);
8923 if (phba->sli.mbox_active) {
8924 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8925 __lpfc_mbox_cmpl_put(phba, mboxq);
8926
8927 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8928 phba->sli.mbox_active = NULL;
8929 }
8930 spin_unlock_irqrestore(&phba->hbalock, iflags);
8931
8932 return MBX_NOT_FINISHED;
8933}
8934
8935
8936
8937
8938
8939
8940
8941
8942
8943
8944
8945
8946
8947int
8948lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8949{
8950 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8951}
8952
8953
8954
8955
8956
8957
8958
8959
8960
8961
8962int
8963lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8964{
8965
8966 switch (dev_grp) {
8967 case LPFC_PCI_DEV_LP:
8968 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8969 phba->lpfc_sli_handle_slow_ring_event =
8970 lpfc_sli_handle_slow_ring_event_s3;
8971 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8972 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8973 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8974 break;
8975 case LPFC_PCI_DEV_OC:
8976 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8977 phba->lpfc_sli_handle_slow_ring_event =
8978 lpfc_sli_handle_slow_ring_event_s4;
8979 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8980 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8981 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8982 break;
8983 default:
8984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8985 "1420 Invalid HBA PCI-device group: 0x%x\n",
8986 dev_grp);
8987 return -ENODEV;
8988 break;
8989 }
8990 return 0;
8991}
8992
8993
8994
8995
8996
8997
8998
8999
9000
9001
9002
9003void
9004__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9005 struct lpfc_iocbq *piocb)
9006{
9007 lockdep_assert_held(&phba->hbalock);
9008
9009 list_add_tail(&piocb->list, &pring->txq);
9010}
9011
9012
9013
9014
9015
9016
9017
9018
9019
9020
9021
9022
9023
9024
9025
9026
9027
9028
9029static struct lpfc_iocbq *
9030lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9031 struct lpfc_iocbq **piocb)
9032{
9033 struct lpfc_iocbq * nextiocb;
9034
9035 lockdep_assert_held(&phba->hbalock);
9036
9037 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9038 if (!nextiocb) {
9039 nextiocb = *piocb;
9040 *piocb = NULL;
9041 }
9042
9043 return nextiocb;
9044}
9045
9046
9047
9048
9049
9050
9051
9052
9053
9054
9055
9056
9057
9058
9059
9060
9061
9062
9063
9064
9065
9066
9067
9068static int
9069__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9070 struct lpfc_iocbq *piocb, uint32_t flag)
9071{
9072 struct lpfc_iocbq *nextiocb;
9073 IOCB_t *iocb;
9074 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9075
9076 lockdep_assert_held(&phba->hbalock);
9077
9078 if (piocb->iocb_cmpl && (!piocb->vport) &&
9079 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9080 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9081 lpfc_printf_log(phba, KERN_ERR,
9082 LOG_SLI | LOG_VPORT,
9083 "1807 IOCB x%x failed. No vport\n",
9084 piocb->iocb.ulpCommand);
9085 dump_stack();
9086 return IOCB_ERROR;
9087 }
9088
9089
9090
9091 if (unlikely(pci_channel_offline(phba->pcidev)))
9092 return IOCB_ERROR;
9093
9094
9095 if (unlikely(phba->hba_flag & DEFER_ERATT))
9096 return IOCB_ERROR;
9097
9098
9099
9100
9101 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9102 return IOCB_ERROR;
9103
9104
9105
9106
9107
9108 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9109 goto iocb_busy;
9110
9111 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9112
9113
9114
9115
9116 switch (piocb->iocb.ulpCommand) {
9117 case CMD_GEN_REQUEST64_CR:
9118 case CMD_GEN_REQUEST64_CX:
9119 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9120 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9121 FC_RCTL_DD_UNSOL_CMD) ||
9122 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9123 MENLO_TRANSPORT_TYPE))
9124
9125 goto iocb_busy;
9126 break;
9127 case CMD_QUE_RING_BUF_CN:
9128 case CMD_QUE_RING_BUF64_CN:
9129
9130
9131
9132
9133 if (piocb->iocb_cmpl)
9134 piocb->iocb_cmpl = NULL;
9135
9136 case CMD_CREATE_XRI_CR:
9137 case CMD_CLOSE_XRI_CN:
9138 case CMD_CLOSE_XRI_CX:
9139 break;
9140 default:
9141 goto iocb_busy;
9142 }
9143
9144
9145
9146
9147
9148 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9149 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9150 goto iocb_busy;
9151 }
9152
9153 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9154 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9155 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9156
9157 if (iocb)
9158 lpfc_sli_update_ring(phba, pring);
9159 else
9160 lpfc_sli_update_full_ring(phba, pring);
9161
9162 if (!piocb)
9163 return IOCB_SUCCESS;
9164
9165 goto out_busy;
9166
9167 iocb_busy:
9168 pring->stats.iocb_cmd_delay++;
9169
9170 out_busy:
9171
9172 if (!(flag & SLI_IOCB_RET_IOCB)) {
9173 __lpfc_sli_ringtx_put(phba, pring, piocb);
9174 return IOCB_SUCCESS;
9175 }
9176
9177 return IOCB_BUSY;
9178}
9179
9180
9181
9182
9183
9184
9185
9186
9187
9188
9189
9190
9191
9192
9193
9194
9195
9196
9197static uint16_t
9198lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9199 struct lpfc_sglq *sglq)
9200{
9201 uint16_t xritag = NO_XRI;
9202 struct ulp_bde64 *bpl = NULL;
9203 struct ulp_bde64 bde;
9204 struct sli4_sge *sgl = NULL;
9205 struct lpfc_dmabuf *dmabuf;
9206 IOCB_t *icmd;
9207 int numBdes = 0;
9208 int i = 0;
9209 uint32_t offset = 0;
9210 int inbound = 0;
9211
9212 if (!piocbq || !sglq)
9213 return xritag;
9214
9215 sgl = (struct sli4_sge *)sglq->sgl;
9216 icmd = &piocbq->iocb;
9217 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9218 return sglq->sli4_xritag;
9219 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9220 numBdes = icmd->un.genreq64.bdl.bdeSize /
9221 sizeof(struct ulp_bde64);
9222
9223
9224
9225
9226 if (piocbq->context3)
9227 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9228 else
9229 return xritag;
9230
9231 bpl = (struct ulp_bde64 *)dmabuf->virt;
9232 if (!bpl)
9233 return xritag;
9234
9235 for (i = 0; i < numBdes; i++) {
9236
9237 sgl->addr_hi = bpl->addrHigh;
9238 sgl->addr_lo = bpl->addrLow;
9239
9240 sgl->word2 = le32_to_cpu(sgl->word2);
9241 if ((i+1) == numBdes)
9242 bf_set(lpfc_sli4_sge_last, sgl, 1);
9243 else
9244 bf_set(lpfc_sli4_sge_last, sgl, 0);
9245
9246
9247
9248 bde.tus.w = le32_to_cpu(bpl->tus.w);
9249 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9250
9251
9252
9253
9254 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9255
9256 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9257 inbound++;
9258
9259 if (inbound == 1)
9260 offset = 0;
9261 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9262 bf_set(lpfc_sli4_sge_type, sgl,
9263 LPFC_SGE_TYPE_DATA);
9264 offset += bde.tus.f.bdeSize;
9265 }
9266 sgl->word2 = cpu_to_le32(sgl->word2);
9267 bpl++;
9268 sgl++;
9269 }
9270 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9271
9272
9273
9274
9275 sgl->addr_hi =
9276 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9277 sgl->addr_lo =
9278 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9279 sgl->word2 = le32_to_cpu(sgl->word2);
9280 bf_set(lpfc_sli4_sge_last, sgl, 1);
9281 sgl->word2 = cpu_to_le32(sgl->word2);
9282 sgl->sge_len =
9283 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9284 }
9285 return sglq->sli4_xritag;
9286}
9287
9288
9289
9290
9291
9292
9293
9294
9295
9296
9297
9298
9299
9300
9301
9302static int
9303lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9304 union lpfc_wqe128 *wqe)
9305{
9306 uint32_t xmit_len = 0, total_len = 0;
9307 uint8_t ct = 0;
9308 uint32_t fip;
9309 uint32_t abort_tag;
9310 uint8_t command_type = ELS_COMMAND_NON_FIP;
9311 uint8_t cmnd;
9312 uint16_t xritag;
9313 uint16_t abrt_iotag;
9314 struct lpfc_iocbq *abrtiocbq;
9315 struct ulp_bde64 *bpl = NULL;
9316 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9317 int numBdes, i;
9318 struct ulp_bde64 bde;
9319 struct lpfc_nodelist *ndlp;
9320 uint32_t *pcmd;
9321 uint32_t if_type;
9322
9323 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9324
9325 if (iocbq->iocb_flag & LPFC_IO_FCP)
9326 command_type = FCP_COMMAND;
9327 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9328 command_type = ELS_COMMAND_FIP;
9329 else
9330 command_type = ELS_COMMAND_NON_FIP;
9331
9332 if (phba->fcp_embed_io)
9333 memset(wqe, 0, sizeof(union lpfc_wqe128));
9334
9335 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9336 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
9337
9338 wqe->generic.wqe_com.word7 = 0;
9339 wqe->generic.wqe_com.word10 = 0;
9340 }
9341
9342 abort_tag = (uint32_t) iocbq->iotag;
9343 xritag = iocbq->sli4_xritag;
9344
9345 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9346 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9347 sizeof(struct ulp_bde64);
9348 bpl = (struct ulp_bde64 *)
9349 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9350 if (!bpl)
9351 return IOCB_ERROR;
9352
9353
9354 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9355 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9356
9357
9358
9359 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9360 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9361 total_len = 0;
9362 for (i = 0; i < numBdes; i++) {
9363 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9364 total_len += bde.tus.f.bdeSize;
9365 }
9366 } else
9367 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9368
9369 iocbq->iocb.ulpIoTag = iocbq->iotag;
9370 cmnd = iocbq->iocb.ulpCommand;
9371
9372 switch (iocbq->iocb.ulpCommand) {
9373 case CMD_ELS_REQUEST64_CR:
9374 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9375 ndlp = iocbq->context_un.ndlp;
9376 else
9377 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9378 if (!iocbq->iocb.ulpLe) {
9379 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9380 "2007 Only Limited Edition cmd Format"
9381 " supported 0x%x\n",
9382 iocbq->iocb.ulpCommand);
9383 return IOCB_ERROR;
9384 }
9385
9386 wqe->els_req.payload_len = xmit_len;
9387
9388 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9389 iocbq->iocb.ulpTimeout);
9390
9391 bf_set(els_req64_vf, &wqe->els_req, 0);
9392
9393 bf_set(els_req64_vfid, &wqe->els_req, 0);
9394 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9395 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9396 iocbq->iocb.ulpContext);
9397 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9398 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9399
9400 if (command_type == ELS_COMMAND_FIP)
9401 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9402 >> LPFC_FIP_ELS_ID_SHIFT);
9403 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9404 iocbq->context2)->virt);
9405 if_type = bf_get(lpfc_sli_intf_if_type,
9406 &phba->sli4_hba.sli_intf);
9407 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9408 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9409 *pcmd == ELS_CMD_SCR ||
9410 *pcmd == ELS_CMD_RSCN_XMT ||
9411 *pcmd == ELS_CMD_FDISC ||
9412 *pcmd == ELS_CMD_LOGO ||
9413 *pcmd == ELS_CMD_PLOGI)) {
9414 bf_set(els_req64_sp, &wqe->els_req, 1);
9415 bf_set(els_req64_sid, &wqe->els_req,
9416 iocbq->vport->fc_myDID);
9417 if ((*pcmd == ELS_CMD_FLOGI) &&
9418 !(phba->fc_topology ==
9419 LPFC_TOPOLOGY_LOOP))
9420 bf_set(els_req64_sid, &wqe->els_req, 0);
9421 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9422 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9423 phba->vpi_ids[iocbq->vport->vpi]);
9424 } else if (pcmd && iocbq->context1) {
9425 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9426 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9427 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9428 }
9429 }
9430 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9431 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9432 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9433 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9434 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9435 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9436 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9437 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9438 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9439 break;
9440 case CMD_XMIT_SEQUENCE64_CX:
9441 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9442 iocbq->iocb.un.ulpWord[3]);
9443 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9444 iocbq->iocb.unsli3.rcvsli3.ox_id);
9445
9446 xmit_len = total_len;
9447 cmnd = CMD_XMIT_SEQUENCE64_CR;
9448 if (phba->link_flag & LS_LOOPBACK_MODE)
9449 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9450
9451 case CMD_XMIT_SEQUENCE64_CR:
9452
9453 wqe->xmit_sequence.rsvd3 = 0;
9454
9455
9456 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9457 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9458 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9459 LPFC_WQE_IOD_WRITE);
9460 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9461 LPFC_WQE_LENLOC_WORD12);
9462 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9463 wqe->xmit_sequence.xmit_len = xmit_len;
9464 command_type = OTHER_COMMAND;
9465 break;
9466 case CMD_XMIT_BCAST64_CN:
9467
9468 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9469
9470
9471
9472 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9473 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9474 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9475 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9476 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9477 LPFC_WQE_LENLOC_WORD3);
9478 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9479 break;
9480 case CMD_FCP_IWRITE64_CR:
9481 command_type = FCP_COMMAND_DATA_OUT;
9482
9483
9484 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9485 xmit_len + sizeof(struct fcp_rsp));
9486 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9487 0);
9488
9489
9490 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9491 iocbq->iocb.ulpFCP2Rcvy);
9492 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9493
9494 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9495 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9496 LPFC_WQE_LENLOC_WORD4);
9497 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9498 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9499 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9500 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9501 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9502 if (iocbq->priority) {
9503 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9504 (iocbq->priority << 1));
9505 } else {
9506 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9507 (phba->cfg_XLanePriority << 1));
9508 }
9509 }
9510
9511
9512
9513 if (phba->cfg_enable_pbde)
9514 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9515 else
9516 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9517
9518 if (phba->fcp_embed_io) {
9519 struct lpfc_io_buf *lpfc_cmd;
9520 struct sli4_sge *sgl;
9521 struct fcp_cmnd *fcp_cmnd;
9522 uint32_t *ptr;
9523
9524
9525
9526 lpfc_cmd = iocbq->context1;
9527 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9528 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9529
9530
9531 wqe->generic.bde.tus.f.bdeFlags =
9532 BUFF_TYPE_BDE_IMMED;
9533 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9534 wqe->generic.bde.addrHigh = 0;
9535 wqe->generic.bde.addrLow = 88;
9536
9537 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9538 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9539
9540
9541 ptr = &wqe->words[22];
9542 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9543 }
9544 break;
9545 case CMD_FCP_IREAD64_CR:
9546
9547
9548 bf_set(payload_offset_len, &wqe->fcp_iread,
9549 xmit_len + sizeof(struct fcp_rsp));
9550 bf_set(cmd_buff_len, &wqe->fcp_iread,
9551 0);
9552
9553
9554 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9555 iocbq->iocb.ulpFCP2Rcvy);
9556 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9557
9558 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9559 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9560 LPFC_WQE_LENLOC_WORD4);
9561 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9562 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9563 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9564 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9565 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9566 if (iocbq->priority) {
9567 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9568 (iocbq->priority << 1));
9569 } else {
9570 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9571 (phba->cfg_XLanePriority << 1));
9572 }
9573 }
9574
9575
9576
9577 if (phba->cfg_enable_pbde)
9578 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9579 else
9580 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9581
9582 if (phba->fcp_embed_io) {
9583 struct lpfc_io_buf *lpfc_cmd;
9584 struct sli4_sge *sgl;
9585 struct fcp_cmnd *fcp_cmnd;
9586 uint32_t *ptr;
9587
9588
9589
9590 lpfc_cmd = iocbq->context1;
9591 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9592 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9593
9594
9595 wqe->generic.bde.tus.f.bdeFlags =
9596 BUFF_TYPE_BDE_IMMED;
9597 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9598 wqe->generic.bde.addrHigh = 0;
9599 wqe->generic.bde.addrLow = 88;
9600
9601 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9602 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9603
9604
9605 ptr = &wqe->words[22];
9606 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9607 }
9608 break;
9609 case CMD_FCP_ICMND64_CR:
9610
9611
9612 bf_set(payload_offset_len, &wqe->fcp_icmd,
9613 xmit_len + sizeof(struct fcp_rsp));
9614 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9615 0);
9616
9617 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9618
9619 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9620 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9621 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9622 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9623 LPFC_WQE_LENLOC_NONE);
9624 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9625 iocbq->iocb.ulpFCP2Rcvy);
9626 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9627 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9628 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9629 if (iocbq->priority) {
9630 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9631 (iocbq->priority << 1));
9632 } else {
9633 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9634 (phba->cfg_XLanePriority << 1));
9635 }
9636 }
9637
9638
9639 if (phba->fcp_embed_io) {
9640 struct lpfc_io_buf *lpfc_cmd;
9641 struct sli4_sge *sgl;
9642 struct fcp_cmnd *fcp_cmnd;
9643 uint32_t *ptr;
9644
9645
9646
9647 lpfc_cmd = iocbq->context1;
9648 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9649 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9650
9651
9652 wqe->generic.bde.tus.f.bdeFlags =
9653 BUFF_TYPE_BDE_IMMED;
9654 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9655 wqe->generic.bde.addrHigh = 0;
9656 wqe->generic.bde.addrLow = 88;
9657
9658 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9659 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9660
9661
9662 ptr = &wqe->words[22];
9663 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9664 }
9665 break;
9666 case CMD_GEN_REQUEST64_CR:
9667
9668
9669
9670 xmit_len = 0;
9671 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9672 sizeof(struct ulp_bde64);
9673 for (i = 0; i < numBdes; i++) {
9674 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9675 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9676 break;
9677 xmit_len += bde.tus.f.bdeSize;
9678 }
9679
9680 wqe->gen_req.request_payload_len = xmit_len;
9681
9682
9683
9684 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9685 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9686 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9687 "2015 Invalid CT %x command 0x%x\n",
9688 ct, iocbq->iocb.ulpCommand);
9689 return IOCB_ERROR;
9690 }
9691 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9692 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9693 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9694 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9695 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9696 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9697 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9698 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9699 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9700 command_type = OTHER_COMMAND;
9701 break;
9702 case CMD_XMIT_ELS_RSP64_CX:
9703 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9704
9705
9706 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9707
9708 wqe->xmit_els_rsp.word4 = 0;
9709
9710 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9711 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9712
9713 if_type = bf_get(lpfc_sli_intf_if_type,
9714 &phba->sli4_hba.sli_intf);
9715 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9716 if (iocbq->vport->fc_flag & FC_PT2PT) {
9717 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9718 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9719 iocbq->vport->fc_myDID);
9720 if (iocbq->vport->fc_myDID == Fabric_DID) {
9721 bf_set(wqe_els_did,
9722 &wqe->xmit_els_rsp.wqe_dest, 0);
9723 }
9724 }
9725 }
9726 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9727 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9728 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9729 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9730 iocbq->iocb.unsli3.rcvsli3.ox_id);
9731 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9732 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9733 phba->vpi_ids[iocbq->vport->vpi]);
9734 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9735 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9736 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9737 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9738 LPFC_WQE_LENLOC_WORD3);
9739 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9740 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9741 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9742 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9743 iocbq->context2)->virt);
9744 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9745 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9746 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9747 iocbq->vport->fc_myDID);
9748 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9749 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9750 phba->vpi_ids[phba->pport->vpi]);
9751 }
9752 command_type = OTHER_COMMAND;
9753 break;
9754 case CMD_CLOSE_XRI_CN:
9755 case CMD_ABORT_XRI_CN:
9756 case CMD_ABORT_XRI_CX:
9757
9758
9759 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9760 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9761 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9762 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9763 } else
9764 fip = 0;
9765
9766 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9767
9768
9769
9770
9771
9772 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9773 else
9774 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9775 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9776
9777 wqe->abort_cmd.rsrvd5 = 0;
9778 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9779 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9780 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9781
9782
9783
9784
9785 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9786 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9787 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9788 LPFC_WQE_LENLOC_NONE);
9789 cmnd = CMD_ABORT_XRI_CX;
9790 command_type = OTHER_COMMAND;
9791 xritag = 0;
9792 break;
9793 case CMD_XMIT_BLS_RSP64_CX:
9794 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9795
9796
9797
9798
9799 memset(wqe, 0, sizeof(union lpfc_wqe));
9800
9801 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9802 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9803 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9804 LPFC_ABTS_UNSOL_INT) {
9805
9806
9807
9808
9809 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9810 iocbq->sli4_xritag);
9811 } else {
9812
9813
9814
9815
9816 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9817 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9818 }
9819 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9820 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9821
9822
9823 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9824 ndlp->nlp_DID);
9825 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9826 iocbq->iocb.ulpContext);
9827 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9828 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9829 phba->vpi_ids[phba->pport->vpi]);
9830 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9831 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9832 LPFC_WQE_LENLOC_NONE);
9833
9834 command_type = OTHER_COMMAND;
9835 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9836 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9837 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9838 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9839 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9840 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9841 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9842 }
9843
9844 break;
9845 case CMD_SEND_FRAME:
9846 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9847 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9848 return 0;
9849 case CMD_XRI_ABORTED_CX:
9850 case CMD_CREATE_XRI_CR:
9851 case CMD_IOCB_FCP_IBIDIR64_CR:
9852 case CMD_FCP_TSEND64_CX:
9853 case CMD_FCP_TRSP64_CX:
9854 case CMD_FCP_AUTO_TRSP_CX:
9855 default:
9856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9857 "2014 Invalid command 0x%x\n",
9858 iocbq->iocb.ulpCommand);
9859 return IOCB_ERROR;
9860 break;
9861 }
9862
9863 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9864 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9865 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9866 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9867 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9868 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9869 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9870 LPFC_IO_DIF_INSERT);
9871 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9872 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9873 wqe->generic.wqe_com.abort_tag = abort_tag;
9874 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9875 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9876 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9877 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9878 return 0;
9879}
9880
9881
9882
9883
9884
9885
9886
9887
9888
9889
9890
9891
9892
9893
9894
9895static int
9896__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9897 struct lpfc_iocbq *piocb, uint32_t flag)
9898{
9899 struct lpfc_sglq *sglq;
9900 union lpfc_wqe128 wqe;
9901 struct lpfc_queue *wq;
9902 struct lpfc_sli_ring *pring;
9903
9904
9905 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9906 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9907 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq;
9908 } else {
9909 wq = phba->sli4_hba.els_wq;
9910 }
9911
9912
9913 pring = wq->pring;
9914
9915
9916
9917
9918
9919 lockdep_assert_held(&pring->ring_lock);
9920
9921 if (piocb->sli4_xritag == NO_XRI) {
9922 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9923 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9924 sglq = NULL;
9925 else {
9926 if (!list_empty(&pring->txq)) {
9927 if (!(flag & SLI_IOCB_RET_IOCB)) {
9928 __lpfc_sli_ringtx_put(phba,
9929 pring, piocb);
9930 return IOCB_SUCCESS;
9931 } else {
9932 return IOCB_BUSY;
9933 }
9934 } else {
9935 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9936 if (!sglq) {
9937 if (!(flag & SLI_IOCB_RET_IOCB)) {
9938 __lpfc_sli_ringtx_put(phba,
9939 pring,
9940 piocb);
9941 return IOCB_SUCCESS;
9942 } else
9943 return IOCB_BUSY;
9944 }
9945 }
9946 }
9947 } else if (piocb->iocb_flag & LPFC_IO_FCP)
9948
9949 sglq = NULL;
9950 else {
9951
9952
9953
9954
9955 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9956 if (!sglq)
9957 return IOCB_ERROR;
9958 }
9959
9960 if (sglq) {
9961 piocb->sli4_lxritag = sglq->sli4_lxritag;
9962 piocb->sli4_xritag = sglq->sli4_xritag;
9963 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9964 return IOCB_ERROR;
9965 }
9966
9967 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
9968 return IOCB_ERROR;
9969
9970 if (lpfc_sli4_wq_put(wq, &wqe))
9971 return IOCB_ERROR;
9972 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9973
9974 return 0;
9975}
9976
9977
9978
9979
9980
9981
9982
9983
9984
9985
9986
9987
9988int
9989__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9990 struct lpfc_iocbq *piocb, uint32_t flag)
9991{
9992 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9993}
9994
9995
9996
9997
9998
9999
10000
10001
10002
10003
10004int
10005lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10006{
10007
10008 switch (dev_grp) {
10009 case LPFC_PCI_DEV_LP:
10010 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10011 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10012 break;
10013 case LPFC_PCI_DEV_OC:
10014 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10015 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10016 break;
10017 default:
10018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10019 "1419 Invalid HBA PCI-device group: 0x%x\n",
10020 dev_grp);
10021 return -ENODEV;
10022 break;
10023 }
10024 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10025 return 0;
10026}
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038struct lpfc_sli_ring *
10039lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10040{
10041 struct lpfc_io_buf *lpfc_cmd;
10042
10043 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10044 if (unlikely(!phba->sli4_hba.hdwq))
10045 return NULL;
10046
10047
10048
10049
10050 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10051 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10052 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10053 }
10054 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
10055 } else {
10056 if (unlikely(!phba->sli4_hba.els_wq))
10057 return NULL;
10058 piocb->hba_wqidx = 0;
10059 return phba->sli4_hba.els_wq->pring;
10060 }
10061}
10062
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076int
10077lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10078 struct lpfc_iocbq *piocb, uint32_t flag)
10079{
10080 struct lpfc_sli_ring *pring;
10081 unsigned long iflags;
10082 int rc;
10083
10084 if (phba->sli_rev == LPFC_SLI_REV4) {
10085 pring = lpfc_sli4_calc_ring(phba, piocb);
10086 if (unlikely(pring == NULL))
10087 return IOCB_ERROR;
10088
10089 spin_lock_irqsave(&pring->ring_lock, iflags);
10090 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10091 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10092 } else {
10093
10094 spin_lock_irqsave(&phba->hbalock, iflags);
10095 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10096 spin_unlock_irqrestore(&phba->hbalock, iflags);
10097 }
10098 return rc;
10099}
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112static int
10113lpfc_extra_ring_setup( struct lpfc_hba *phba)
10114{
10115 struct lpfc_sli *psli;
10116 struct lpfc_sli_ring *pring;
10117
10118 psli = &phba->sli;
10119
10120
10121
10122
10123 pring = &psli->sli3_ring[LPFC_FCP_RING];
10124 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10125 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10126 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10127 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10128
10129
10130 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10131
10132 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10133 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10134 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10135 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10136
10137
10138 pring->iotag_max = 4096;
10139 pring->num_mask = 1;
10140 pring->prt[0].profile = 0;
10141 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10142 pring->prt[0].type = phba->cfg_multi_ring_type;
10143 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10144 return 0;
10145}
10146
10147
10148
10149
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159static void
10160lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10161 struct lpfc_iocbq *iocbq)
10162{
10163 struct lpfc_nodelist *ndlp = NULL;
10164 uint16_t rpi = 0, vpi = 0;
10165 struct lpfc_vport *vport = NULL;
10166
10167
10168 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10169 rpi = iocbq->iocb.ulpContext;
10170
10171 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10172 "3092 Port generated ABTS async event "
10173 "on vpi %d rpi %d status 0x%x\n",
10174 vpi, rpi, iocbq->iocb.ulpStatus);
10175
10176 vport = lpfc_find_vport_by_vpid(phba, vpi);
10177 if (!vport)
10178 goto err_exit;
10179 ndlp = lpfc_findnode_rpi(vport, rpi);
10180 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10181 goto err_exit;
10182
10183 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10184 lpfc_sli_abts_recover_port(vport, ndlp);
10185 return;
10186
10187 err_exit:
10188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10189 "3095 Event Context not found, no "
10190 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10191 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10192 vpi, rpi);
10193}
10194
10195
10196
10197
10198
10199
10200
10201
10202
10203
10204
10205void
10206lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10207 struct lpfc_nodelist *ndlp,
10208 struct sli4_wcqe_xri_aborted *axri)
10209{
10210 struct lpfc_vport *vport;
10211 uint32_t ext_status = 0;
10212
10213 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10214 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10215 "3115 Node Context not found, driver "
10216 "ignoring abts err event\n");
10217 return;
10218 }
10219
10220 vport = ndlp->vport;
10221 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10222 "3116 Port generated FCP XRI ABORT event on "
10223 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10224 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10225 bf_get(lpfc_wcqe_xa_xri, axri),
10226 bf_get(lpfc_wcqe_xa_status, axri),
10227 axri->parameter);
10228
10229
10230
10231
10232
10233
10234 ext_status = axri->parameter & IOERR_PARAM_MASK;
10235 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10236 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10237 lpfc_sli_abts_recover_port(vport, ndlp);
10238}
10239
10240
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253static void
10254lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10255 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10256{
10257 IOCB_t *icmd;
10258 uint16_t evt_code;
10259 struct temp_event temp_event_data;
10260 struct Scsi_Host *shost;
10261 uint32_t *iocb_w;
10262
10263 icmd = &iocbq->iocb;
10264 evt_code = icmd->un.asyncstat.evt_code;
10265
10266 switch (evt_code) {
10267 case ASYNC_TEMP_WARN:
10268 case ASYNC_TEMP_SAFE:
10269 temp_event_data.data = (uint32_t) icmd->ulpContext;
10270 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10271 if (evt_code == ASYNC_TEMP_WARN) {
10272 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10273 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10274 "0347 Adapter is very hot, please take "
10275 "corrective action. temperature : %d Celsius\n",
10276 (uint32_t) icmd->ulpContext);
10277 } else {
10278 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10279 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10280 "0340 Adapter temperature is OK now. "
10281 "temperature : %d Celsius\n",
10282 (uint32_t) icmd->ulpContext);
10283 }
10284
10285
10286 shost = lpfc_shost_from_vport(phba->pport);
10287 fc_host_post_vendor_event(shost, fc_get_event_number(),
10288 sizeof(temp_event_data), (char *) &temp_event_data,
10289 LPFC_NL_VENDOR_ID);
10290 break;
10291 case ASYNC_STATUS_CN:
10292 lpfc_sli_abts_err_handler(phba, iocbq);
10293 break;
10294 default:
10295 iocb_w = (uint32_t *) icmd;
10296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10297 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10298 " evt_code 0x%x\n"
10299 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10300 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10301 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10302 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10303 pring->ringno, icmd->un.asyncstat.evt_code,
10304 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10305 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10306 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10307 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10308
10309 break;
10310 }
10311}
10312
10313
10314
10315
10316
10317
10318
10319
10320
10321
10322
10323
10324
10325int
10326lpfc_sli4_setup(struct lpfc_hba *phba)
10327{
10328 struct lpfc_sli_ring *pring;
10329
10330 pring = phba->sli4_hba.els_wq->pring;
10331 pring->num_mask = LPFC_MAX_RING_MASK;
10332 pring->prt[0].profile = 0;
10333 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10334 pring->prt[0].type = FC_TYPE_ELS;
10335 pring->prt[0].lpfc_sli_rcv_unsol_event =
10336 lpfc_els_unsol_event;
10337 pring->prt[1].profile = 0;
10338 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10339 pring->prt[1].type = FC_TYPE_ELS;
10340 pring->prt[1].lpfc_sli_rcv_unsol_event =
10341 lpfc_els_unsol_event;
10342 pring->prt[2].profile = 0;
10343
10344 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10345
10346 pring->prt[2].type = FC_TYPE_CT;
10347 pring->prt[2].lpfc_sli_rcv_unsol_event =
10348 lpfc_ct_unsol_event;
10349 pring->prt[3].profile = 0;
10350
10351 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10352
10353 pring->prt[3].type = FC_TYPE_CT;
10354 pring->prt[3].lpfc_sli_rcv_unsol_event =
10355 lpfc_ct_unsol_event;
10356 return 0;
10357}
10358
10359
10360
10361
10362
10363
10364
10365
10366
10367
10368
10369
10370int
10371lpfc_sli_setup(struct lpfc_hba *phba)
10372{
10373 int i, totiocbsize = 0;
10374 struct lpfc_sli *psli = &phba->sli;
10375 struct lpfc_sli_ring *pring;
10376
10377 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10378 psli->sli_flag = 0;
10379
10380 psli->iocbq_lookup = NULL;
10381 psli->iocbq_lookup_len = 0;
10382 psli->last_iotag = 0;
10383
10384 for (i = 0; i < psli->num_rings; i++) {
10385 pring = &psli->sli3_ring[i];
10386 switch (i) {
10387 case LPFC_FCP_RING:
10388
10389 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10390 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10391 pring->sli.sli3.numCiocb +=
10392 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10393 pring->sli.sli3.numRiocb +=
10394 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10395 pring->sli.sli3.numCiocb +=
10396 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10397 pring->sli.sli3.numRiocb +=
10398 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10399 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10400 SLI3_IOCB_CMD_SIZE :
10401 SLI2_IOCB_CMD_SIZE;
10402 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10403 SLI3_IOCB_RSP_SIZE :
10404 SLI2_IOCB_RSP_SIZE;
10405 pring->iotag_ctr = 0;
10406 pring->iotag_max =
10407 (phba->cfg_hba_queue_depth * 2);
10408 pring->fast_iotag = pring->iotag_max;
10409 pring->num_mask = 0;
10410 break;
10411 case LPFC_EXTRA_RING:
10412
10413 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10414 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10415 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10416 SLI3_IOCB_CMD_SIZE :
10417 SLI2_IOCB_CMD_SIZE;
10418 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10419 SLI3_IOCB_RSP_SIZE :
10420 SLI2_IOCB_RSP_SIZE;
10421 pring->iotag_max = phba->cfg_hba_queue_depth;
10422 pring->num_mask = 0;
10423 break;
10424 case LPFC_ELS_RING:
10425
10426 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10427 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10428 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10429 SLI3_IOCB_CMD_SIZE :
10430 SLI2_IOCB_CMD_SIZE;
10431 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10432 SLI3_IOCB_RSP_SIZE :
10433 SLI2_IOCB_RSP_SIZE;
10434 pring->fast_iotag = 0;
10435 pring->iotag_ctr = 0;
10436 pring->iotag_max = 4096;
10437 pring->lpfc_sli_rcv_async_status =
10438 lpfc_sli_async_event_handler;
10439 pring->num_mask = LPFC_MAX_RING_MASK;
10440 pring->prt[0].profile = 0;
10441 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10442 pring->prt[0].type = FC_TYPE_ELS;
10443 pring->prt[0].lpfc_sli_rcv_unsol_event =
10444 lpfc_els_unsol_event;
10445 pring->prt[1].profile = 0;
10446 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10447 pring->prt[1].type = FC_TYPE_ELS;
10448 pring->prt[1].lpfc_sli_rcv_unsol_event =
10449 lpfc_els_unsol_event;
10450 pring->prt[2].profile = 0;
10451
10452 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10453
10454 pring->prt[2].type = FC_TYPE_CT;
10455 pring->prt[2].lpfc_sli_rcv_unsol_event =
10456 lpfc_ct_unsol_event;
10457 pring->prt[3].profile = 0;
10458
10459 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10460
10461 pring->prt[3].type = FC_TYPE_CT;
10462 pring->prt[3].lpfc_sli_rcv_unsol_event =
10463 lpfc_ct_unsol_event;
10464 break;
10465 }
10466 totiocbsize += (pring->sli.sli3.numCiocb *
10467 pring->sli.sli3.sizeCiocb) +
10468 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10469 }
10470 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10471
10472 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10473 "SLI2 SLIM Data: x%x x%lx\n",
10474 phba->brd_no, totiocbsize,
10475 (unsigned long) MAX_SLIM_IOCB_SIZE);
10476 }
10477 if (phba->cfg_multi_ring_support == 2)
10478 lpfc_extra_ring_setup(phba);
10479
10480 return 0;
10481}
10482
10483
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494void
10495lpfc_sli4_queue_init(struct lpfc_hba *phba)
10496{
10497 struct lpfc_sli *psli;
10498 struct lpfc_sli_ring *pring;
10499 int i;
10500
10501 psli = &phba->sli;
10502 spin_lock_irq(&phba->hbalock);
10503 INIT_LIST_HEAD(&psli->mboxq);
10504 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10505
10506 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10507 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring;
10508 pring->flag = 0;
10509 pring->ringno = LPFC_FCP_RING;
10510 pring->txcmplq_cnt = 0;
10511 INIT_LIST_HEAD(&pring->txq);
10512 INIT_LIST_HEAD(&pring->txcmplq);
10513 INIT_LIST_HEAD(&pring->iocb_continueq);
10514 spin_lock_init(&pring->ring_lock);
10515 }
10516 pring = phba->sli4_hba.els_wq->pring;
10517 pring->flag = 0;
10518 pring->ringno = LPFC_ELS_RING;
10519 pring->txcmplq_cnt = 0;
10520 INIT_LIST_HEAD(&pring->txq);
10521 INIT_LIST_HEAD(&pring->txcmplq);
10522 INIT_LIST_HEAD(&pring->iocb_continueq);
10523 spin_lock_init(&pring->ring_lock);
10524
10525 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10526 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10527 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
10528 pring->flag = 0;
10529 pring->ringno = LPFC_FCP_RING;
10530 pring->txcmplq_cnt = 0;
10531 INIT_LIST_HEAD(&pring->txq);
10532 INIT_LIST_HEAD(&pring->txcmplq);
10533 INIT_LIST_HEAD(&pring->iocb_continueq);
10534 spin_lock_init(&pring->ring_lock);
10535 }
10536 pring = phba->sli4_hba.nvmels_wq->pring;
10537 pring->flag = 0;
10538 pring->ringno = LPFC_ELS_RING;
10539 pring->txcmplq_cnt = 0;
10540 INIT_LIST_HEAD(&pring->txq);
10541 INIT_LIST_HEAD(&pring->txcmplq);
10542 INIT_LIST_HEAD(&pring->iocb_continueq);
10543 spin_lock_init(&pring->ring_lock);
10544 }
10545
10546 spin_unlock_irq(&phba->hbalock);
10547}
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560void
10561lpfc_sli_queue_init(struct lpfc_hba *phba)
10562{
10563 struct lpfc_sli *psli;
10564 struct lpfc_sli_ring *pring;
10565 int i;
10566
10567 psli = &phba->sli;
10568 spin_lock_irq(&phba->hbalock);
10569 INIT_LIST_HEAD(&psli->mboxq);
10570 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10571
10572 for (i = 0; i < psli->num_rings; i++) {
10573 pring = &psli->sli3_ring[i];
10574 pring->ringno = i;
10575 pring->sli.sli3.next_cmdidx = 0;
10576 pring->sli.sli3.local_getidx = 0;
10577 pring->sli.sli3.cmdidx = 0;
10578 INIT_LIST_HEAD(&pring->iocb_continueq);
10579 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10580 INIT_LIST_HEAD(&pring->postbufq);
10581 pring->flag = 0;
10582 INIT_LIST_HEAD(&pring->txq);
10583 INIT_LIST_HEAD(&pring->txcmplq);
10584 spin_lock_init(&pring->ring_lock);
10585 }
10586 spin_unlock_irq(&phba->hbalock);
10587}
10588
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604static void
10605lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10606{
10607 LIST_HEAD(completions);
10608 struct lpfc_sli *psli = &phba->sli;
10609 LPFC_MBOXQ_t *pmb;
10610 unsigned long iflag;
10611
10612
10613 local_bh_disable();
10614
10615
10616 spin_lock_irqsave(&phba->hbalock, iflag);
10617
10618
10619 list_splice_init(&phba->sli.mboxq, &completions);
10620
10621 if (psli->mbox_active) {
10622 list_add_tail(&psli->mbox_active->list, &completions);
10623 psli->mbox_active = NULL;
10624 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10625 }
10626
10627 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10628 spin_unlock_irqrestore(&phba->hbalock, iflag);
10629
10630
10631 local_bh_enable();
10632
10633
10634 while (!list_empty(&completions)) {
10635 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10636 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10637 if (pmb->mbox_cmpl)
10638 pmb->mbox_cmpl(phba, pmb);
10639 }
10640}
10641
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656
10657
10658
10659int
10660lpfc_sli_host_down(struct lpfc_vport *vport)
10661{
10662 LIST_HEAD(completions);
10663 struct lpfc_hba *phba = vport->phba;
10664 struct lpfc_sli *psli = &phba->sli;
10665 struct lpfc_queue *qp = NULL;
10666 struct lpfc_sli_ring *pring;
10667 struct lpfc_iocbq *iocb, *next_iocb;
10668 int i;
10669 unsigned long flags = 0;
10670 uint16_t prev_pring_flag;
10671
10672 lpfc_cleanup_discovery_resources(vport);
10673
10674 spin_lock_irqsave(&phba->hbalock, flags);
10675
10676
10677
10678
10679
10680
10681 if (phba->sli_rev != LPFC_SLI_REV4) {
10682 for (i = 0; i < psli->num_rings; i++) {
10683 pring = &psli->sli3_ring[i];
10684 prev_pring_flag = pring->flag;
10685
10686 if (pring->ringno == LPFC_ELS_RING) {
10687 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10688
10689 set_bit(LPFC_DATA_READY, &phba->data_flags);
10690 }
10691 list_for_each_entry_safe(iocb, next_iocb,
10692 &pring->txq, list) {
10693 if (iocb->vport != vport)
10694 continue;
10695 list_move_tail(&iocb->list, &completions);
10696 }
10697 list_for_each_entry_safe(iocb, next_iocb,
10698 &pring->txcmplq, list) {
10699 if (iocb->vport != vport)
10700 continue;
10701 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10702 }
10703 pring->flag = prev_pring_flag;
10704 }
10705 } else {
10706 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10707 pring = qp->pring;
10708 if (!pring)
10709 continue;
10710 if (pring == phba->sli4_hba.els_wq->pring) {
10711 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10712
10713 set_bit(LPFC_DATA_READY, &phba->data_flags);
10714 }
10715 prev_pring_flag = pring->flag;
10716 spin_lock_irq(&pring->ring_lock);
10717 list_for_each_entry_safe(iocb, next_iocb,
10718 &pring->txq, list) {
10719 if (iocb->vport != vport)
10720 continue;
10721 list_move_tail(&iocb->list, &completions);
10722 }
10723 spin_unlock_irq(&pring->ring_lock);
10724 list_for_each_entry_safe(iocb, next_iocb,
10725 &pring->txcmplq, list) {
10726 if (iocb->vport != vport)
10727 continue;
10728 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10729 }
10730 pring->flag = prev_pring_flag;
10731 }
10732 }
10733 spin_unlock_irqrestore(&phba->hbalock, flags);
10734
10735
10736 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10737 IOERR_SLI_DOWN);
10738 return 1;
10739}
10740
10741
10742
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754
10755
10756int
10757lpfc_sli_hba_down(struct lpfc_hba *phba)
10758{
10759 LIST_HEAD(completions);
10760 struct lpfc_sli *psli = &phba->sli;
10761 struct lpfc_queue *qp = NULL;
10762 struct lpfc_sli_ring *pring;
10763 struct lpfc_dmabuf *buf_ptr;
10764 unsigned long flags = 0;
10765 int i;
10766
10767
10768 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10769
10770 lpfc_hba_down_prep(phba);
10771
10772
10773 local_bh_disable();
10774
10775 lpfc_fabric_abort_hba(phba);
10776
10777 spin_lock_irqsave(&phba->hbalock, flags);
10778
10779
10780
10781
10782
10783 if (phba->sli_rev != LPFC_SLI_REV4) {
10784 for (i = 0; i < psli->num_rings; i++) {
10785 pring = &psli->sli3_ring[i];
10786
10787 if (pring->ringno == LPFC_ELS_RING) {
10788 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10789
10790 set_bit(LPFC_DATA_READY, &phba->data_flags);
10791 }
10792 list_splice_init(&pring->txq, &completions);
10793 }
10794 } else {
10795 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10796 pring = qp->pring;
10797 if (!pring)
10798 continue;
10799 spin_lock_irq(&pring->ring_lock);
10800 list_splice_init(&pring->txq, &completions);
10801 spin_unlock_irq(&pring->ring_lock);
10802 if (pring == phba->sli4_hba.els_wq->pring) {
10803 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10804
10805 set_bit(LPFC_DATA_READY, &phba->data_flags);
10806 }
10807 }
10808 }
10809 spin_unlock_irqrestore(&phba->hbalock, flags);
10810
10811
10812 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10813 IOERR_SLI_DOWN);
10814
10815 spin_lock_irqsave(&phba->hbalock, flags);
10816 list_splice_init(&phba->elsbuf, &completions);
10817 phba->elsbuf_cnt = 0;
10818 phba->elsbuf_prev_cnt = 0;
10819 spin_unlock_irqrestore(&phba->hbalock, flags);
10820
10821 while (!list_empty(&completions)) {
10822 list_remove_head(&completions, buf_ptr,
10823 struct lpfc_dmabuf, list);
10824 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10825 kfree(buf_ptr);
10826 }
10827
10828
10829 local_bh_enable();
10830
10831
10832 del_timer_sync(&psli->mbox_tmo);
10833
10834 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10835 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10836 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10837
10838 return 1;
10839}
10840
10841
10842
10843
10844
10845
10846
10847
10848
10849
10850
10851
10852
10853void
10854lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10855{
10856 uint32_t *src = srcp;
10857 uint32_t *dest = destp;
10858 uint32_t ldata;
10859 int i;
10860
10861 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10862 ldata = *src;
10863 ldata = le32_to_cpu(ldata);
10864 *dest = ldata;
10865 src++;
10866 dest++;
10867 }
10868}
10869
10870
10871
10872
10873
10874
10875
10876
10877
10878
10879
10880
10881void
10882lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10883{
10884 uint32_t *src = srcp;
10885 uint32_t *dest = destp;
10886 uint32_t ldata;
10887 int i;
10888
10889 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10890 ldata = *src;
10891 ldata = be32_to_cpu(ldata);
10892 *dest = ldata;
10893 src++;
10894 dest++;
10895 }
10896}
10897
10898
10899
10900
10901
10902
10903
10904
10905
10906
10907
10908int
10909lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10910 struct lpfc_dmabuf *mp)
10911{
10912
10913
10914 spin_lock_irq(&phba->hbalock);
10915 list_add_tail(&mp->list, &pring->postbufq);
10916 pring->postbufq_cnt++;
10917 spin_unlock_irq(&phba->hbalock);
10918 return 0;
10919}
10920
10921
10922
10923
10924
10925
10926
10927
10928
10929
10930
10931
10932uint32_t
10933lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10934{
10935 spin_lock_irq(&phba->hbalock);
10936 phba->buffer_tag_count++;
10937
10938
10939
10940
10941 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10942 spin_unlock_irq(&phba->hbalock);
10943 return phba->buffer_tag_count;
10944}
10945
10946
10947
10948
10949
10950
10951
10952
10953
10954
10955
10956
10957
10958
10959
10960
10961struct lpfc_dmabuf *
10962lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10963 uint32_t tag)
10964{
10965 struct lpfc_dmabuf *mp, *next_mp;
10966 struct list_head *slp = &pring->postbufq;
10967
10968
10969 spin_lock_irq(&phba->hbalock);
10970 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10971 if (mp->buffer_tag == tag) {
10972 list_del_init(&mp->list);
10973 pring->postbufq_cnt--;
10974 spin_unlock_irq(&phba->hbalock);
10975 return mp;
10976 }
10977 }
10978
10979 spin_unlock_irq(&phba->hbalock);
10980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10981 "0402 Cannot find virtual addr for buffer tag on "
10982 "ring %d Data x%lx x%p x%p x%x\n",
10983 pring->ringno, (unsigned long) tag,
10984 slp->next, slp->prev, pring->postbufq_cnt);
10985
10986 return NULL;
10987}
10988
10989
10990
10991
10992
10993
10994
10995
10996
10997
10998
10999
11000
11001
11002
11003
11004
11005struct lpfc_dmabuf *
11006lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11007 dma_addr_t phys)
11008{
11009 struct lpfc_dmabuf *mp, *next_mp;
11010 struct list_head *slp = &pring->postbufq;
11011
11012
11013 spin_lock_irq(&phba->hbalock);
11014 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11015 if (mp->phys == phys) {
11016 list_del_init(&mp->list);
11017 pring->postbufq_cnt--;
11018 spin_unlock_irq(&phba->hbalock);
11019 return mp;
11020 }
11021 }
11022
11023 spin_unlock_irq(&phba->hbalock);
11024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11025 "0410 Cannot find virtual addr for mapped buf on "
11026 "ring %d Data x%llx x%p x%p x%x\n",
11027 pring->ringno, (unsigned long long)phys,
11028 slp->next, slp->prev, pring->postbufq_cnt);
11029 return NULL;
11030}
11031
11032
11033
11034
11035
11036
11037
11038
11039
11040
11041
11042
11043static void
11044lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11045 struct lpfc_iocbq *rspiocb)
11046{
11047 IOCB_t *irsp = &rspiocb->iocb;
11048 uint16_t abort_iotag, abort_context;
11049 struct lpfc_iocbq *abort_iocb = NULL;
11050
11051 if (irsp->ulpStatus) {
11052
11053
11054
11055
11056
11057 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11058 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11059
11060 spin_lock_irq(&phba->hbalock);
11061 if (phba->sli_rev < LPFC_SLI_REV4) {
11062 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11063 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11064 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11065 spin_unlock_irq(&phba->hbalock);
11066 goto release_iocb;
11067 }
11068 if (abort_iotag != 0 &&
11069 abort_iotag <= phba->sli.last_iotag)
11070 abort_iocb =
11071 phba->sli.iocbq_lookup[abort_iotag];
11072 } else
11073
11074
11075
11076
11077
11078 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11079
11080 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11081 "0327 Cannot abort els iocb %p "
11082 "with tag %x context %x, abort status %x, "
11083 "abort code %x\n",
11084 abort_iocb, abort_iotag, abort_context,
11085 irsp->ulpStatus, irsp->un.ulpWord[4]);
11086
11087 spin_unlock_irq(&phba->hbalock);
11088 }
11089release_iocb:
11090 lpfc_sli_release_iocbq(phba, cmdiocb);
11091 return;
11092}
11093
11094
11095
11096
11097
11098
11099
11100
11101
11102
11103
11104
11105static void
11106lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11107 struct lpfc_iocbq *rspiocb)
11108{
11109 IOCB_t *irsp = &rspiocb->iocb;
11110
11111
11112 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11113 "0139 Ignoring ELS cmd tag x%x completion Data: "
11114 "x%x x%x x%x\n",
11115 irsp->ulpIoTag, irsp->ulpStatus,
11116 irsp->un.ulpWord[4], irsp->ulpTimeout);
11117 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11118 lpfc_ct_free_iocb(phba, cmdiocb);
11119 else
11120 lpfc_els_free_iocb(phba, cmdiocb);
11121 return;
11122}
11123
11124
11125
11126
11127
11128
11129
11130
11131
11132
11133
11134
11135
11136static int
11137lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11138 struct lpfc_iocbq *cmdiocb)
11139{
11140 struct lpfc_vport *vport = cmdiocb->vport;
11141 struct lpfc_iocbq *abtsiocbp;
11142 IOCB_t *icmd = NULL;
11143 IOCB_t *iabt = NULL;
11144 int retval;
11145 unsigned long iflags;
11146 struct lpfc_nodelist *ndlp;
11147
11148 lockdep_assert_held(&phba->hbalock);
11149
11150
11151
11152
11153
11154
11155 icmd = &cmdiocb->iocb;
11156 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11157 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11158 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11159 return 0;
11160
11161
11162 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11163 if (abtsiocbp == NULL)
11164 return 0;
11165
11166
11167
11168
11169 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11170
11171 iabt = &abtsiocbp->iocb;
11172 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11173 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11174 if (phba->sli_rev == LPFC_SLI_REV4) {
11175 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11176 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11177 } else {
11178 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11179 if (pring->ringno == LPFC_ELS_RING) {
11180 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11181 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11182 }
11183 }
11184 iabt->ulpLe = 1;
11185 iabt->ulpClass = icmd->ulpClass;
11186
11187
11188 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11189 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11190 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11191 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11192 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11193
11194 if (phba->link_state >= LPFC_LINK_UP)
11195 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11196 else
11197 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11198
11199 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11200 abtsiocbp->vport = vport;
11201
11202 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11203 "0339 Abort xri x%x, original iotag x%x, "
11204 "abort cmd iotag x%x\n",
11205 iabt->un.acxri.abortIoTag,
11206 iabt->un.acxri.abortContextTag,
11207 abtsiocbp->iotag);
11208
11209 if (phba->sli_rev == LPFC_SLI_REV4) {
11210 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11211 if (unlikely(pring == NULL))
11212 return 0;
11213
11214 spin_lock_irqsave(&pring->ring_lock, iflags);
11215 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11216 abtsiocbp, 0);
11217 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11218 } else {
11219 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11220 abtsiocbp, 0);
11221 }
11222
11223 if (retval)
11224 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11225
11226
11227
11228
11229
11230
11231 return retval;
11232}
11233
11234
11235
11236
11237
11238
11239
11240
11241
11242
11243
11244
11245
11246
11247int
11248lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11249 struct lpfc_iocbq *cmdiocb)
11250{
11251 struct lpfc_vport *vport = cmdiocb->vport;
11252 int retval = IOCB_ERROR;
11253 IOCB_t *icmd = NULL;
11254
11255 lockdep_assert_held(&phba->hbalock);
11256
11257
11258
11259
11260
11261
11262 icmd = &cmdiocb->iocb;
11263 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11264 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11265 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11266 return 0;
11267
11268 if (!pring) {
11269 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11270 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11271 else
11272 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11273 goto abort_iotag_exit;
11274 }
11275
11276
11277
11278
11279
11280 if ((vport->load_flag & FC_UNLOADING) &&
11281 (pring->ringno == LPFC_ELS_RING)) {
11282 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11283 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11284 else
11285 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11286 goto abort_iotag_exit;
11287 }
11288
11289
11290 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11291
11292abort_iotag_exit:
11293
11294
11295
11296
11297
11298 return retval;
11299}
11300
11301
11302
11303
11304
11305
11306
11307void
11308lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11309{
11310 struct lpfc_sli *psli = &phba->sli;
11311 struct lpfc_sli_ring *pring;
11312 struct lpfc_queue *qp = NULL;
11313 int i;
11314
11315 if (phba->sli_rev != LPFC_SLI_REV4) {
11316 for (i = 0; i < psli->num_rings; i++) {
11317 pring = &psli->sli3_ring[i];
11318 lpfc_sli_abort_iocb_ring(phba, pring);
11319 }
11320 return;
11321 }
11322 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11323 pring = qp->pring;
11324 if (!pring)
11325 continue;
11326 lpfc_sli_abort_iocb_ring(phba, pring);
11327 }
11328}
11329
11330
11331
11332
11333
11334
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344
11345
11346
11347
11348
11349
11350
11351
11352static int
11353lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11354 uint16_t tgt_id, uint64_t lun_id,
11355 lpfc_ctx_cmd ctx_cmd)
11356{
11357 struct lpfc_io_buf *lpfc_cmd;
11358 int rc = 1;
11359
11360 if (iocbq->vport != vport)
11361 return rc;
11362
11363 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11364 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11365 return rc;
11366
11367 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11368
11369 if (lpfc_cmd->pCmd == NULL)
11370 return rc;
11371
11372 switch (ctx_cmd) {
11373 case LPFC_CTX_LUN:
11374 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11375 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11376 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11377 rc = 0;
11378 break;
11379 case LPFC_CTX_TGT:
11380 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11381 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11382 rc = 0;
11383 break;
11384 case LPFC_CTX_HOST:
11385 rc = 0;
11386 break;
11387 default:
11388 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11389 __func__, ctx_cmd);
11390 break;
11391 }
11392
11393 return rc;
11394}
11395
11396
11397
11398
11399
11400
11401
11402
11403
11404
11405
11406
11407
11408
11409
11410
11411
11412
11413
11414
11415int
11416lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11417 lpfc_ctx_cmd ctx_cmd)
11418{
11419 struct lpfc_hba *phba = vport->phba;
11420 struct lpfc_iocbq *iocbq;
11421 int sum, i;
11422
11423 spin_lock_irq(&phba->hbalock);
11424 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11425 iocbq = phba->sli.iocbq_lookup[i];
11426
11427 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11428 ctx_cmd) == 0)
11429 sum++;
11430 }
11431 spin_unlock_irq(&phba->hbalock);
11432
11433 return sum;
11434}
11435
11436
11437
11438
11439
11440
11441
11442
11443
11444
11445
11446void
11447lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11448 struct lpfc_iocbq *rspiocb)
11449{
11450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11451 "3096 ABORT_XRI_CN completing on rpi x%x "
11452 "original iotag x%x, abort cmd iotag x%x "
11453 "status 0x%x, reason 0x%x\n",
11454 cmdiocb->iocb.un.acxri.abortContextTag,
11455 cmdiocb->iocb.un.acxri.abortIoTag,
11456 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11457 rspiocb->iocb.un.ulpWord[4]);
11458 lpfc_sli_release_iocbq(phba, cmdiocb);
11459 return;
11460}
11461
11462
11463
11464
11465
11466
11467
11468
11469
11470
11471
11472
11473
11474
11475
11476
11477
11478
11479
11480
11481
11482
11483int
11484lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11485 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11486{
11487 struct lpfc_hba *phba = vport->phba;
11488 struct lpfc_iocbq *iocbq;
11489 struct lpfc_iocbq *abtsiocb;
11490 struct lpfc_sli_ring *pring_s4;
11491 IOCB_t *cmd = NULL;
11492 int errcnt = 0, ret_val = 0;
11493 int i;
11494
11495
11496 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH)
11497 return errcnt;
11498
11499 for (i = 1; i <= phba->sli.last_iotag; i++) {
11500 iocbq = phba->sli.iocbq_lookup[i];
11501
11502 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11503 abort_cmd) != 0)
11504 continue;
11505
11506
11507
11508
11509
11510 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11511 continue;
11512
11513
11514 abtsiocb = lpfc_sli_get_iocbq(phba);
11515 if (abtsiocb == NULL) {
11516 errcnt++;
11517 continue;
11518 }
11519
11520
11521 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11522
11523 cmd = &iocbq->iocb;
11524 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11525 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11526 if (phba->sli_rev == LPFC_SLI_REV4)
11527 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11528 else
11529 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11530 abtsiocb->iocb.ulpLe = 1;
11531 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11532 abtsiocb->vport = vport;
11533
11534
11535 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11536 if (iocbq->iocb_flag & LPFC_IO_FCP)
11537 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11538 if (iocbq->iocb_flag & LPFC_IO_FOF)
11539 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11540
11541 if (lpfc_is_link_up(phba))
11542 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11543 else
11544 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11545
11546
11547 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11548 if (phba->sli_rev == LPFC_SLI_REV4) {
11549 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11550 if (!pring_s4)
11551 continue;
11552 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11553 abtsiocb, 0);
11554 } else
11555 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11556 abtsiocb, 0);
11557 if (ret_val == IOCB_ERROR) {
11558 lpfc_sli_release_iocbq(phba, abtsiocb);
11559 errcnt++;
11560 continue;
11561 }
11562 }
11563
11564 return errcnt;
11565}
11566
11567
11568
11569
11570
11571
11572
11573
11574
11575
11576
11577
11578
11579
11580
11581
11582
11583
11584
11585
11586
11587
11588
11589int
11590lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11591 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11592{
11593 struct lpfc_hba *phba = vport->phba;
11594 struct lpfc_io_buf *lpfc_cmd;
11595 struct lpfc_iocbq *abtsiocbq;
11596 struct lpfc_nodelist *ndlp;
11597 struct lpfc_iocbq *iocbq;
11598 IOCB_t *icmd;
11599 int sum, i, ret_val;
11600 unsigned long iflags;
11601 struct lpfc_sli_ring *pring_s4 = NULL;
11602
11603 spin_lock_irqsave(&phba->hbalock, iflags);
11604
11605
11606 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11607 spin_unlock_irqrestore(&phba->hbalock, iflags);
11608 return 0;
11609 }
11610 sum = 0;
11611
11612 for (i = 1; i <= phba->sli.last_iotag; i++) {
11613 iocbq = phba->sli.iocbq_lookup[i];
11614
11615 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11616 cmd) != 0)
11617 continue;
11618
11619
11620 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11621 spin_lock(&lpfc_cmd->buf_lock);
11622
11623 if (!lpfc_cmd->pCmd) {
11624 spin_unlock(&lpfc_cmd->buf_lock);
11625 continue;
11626 }
11627
11628 if (phba->sli_rev == LPFC_SLI_REV4) {
11629 pring_s4 =
11630 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring;
11631 if (!pring_s4) {
11632 spin_unlock(&lpfc_cmd->buf_lock);
11633 continue;
11634 }
11635
11636 spin_lock(&pring_s4->ring_lock);
11637 }
11638
11639
11640
11641
11642
11643 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11644 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11645 if (phba->sli_rev == LPFC_SLI_REV4)
11646 spin_unlock(&pring_s4->ring_lock);
11647 spin_unlock(&lpfc_cmd->buf_lock);
11648 continue;
11649 }
11650
11651
11652 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11653 if (!abtsiocbq) {
11654 if (phba->sli_rev == LPFC_SLI_REV4)
11655 spin_unlock(&pring_s4->ring_lock);
11656 spin_unlock(&lpfc_cmd->buf_lock);
11657 continue;
11658 }
11659
11660 icmd = &iocbq->iocb;
11661 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11662 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11663 if (phba->sli_rev == LPFC_SLI_REV4)
11664 abtsiocbq->iocb.un.acxri.abortIoTag =
11665 iocbq->sli4_xritag;
11666 else
11667 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11668 abtsiocbq->iocb.ulpLe = 1;
11669 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11670 abtsiocbq->vport = vport;
11671
11672
11673 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11674 if (iocbq->iocb_flag & LPFC_IO_FCP)
11675 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11676 if (iocbq->iocb_flag & LPFC_IO_FOF)
11677 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11678
11679 ndlp = lpfc_cmd->rdata->pnode;
11680
11681 if (lpfc_is_link_up(phba) &&
11682 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11683 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11684 else
11685 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11686
11687
11688 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11689
11690
11691
11692
11693
11694 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11695
11696 if (phba->sli_rev == LPFC_SLI_REV4) {
11697 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11698 abtsiocbq, 0);
11699 spin_unlock(&pring_s4->ring_lock);
11700 } else {
11701 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11702 abtsiocbq, 0);
11703 }
11704
11705 spin_unlock(&lpfc_cmd->buf_lock);
11706
11707 if (ret_val == IOCB_ERROR)
11708 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11709 else
11710 sum++;
11711 }
11712 spin_unlock_irqrestore(&phba->hbalock, iflags);
11713 return sum;
11714}
11715
11716
11717
11718
11719
11720
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733static void
11734lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11735 struct lpfc_iocbq *cmdiocbq,
11736 struct lpfc_iocbq *rspiocbq)
11737{
11738 wait_queue_head_t *pdone_q;
11739 unsigned long iflags;
11740 struct lpfc_io_buf *lpfc_cmd;
11741
11742 spin_lock_irqsave(&phba->hbalock, iflags);
11743 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11744
11745
11746
11747
11748
11749
11750
11751 spin_unlock_irqrestore(&phba->hbalock, iflags);
11752 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11753 cmdiocbq->wait_iocb_cmpl = NULL;
11754 if (cmdiocbq->iocb_cmpl)
11755 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11756 else
11757 lpfc_sli_release_iocbq(phba, cmdiocbq);
11758 return;
11759 }
11760
11761 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11762 if (cmdiocbq->context2 && rspiocbq)
11763 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11764 &rspiocbq->iocb, sizeof(IOCB_t));
11765
11766
11767 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11768 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11769 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11770 cur_iocbq);
11771 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11772 }
11773
11774 pdone_q = cmdiocbq->context_un.wait_queue;
11775 if (pdone_q)
11776 wake_up(pdone_q);
11777 spin_unlock_irqrestore(&phba->hbalock, iflags);
11778 return;
11779}
11780
11781
11782
11783
11784
11785
11786
11787
11788
11789
11790
11791
11792
11793static int
11794lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11795 struct lpfc_iocbq *piocbq, uint32_t flag)
11796{
11797 unsigned long iflags;
11798 int ret;
11799
11800 spin_lock_irqsave(&phba->hbalock, iflags);
11801 ret = piocbq->iocb_flag & flag;
11802 spin_unlock_irqrestore(&phba->hbalock, iflags);
11803 return ret;
11804
11805}
11806
11807
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825
11826
11827
11828
11829
11830
11831
11832
11833
11834
11835
11836
11837
11838
11839
11840
11841
11842
11843int
11844lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11845 uint32_t ring_number,
11846 struct lpfc_iocbq *piocb,
11847 struct lpfc_iocbq *prspiocbq,
11848 uint32_t timeout)
11849{
11850 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11851 long timeleft, timeout_req = 0;
11852 int retval = IOCB_SUCCESS;
11853 uint32_t creg_val;
11854 struct lpfc_iocbq *iocb;
11855 int txq_cnt = 0;
11856 int txcmplq_cnt = 0;
11857 struct lpfc_sli_ring *pring;
11858 unsigned long iflags;
11859 bool iocb_completed = true;
11860
11861 if (phba->sli_rev >= LPFC_SLI_REV4)
11862 pring = lpfc_sli4_calc_ring(phba, piocb);
11863 else
11864 pring = &phba->sli.sli3_ring[ring_number];
11865
11866
11867
11868
11869 if (prspiocbq) {
11870 if (piocb->context2)
11871 return IOCB_ERROR;
11872 piocb->context2 = prspiocbq;
11873 }
11874
11875 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11876 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11877 piocb->context_un.wait_queue = &done_q;
11878 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11879
11880 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11881 if (lpfc_readl(phba->HCregaddr, &creg_val))
11882 return IOCB_ERROR;
11883 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11884 writel(creg_val, phba->HCregaddr);
11885 readl(phba->HCregaddr);
11886 }
11887
11888 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11889 SLI_IOCB_RET_IOCB);
11890 if (retval == IOCB_SUCCESS) {
11891 timeout_req = msecs_to_jiffies(timeout * 1000);
11892 timeleft = wait_event_timeout(done_q,
11893 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11894 timeout_req);
11895 spin_lock_irqsave(&phba->hbalock, iflags);
11896 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11897
11898
11899
11900
11901
11902
11903 iocb_completed = false;
11904 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11905 }
11906 spin_unlock_irqrestore(&phba->hbalock, iflags);
11907 if (iocb_completed) {
11908 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11909 "0331 IOCB wake signaled\n");
11910
11911
11912
11913
11914
11915 } else if (timeleft == 0) {
11916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11917 "0338 IOCB wait timeout error - no "
11918 "wake response Data x%x\n", timeout);
11919 retval = IOCB_TIMEDOUT;
11920 } else {
11921 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11922 "0330 IOCB wake NOT set, "
11923 "Data x%x x%lx\n",
11924 timeout, (timeleft / jiffies));
11925 retval = IOCB_TIMEDOUT;
11926 }
11927 } else if (retval == IOCB_BUSY) {
11928 if (phba->cfg_log_verbose & LOG_SLI) {
11929 list_for_each_entry(iocb, &pring->txq, list) {
11930 txq_cnt++;
11931 }
11932 list_for_each_entry(iocb, &pring->txcmplq, list) {
11933 txcmplq_cnt++;
11934 }
11935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11936 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11937 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11938 }
11939 return retval;
11940 } else {
11941 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11942 "0332 IOCB wait issue failed, Data x%x\n",
11943 retval);
11944 retval = IOCB_ERROR;
11945 }
11946
11947 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11948 if (lpfc_readl(phba->HCregaddr, &creg_val))
11949 return IOCB_ERROR;
11950 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11951 writel(creg_val, phba->HCregaddr);
11952 readl(phba->HCregaddr);
11953 }
11954
11955 if (prspiocbq)
11956 piocb->context2 = NULL;
11957
11958 piocb->context_un.wait_queue = NULL;
11959 piocb->iocb_cmpl = NULL;
11960 return retval;
11961}
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974
11975
11976
11977
11978
11979
11980
11981
11982
11983
11984
11985
11986
11987
11988
11989int
11990lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11991 uint32_t timeout)
11992{
11993 struct completion mbox_done;
11994 int retval;
11995 unsigned long flag;
11996
11997 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11998
11999 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12000
12001
12002 init_completion(&mbox_done);
12003 pmboxq->context3 = &mbox_done;
12004
12005 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12006 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12007 wait_for_completion_timeout(&mbox_done,
12008 msecs_to_jiffies(timeout * 1000));
12009
12010 spin_lock_irqsave(&phba->hbalock, flag);
12011 pmboxq->context3 = NULL;
12012
12013
12014
12015
12016 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12017 retval = MBX_SUCCESS;
12018 } else {
12019 retval = MBX_TIMEOUT;
12020 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12021 }
12022 spin_unlock_irqrestore(&phba->hbalock, flag);
12023 }
12024 return retval;
12025}
12026
12027
12028
12029
12030
12031
12032
12033
12034
12035
12036
12037
12038
12039
12040
12041
12042void
12043lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12044{
12045 struct lpfc_sli *psli = &phba->sli;
12046 unsigned long timeout;
12047
12048 if (mbx_action == LPFC_MBX_NO_WAIT) {
12049
12050 msleep(100);
12051 lpfc_sli_mbox_sys_flush(phba);
12052 return;
12053 }
12054 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12055
12056
12057 local_bh_disable();
12058
12059 spin_lock_irq(&phba->hbalock);
12060 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12061
12062 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12063
12064
12065
12066 if (phba->sli.mbox_active)
12067 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12068 phba->sli.mbox_active) *
12069 1000) + jiffies;
12070 spin_unlock_irq(&phba->hbalock);
12071
12072
12073 local_bh_enable();
12074
12075 while (phba->sli.mbox_active) {
12076
12077 msleep(2);
12078 if (time_after(jiffies, timeout))
12079
12080
12081
12082 break;
12083 }
12084 } else {
12085 spin_unlock_irq(&phba->hbalock);
12086
12087
12088 local_bh_enable();
12089 }
12090
12091 lpfc_sli_mbox_sys_flush(phba);
12092}
12093
12094
12095
12096
12097
12098
12099
12100
12101
12102
12103
12104
12105static int
12106lpfc_sli_eratt_read(struct lpfc_hba *phba)
12107{
12108 uint32_t ha_copy;
12109
12110
12111 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12112 goto unplug_err;
12113
12114 if (ha_copy & HA_ERATT) {
12115
12116 if (lpfc_sli_read_hs(phba))
12117 goto unplug_err;
12118
12119
12120 if ((HS_FFER1 & phba->work_hs) &&
12121 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12122 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12123 phba->hba_flag |= DEFER_ERATT;
12124
12125 writel(0, phba->HCregaddr);
12126 readl(phba->HCregaddr);
12127 }
12128
12129
12130 phba->work_ha |= HA_ERATT;
12131
12132 phba->hba_flag |= HBA_ERATT_HANDLED;
12133 return 1;
12134 }
12135 return 0;
12136
12137unplug_err:
12138
12139 phba->work_hs |= UNPLUG_ERR;
12140
12141 phba->work_ha |= HA_ERATT;
12142
12143 phba->hba_flag |= HBA_ERATT_HANDLED;
12144 return 1;
12145}
12146
12147
12148
12149
12150
12151
12152
12153
12154
12155
12156
12157
12158static int
12159lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12160{
12161 uint32_t uerr_sta_hi, uerr_sta_lo;
12162 uint32_t if_type, portsmphr;
12163 struct lpfc_register portstat_reg;
12164
12165
12166
12167
12168
12169 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12170 switch (if_type) {
12171 case LPFC_SLI_INTF_IF_TYPE_0:
12172 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12173 &uerr_sta_lo) ||
12174 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12175 &uerr_sta_hi)) {
12176 phba->work_hs |= UNPLUG_ERR;
12177 phba->work_ha |= HA_ERATT;
12178 phba->hba_flag |= HBA_ERATT_HANDLED;
12179 return 1;
12180 }
12181 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12182 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12184 "1423 HBA Unrecoverable error: "
12185 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12186 "ue_mask_lo_reg=0x%x, "
12187 "ue_mask_hi_reg=0x%x\n",
12188 uerr_sta_lo, uerr_sta_hi,
12189 phba->sli4_hba.ue_mask_lo,
12190 phba->sli4_hba.ue_mask_hi);
12191 phba->work_status[0] = uerr_sta_lo;
12192 phba->work_status[1] = uerr_sta_hi;
12193 phba->work_ha |= HA_ERATT;
12194 phba->hba_flag |= HBA_ERATT_HANDLED;
12195 return 1;
12196 }
12197 break;
12198 case LPFC_SLI_INTF_IF_TYPE_2:
12199 case LPFC_SLI_INTF_IF_TYPE_6:
12200 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12201 &portstat_reg.word0) ||
12202 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12203 &portsmphr)){
12204 phba->work_hs |= UNPLUG_ERR;
12205 phba->work_ha |= HA_ERATT;
12206 phba->hba_flag |= HBA_ERATT_HANDLED;
12207 return 1;
12208 }
12209 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12210 phba->work_status[0] =
12211 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12212 phba->work_status[1] =
12213 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12215 "2885 Port Status Event: "
12216 "port status reg 0x%x, "
12217 "port smphr reg 0x%x, "
12218 "error 1=0x%x, error 2=0x%x\n",
12219 portstat_reg.word0,
12220 portsmphr,
12221 phba->work_status[0],
12222 phba->work_status[1]);
12223 phba->work_ha |= HA_ERATT;
12224 phba->hba_flag |= HBA_ERATT_HANDLED;
12225 return 1;
12226 }
12227 break;
12228 case LPFC_SLI_INTF_IF_TYPE_1:
12229 default:
12230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12231 "2886 HBA Error Attention on unsupported "
12232 "if type %d.", if_type);
12233 return 1;
12234 }
12235
12236 return 0;
12237}
12238
12239
12240
12241
12242
12243
12244
12245
12246
12247
12248
12249int
12250lpfc_sli_check_eratt(struct lpfc_hba *phba)
12251{
12252 uint32_t ha_copy;
12253
12254
12255
12256
12257 if (phba->link_flag & LS_IGNORE_ERATT)
12258 return 0;
12259
12260
12261 spin_lock_irq(&phba->hbalock);
12262 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12263
12264 spin_unlock_irq(&phba->hbalock);
12265 return 0;
12266 }
12267
12268
12269
12270
12271
12272 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12273 spin_unlock_irq(&phba->hbalock);
12274 return 0;
12275 }
12276
12277
12278 if (unlikely(pci_channel_offline(phba->pcidev))) {
12279 spin_unlock_irq(&phba->hbalock);
12280 return 0;
12281 }
12282
12283 switch (phba->sli_rev) {
12284 case LPFC_SLI_REV2:
12285 case LPFC_SLI_REV3:
12286
12287 ha_copy = lpfc_sli_eratt_read(phba);
12288 break;
12289 case LPFC_SLI_REV4:
12290
12291 ha_copy = lpfc_sli4_eratt_read(phba);
12292 break;
12293 default:
12294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12295 "0299 Invalid SLI revision (%d)\n",
12296 phba->sli_rev);
12297 ha_copy = 0;
12298 break;
12299 }
12300 spin_unlock_irq(&phba->hbalock);
12301
12302 return ha_copy;
12303}
12304
12305
12306
12307
12308
12309
12310
12311
12312
12313
12314
12315static inline int
12316lpfc_intr_state_check(struct lpfc_hba *phba)
12317{
12318
12319 if (unlikely(pci_channel_offline(phba->pcidev)))
12320 return -EIO;
12321
12322
12323 phba->sli.slistat.sli_intr++;
12324
12325
12326 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12327 return -EIO;
12328
12329 return 0;
12330}
12331
12332
12333
12334
12335
12336
12337
12338
12339
12340
12341
12342
12343
12344
12345
12346
12347
12348
12349
12350
12351
12352
12353irqreturn_t
12354lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12355{
12356 struct lpfc_hba *phba;
12357 uint32_t ha_copy, hc_copy;
12358 uint32_t work_ha_copy;
12359 unsigned long status;
12360 unsigned long iflag;
12361 uint32_t control;
12362
12363 MAILBOX_t *mbox, *pmbox;
12364 struct lpfc_vport *vport;
12365 struct lpfc_nodelist *ndlp;
12366 struct lpfc_dmabuf *mp;
12367 LPFC_MBOXQ_t *pmb;
12368 int rc;
12369
12370
12371
12372
12373
12374 phba = (struct lpfc_hba *)dev_id;
12375
12376 if (unlikely(!phba))
12377 return IRQ_NONE;
12378
12379
12380
12381
12382
12383 if (phba->intr_type == MSIX) {
12384
12385 if (lpfc_intr_state_check(phba))
12386 return IRQ_NONE;
12387
12388 spin_lock_irqsave(&phba->hbalock, iflag);
12389 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12390 goto unplug_error;
12391
12392
12393
12394 if (phba->link_flag & LS_IGNORE_ERATT)
12395 ha_copy &= ~HA_ERATT;
12396
12397 if (ha_copy & HA_ERATT) {
12398 if (phba->hba_flag & HBA_ERATT_HANDLED)
12399
12400 ha_copy &= ~HA_ERATT;
12401 else
12402
12403 phba->hba_flag |= HBA_ERATT_HANDLED;
12404 }
12405
12406
12407
12408
12409
12410 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12411 spin_unlock_irqrestore(&phba->hbalock, iflag);
12412 return IRQ_NONE;
12413 }
12414
12415
12416 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12417 goto unplug_error;
12418
12419 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12420 HC_LAINT_ENA | HC_ERINT_ENA),
12421 phba->HCregaddr);
12422 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12423 phba->HAregaddr);
12424 writel(hc_copy, phba->HCregaddr);
12425 readl(phba->HAregaddr);
12426 spin_unlock_irqrestore(&phba->hbalock, iflag);
12427 } else
12428 ha_copy = phba->ha_copy;
12429
12430 work_ha_copy = ha_copy & phba->work_ha_mask;
12431
12432 if (work_ha_copy) {
12433 if (work_ha_copy & HA_LATT) {
12434 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12435
12436
12437
12438
12439 spin_lock_irqsave(&phba->hbalock, iflag);
12440 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12441 if (lpfc_readl(phba->HCregaddr, &control))
12442 goto unplug_error;
12443 control &= ~HC_LAINT_ENA;
12444 writel(control, phba->HCregaddr);
12445 readl(phba->HCregaddr);
12446 spin_unlock_irqrestore(&phba->hbalock, iflag);
12447 }
12448 else
12449 work_ha_copy &= ~HA_LATT;
12450 }
12451
12452 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12453
12454
12455
12456
12457 status = (work_ha_copy &
12458 (HA_RXMASK << (4*LPFC_ELS_RING)));
12459 status >>= (4*LPFC_ELS_RING);
12460 if (status & HA_RXMASK) {
12461 spin_lock_irqsave(&phba->hbalock, iflag);
12462 if (lpfc_readl(phba->HCregaddr, &control))
12463 goto unplug_error;
12464
12465 lpfc_debugfs_slow_ring_trc(phba,
12466 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12467 control, status,
12468 (uint32_t)phba->sli.slistat.sli_intr);
12469
12470 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12471 lpfc_debugfs_slow_ring_trc(phba,
12472 "ISR Disable ring:"
12473 "pwork:x%x hawork:x%x wait:x%x",
12474 phba->work_ha, work_ha_copy,
12475 (uint32_t)((unsigned long)
12476 &phba->work_waitq));
12477
12478 control &=
12479 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12480 writel(control, phba->HCregaddr);
12481 readl(phba->HCregaddr);
12482 }
12483 else {
12484 lpfc_debugfs_slow_ring_trc(phba,
12485 "ISR slow ring: pwork:"
12486 "x%x hawork:x%x wait:x%x",
12487 phba->work_ha, work_ha_copy,
12488 (uint32_t)((unsigned long)
12489 &phba->work_waitq));
12490 }
12491 spin_unlock_irqrestore(&phba->hbalock, iflag);
12492 }
12493 }
12494 spin_lock_irqsave(&phba->hbalock, iflag);
12495 if (work_ha_copy & HA_ERATT) {
12496 if (lpfc_sli_read_hs(phba))
12497 goto unplug_error;
12498
12499
12500
12501
12502 if ((HS_FFER1 & phba->work_hs) &&
12503 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12504 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12505 phba->work_hs)) {
12506 phba->hba_flag |= DEFER_ERATT;
12507
12508 writel(0, phba->HCregaddr);
12509 readl(phba->HCregaddr);
12510 }
12511 }
12512
12513 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12514 pmb = phba->sli.mbox_active;
12515 pmbox = &pmb->u.mb;
12516 mbox = phba->mbox;
12517 vport = pmb->vport;
12518
12519
12520 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12521 if (pmbox->mbxOwner != OWN_HOST) {
12522 spin_unlock_irqrestore(&phba->hbalock, iflag);
12523
12524
12525
12526
12527 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12528 LOG_SLI,
12529 "(%d):0304 Stray Mailbox "
12530 "Interrupt mbxCommand x%x "
12531 "mbxStatus x%x\n",
12532 (vport ? vport->vpi : 0),
12533 pmbox->mbxCommand,
12534 pmbox->mbxStatus);
12535
12536 work_ha_copy &= ~HA_MBATT;
12537 } else {
12538 phba->sli.mbox_active = NULL;
12539 spin_unlock_irqrestore(&phba->hbalock, iflag);
12540 phba->last_completion_time = jiffies;
12541 del_timer(&phba->sli.mbox_tmo);
12542 if (pmb->mbox_cmpl) {
12543 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12544 MAILBOX_CMD_SIZE);
12545 if (pmb->out_ext_byte_len &&
12546 pmb->ctx_buf)
12547 lpfc_sli_pcimem_bcopy(
12548 phba->mbox_ext,
12549 pmb->ctx_buf,
12550 pmb->out_ext_byte_len);
12551 }
12552 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12553 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12554
12555 lpfc_debugfs_disc_trc(vport,
12556 LPFC_DISC_TRC_MBOX_VPORT,
12557 "MBOX dflt rpi: : "
12558 "status:x%x rpi:x%x",
12559 (uint32_t)pmbox->mbxStatus,
12560 pmbox->un.varWords[0], 0);
12561
12562 if (!pmbox->mbxStatus) {
12563 mp = (struct lpfc_dmabuf *)
12564 (pmb->ctx_buf);
12565 ndlp = (struct lpfc_nodelist *)
12566 pmb->ctx_ndlp;
12567
12568
12569
12570
12571
12572
12573 lpfc_unreg_login(phba,
12574 vport->vpi,
12575 pmbox->un.varWords[0],
12576 pmb);
12577 pmb->mbox_cmpl =
12578 lpfc_mbx_cmpl_dflt_rpi;
12579 pmb->ctx_buf = mp;
12580 pmb->ctx_ndlp = ndlp;
12581 pmb->vport = vport;
12582 rc = lpfc_sli_issue_mbox(phba,
12583 pmb,
12584 MBX_NOWAIT);
12585 if (rc != MBX_BUSY)
12586 lpfc_printf_log(phba,
12587 KERN_ERR,
12588 LOG_MBOX | LOG_SLI,
12589 "0350 rc should have"
12590 "been MBX_BUSY\n");
12591 if (rc != MBX_NOT_FINISHED)
12592 goto send_current_mbox;
12593 }
12594 }
12595 spin_lock_irqsave(
12596 &phba->pport->work_port_lock,
12597 iflag);
12598 phba->pport->work_port_events &=
12599 ~WORKER_MBOX_TMO;
12600 spin_unlock_irqrestore(
12601 &phba->pport->work_port_lock,
12602 iflag);
12603 lpfc_mbox_cmpl_put(phba, pmb);
12604 }
12605 } else
12606 spin_unlock_irqrestore(&phba->hbalock, iflag);
12607
12608 if ((work_ha_copy & HA_MBATT) &&
12609 (phba->sli.mbox_active == NULL)) {
12610send_current_mbox:
12611
12612 do {
12613 rc = lpfc_sli_issue_mbox(phba, NULL,
12614 MBX_NOWAIT);
12615 } while (rc == MBX_NOT_FINISHED);
12616 if (rc != MBX_SUCCESS)
12617 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12618 LOG_SLI, "0349 rc should be "
12619 "MBX_SUCCESS\n");
12620 }
12621
12622 spin_lock_irqsave(&phba->hbalock, iflag);
12623 phba->work_ha |= work_ha_copy;
12624 spin_unlock_irqrestore(&phba->hbalock, iflag);
12625 lpfc_worker_wake_up(phba);
12626 }
12627 return IRQ_HANDLED;
12628unplug_error:
12629 spin_unlock_irqrestore(&phba->hbalock, iflag);
12630 return IRQ_HANDLED;
12631
12632}
12633
12634
12635
12636
12637
12638
12639
12640
12641
12642
12643
12644
12645
12646
12647
12648
12649
12650
12651
12652
12653irqreturn_t
12654lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12655{
12656 struct lpfc_hba *phba;
12657 uint32_t ha_copy;
12658 unsigned long status;
12659 unsigned long iflag;
12660 struct lpfc_sli_ring *pring;
12661
12662
12663
12664
12665 phba = (struct lpfc_hba *) dev_id;
12666
12667 if (unlikely(!phba))
12668 return IRQ_NONE;
12669
12670
12671
12672
12673
12674 if (phba->intr_type == MSIX) {
12675
12676 if (lpfc_intr_state_check(phba))
12677 return IRQ_NONE;
12678
12679 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12680 return IRQ_HANDLED;
12681
12682 spin_lock_irqsave(&phba->hbalock, iflag);
12683
12684
12685
12686
12687 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12688 spin_unlock_irqrestore(&phba->hbalock, iflag);
12689 return IRQ_NONE;
12690 }
12691 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12692 phba->HAregaddr);
12693 readl(phba->HAregaddr);
12694 spin_unlock_irqrestore(&phba->hbalock, iflag);
12695 } else
12696 ha_copy = phba->ha_copy;
12697
12698
12699
12700
12701 ha_copy &= ~(phba->work_ha_mask);
12702
12703 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12704 status >>= (4*LPFC_FCP_RING);
12705 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12706 if (status & HA_RXMASK)
12707 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12708
12709 if (phba->cfg_multi_ring_support == 2) {
12710
12711
12712
12713
12714 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12715 status >>= (4*LPFC_EXTRA_RING);
12716 if (status & HA_RXMASK) {
12717 lpfc_sli_handle_fast_ring_event(phba,
12718 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12719 status);
12720 }
12721 }
12722 return IRQ_HANDLED;
12723}
12724
12725
12726
12727
12728
12729
12730
12731
12732
12733
12734
12735
12736
12737
12738
12739
12740
12741
12742irqreturn_t
12743lpfc_sli_intr_handler(int irq, void *dev_id)
12744{
12745 struct lpfc_hba *phba;
12746 irqreturn_t sp_irq_rc, fp_irq_rc;
12747 unsigned long status1, status2;
12748 uint32_t hc_copy;
12749
12750
12751
12752
12753
12754 phba = (struct lpfc_hba *) dev_id;
12755
12756 if (unlikely(!phba))
12757 return IRQ_NONE;
12758
12759
12760 if (lpfc_intr_state_check(phba))
12761 return IRQ_NONE;
12762
12763 spin_lock(&phba->hbalock);
12764 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12765 spin_unlock(&phba->hbalock);
12766 return IRQ_HANDLED;
12767 }
12768
12769 if (unlikely(!phba->ha_copy)) {
12770 spin_unlock(&phba->hbalock);
12771 return IRQ_NONE;
12772 } else if (phba->ha_copy & HA_ERATT) {
12773 if (phba->hba_flag & HBA_ERATT_HANDLED)
12774
12775 phba->ha_copy &= ~HA_ERATT;
12776 else
12777
12778 phba->hba_flag |= HBA_ERATT_HANDLED;
12779 }
12780
12781
12782
12783
12784 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12785 spin_unlock(&phba->hbalock);
12786 return IRQ_NONE;
12787 }
12788
12789
12790 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12791 spin_unlock(&phba->hbalock);
12792 return IRQ_HANDLED;
12793 }
12794 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12795 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12796 phba->HCregaddr);
12797 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12798 writel(hc_copy, phba->HCregaddr);
12799 readl(phba->HAregaddr);
12800 spin_unlock(&phba->hbalock);
12801
12802
12803
12804
12805
12806
12807 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12808
12809
12810 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12811 status2 >>= (4*LPFC_ELS_RING);
12812
12813 if (status1 || (status2 & HA_RXMASK))
12814 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12815 else
12816 sp_irq_rc = IRQ_NONE;
12817
12818
12819
12820
12821
12822
12823 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12824 status1 >>= (4*LPFC_FCP_RING);
12825
12826
12827 if (phba->cfg_multi_ring_support == 2) {
12828 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12829 status2 >>= (4*LPFC_EXTRA_RING);
12830 } else
12831 status2 = 0;
12832
12833 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12834 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12835 else
12836 fp_irq_rc = IRQ_NONE;
12837
12838
12839 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12840}
12841
12842
12843
12844
12845
12846
12847
12848
12849void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12850{
12851 struct lpfc_cq_event *cq_event;
12852
12853
12854 spin_lock_irq(&phba->hbalock);
12855 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12856 spin_unlock_irq(&phba->hbalock);
12857
12858 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12859
12860 spin_lock_irq(&phba->hbalock);
12861 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12862 cq_event, struct lpfc_cq_event, list);
12863 spin_unlock_irq(&phba->hbalock);
12864
12865 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12866
12867 lpfc_sli4_cq_event_release(phba, cq_event);
12868 }
12869}
12870
12871
12872
12873
12874
12875
12876
12877
12878
12879
12880
12881
12882static void
12883lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12884 struct lpfc_iocbq *pIocbIn,
12885 struct lpfc_iocbq *pIocbOut,
12886 struct lpfc_wcqe_complete *wcqe)
12887{
12888 int numBdes, i;
12889 unsigned long iflags;
12890 uint32_t status, max_response;
12891 struct lpfc_dmabuf *dmabuf;
12892 struct ulp_bde64 *bpl, bde;
12893 size_t offset = offsetof(struct lpfc_iocbq, iocb);
12894
12895 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12896 sizeof(struct lpfc_iocbq) - offset);
12897
12898 status = bf_get(lpfc_wcqe_c_status, wcqe);
12899 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12900 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12901 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12902 pIocbIn->iocb.un.fcpi.fcpi_parm =
12903 pIocbOut->iocb.un.fcpi.fcpi_parm -
12904 wcqe->total_data_placed;
12905 else
12906 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12907 else {
12908 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12909 switch (pIocbOut->iocb.ulpCommand) {
12910 case CMD_ELS_REQUEST64_CR:
12911 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12912 bpl = (struct ulp_bde64 *)dmabuf->virt;
12913 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12914 max_response = bde.tus.f.bdeSize;
12915 break;
12916 case CMD_GEN_REQUEST64_CR:
12917 max_response = 0;
12918 if (!pIocbOut->context3)
12919 break;
12920 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12921 sizeof(struct ulp_bde64);
12922 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12923 bpl = (struct ulp_bde64 *)dmabuf->virt;
12924 for (i = 0; i < numBdes; i++) {
12925 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12926 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12927 max_response += bde.tus.f.bdeSize;
12928 }
12929 break;
12930 default:
12931 max_response = wcqe->total_data_placed;
12932 break;
12933 }
12934 if (max_response < wcqe->total_data_placed)
12935 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12936 else
12937 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12938 wcqe->total_data_placed;
12939 }
12940
12941
12942 if (status == CQE_STATUS_DI_ERROR) {
12943 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12944
12945 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12946 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12947 else
12948 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12949
12950 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12951 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
12952 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12953 BGS_GUARD_ERR_MASK;
12954 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
12955 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12956 BGS_APPTAG_ERR_MASK;
12957 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
12958 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12959 BGS_REFTAG_ERR_MASK;
12960
12961
12962 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12963 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12964 BGS_HI_WATER_MARK_PRESENT_MASK;
12965 pIocbIn->iocb.unsli3.sli3_bg.bghm =
12966 wcqe->total_data_placed;
12967 }
12968
12969
12970
12971
12972
12973 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12974 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12975 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12976 BGS_GUARD_ERR_MASK);
12977 }
12978
12979
12980 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12981 spin_lock_irqsave(&phba->hbalock, iflags);
12982 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12983 spin_unlock_irqrestore(&phba->hbalock, iflags);
12984 }
12985}
12986
12987
12988
12989
12990
12991
12992
12993
12994
12995
12996
12997
12998static struct lpfc_iocbq *
12999lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13000 struct lpfc_iocbq *irspiocbq)
13001{
13002 struct lpfc_sli_ring *pring;
13003 struct lpfc_iocbq *cmdiocbq;
13004 struct lpfc_wcqe_complete *wcqe;
13005 unsigned long iflags;
13006
13007 pring = lpfc_phba_elsring(phba);
13008 if (unlikely(!pring))
13009 return NULL;
13010
13011 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13012 pring->stats.iocb_event++;
13013
13014 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13015 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13016 if (unlikely(!cmdiocbq)) {
13017 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13018 "0386 ELS complete with no corresponding "
13019 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13020 wcqe->word0, wcqe->total_data_placed,
13021 wcqe->parameter, wcqe->word3);
13022 lpfc_sli_release_iocbq(phba, irspiocbq);
13023 return NULL;
13024 }
13025
13026 spin_lock_irqsave(&pring->ring_lock, iflags);
13027
13028 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13029 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13030
13031
13032 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13033
13034 return irspiocbq;
13035}
13036
13037inline struct lpfc_cq_event *
13038lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13039{
13040 struct lpfc_cq_event *cq_event;
13041
13042
13043 cq_event = lpfc_sli4_cq_event_alloc(phba);
13044 if (!cq_event) {
13045 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13046 "0602 Failed to alloc CQ_EVENT entry\n");
13047 return NULL;
13048 }
13049
13050
13051 memcpy(&cq_event->cqe, entry, size);
13052 return cq_event;
13053}
13054
13055
13056
13057
13058
13059
13060
13061
13062
13063
13064
13065static bool
13066lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13067{
13068 struct lpfc_cq_event *cq_event;
13069 unsigned long iflags;
13070
13071 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13072 "0392 Async Event: word0:x%x, word1:x%x, "
13073 "word2:x%x, word3:x%x\n", mcqe->word0,
13074 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13075
13076 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13077 if (!cq_event)
13078 return false;
13079 spin_lock_irqsave(&phba->hbalock, iflags);
13080 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13081
13082 phba->hba_flag |= ASYNC_EVENT;
13083 spin_unlock_irqrestore(&phba->hbalock, iflags);
13084
13085 return true;
13086}
13087
13088
13089
13090
13091
13092
13093
13094
13095
13096
13097
13098static bool
13099lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13100{
13101 uint32_t mcqe_status;
13102 MAILBOX_t *mbox, *pmbox;
13103 struct lpfc_mqe *mqe;
13104 struct lpfc_vport *vport;
13105 struct lpfc_nodelist *ndlp;
13106 struct lpfc_dmabuf *mp;
13107 unsigned long iflags;
13108 LPFC_MBOXQ_t *pmb;
13109 bool workposted = false;
13110 int rc;
13111
13112
13113 if (!bf_get(lpfc_trailer_completed, mcqe))
13114 goto out_no_mqe_complete;
13115
13116
13117 spin_lock_irqsave(&phba->hbalock, iflags);
13118 pmb = phba->sli.mbox_active;
13119 if (unlikely(!pmb)) {
13120 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13121 "1832 No pending MBOX command to handle\n");
13122 spin_unlock_irqrestore(&phba->hbalock, iflags);
13123 goto out_no_mqe_complete;
13124 }
13125 spin_unlock_irqrestore(&phba->hbalock, iflags);
13126 mqe = &pmb->u.mqe;
13127 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13128 mbox = phba->mbox;
13129 vport = pmb->vport;
13130
13131
13132 phba->last_completion_time = jiffies;
13133 del_timer(&phba->sli.mbox_tmo);
13134
13135
13136 if (pmb->mbox_cmpl && mbox)
13137 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13138
13139
13140
13141
13142
13143 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13144 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13145 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13146 bf_set(lpfc_mqe_status, mqe,
13147 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13148 }
13149 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13150 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13151 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13152 "MBOX dflt rpi: status:x%x rpi:x%x",
13153 mcqe_status,
13154 pmbox->un.varWords[0], 0);
13155 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13156 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13157 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13158
13159
13160
13161 lpfc_unreg_login(phba, vport->vpi,
13162 pmbox->un.varWords[0], pmb);
13163 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13164 pmb->ctx_buf = mp;
13165 pmb->ctx_ndlp = ndlp;
13166 pmb->vport = vport;
13167 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13168 if (rc != MBX_BUSY)
13169 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13170 LOG_SLI, "0385 rc should "
13171 "have been MBX_BUSY\n");
13172 if (rc != MBX_NOT_FINISHED)
13173 goto send_current_mbox;
13174 }
13175 }
13176 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13177 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13178 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13179
13180
13181 spin_lock_irqsave(&phba->hbalock, iflags);
13182 __lpfc_mbox_cmpl_put(phba, pmb);
13183 phba->work_ha |= HA_MBATT;
13184 spin_unlock_irqrestore(&phba->hbalock, iflags);
13185 workposted = true;
13186
13187send_current_mbox:
13188 spin_lock_irqsave(&phba->hbalock, iflags);
13189
13190 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13191
13192 phba->sli.mbox_active = NULL;
13193 spin_unlock_irqrestore(&phba->hbalock, iflags);
13194
13195 lpfc_worker_wake_up(phba);
13196out_no_mqe_complete:
13197 if (bf_get(lpfc_trailer_consumed, mcqe))
13198 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13199 return workposted;
13200}
13201
13202
13203
13204
13205
13206
13207
13208
13209
13210
13211
13212
13213static bool
13214lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13215 struct lpfc_cqe *cqe)
13216{
13217 struct lpfc_mcqe mcqe;
13218 bool workposted;
13219
13220 cq->CQ_mbox++;
13221
13222
13223 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13224
13225
13226 if (!bf_get(lpfc_trailer_async, &mcqe))
13227 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13228 else
13229 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13230 return workposted;
13231}
13232
13233
13234
13235
13236
13237
13238
13239
13240
13241
13242
13243static bool
13244lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13245 struct lpfc_wcqe_complete *wcqe)
13246{
13247 struct lpfc_iocbq *irspiocbq;
13248 unsigned long iflags;
13249 struct lpfc_sli_ring *pring = cq->pring;
13250 int txq_cnt = 0;
13251 int txcmplq_cnt = 0;
13252 int fcp_txcmplq_cnt = 0;
13253
13254
13255 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13256
13257 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13258 "0357 ELS CQE error: status=x%x: "
13259 "CQE: %08x %08x %08x %08x\n",
13260 bf_get(lpfc_wcqe_c_status, wcqe),
13261 wcqe->word0, wcqe->total_data_placed,
13262 wcqe->parameter, wcqe->word3);
13263 }
13264
13265
13266 irspiocbq = lpfc_sli_get_iocbq(phba);
13267 if (!irspiocbq) {
13268 if (!list_empty(&pring->txq))
13269 txq_cnt++;
13270 if (!list_empty(&pring->txcmplq))
13271 txcmplq_cnt++;
13272 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13273 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13274 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
13275 txq_cnt, phba->iocb_cnt,
13276 fcp_txcmplq_cnt,
13277 txcmplq_cnt);
13278 return false;
13279 }
13280
13281
13282 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13283 spin_lock_irqsave(&phba->hbalock, iflags);
13284 list_add_tail(&irspiocbq->cq_event.list,
13285 &phba->sli4_hba.sp_queue_event);
13286 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13287 spin_unlock_irqrestore(&phba->hbalock, iflags);
13288
13289 return true;
13290}
13291
13292
13293
13294
13295
13296
13297
13298
13299
13300static void
13301lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13302 struct lpfc_wcqe_release *wcqe)
13303{
13304
13305 if (unlikely(!phba->sli4_hba.els_wq))
13306 return;
13307
13308 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13309 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13310 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13311 else
13312 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13313 "2579 Slow-path wqe consume event carries "
13314 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13315 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13316 phba->sli4_hba.els_wq->queue_id);
13317}
13318
13319
13320
13321
13322
13323
13324
13325
13326
13327
13328
13329static bool
13330lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13331 struct lpfc_queue *cq,
13332 struct sli4_wcqe_xri_aborted *wcqe)
13333{
13334 bool workposted = false;
13335 struct lpfc_cq_event *cq_event;
13336 unsigned long iflags;
13337
13338 switch (cq->subtype) {
13339 case LPFC_FCP:
13340 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq);
13341 workposted = false;
13342 break;
13343 case LPFC_NVME_LS:
13344 case LPFC_ELS:
13345 cq_event = lpfc_cq_event_setup(
13346 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13347 if (!cq_event)
13348 return false;
13349 cq_event->hdwq = cq->hdwq;
13350 spin_lock_irqsave(&phba->hbalock, iflags);
13351 list_add_tail(&cq_event->list,
13352 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13353
13354 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13355 spin_unlock_irqrestore(&phba->hbalock, iflags);
13356 workposted = true;
13357 break;
13358 case LPFC_NVME:
13359
13360 if (phba->nvmet_support)
13361 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13362 else
13363 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq);
13364
13365 workposted = false;
13366 break;
13367 default:
13368 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13369 "0603 Invalid CQ subtype %d: "
13370 "%08x %08x %08x %08x\n",
13371 cq->subtype, wcqe->word0, wcqe->parameter,
13372 wcqe->word2, wcqe->word3);
13373 workposted = false;
13374 break;
13375 }
13376 return workposted;
13377}
13378
13379#define FC_RCTL_MDS_DIAGS 0xF4
13380
13381
13382
13383
13384
13385
13386
13387
13388
13389
13390static bool
13391lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13392{
13393 bool workposted = false;
13394 struct fc_frame_header *fc_hdr;
13395 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13396 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13397 struct lpfc_nvmet_tgtport *tgtp;
13398 struct hbq_dmabuf *dma_buf;
13399 uint32_t status, rq_id;
13400 unsigned long iflags;
13401
13402
13403 if (unlikely(!hrq) || unlikely(!drq))
13404 return workposted;
13405
13406 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13407 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13408 else
13409 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13410 if (rq_id != hrq->queue_id)
13411 goto out;
13412
13413 status = bf_get(lpfc_rcqe_status, rcqe);
13414 switch (status) {
13415 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13416 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13417 "2537 Receive Frame Truncated!!\n");
13418
13419 case FC_STATUS_RQ_SUCCESS:
13420 spin_lock_irqsave(&phba->hbalock, iflags);
13421 lpfc_sli4_rq_release(hrq, drq);
13422 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13423 if (!dma_buf) {
13424 hrq->RQ_no_buf_found++;
13425 spin_unlock_irqrestore(&phba->hbalock, iflags);
13426 goto out;
13427 }
13428 hrq->RQ_rcv_buf++;
13429 hrq->RQ_buf_posted--;
13430 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13431
13432 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13433
13434 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13435 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13436 spin_unlock_irqrestore(&phba->hbalock, iflags);
13437
13438 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13439 break;
13440 }
13441
13442
13443 list_add_tail(&dma_buf->cq_event.list,
13444 &phba->sli4_hba.sp_queue_event);
13445
13446 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13447 spin_unlock_irqrestore(&phba->hbalock, iflags);
13448 workposted = true;
13449 break;
13450 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13451 if (phba->nvmet_support) {
13452 tgtp = phba->targetport->private;
13453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13454 "6402 RQE Error x%x, posted %d err_cnt "
13455 "%d: %x %x %x\n",
13456 status, hrq->RQ_buf_posted,
13457 hrq->RQ_no_posted_buf,
13458 atomic_read(&tgtp->rcv_fcp_cmd_in),
13459 atomic_read(&tgtp->rcv_fcp_cmd_out),
13460 atomic_read(&tgtp->xmt_fcp_release));
13461 }
13462
13463
13464 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13465 hrq->RQ_no_posted_buf++;
13466
13467 spin_lock_irqsave(&phba->hbalock, iflags);
13468 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13469 spin_unlock_irqrestore(&phba->hbalock, iflags);
13470 workposted = true;
13471 break;
13472 }
13473out:
13474 return workposted;
13475}
13476
13477
13478
13479
13480
13481
13482
13483
13484
13485
13486
13487
13488static bool
13489lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13490 struct lpfc_cqe *cqe)
13491{
13492 struct lpfc_cqe cqevt;
13493 bool workposted = false;
13494
13495
13496 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13497
13498
13499 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13500 case CQE_CODE_COMPL_WQE:
13501
13502 phba->last_completion_time = jiffies;
13503 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13504 (struct lpfc_wcqe_complete *)&cqevt);
13505 break;
13506 case CQE_CODE_RELEASE_WQE:
13507
13508 lpfc_sli4_sp_handle_rel_wcqe(phba,
13509 (struct lpfc_wcqe_release *)&cqevt);
13510 break;
13511 case CQE_CODE_XRI_ABORTED:
13512
13513 phba->last_completion_time = jiffies;
13514 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13515 (struct sli4_wcqe_xri_aborted *)&cqevt);
13516 break;
13517 case CQE_CODE_RECEIVE:
13518 case CQE_CODE_RECEIVE_V1:
13519
13520 phba->last_completion_time = jiffies;
13521 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13522 (struct lpfc_rcqe *)&cqevt);
13523 break;
13524 default:
13525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13526 "0388 Not a valid WCQE code: x%x\n",
13527 bf_get(lpfc_cqe_code, &cqevt));
13528 break;
13529 }
13530 return workposted;
13531}
13532
13533
13534
13535
13536
13537
13538
13539
13540
13541
13542
13543
13544
13545
13546static void
13547lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13548 struct lpfc_queue *speq)
13549{
13550 struct lpfc_queue *cq = NULL, *childq;
13551 uint16_t cqid;
13552
13553
13554 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13555
13556 list_for_each_entry(childq, &speq->child_list, list) {
13557 if (childq->queue_id == cqid) {
13558 cq = childq;
13559 break;
13560 }
13561 }
13562 if (unlikely(!cq)) {
13563 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13564 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13565 "0365 Slow-path CQ identifier "
13566 "(%d) does not exist\n", cqid);
13567 return;
13568 }
13569
13570
13571 cq->assoc_qp = speq;
13572
13573 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13575 "0390 Cannot schedule soft IRQ "
13576 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13577 cqid, cq->queue_id, raw_smp_processor_id());
13578}
13579
13580
13581
13582
13583
13584
13585
13586
13587
13588
13589
13590
13591
13592
13593
13594
13595
13596
13597
13598
13599
13600static bool
13601__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13602 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13603 struct lpfc_cqe *), unsigned long *delay)
13604{
13605 struct lpfc_cqe *cqe;
13606 bool workposted = false;
13607 int count = 0, consumed = 0;
13608 bool arm = true;
13609
13610
13611 *delay = 0;
13612
13613 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13614 goto rearm_and_exit;
13615
13616
13617 cq->q_flag = 0;
13618 cqe = lpfc_sli4_cq_get(cq);
13619 while (cqe) {
13620 workposted |= handler(phba, cq, cqe);
13621 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13622
13623 consumed++;
13624 if (!(++count % cq->max_proc_limit))
13625 break;
13626
13627 if (!(count % cq->notify_interval)) {
13628 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13629 LPFC_QUEUE_NOARM);
13630 consumed = 0;
13631 }
13632
13633 if (count == LPFC_NVMET_CQ_NOTIFY)
13634 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13635
13636 cqe = lpfc_sli4_cq_get(cq);
13637 }
13638 if (count >= phba->cfg_cq_poll_threshold) {
13639 *delay = 1;
13640 arm = false;
13641 }
13642
13643
13644 if (count > cq->CQ_max_cqe)
13645 cq->CQ_max_cqe = count;
13646
13647 cq->assoc_qp->EQ_cqe_cnt += count;
13648
13649
13650 if (unlikely(count == 0))
13651 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13652 "0369 No entry from completion queue "
13653 "qid=%d\n", cq->queue_id);
13654
13655 cq->queue_claimed = 0;
13656
13657rearm_and_exit:
13658 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13659 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13660
13661 return workposted;
13662}
13663
13664
13665
13666
13667
13668
13669
13670
13671
13672
13673
13674
13675
13676
13677
13678
13679static void
13680__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13681{
13682 struct lpfc_hba *phba = cq->phba;
13683 unsigned long delay;
13684 bool workposted = false;
13685
13686
13687 switch (cq->type) {
13688 case LPFC_MCQ:
13689 workposted |= __lpfc_sli4_process_cq(phba, cq,
13690 lpfc_sli4_sp_handle_mcqe,
13691 &delay);
13692 break;
13693 case LPFC_WCQ:
13694 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME)
13695 workposted |= __lpfc_sli4_process_cq(phba, cq,
13696 lpfc_sli4_fp_handle_cqe,
13697 &delay);
13698 else
13699 workposted |= __lpfc_sli4_process_cq(phba, cq,
13700 lpfc_sli4_sp_handle_cqe,
13701 &delay);
13702 break;
13703 default:
13704 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13705 "0370 Invalid completion queue type (%d)\n",
13706 cq->type);
13707 return;
13708 }
13709
13710 if (delay) {
13711 if (!queue_delayed_work_on(cq->chann, phba->wq,
13712 &cq->sched_spwork, delay))
13713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13714 "0394 Cannot schedule soft IRQ "
13715 "for cqid=%d on CPU %d\n",
13716 cq->queue_id, cq->chann);
13717 }
13718
13719
13720 if (workposted)
13721 lpfc_worker_wake_up(phba);
13722}
13723
13724
13725
13726
13727
13728
13729
13730
13731static void
13732lpfc_sli4_sp_process_cq(struct work_struct *work)
13733{
13734 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13735
13736 __lpfc_sli4_sp_process_cq(cq);
13737}
13738
13739
13740
13741
13742
13743
13744
13745static void
13746lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13747{
13748 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13749 struct lpfc_queue, sched_spwork);
13750
13751 __lpfc_sli4_sp_process_cq(cq);
13752}
13753
13754
13755
13756
13757
13758
13759
13760
13761
13762
13763static void
13764lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13765 struct lpfc_wcqe_complete *wcqe)
13766{
13767 struct lpfc_sli_ring *pring = cq->pring;
13768 struct lpfc_iocbq *cmdiocbq;
13769 struct lpfc_iocbq irspiocbq;
13770 unsigned long iflags;
13771
13772
13773 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13774
13775
13776
13777 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13778 IOSTAT_LOCAL_REJECT)) &&
13779 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13780 IOERR_NO_RESOURCES))
13781 phba->lpfc_rampdown_queue_depth(phba);
13782
13783
13784 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13785 "0373 FCP CQE error: status=x%x: "
13786 "CQE: %08x %08x %08x %08x\n",
13787 bf_get(lpfc_wcqe_c_status, wcqe),
13788 wcqe->word0, wcqe->total_data_placed,
13789 wcqe->parameter, wcqe->word3);
13790 }
13791
13792
13793 spin_lock_irqsave(&pring->ring_lock, iflags);
13794 pring->stats.iocb_event++;
13795 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13796 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13797 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13798 if (unlikely(!cmdiocbq)) {
13799 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13800 "0374 FCP complete with no corresponding "
13801 "cmdiocb: iotag (%d)\n",
13802 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13803 return;
13804 }
13805#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13806 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13807#endif
13808 if (cmdiocbq->iocb_cmpl == NULL) {
13809 if (cmdiocbq->wqe_cmpl) {
13810 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13811 spin_lock_irqsave(&phba->hbalock, iflags);
13812 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13813 spin_unlock_irqrestore(&phba->hbalock, iflags);
13814 }
13815
13816
13817 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13818 return;
13819 }
13820 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13821 "0375 FCP cmdiocb not callback function "
13822 "iotag: (%d)\n",
13823 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13824 return;
13825 }
13826
13827
13828 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13829
13830 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13831 spin_lock_irqsave(&phba->hbalock, iflags);
13832 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13833 spin_unlock_irqrestore(&phba->hbalock, iflags);
13834 }
13835
13836
13837 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13838}
13839
13840
13841
13842
13843
13844
13845
13846
13847
13848
13849static void
13850lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13851 struct lpfc_wcqe_release *wcqe)
13852{
13853 struct lpfc_queue *childwq;
13854 bool wqid_matched = false;
13855 uint16_t hba_wqid;
13856
13857
13858 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13859 list_for_each_entry(childwq, &cq->child_list, list) {
13860 if (childwq->queue_id == hba_wqid) {
13861 lpfc_sli4_wq_release(childwq,
13862 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13863 if (childwq->q_flag & HBA_NVMET_WQFULL)
13864 lpfc_nvmet_wqfull_process(phba, childwq);
13865 wqid_matched = true;
13866 break;
13867 }
13868 }
13869
13870 if (wqid_matched != true)
13871 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13872 "2580 Fast-path wqe consume event carries "
13873 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13874}
13875
13876
13877
13878
13879
13880
13881
13882
13883
13884
13885static bool
13886lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13887 struct lpfc_rcqe *rcqe)
13888{
13889 bool workposted = false;
13890 struct lpfc_queue *hrq;
13891 struct lpfc_queue *drq;
13892 struct rqb_dmabuf *dma_buf;
13893 struct fc_frame_header *fc_hdr;
13894 struct lpfc_nvmet_tgtport *tgtp;
13895 uint32_t status, rq_id;
13896 unsigned long iflags;
13897 uint32_t fctl, idx;
13898
13899 if ((phba->nvmet_support == 0) ||
13900 (phba->sli4_hba.nvmet_cqset == NULL))
13901 return workposted;
13902
13903 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13904 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13905 drq = phba->sli4_hba.nvmet_mrq_data[idx];
13906
13907
13908 if (unlikely(!hrq) || unlikely(!drq))
13909 return workposted;
13910
13911 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13912 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13913 else
13914 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13915
13916 if ((phba->nvmet_support == 0) ||
13917 (rq_id != hrq->queue_id))
13918 return workposted;
13919
13920 status = bf_get(lpfc_rcqe_status, rcqe);
13921 switch (status) {
13922 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13924 "6126 Receive Frame Truncated!!\n");
13925
13926 case FC_STATUS_RQ_SUCCESS:
13927 spin_lock_irqsave(&phba->hbalock, iflags);
13928 lpfc_sli4_rq_release(hrq, drq);
13929 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13930 if (!dma_buf) {
13931 hrq->RQ_no_buf_found++;
13932 spin_unlock_irqrestore(&phba->hbalock, iflags);
13933 goto out;
13934 }
13935 spin_unlock_irqrestore(&phba->hbalock, iflags);
13936 hrq->RQ_rcv_buf++;
13937 hrq->RQ_buf_posted--;
13938 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13939
13940
13941 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13942 fc_hdr->fh_f_ctl[1] << 8 |
13943 fc_hdr->fh_f_ctl[2]);
13944 if (((fctl &
13945 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13946 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13947 (fc_hdr->fh_seq_cnt != 0))
13948 goto drop;
13949
13950 if (fc_hdr->fh_type == FC_TYPE_FCP) {
13951 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
13952 lpfc_nvmet_unsol_fcp_event(
13953 phba, idx, dma_buf, cq->isr_timestamp,
13954 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
13955 return false;
13956 }
13957drop:
13958 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
13959 break;
13960 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13961 if (phba->nvmet_support) {
13962 tgtp = phba->targetport->private;
13963 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13964 "6401 RQE Error x%x, posted %d err_cnt "
13965 "%d: %x %x %x\n",
13966 status, hrq->RQ_buf_posted,
13967 hrq->RQ_no_posted_buf,
13968 atomic_read(&tgtp->rcv_fcp_cmd_in),
13969 atomic_read(&tgtp->rcv_fcp_cmd_out),
13970 atomic_read(&tgtp->xmt_fcp_release));
13971 }
13972
13973
13974 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13975 hrq->RQ_no_posted_buf++;
13976
13977 break;
13978 }
13979out:
13980 return workposted;
13981}
13982
13983
13984
13985
13986
13987
13988
13989
13990
13991
13992
13993
13994static bool
13995lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13996 struct lpfc_cqe *cqe)
13997{
13998 struct lpfc_wcqe_release wcqe;
13999 bool workposted = false;
14000
14001
14002 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14003
14004
14005 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14006 case CQE_CODE_COMPL_WQE:
14007 case CQE_CODE_NVME_ERSP:
14008 cq->CQ_wq++;
14009
14010 phba->last_completion_time = jiffies;
14011 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
14012 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14013 (struct lpfc_wcqe_complete *)&wcqe);
14014 if (cq->subtype == LPFC_NVME_LS)
14015 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14016 (struct lpfc_wcqe_complete *)&wcqe);
14017 break;
14018 case CQE_CODE_RELEASE_WQE:
14019 cq->CQ_release_wqe++;
14020
14021 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14022 (struct lpfc_wcqe_release *)&wcqe);
14023 break;
14024 case CQE_CODE_XRI_ABORTED:
14025 cq->CQ_xri_aborted++;
14026
14027 phba->last_completion_time = jiffies;
14028 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14029 (struct sli4_wcqe_xri_aborted *)&wcqe);
14030 break;
14031 case CQE_CODE_RECEIVE_V1:
14032 case CQE_CODE_RECEIVE:
14033 phba->last_completion_time = jiffies;
14034 if (cq->subtype == LPFC_NVMET) {
14035 workposted = lpfc_sli4_nvmet_handle_rcqe(
14036 phba, cq, (struct lpfc_rcqe *)&wcqe);
14037 }
14038 break;
14039 default:
14040 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14041 "0144 Not a valid CQE code: x%x\n",
14042 bf_get(lpfc_wcqe_c_code, &wcqe));
14043 break;
14044 }
14045 return workposted;
14046}
14047
14048
14049
14050
14051
14052
14053
14054
14055
14056
14057
14058
14059
14060static void
14061lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14062 struct lpfc_eqe *eqe)
14063{
14064 struct lpfc_queue *cq = NULL;
14065 uint32_t qidx = eq->hdwq;
14066 uint16_t cqid, id;
14067
14068 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14070 "0366 Not a valid completion "
14071 "event: majorcode=x%x, minorcode=x%x\n",
14072 bf_get_le32(lpfc_eqe_major_code, eqe),
14073 bf_get_le32(lpfc_eqe_minor_code, eqe));
14074 return;
14075 }
14076
14077
14078 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14079
14080
14081 if (cqid <= phba->sli4_hba.cq_max) {
14082 cq = phba->sli4_hba.cq_lookup[cqid];
14083 if (cq)
14084 goto work_cq;
14085 }
14086
14087
14088 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14089 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14090 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14091
14092 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14093 goto process_cq;
14094 }
14095 }
14096
14097 if (phba->sli4_hba.nvmels_cq &&
14098 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14099
14100 cq = phba->sli4_hba.nvmels_cq;
14101 }
14102
14103
14104 if (cq == NULL) {
14105 lpfc_sli4_sp_handle_eqe(phba, eqe,
14106 phba->sli4_hba.hdwq[qidx].hba_eq);
14107 return;
14108 }
14109
14110process_cq:
14111 if (unlikely(cqid != cq->queue_id)) {
14112 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14113 "0368 Miss-matched fast-path completion "
14114 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14115 cqid, cq->queue_id);
14116 return;
14117 }
14118
14119work_cq:
14120#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14121 if (phba->ktime_on)
14122 cq->isr_timestamp = ktime_get_ns();
14123 else
14124 cq->isr_timestamp = 0;
14125#endif
14126 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14127 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14128 "0363 Cannot schedule soft IRQ "
14129 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14130 cqid, cq->queue_id, raw_smp_processor_id());
14131}
14132
14133
14134
14135
14136
14137
14138
14139
14140
14141
14142
14143
14144
14145
14146
14147
14148static void
14149__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14150{
14151 struct lpfc_hba *phba = cq->phba;
14152 unsigned long delay;
14153 bool workposted = false;
14154
14155
14156 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14157 &delay);
14158
14159 if (delay) {
14160 if (!queue_delayed_work_on(cq->chann, phba->wq,
14161 &cq->sched_irqwork, delay))
14162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14163 "0367 Cannot schedule soft IRQ "
14164 "for cqid=%d on CPU %d\n",
14165 cq->queue_id, cq->chann);
14166 }
14167
14168
14169 if (workposted)
14170 lpfc_worker_wake_up(phba);
14171}
14172
14173
14174
14175
14176
14177
14178
14179
14180static void
14181lpfc_sli4_hba_process_cq(struct work_struct *work)
14182{
14183 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14184
14185 __lpfc_sli4_hba_process_cq(cq);
14186}
14187
14188
14189
14190
14191
14192
14193
14194static void
14195lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14196{
14197 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14198 struct lpfc_queue, sched_irqwork);
14199
14200 __lpfc_sli4_hba_process_cq(cq);
14201}
14202
14203
14204
14205
14206
14207
14208
14209
14210
14211
14212
14213
14214
14215
14216
14217
14218
14219
14220
14221
14222
14223
14224
14225
14226
14227
14228
14229irqreturn_t
14230lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14231{
14232 struct lpfc_hba *phba;
14233 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14234 struct lpfc_queue *fpeq;
14235 unsigned long iflag;
14236 int ecount = 0;
14237 int hba_eqidx;
14238 struct lpfc_eq_intr_info *eqi;
14239 uint32_t icnt;
14240
14241
14242 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14243 phba = hba_eq_hdl->phba;
14244 hba_eqidx = hba_eq_hdl->idx;
14245
14246 if (unlikely(!phba))
14247 return IRQ_NONE;
14248 if (unlikely(!phba->sli4_hba.hdwq))
14249 return IRQ_NONE;
14250
14251
14252 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14253 if (unlikely(!fpeq))
14254 return IRQ_NONE;
14255
14256
14257 if (unlikely(lpfc_intr_state_check(phba))) {
14258
14259 spin_lock_irqsave(&phba->hbalock, iflag);
14260 if (phba->link_state < LPFC_LINK_DOWN)
14261
14262 lpfc_sli4_eq_flush(phba, fpeq);
14263 spin_unlock_irqrestore(&phba->hbalock, iflag);
14264 return IRQ_NONE;
14265 }
14266
14267 eqi = phba->sli4_hba.eq_info;
14268 icnt = this_cpu_inc_return(eqi->icnt);
14269 fpeq->last_cpu = raw_smp_processor_id();
14270
14271 if (icnt > LPFC_EQD_ISR_TRIGGER &&
14272 phba->cfg_irq_chann == 1 &&
14273 phba->cfg_auto_imax &&
14274 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14275 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14276 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14277
14278
14279 ecount = lpfc_sli4_process_eq(phba, fpeq);
14280
14281 if (unlikely(ecount == 0)) {
14282 fpeq->EQ_no_entry++;
14283 if (phba->intr_type == MSIX)
14284
14285 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14286 "0358 MSI-X interrupt with no EQE\n");
14287 else
14288
14289 return IRQ_NONE;
14290 }
14291
14292 return IRQ_HANDLED;
14293}
14294
14295
14296
14297
14298
14299
14300
14301
14302
14303
14304
14305
14306
14307
14308
14309
14310
14311
14312irqreturn_t
14313lpfc_sli4_intr_handler(int irq, void *dev_id)
14314{
14315 struct lpfc_hba *phba;
14316 irqreturn_t hba_irq_rc;
14317 bool hba_handled = false;
14318 int qidx;
14319
14320
14321 phba = (struct lpfc_hba *)dev_id;
14322
14323 if (unlikely(!phba))
14324 return IRQ_NONE;
14325
14326
14327
14328
14329 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14330 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14331 &phba->sli4_hba.hba_eq_hdl[qidx]);
14332 if (hba_irq_rc == IRQ_HANDLED)
14333 hba_handled |= true;
14334 }
14335
14336 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14337}
14338
14339
14340
14341
14342
14343
14344
14345
14346
14347void
14348lpfc_sli4_queue_free(struct lpfc_queue *queue)
14349{
14350 struct lpfc_dmabuf *dmabuf;
14351
14352 if (!queue)
14353 return;
14354
14355 if (!list_empty(&queue->wq_list))
14356 list_del(&queue->wq_list);
14357
14358 while (!list_empty(&queue->page_list)) {
14359 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14360 list);
14361 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14362 dmabuf->virt, dmabuf->phys);
14363 kfree(dmabuf);
14364 }
14365 if (queue->rqbp) {
14366 lpfc_free_rq_buffer(queue->phba, queue);
14367 kfree(queue->rqbp);
14368 }
14369
14370 if (!list_empty(&queue->cpu_list))
14371 list_del(&queue->cpu_list);
14372
14373 kfree(queue);
14374 return;
14375}
14376
14377
14378
14379
14380
14381
14382
14383
14384
14385
14386
14387
14388
14389struct lpfc_queue *
14390lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14391 uint32_t entry_size, uint32_t entry_count, int cpu)
14392{
14393 struct lpfc_queue *queue;
14394 struct lpfc_dmabuf *dmabuf;
14395 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14396 uint16_t x, pgcnt;
14397
14398 if (!phba->sli4_hba.pc_sli4_params.supported)
14399 hw_page_size = page_size;
14400
14401 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14402
14403
14404 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14405 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14406
14407 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14408 GFP_KERNEL, cpu_to_node(cpu));
14409 if (!queue)
14410 return NULL;
14411
14412 INIT_LIST_HEAD(&queue->list);
14413 INIT_LIST_HEAD(&queue->wq_list);
14414 INIT_LIST_HEAD(&queue->wqfull_list);
14415 INIT_LIST_HEAD(&queue->page_list);
14416 INIT_LIST_HEAD(&queue->child_list);
14417 INIT_LIST_HEAD(&queue->cpu_list);
14418
14419
14420
14421
14422 queue->page_count = pgcnt;
14423 queue->q_pgs = (void **)&queue[1];
14424 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14425 queue->entry_size = entry_size;
14426 queue->entry_count = entry_count;
14427 queue->page_size = hw_page_size;
14428 queue->phba = phba;
14429
14430 for (x = 0; x < queue->page_count; x++) {
14431 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14432 dev_to_node(&phba->pcidev->dev));
14433 if (!dmabuf)
14434 goto out_fail;
14435 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14436 hw_page_size, &dmabuf->phys,
14437 GFP_KERNEL);
14438 if (!dmabuf->virt) {
14439 kfree(dmabuf);
14440 goto out_fail;
14441 }
14442 dmabuf->buffer_tag = x;
14443 list_add_tail(&dmabuf->list, &queue->page_list);
14444
14445 queue->q_pgs[x] = dmabuf->virt;
14446 }
14447 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14448 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14449 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14450 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14451
14452
14453
14454 return queue;
14455out_fail:
14456 lpfc_sli4_queue_free(queue);
14457 return NULL;
14458}
14459
14460
14461
14462
14463
14464
14465
14466
14467
14468
14469static void __iomem *
14470lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14471{
14472 if (!phba->pcidev)
14473 return NULL;
14474
14475 switch (pci_barset) {
14476 case WQ_PCI_BAR_0_AND_1:
14477 return phba->pci_bar0_memmap_p;
14478 case WQ_PCI_BAR_2_AND_3:
14479 return phba->pci_bar2_memmap_p;
14480 case WQ_PCI_BAR_4_AND_5:
14481 return phba->pci_bar4_memmap_p;
14482 default:
14483 break;
14484 }
14485 return NULL;
14486}
14487
14488
14489
14490
14491
14492
14493
14494
14495
14496
14497
14498
14499
14500
14501
14502
14503
14504
14505
14506
14507
14508
14509
14510
14511void
14512lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14513 uint32_t numq, uint32_t usdelay)
14514{
14515 struct lpfc_mbx_modify_eq_delay *eq_delay;
14516 LPFC_MBOXQ_t *mbox;
14517 struct lpfc_queue *eq;
14518 int cnt = 0, rc, length;
14519 uint32_t shdr_status, shdr_add_status;
14520 uint32_t dmult;
14521 int qidx;
14522 union lpfc_sli4_cfg_shdr *shdr;
14523
14524 if (startq >= phba->cfg_irq_chann)
14525 return;
14526
14527 if (usdelay > 0xFFFF) {
14528 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14529 "6429 usdelay %d too large. Scaled down to "
14530 "0xFFFF.\n", usdelay);
14531 usdelay = 0xFFFF;
14532 }
14533
14534
14535 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14536 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14537 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14538 if (!eq)
14539 continue;
14540
14541 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14542
14543 if (++cnt >= numq)
14544 break;
14545 }
14546 return;
14547 }
14548
14549
14550
14551 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14552 if (!mbox) {
14553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14554 "6428 Failed allocating mailbox cmd buffer."
14555 " EQ delay was not set.\n");
14556 return;
14557 }
14558 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14559 sizeof(struct lpfc_sli4_cfg_mhdr));
14560 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14561 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14562 length, LPFC_SLI4_MBX_EMBED);
14563 eq_delay = &mbox->u.mqe.un.eq_delay;
14564
14565
14566 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14567 if (dmult)
14568 dmult--;
14569 if (dmult > LPFC_DMULT_MAX)
14570 dmult = LPFC_DMULT_MAX;
14571
14572 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14573 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14574 if (!eq)
14575 continue;
14576 eq->q_mode = usdelay;
14577 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14578 eq_delay->u.request.eq[cnt].phase = 0;
14579 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14580
14581 if (++cnt >= numq)
14582 break;
14583 }
14584 eq_delay->u.request.num_eq = cnt;
14585
14586 mbox->vport = phba->pport;
14587 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14588 mbox->ctx_buf = NULL;
14589 mbox->ctx_ndlp = NULL;
14590 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14591 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14592 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14593 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14594 if (shdr_status || shdr_add_status || rc) {
14595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14596 "2512 MODIFY_EQ_DELAY mailbox failed with "
14597 "status x%x add_status x%x, mbx status x%x\n",
14598 shdr_status, shdr_add_status, rc);
14599 }
14600 mempool_free(mbox, phba->mbox_mem_pool);
14601 return;
14602}
14603
14604
14605
14606
14607
14608
14609
14610
14611
14612
14613
14614
14615
14616
14617
14618
14619
14620
14621
14622
14623
14624int
14625lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14626{
14627 struct lpfc_mbx_eq_create *eq_create;
14628 LPFC_MBOXQ_t *mbox;
14629 int rc, length, status = 0;
14630 struct lpfc_dmabuf *dmabuf;
14631 uint32_t shdr_status, shdr_add_status;
14632 union lpfc_sli4_cfg_shdr *shdr;
14633 uint16_t dmult;
14634 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14635
14636
14637 if (!eq)
14638 return -ENODEV;
14639 if (!phba->sli4_hba.pc_sli4_params.supported)
14640 hw_page_size = SLI4_PAGE_SIZE;
14641
14642 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14643 if (!mbox)
14644 return -ENOMEM;
14645 length = (sizeof(struct lpfc_mbx_eq_create) -
14646 sizeof(struct lpfc_sli4_cfg_mhdr));
14647 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14648 LPFC_MBOX_OPCODE_EQ_CREATE,
14649 length, LPFC_SLI4_MBX_EMBED);
14650 eq_create = &mbox->u.mqe.un.eq_create;
14651 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14652 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14653 eq->page_count);
14654 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14655 LPFC_EQE_SIZE);
14656 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14657
14658
14659 if (phba->sli4_hba.pc_sli4_params.eqav) {
14660 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14661 LPFC_Q_CREATE_VERSION_2);
14662 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14663 phba->sli4_hba.pc_sli4_params.eqav);
14664 }
14665
14666
14667 dmult = 0;
14668 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14669 dmult);
14670 switch (eq->entry_count) {
14671 default:
14672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14673 "0360 Unsupported EQ count. (%d)\n",
14674 eq->entry_count);
14675 if (eq->entry_count < 256) {
14676 status = -EINVAL;
14677 goto out;
14678 }
14679
14680 case 256:
14681 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14682 LPFC_EQ_CNT_256);
14683 break;
14684 case 512:
14685 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14686 LPFC_EQ_CNT_512);
14687 break;
14688 case 1024:
14689 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14690 LPFC_EQ_CNT_1024);
14691 break;
14692 case 2048:
14693 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14694 LPFC_EQ_CNT_2048);
14695 break;
14696 case 4096:
14697 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14698 LPFC_EQ_CNT_4096);
14699 break;
14700 }
14701 list_for_each_entry(dmabuf, &eq->page_list, list) {
14702 memset(dmabuf->virt, 0, hw_page_size);
14703 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14704 putPaddrLow(dmabuf->phys);
14705 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14706 putPaddrHigh(dmabuf->phys);
14707 }
14708 mbox->vport = phba->pport;
14709 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14710 mbox->ctx_buf = NULL;
14711 mbox->ctx_ndlp = NULL;
14712 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14713 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14714 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14715 if (shdr_status || shdr_add_status || rc) {
14716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14717 "2500 EQ_CREATE mailbox failed with "
14718 "status x%x add_status x%x, mbx status x%x\n",
14719 shdr_status, shdr_add_status, rc);
14720 status = -ENXIO;
14721 }
14722 eq->type = LPFC_EQ;
14723 eq->subtype = LPFC_NONE;
14724 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14725 if (eq->queue_id == 0xFFFF)
14726 status = -ENXIO;
14727 eq->host_index = 0;
14728 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
14729 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
14730out:
14731 mempool_free(mbox, phba->mbox_mem_pool);
14732 return status;
14733}
14734
14735
14736
14737
14738
14739
14740
14741
14742
14743
14744
14745
14746
14747
14748
14749
14750
14751
14752
14753
14754
14755
14756int
14757lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14758 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14759{
14760 struct lpfc_mbx_cq_create *cq_create;
14761 struct lpfc_dmabuf *dmabuf;
14762 LPFC_MBOXQ_t *mbox;
14763 int rc, length, status = 0;
14764 uint32_t shdr_status, shdr_add_status;
14765 union lpfc_sli4_cfg_shdr *shdr;
14766
14767
14768 if (!cq || !eq)
14769 return -ENODEV;
14770
14771 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14772 if (!mbox)
14773 return -ENOMEM;
14774 length = (sizeof(struct lpfc_mbx_cq_create) -
14775 sizeof(struct lpfc_sli4_cfg_mhdr));
14776 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14777 LPFC_MBOX_OPCODE_CQ_CREATE,
14778 length, LPFC_SLI4_MBX_EMBED);
14779 cq_create = &mbox->u.mqe.un.cq_create;
14780 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14781 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14782 cq->page_count);
14783 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14784 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14785 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14786 phba->sli4_hba.pc_sli4_params.cqv);
14787 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14788 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
14789 (cq->page_size / SLI4_PAGE_SIZE));
14790 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14791 eq->queue_id);
14792 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
14793 phba->sli4_hba.pc_sli4_params.cqav);
14794 } else {
14795 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14796 eq->queue_id);
14797 }
14798 switch (cq->entry_count) {
14799 case 2048:
14800 case 4096:
14801 if (phba->sli4_hba.pc_sli4_params.cqv ==
14802 LPFC_Q_CREATE_VERSION_2) {
14803 cq_create->u.request.context.lpfc_cq_context_count =
14804 cq->entry_count;
14805 bf_set(lpfc_cq_context_count,
14806 &cq_create->u.request.context,
14807 LPFC_CQ_CNT_WORD7);
14808 break;
14809 }
14810
14811 default:
14812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14813 "0361 Unsupported CQ count: "
14814 "entry cnt %d sz %d pg cnt %d\n",
14815 cq->entry_count, cq->entry_size,
14816 cq->page_count);
14817 if (cq->entry_count < 256) {
14818 status = -EINVAL;
14819 goto out;
14820 }
14821
14822 case 256:
14823 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14824 LPFC_CQ_CNT_256);
14825 break;
14826 case 512:
14827 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14828 LPFC_CQ_CNT_512);
14829 break;
14830 case 1024:
14831 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14832 LPFC_CQ_CNT_1024);
14833 break;
14834 }
14835 list_for_each_entry(dmabuf, &cq->page_list, list) {
14836 memset(dmabuf->virt, 0, cq->page_size);
14837 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14838 putPaddrLow(dmabuf->phys);
14839 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14840 putPaddrHigh(dmabuf->phys);
14841 }
14842 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14843
14844
14845 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14846 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14847 if (shdr_status || shdr_add_status || rc) {
14848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14849 "2501 CQ_CREATE mailbox failed with "
14850 "status x%x add_status x%x, mbx status x%x\n",
14851 shdr_status, shdr_add_status, rc);
14852 status = -ENXIO;
14853 goto out;
14854 }
14855 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14856 if (cq->queue_id == 0xFFFF) {
14857 status = -ENXIO;
14858 goto out;
14859 }
14860
14861 list_add_tail(&cq->list, &eq->child_list);
14862
14863 cq->type = type;
14864 cq->subtype = subtype;
14865 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14866 cq->assoc_qid = eq->queue_id;
14867 cq->assoc_qp = eq;
14868 cq->host_index = 0;
14869 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
14870 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
14871
14872 if (cq->queue_id > phba->sli4_hba.cq_max)
14873 phba->sli4_hba.cq_max = cq->queue_id;
14874out:
14875 mempool_free(mbox, phba->mbox_mem_pool);
14876 return status;
14877}
14878
14879
14880
14881
14882
14883
14884
14885
14886
14887
14888
14889
14890
14891
14892
14893
14894
14895
14896
14897
14898
14899
14900
14901int
14902lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14903 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
14904 uint32_t subtype)
14905{
14906 struct lpfc_queue *cq;
14907 struct lpfc_queue *eq;
14908 struct lpfc_mbx_cq_create_set *cq_set;
14909 struct lpfc_dmabuf *dmabuf;
14910 LPFC_MBOXQ_t *mbox;
14911 int rc, length, alloclen, status = 0;
14912 int cnt, idx, numcq, page_idx = 0;
14913 uint32_t shdr_status, shdr_add_status;
14914 union lpfc_sli4_cfg_shdr *shdr;
14915 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14916
14917
14918 numcq = phba->cfg_nvmet_mrq;
14919 if (!cqp || !hdwq || !numcq)
14920 return -ENODEV;
14921
14922 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14923 if (!mbox)
14924 return -ENOMEM;
14925
14926 length = sizeof(struct lpfc_mbx_cq_create_set);
14927 length += ((numcq * cqp[0]->page_count) *
14928 sizeof(struct dma_address));
14929 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14930 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14931 LPFC_SLI4_MBX_NEMBED);
14932 if (alloclen < length) {
14933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14934 "3098 Allocated DMA memory size (%d) is "
14935 "less than the requested DMA memory size "
14936 "(%d)\n", alloclen, length);
14937 status = -ENOMEM;
14938 goto out;
14939 }
14940 cq_set = mbox->sge_array->addr[0];
14941 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14942 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14943
14944 for (idx = 0; idx < numcq; idx++) {
14945 cq = cqp[idx];
14946 eq = hdwq[idx].hba_eq;
14947 if (!cq || !eq) {
14948 status = -ENOMEM;
14949 goto out;
14950 }
14951 if (!phba->sli4_hba.pc_sli4_params.supported)
14952 hw_page_size = cq->page_size;
14953
14954 switch (idx) {
14955 case 0:
14956 bf_set(lpfc_mbx_cq_create_set_page_size,
14957 &cq_set->u.request,
14958 (hw_page_size / SLI4_PAGE_SIZE));
14959 bf_set(lpfc_mbx_cq_create_set_num_pages,
14960 &cq_set->u.request, cq->page_count);
14961 bf_set(lpfc_mbx_cq_create_set_evt,
14962 &cq_set->u.request, 1);
14963 bf_set(lpfc_mbx_cq_create_set_valid,
14964 &cq_set->u.request, 1);
14965 bf_set(lpfc_mbx_cq_create_set_cqe_size,
14966 &cq_set->u.request, 0);
14967 bf_set(lpfc_mbx_cq_create_set_num_cq,
14968 &cq_set->u.request, numcq);
14969 bf_set(lpfc_mbx_cq_create_set_autovalid,
14970 &cq_set->u.request,
14971 phba->sli4_hba.pc_sli4_params.cqav);
14972 switch (cq->entry_count) {
14973 case 2048:
14974 case 4096:
14975 if (phba->sli4_hba.pc_sli4_params.cqv ==
14976 LPFC_Q_CREATE_VERSION_2) {
14977 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14978 &cq_set->u.request,
14979 cq->entry_count);
14980 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14981 &cq_set->u.request,
14982 LPFC_CQ_CNT_WORD7);
14983 break;
14984 }
14985
14986 default:
14987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14988 "3118 Bad CQ count. (%d)\n",
14989 cq->entry_count);
14990 if (cq->entry_count < 256) {
14991 status = -EINVAL;
14992 goto out;
14993 }
14994
14995 case 256:
14996 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14997 &cq_set->u.request, LPFC_CQ_CNT_256);
14998 break;
14999 case 512:
15000 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15001 &cq_set->u.request, LPFC_CQ_CNT_512);
15002 break;
15003 case 1024:
15004 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15005 &cq_set->u.request, LPFC_CQ_CNT_1024);
15006 break;
15007 }
15008 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15009 &cq_set->u.request, eq->queue_id);
15010 break;
15011 case 1:
15012 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15013 &cq_set->u.request, eq->queue_id);
15014 break;
15015 case 2:
15016 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15017 &cq_set->u.request, eq->queue_id);
15018 break;
15019 case 3:
15020 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15021 &cq_set->u.request, eq->queue_id);
15022 break;
15023 case 4:
15024 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15025 &cq_set->u.request, eq->queue_id);
15026 break;
15027 case 5:
15028 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15029 &cq_set->u.request, eq->queue_id);
15030 break;
15031 case 6:
15032 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15033 &cq_set->u.request, eq->queue_id);
15034 break;
15035 case 7:
15036 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15037 &cq_set->u.request, eq->queue_id);
15038 break;
15039 case 8:
15040 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15041 &cq_set->u.request, eq->queue_id);
15042 break;
15043 case 9:
15044 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15045 &cq_set->u.request, eq->queue_id);
15046 break;
15047 case 10:
15048 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15049 &cq_set->u.request, eq->queue_id);
15050 break;
15051 case 11:
15052 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15053 &cq_set->u.request, eq->queue_id);
15054 break;
15055 case 12:
15056 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15057 &cq_set->u.request, eq->queue_id);
15058 break;
15059 case 13:
15060 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15061 &cq_set->u.request, eq->queue_id);
15062 break;
15063 case 14:
15064 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15065 &cq_set->u.request, eq->queue_id);
15066 break;
15067 case 15:
15068 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15069 &cq_set->u.request, eq->queue_id);
15070 break;
15071 }
15072
15073
15074 list_add_tail(&cq->list, &eq->child_list);
15075
15076 cq->type = type;
15077 cq->subtype = subtype;
15078 cq->assoc_qid = eq->queue_id;
15079 cq->assoc_qp = eq;
15080 cq->host_index = 0;
15081 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15082 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15083 cq->entry_count);
15084 cq->chann = idx;
15085
15086 rc = 0;
15087 list_for_each_entry(dmabuf, &cq->page_list, list) {
15088 memset(dmabuf->virt, 0, hw_page_size);
15089 cnt = page_idx + dmabuf->buffer_tag;
15090 cq_set->u.request.page[cnt].addr_lo =
15091 putPaddrLow(dmabuf->phys);
15092 cq_set->u.request.page[cnt].addr_hi =
15093 putPaddrHigh(dmabuf->phys);
15094 rc++;
15095 }
15096 page_idx += rc;
15097 }
15098
15099 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15100
15101
15102 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15103 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15104 if (shdr_status || shdr_add_status || rc) {
15105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15106 "3119 CQ_CREATE_SET mailbox failed with "
15107 "status x%x add_status x%x, mbx status x%x\n",
15108 shdr_status, shdr_add_status, rc);
15109 status = -ENXIO;
15110 goto out;
15111 }
15112 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15113 if (rc == 0xFFFF) {
15114 status = -ENXIO;
15115 goto out;
15116 }
15117
15118 for (idx = 0; idx < numcq; idx++) {
15119 cq = cqp[idx];
15120 cq->queue_id = rc + idx;
15121 if (cq->queue_id > phba->sli4_hba.cq_max)
15122 phba->sli4_hba.cq_max = cq->queue_id;
15123 }
15124
15125out:
15126 lpfc_sli4_mbox_cmd_free(phba, mbox);
15127 return status;
15128}
15129
15130
15131
15132
15133
15134
15135
15136
15137
15138
15139
15140
15141
15142
15143
15144static void
15145lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15146 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15147{
15148 struct lpfc_mbx_mq_create *mq_create;
15149 struct lpfc_dmabuf *dmabuf;
15150 int length;
15151
15152 length = (sizeof(struct lpfc_mbx_mq_create) -
15153 sizeof(struct lpfc_sli4_cfg_mhdr));
15154 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15155 LPFC_MBOX_OPCODE_MQ_CREATE,
15156 length, LPFC_SLI4_MBX_EMBED);
15157 mq_create = &mbox->u.mqe.un.mq_create;
15158 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15159 mq->page_count);
15160 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15161 cq->queue_id);
15162 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15163 switch (mq->entry_count) {
15164 case 16:
15165 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15166 LPFC_MQ_RING_SIZE_16);
15167 break;
15168 case 32:
15169 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15170 LPFC_MQ_RING_SIZE_32);
15171 break;
15172 case 64:
15173 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15174 LPFC_MQ_RING_SIZE_64);
15175 break;
15176 case 128:
15177 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15178 LPFC_MQ_RING_SIZE_128);
15179 break;
15180 }
15181 list_for_each_entry(dmabuf, &mq->page_list, list) {
15182 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15183 putPaddrLow(dmabuf->phys);
15184 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15185 putPaddrHigh(dmabuf->phys);
15186 }
15187}
15188
15189
15190
15191
15192
15193
15194
15195
15196
15197
15198
15199
15200
15201
15202
15203
15204
15205
15206
15207
15208
15209
15210int32_t
15211lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15212 struct lpfc_queue *cq, uint32_t subtype)
15213{
15214 struct lpfc_mbx_mq_create *mq_create;
15215 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15216 struct lpfc_dmabuf *dmabuf;
15217 LPFC_MBOXQ_t *mbox;
15218 int rc, length, status = 0;
15219 uint32_t shdr_status, shdr_add_status;
15220 union lpfc_sli4_cfg_shdr *shdr;
15221 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15222
15223
15224 if (!mq || !cq)
15225 return -ENODEV;
15226 if (!phba->sli4_hba.pc_sli4_params.supported)
15227 hw_page_size = SLI4_PAGE_SIZE;
15228
15229 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15230 if (!mbox)
15231 return -ENOMEM;
15232 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15233 sizeof(struct lpfc_sli4_cfg_mhdr));
15234 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15235 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15236 length, LPFC_SLI4_MBX_EMBED);
15237
15238 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15239 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15240 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15241 &mq_create_ext->u.request, mq->page_count);
15242 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15243 &mq_create_ext->u.request, 1);
15244 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15245 &mq_create_ext->u.request, 1);
15246 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15247 &mq_create_ext->u.request, 1);
15248 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15249 &mq_create_ext->u.request, 1);
15250 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15251 &mq_create_ext->u.request, 1);
15252 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15253 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15254 phba->sli4_hba.pc_sli4_params.mqv);
15255 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15256 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15257 cq->queue_id);
15258 else
15259 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15260 cq->queue_id);
15261 switch (mq->entry_count) {
15262 default:
15263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15264 "0362 Unsupported MQ count. (%d)\n",
15265 mq->entry_count);
15266 if (mq->entry_count < 16) {
15267 status = -EINVAL;
15268 goto out;
15269 }
15270
15271 case 16:
15272 bf_set(lpfc_mq_context_ring_size,
15273 &mq_create_ext->u.request.context,
15274 LPFC_MQ_RING_SIZE_16);
15275 break;
15276 case 32:
15277 bf_set(lpfc_mq_context_ring_size,
15278 &mq_create_ext->u.request.context,
15279 LPFC_MQ_RING_SIZE_32);
15280 break;
15281 case 64:
15282 bf_set(lpfc_mq_context_ring_size,
15283 &mq_create_ext->u.request.context,
15284 LPFC_MQ_RING_SIZE_64);
15285 break;
15286 case 128:
15287 bf_set(lpfc_mq_context_ring_size,
15288 &mq_create_ext->u.request.context,
15289 LPFC_MQ_RING_SIZE_128);
15290 break;
15291 }
15292 list_for_each_entry(dmabuf, &mq->page_list, list) {
15293 memset(dmabuf->virt, 0, hw_page_size);
15294 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15295 putPaddrLow(dmabuf->phys);
15296 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15297 putPaddrHigh(dmabuf->phys);
15298 }
15299 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15300 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15301 &mq_create_ext->u.response);
15302 if (rc != MBX_SUCCESS) {
15303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15304 "2795 MQ_CREATE_EXT failed with "
15305 "status x%x. Failback to MQ_CREATE.\n",
15306 rc);
15307 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15308 mq_create = &mbox->u.mqe.un.mq_create;
15309 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15310 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15311 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15312 &mq_create->u.response);
15313 }
15314
15315
15316 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15317 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15318 if (shdr_status || shdr_add_status || rc) {
15319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15320 "2502 MQ_CREATE mailbox failed with "
15321 "status x%x add_status x%x, mbx status x%x\n",
15322 shdr_status, shdr_add_status, rc);
15323 status = -ENXIO;
15324 goto out;
15325 }
15326 if (mq->queue_id == 0xFFFF) {
15327 status = -ENXIO;
15328 goto out;
15329 }
15330 mq->type = LPFC_MQ;
15331 mq->assoc_qid = cq->queue_id;
15332 mq->subtype = subtype;
15333 mq->host_index = 0;
15334 mq->hba_index = 0;
15335
15336
15337 list_add_tail(&mq->list, &cq->child_list);
15338out:
15339 mempool_free(mbox, phba->mbox_mem_pool);
15340 return status;
15341}
15342
15343
15344
15345
15346
15347
15348
15349
15350
15351
15352
15353
15354
15355
15356
15357
15358
15359
15360
15361
15362
15363
15364
15365int
15366lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15367 struct lpfc_queue *cq, uint32_t subtype)
15368{
15369 struct lpfc_mbx_wq_create *wq_create;
15370 struct lpfc_dmabuf *dmabuf;
15371 LPFC_MBOXQ_t *mbox;
15372 int rc, length, status = 0;
15373 uint32_t shdr_status, shdr_add_status;
15374 union lpfc_sli4_cfg_shdr *shdr;
15375 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15376 struct dma_address *page;
15377 void __iomem *bar_memmap_p;
15378 uint32_t db_offset;
15379 uint16_t pci_barset;
15380 uint8_t dpp_barset;
15381 uint32_t dpp_offset;
15382 unsigned long pg_addr;
15383 uint8_t wq_create_version;
15384
15385
15386 if (!wq || !cq)
15387 return -ENODEV;
15388 if (!phba->sli4_hba.pc_sli4_params.supported)
15389 hw_page_size = wq->page_size;
15390
15391 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15392 if (!mbox)
15393 return -ENOMEM;
15394 length = (sizeof(struct lpfc_mbx_wq_create) -
15395 sizeof(struct lpfc_sli4_cfg_mhdr));
15396 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15397 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15398 length, LPFC_SLI4_MBX_EMBED);
15399 wq_create = &mbox->u.mqe.un.wq_create;
15400 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15401 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15402 wq->page_count);
15403 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15404 cq->queue_id);
15405
15406
15407 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15408 phba->sli4_hba.pc_sli4_params.wqv);
15409
15410 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15411 (wq->page_size > SLI4_PAGE_SIZE))
15412 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15413 else
15414 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15415
15416
15417 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15418 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15419 else
15420 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15421
15422 switch (wq_create_version) {
15423 case LPFC_Q_CREATE_VERSION_1:
15424 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15425 wq->entry_count);
15426 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15427 LPFC_Q_CREATE_VERSION_1);
15428
15429 switch (wq->entry_size) {
15430 default:
15431 case 64:
15432 bf_set(lpfc_mbx_wq_create_wqe_size,
15433 &wq_create->u.request_1,
15434 LPFC_WQ_WQE_SIZE_64);
15435 break;
15436 case 128:
15437 bf_set(lpfc_mbx_wq_create_wqe_size,
15438 &wq_create->u.request_1,
15439 LPFC_WQ_WQE_SIZE_128);
15440 break;
15441 }
15442
15443 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15444 bf_set(lpfc_mbx_wq_create_page_size,
15445 &wq_create->u.request_1,
15446 (wq->page_size / SLI4_PAGE_SIZE));
15447 page = wq_create->u.request_1.page;
15448 break;
15449 default:
15450 page = wq_create->u.request.page;
15451 break;
15452 }
15453
15454 list_for_each_entry(dmabuf, &wq->page_list, list) {
15455 memset(dmabuf->virt, 0, hw_page_size);
15456 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15457 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15458 }
15459
15460 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15461 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15462
15463 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15464
15465 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15466 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15467 if (shdr_status || shdr_add_status || rc) {
15468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15469 "2503 WQ_CREATE mailbox failed with "
15470 "status x%x add_status x%x, mbx status x%x\n",
15471 shdr_status, shdr_add_status, rc);
15472 status = -ENXIO;
15473 goto out;
15474 }
15475
15476 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15477 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15478 &wq_create->u.response);
15479 else
15480 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15481 &wq_create->u.response_1);
15482
15483 if (wq->queue_id == 0xFFFF) {
15484 status = -ENXIO;
15485 goto out;
15486 }
15487
15488 wq->db_format = LPFC_DB_LIST_FORMAT;
15489 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15490 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15491 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15492 &wq_create->u.response);
15493 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15494 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15496 "3265 WQ[%d] doorbell format "
15497 "not supported: x%x\n",
15498 wq->queue_id, wq->db_format);
15499 status = -EINVAL;
15500 goto out;
15501 }
15502 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15503 &wq_create->u.response);
15504 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15505 pci_barset);
15506 if (!bar_memmap_p) {
15507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15508 "3263 WQ[%d] failed to memmap "
15509 "pci barset:x%x\n",
15510 wq->queue_id, pci_barset);
15511 status = -ENOMEM;
15512 goto out;
15513 }
15514 db_offset = wq_create->u.response.doorbell_offset;
15515 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15516 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15518 "3252 WQ[%d] doorbell offset "
15519 "not supported: x%x\n",
15520 wq->queue_id, db_offset);
15521 status = -EINVAL;
15522 goto out;
15523 }
15524 wq->db_regaddr = bar_memmap_p + db_offset;
15525 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15526 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15527 "format:x%x\n", wq->queue_id,
15528 pci_barset, db_offset, wq->db_format);
15529 } else
15530 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15531 } else {
15532
15533 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15534 &wq_create->u.response_1);
15535 if (wq->dpp_enable) {
15536 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15537 &wq_create->u.response_1);
15538 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15539 pci_barset);
15540 if (!bar_memmap_p) {
15541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15542 "3267 WQ[%d] failed to memmap "
15543 "pci barset:x%x\n",
15544 wq->queue_id, pci_barset);
15545 status = -ENOMEM;
15546 goto out;
15547 }
15548 db_offset = wq_create->u.response_1.doorbell_offset;
15549 wq->db_regaddr = bar_memmap_p + db_offset;
15550 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15551 &wq_create->u.response_1);
15552 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15553 &wq_create->u.response_1);
15554 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15555 dpp_barset);
15556 if (!bar_memmap_p) {
15557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15558 "3268 WQ[%d] failed to memmap "
15559 "pci barset:x%x\n",
15560 wq->queue_id, dpp_barset);
15561 status = -ENOMEM;
15562 goto out;
15563 }
15564 dpp_offset = wq_create->u.response_1.dpp_offset;
15565 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15566 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15567 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15568 "dpp_id:x%x dpp_barset:x%x "
15569 "dpp_offset:x%x\n",
15570 wq->queue_id, pci_barset, db_offset,
15571 wq->dpp_id, dpp_barset, dpp_offset);
15572
15573
15574 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15575#ifdef CONFIG_X86
15576 rc = set_memory_wc(pg_addr, 1);
15577 if (rc) {
15578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15579 "3272 Cannot setup Combined "
15580 "Write on WQ[%d] - disable DPP\n",
15581 wq->queue_id);
15582 phba->cfg_enable_dpp = 0;
15583 }
15584#else
15585 phba->cfg_enable_dpp = 0;
15586#endif
15587 } else
15588 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15589 }
15590 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15591 if (wq->pring == NULL) {
15592 status = -ENOMEM;
15593 goto out;
15594 }
15595 wq->type = LPFC_WQ;
15596 wq->assoc_qid = cq->queue_id;
15597 wq->subtype = subtype;
15598 wq->host_index = 0;
15599 wq->hba_index = 0;
15600 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15601
15602
15603 list_add_tail(&wq->list, &cq->child_list);
15604out:
15605 mempool_free(mbox, phba->mbox_mem_pool);
15606 return status;
15607}
15608
15609
15610
15611
15612
15613
15614
15615
15616
15617
15618
15619
15620
15621
15622
15623
15624
15625
15626
15627
15628
15629
15630
15631
15632int
15633lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15634 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15635{
15636 struct lpfc_mbx_rq_create *rq_create;
15637 struct lpfc_dmabuf *dmabuf;
15638 LPFC_MBOXQ_t *mbox;
15639 int rc, length, status = 0;
15640 uint32_t shdr_status, shdr_add_status;
15641 union lpfc_sli4_cfg_shdr *shdr;
15642 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15643 void __iomem *bar_memmap_p;
15644 uint32_t db_offset;
15645 uint16_t pci_barset;
15646
15647
15648 if (!hrq || !drq || !cq)
15649 return -ENODEV;
15650 if (!phba->sli4_hba.pc_sli4_params.supported)
15651 hw_page_size = SLI4_PAGE_SIZE;
15652
15653 if (hrq->entry_count != drq->entry_count)
15654 return -EINVAL;
15655 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15656 if (!mbox)
15657 return -ENOMEM;
15658 length = (sizeof(struct lpfc_mbx_rq_create) -
15659 sizeof(struct lpfc_sli4_cfg_mhdr));
15660 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15661 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15662 length, LPFC_SLI4_MBX_EMBED);
15663 rq_create = &mbox->u.mqe.un.rq_create;
15664 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15665 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15666 phba->sli4_hba.pc_sli4_params.rqv);
15667 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15668 bf_set(lpfc_rq_context_rqe_count_1,
15669 &rq_create->u.request.context,
15670 hrq->entry_count);
15671 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15672 bf_set(lpfc_rq_context_rqe_size,
15673 &rq_create->u.request.context,
15674 LPFC_RQE_SIZE_8);
15675 bf_set(lpfc_rq_context_page_size,
15676 &rq_create->u.request.context,
15677 LPFC_RQ_PAGE_SIZE_4096);
15678 } else {
15679 switch (hrq->entry_count) {
15680 default:
15681 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15682 "2535 Unsupported RQ count. (%d)\n",
15683 hrq->entry_count);
15684 if (hrq->entry_count < 512) {
15685 status = -EINVAL;
15686 goto out;
15687 }
15688
15689 case 512:
15690 bf_set(lpfc_rq_context_rqe_count,
15691 &rq_create->u.request.context,
15692 LPFC_RQ_RING_SIZE_512);
15693 break;
15694 case 1024:
15695 bf_set(lpfc_rq_context_rqe_count,
15696 &rq_create->u.request.context,
15697 LPFC_RQ_RING_SIZE_1024);
15698 break;
15699 case 2048:
15700 bf_set(lpfc_rq_context_rqe_count,
15701 &rq_create->u.request.context,
15702 LPFC_RQ_RING_SIZE_2048);
15703 break;
15704 case 4096:
15705 bf_set(lpfc_rq_context_rqe_count,
15706 &rq_create->u.request.context,
15707 LPFC_RQ_RING_SIZE_4096);
15708 break;
15709 }
15710 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15711 LPFC_HDR_BUF_SIZE);
15712 }
15713 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15714 cq->queue_id);
15715 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15716 hrq->page_count);
15717 list_for_each_entry(dmabuf, &hrq->page_list, list) {
15718 memset(dmabuf->virt, 0, hw_page_size);
15719 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15720 putPaddrLow(dmabuf->phys);
15721 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15722 putPaddrHigh(dmabuf->phys);
15723 }
15724 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15725 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15726
15727 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15728
15729 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15730 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15731 if (shdr_status || shdr_add_status || rc) {
15732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15733 "2504 RQ_CREATE mailbox failed with "
15734 "status x%x add_status x%x, mbx status x%x\n",
15735 shdr_status, shdr_add_status, rc);
15736 status = -ENXIO;
15737 goto out;
15738 }
15739 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15740 if (hrq->queue_id == 0xFFFF) {
15741 status = -ENXIO;
15742 goto out;
15743 }
15744
15745 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15746 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15747 &rq_create->u.response);
15748 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15749 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15751 "3262 RQ [%d] doorbell format not "
15752 "supported: x%x\n", hrq->queue_id,
15753 hrq->db_format);
15754 status = -EINVAL;
15755 goto out;
15756 }
15757
15758 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15759 &rq_create->u.response);
15760 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15761 if (!bar_memmap_p) {
15762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15763 "3269 RQ[%d] failed to memmap pci "
15764 "barset:x%x\n", hrq->queue_id,
15765 pci_barset);
15766 status = -ENOMEM;
15767 goto out;
15768 }
15769
15770 db_offset = rq_create->u.response.doorbell_offset;
15771 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15772 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15774 "3270 RQ[%d] doorbell offset not "
15775 "supported: x%x\n", hrq->queue_id,
15776 db_offset);
15777 status = -EINVAL;
15778 goto out;
15779 }
15780 hrq->db_regaddr = bar_memmap_p + db_offset;
15781 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15782 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15783 "format:x%x\n", hrq->queue_id, pci_barset,
15784 db_offset, hrq->db_format);
15785 } else {
15786 hrq->db_format = LPFC_DB_RING_FORMAT;
15787 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15788 }
15789 hrq->type = LPFC_HRQ;
15790 hrq->assoc_qid = cq->queue_id;
15791 hrq->subtype = subtype;
15792 hrq->host_index = 0;
15793 hrq->hba_index = 0;
15794 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15795
15796
15797 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15798 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15799 length, LPFC_SLI4_MBX_EMBED);
15800 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15801 phba->sli4_hba.pc_sli4_params.rqv);
15802 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15803 bf_set(lpfc_rq_context_rqe_count_1,
15804 &rq_create->u.request.context, hrq->entry_count);
15805 if (subtype == LPFC_NVMET)
15806 rq_create->u.request.context.buffer_size =
15807 LPFC_NVMET_DATA_BUF_SIZE;
15808 else
15809 rq_create->u.request.context.buffer_size =
15810 LPFC_DATA_BUF_SIZE;
15811 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15812 LPFC_RQE_SIZE_8);
15813 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15814 (PAGE_SIZE/SLI4_PAGE_SIZE));
15815 } else {
15816 switch (drq->entry_count) {
15817 default:
15818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15819 "2536 Unsupported RQ count. (%d)\n",
15820 drq->entry_count);
15821 if (drq->entry_count < 512) {
15822 status = -EINVAL;
15823 goto out;
15824 }
15825
15826 case 512:
15827 bf_set(lpfc_rq_context_rqe_count,
15828 &rq_create->u.request.context,
15829 LPFC_RQ_RING_SIZE_512);
15830 break;
15831 case 1024:
15832 bf_set(lpfc_rq_context_rqe_count,
15833 &rq_create->u.request.context,
15834 LPFC_RQ_RING_SIZE_1024);
15835 break;
15836 case 2048:
15837 bf_set(lpfc_rq_context_rqe_count,
15838 &rq_create->u.request.context,
15839 LPFC_RQ_RING_SIZE_2048);
15840 break;
15841 case 4096:
15842 bf_set(lpfc_rq_context_rqe_count,
15843 &rq_create->u.request.context,
15844 LPFC_RQ_RING_SIZE_4096);
15845 break;
15846 }
15847 if (subtype == LPFC_NVMET)
15848 bf_set(lpfc_rq_context_buf_size,
15849 &rq_create->u.request.context,
15850 LPFC_NVMET_DATA_BUF_SIZE);
15851 else
15852 bf_set(lpfc_rq_context_buf_size,
15853 &rq_create->u.request.context,
15854 LPFC_DATA_BUF_SIZE);
15855 }
15856 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15857 cq->queue_id);
15858 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15859 drq->page_count);
15860 list_for_each_entry(dmabuf, &drq->page_list, list) {
15861 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15862 putPaddrLow(dmabuf->phys);
15863 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15864 putPaddrHigh(dmabuf->phys);
15865 }
15866 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15867 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15869
15870 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15871 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15872 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15873 if (shdr_status || shdr_add_status || rc) {
15874 status = -ENXIO;
15875 goto out;
15876 }
15877 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15878 if (drq->queue_id == 0xFFFF) {
15879 status = -ENXIO;
15880 goto out;
15881 }
15882 drq->type = LPFC_DRQ;
15883 drq->assoc_qid = cq->queue_id;
15884 drq->subtype = subtype;
15885 drq->host_index = 0;
15886 drq->hba_index = 0;
15887 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
15888
15889
15890 list_add_tail(&hrq->list, &cq->child_list);
15891 list_add_tail(&drq->list, &cq->child_list);
15892
15893out:
15894 mempool_free(mbox, phba->mbox_mem_pool);
15895 return status;
15896}
15897
15898
15899
15900
15901
15902
15903
15904
15905
15906
15907
15908
15909
15910
15911
15912
15913
15914
15915
15916
15917
15918
15919
15920
15921int
15922lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15923 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15924 uint32_t subtype)
15925{
15926 struct lpfc_queue *hrq, *drq, *cq;
15927 struct lpfc_mbx_rq_create_v2 *rq_create;
15928 struct lpfc_dmabuf *dmabuf;
15929 LPFC_MBOXQ_t *mbox;
15930 int rc, length, alloclen, status = 0;
15931 int cnt, idx, numrq, page_idx = 0;
15932 uint32_t shdr_status, shdr_add_status;
15933 union lpfc_sli4_cfg_shdr *shdr;
15934 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15935
15936 numrq = phba->cfg_nvmet_mrq;
15937
15938 if (!hrqp || !drqp || !cqp || !numrq)
15939 return -ENODEV;
15940 if (!phba->sli4_hba.pc_sli4_params.supported)
15941 hw_page_size = SLI4_PAGE_SIZE;
15942
15943 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15944 if (!mbox)
15945 return -ENOMEM;
15946
15947 length = sizeof(struct lpfc_mbx_rq_create_v2);
15948 length += ((2 * numrq * hrqp[0]->page_count) *
15949 sizeof(struct dma_address));
15950
15951 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15952 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15953 LPFC_SLI4_MBX_NEMBED);
15954 if (alloclen < length) {
15955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15956 "3099 Allocated DMA memory size (%d) is "
15957 "less than the requested DMA memory size "
15958 "(%d)\n", alloclen, length);
15959 status = -ENOMEM;
15960 goto out;
15961 }
15962
15963
15964
15965 rq_create = mbox->sge_array->addr[0];
15966 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15967
15968 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15969 cnt = 0;
15970
15971 for (idx = 0; idx < numrq; idx++) {
15972 hrq = hrqp[idx];
15973 drq = drqp[idx];
15974 cq = cqp[idx];
15975
15976
15977 if (!hrq || !drq || !cq) {
15978 status = -ENODEV;
15979 goto out;
15980 }
15981
15982 if (hrq->entry_count != drq->entry_count) {
15983 status = -EINVAL;
15984 goto out;
15985 }
15986
15987 if (idx == 0) {
15988 bf_set(lpfc_mbx_rq_create_num_pages,
15989 &rq_create->u.request,
15990 hrq->page_count);
15991 bf_set(lpfc_mbx_rq_create_rq_cnt,
15992 &rq_create->u.request, (numrq * 2));
15993 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15994 1);
15995 bf_set(lpfc_rq_context_base_cq,
15996 &rq_create->u.request.context,
15997 cq->queue_id);
15998 bf_set(lpfc_rq_context_data_size,
15999 &rq_create->u.request.context,
16000 LPFC_NVMET_DATA_BUF_SIZE);
16001 bf_set(lpfc_rq_context_hdr_size,
16002 &rq_create->u.request.context,
16003 LPFC_HDR_BUF_SIZE);
16004 bf_set(lpfc_rq_context_rqe_count_1,
16005 &rq_create->u.request.context,
16006 hrq->entry_count);
16007 bf_set(lpfc_rq_context_rqe_size,
16008 &rq_create->u.request.context,
16009 LPFC_RQE_SIZE_8);
16010 bf_set(lpfc_rq_context_page_size,
16011 &rq_create->u.request.context,
16012 (PAGE_SIZE/SLI4_PAGE_SIZE));
16013 }
16014 rc = 0;
16015 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16016 memset(dmabuf->virt, 0, hw_page_size);
16017 cnt = page_idx + dmabuf->buffer_tag;
16018 rq_create->u.request.page[cnt].addr_lo =
16019 putPaddrLow(dmabuf->phys);
16020 rq_create->u.request.page[cnt].addr_hi =
16021 putPaddrHigh(dmabuf->phys);
16022 rc++;
16023 }
16024 page_idx += rc;
16025
16026 rc = 0;
16027 list_for_each_entry(dmabuf, &drq->page_list, list) {
16028 memset(dmabuf->virt, 0, hw_page_size);
16029 cnt = page_idx + dmabuf->buffer_tag;
16030 rq_create->u.request.page[cnt].addr_lo =
16031 putPaddrLow(dmabuf->phys);
16032 rq_create->u.request.page[cnt].addr_hi =
16033 putPaddrHigh(dmabuf->phys);
16034 rc++;
16035 }
16036 page_idx += rc;
16037
16038 hrq->db_format = LPFC_DB_RING_FORMAT;
16039 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16040 hrq->type = LPFC_HRQ;
16041 hrq->assoc_qid = cq->queue_id;
16042 hrq->subtype = subtype;
16043 hrq->host_index = 0;
16044 hrq->hba_index = 0;
16045 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16046
16047 drq->db_format = LPFC_DB_RING_FORMAT;
16048 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16049 drq->type = LPFC_DRQ;
16050 drq->assoc_qid = cq->queue_id;
16051 drq->subtype = subtype;
16052 drq->host_index = 0;
16053 drq->hba_index = 0;
16054 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16055
16056 list_add_tail(&hrq->list, &cq->child_list);
16057 list_add_tail(&drq->list, &cq->child_list);
16058 }
16059
16060 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16061
16062 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16063 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16064 if (shdr_status || shdr_add_status || rc) {
16065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16066 "3120 RQ_CREATE mailbox failed with "
16067 "status x%x add_status x%x, mbx status x%x\n",
16068 shdr_status, shdr_add_status, rc);
16069 status = -ENXIO;
16070 goto out;
16071 }
16072 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16073 if (rc == 0xFFFF) {
16074 status = -ENXIO;
16075 goto out;
16076 }
16077
16078
16079 for (idx = 0; idx < numrq; idx++) {
16080 hrq = hrqp[idx];
16081 hrq->queue_id = rc + (2 * idx);
16082 drq = drqp[idx];
16083 drq->queue_id = rc + (2 * idx) + 1;
16084 }
16085
16086out:
16087 lpfc_sli4_mbox_cmd_free(phba, mbox);
16088 return status;
16089}
16090
16091
16092
16093
16094
16095
16096
16097
16098
16099
16100
16101
16102
16103int
16104lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16105{
16106 LPFC_MBOXQ_t *mbox;
16107 int rc, length, status = 0;
16108 uint32_t shdr_status, shdr_add_status;
16109 union lpfc_sli4_cfg_shdr *shdr;
16110
16111
16112 if (!eq)
16113 return -ENODEV;
16114
16115 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16116 if (!mbox)
16117 return -ENOMEM;
16118 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16119 sizeof(struct lpfc_sli4_cfg_mhdr));
16120 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16121 LPFC_MBOX_OPCODE_EQ_DESTROY,
16122 length, LPFC_SLI4_MBX_EMBED);
16123 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16124 eq->queue_id);
16125 mbox->vport = eq->phba->pport;
16126 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16127
16128 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16129
16130 shdr = (union lpfc_sli4_cfg_shdr *)
16131 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16132 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16133 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16134 if (shdr_status || shdr_add_status || rc) {
16135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16136 "2505 EQ_DESTROY mailbox failed with "
16137 "status x%x add_status x%x, mbx status x%x\n",
16138 shdr_status, shdr_add_status, rc);
16139 status = -ENXIO;
16140 }
16141
16142
16143 list_del_init(&eq->list);
16144 mempool_free(mbox, eq->phba->mbox_mem_pool);
16145 return status;
16146}
16147
16148
16149
16150
16151
16152
16153
16154
16155
16156
16157
16158
16159
16160int
16161lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16162{
16163 LPFC_MBOXQ_t *mbox;
16164 int rc, length, status = 0;
16165 uint32_t shdr_status, shdr_add_status;
16166 union lpfc_sli4_cfg_shdr *shdr;
16167
16168
16169 if (!cq)
16170 return -ENODEV;
16171 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16172 if (!mbox)
16173 return -ENOMEM;
16174 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16175 sizeof(struct lpfc_sli4_cfg_mhdr));
16176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16177 LPFC_MBOX_OPCODE_CQ_DESTROY,
16178 length, LPFC_SLI4_MBX_EMBED);
16179 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16180 cq->queue_id);
16181 mbox->vport = cq->phba->pport;
16182 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16183 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16184
16185 shdr = (union lpfc_sli4_cfg_shdr *)
16186 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16187 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16188 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16189 if (shdr_status || shdr_add_status || rc) {
16190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16191 "2506 CQ_DESTROY mailbox failed with "
16192 "status x%x add_status x%x, mbx status x%x\n",
16193 shdr_status, shdr_add_status, rc);
16194 status = -ENXIO;
16195 }
16196
16197 list_del_init(&cq->list);
16198 mempool_free(mbox, cq->phba->mbox_mem_pool);
16199 return status;
16200}
16201
16202
16203
16204
16205
16206
16207
16208
16209
16210
16211
16212
16213
16214int
16215lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16216{
16217 LPFC_MBOXQ_t *mbox;
16218 int rc, length, status = 0;
16219 uint32_t shdr_status, shdr_add_status;
16220 union lpfc_sli4_cfg_shdr *shdr;
16221
16222
16223 if (!mq)
16224 return -ENODEV;
16225 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16226 if (!mbox)
16227 return -ENOMEM;
16228 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16229 sizeof(struct lpfc_sli4_cfg_mhdr));
16230 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16231 LPFC_MBOX_OPCODE_MQ_DESTROY,
16232 length, LPFC_SLI4_MBX_EMBED);
16233 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16234 mq->queue_id);
16235 mbox->vport = mq->phba->pport;
16236 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16237 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16238
16239 shdr = (union lpfc_sli4_cfg_shdr *)
16240 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16241 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16242 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16243 if (shdr_status || shdr_add_status || rc) {
16244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16245 "2507 MQ_DESTROY mailbox failed with "
16246 "status x%x add_status x%x, mbx status x%x\n",
16247 shdr_status, shdr_add_status, rc);
16248 status = -ENXIO;
16249 }
16250
16251 list_del_init(&mq->list);
16252 mempool_free(mbox, mq->phba->mbox_mem_pool);
16253 return status;
16254}
16255
16256
16257
16258
16259
16260
16261
16262
16263
16264
16265
16266
16267
16268int
16269lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16270{
16271 LPFC_MBOXQ_t *mbox;
16272 int rc, length, status = 0;
16273 uint32_t shdr_status, shdr_add_status;
16274 union lpfc_sli4_cfg_shdr *shdr;
16275
16276
16277 if (!wq)
16278 return -ENODEV;
16279 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16280 if (!mbox)
16281 return -ENOMEM;
16282 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16283 sizeof(struct lpfc_sli4_cfg_mhdr));
16284 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16285 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16286 length, LPFC_SLI4_MBX_EMBED);
16287 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16288 wq->queue_id);
16289 mbox->vport = wq->phba->pport;
16290 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16291 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16292 shdr = (union lpfc_sli4_cfg_shdr *)
16293 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16294 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16295 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16296 if (shdr_status || shdr_add_status || rc) {
16297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16298 "2508 WQ_DESTROY mailbox failed with "
16299 "status x%x add_status x%x, mbx status x%x\n",
16300 shdr_status, shdr_add_status, rc);
16301 status = -ENXIO;
16302 }
16303
16304 list_del_init(&wq->list);
16305 kfree(wq->pring);
16306 wq->pring = NULL;
16307 mempool_free(mbox, wq->phba->mbox_mem_pool);
16308 return status;
16309}
16310
16311
16312
16313
16314
16315
16316
16317
16318
16319
16320
16321
16322
16323int
16324lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16325 struct lpfc_queue *drq)
16326{
16327 LPFC_MBOXQ_t *mbox;
16328 int rc, length, status = 0;
16329 uint32_t shdr_status, shdr_add_status;
16330 union lpfc_sli4_cfg_shdr *shdr;
16331
16332
16333 if (!hrq || !drq)
16334 return -ENODEV;
16335 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16336 if (!mbox)
16337 return -ENOMEM;
16338 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16339 sizeof(struct lpfc_sli4_cfg_mhdr));
16340 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16341 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16342 length, LPFC_SLI4_MBX_EMBED);
16343 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16344 hrq->queue_id);
16345 mbox->vport = hrq->phba->pport;
16346 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16347 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16348
16349 shdr = (union lpfc_sli4_cfg_shdr *)
16350 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16351 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16352 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16353 if (shdr_status || shdr_add_status || rc) {
16354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16355 "2509 RQ_DESTROY mailbox failed with "
16356 "status x%x add_status x%x, mbx status x%x\n",
16357 shdr_status, shdr_add_status, rc);
16358 if (rc != MBX_TIMEOUT)
16359 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16360 return -ENXIO;
16361 }
16362 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16363 drq->queue_id);
16364 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16365 shdr = (union lpfc_sli4_cfg_shdr *)
16366 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16367 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16368 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16369 if (shdr_status || shdr_add_status || rc) {
16370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16371 "2510 RQ_DESTROY mailbox failed with "
16372 "status x%x add_status x%x, mbx status x%x\n",
16373 shdr_status, shdr_add_status, rc);
16374 status = -ENXIO;
16375 }
16376 list_del_init(&hrq->list);
16377 list_del_init(&drq->list);
16378 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16379 return status;
16380}
16381
16382
16383
16384
16385
16386
16387
16388
16389
16390
16391
16392
16393
16394
16395
16396
16397
16398
16399
16400
16401
16402
16403
16404int
16405lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16406 dma_addr_t pdma_phys_addr0,
16407 dma_addr_t pdma_phys_addr1,
16408 uint16_t xritag)
16409{
16410 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16411 LPFC_MBOXQ_t *mbox;
16412 int rc;
16413 uint32_t shdr_status, shdr_add_status;
16414 uint32_t mbox_tmo;
16415 union lpfc_sli4_cfg_shdr *shdr;
16416
16417 if (xritag == NO_XRI) {
16418 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16419 "0364 Invalid param:\n");
16420 return -EINVAL;
16421 }
16422
16423 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16424 if (!mbox)
16425 return -ENOMEM;
16426
16427 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16428 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16429 sizeof(struct lpfc_mbx_post_sgl_pages) -
16430 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16431
16432 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16433 &mbox->u.mqe.un.post_sgl_pages;
16434 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16435 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16436
16437 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16438 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16439 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16440 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16441
16442 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16443 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16444 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16445 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16446 if (!phba->sli4_hba.intr_enable)
16447 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16448 else {
16449 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16450 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16451 }
16452
16453 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16454 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16455 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16456 if (rc != MBX_TIMEOUT)
16457 mempool_free(mbox, phba->mbox_mem_pool);
16458 if (shdr_status || shdr_add_status || rc) {
16459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16460 "2511 POST_SGL mailbox failed with "
16461 "status x%x add_status x%x, mbx status x%x\n",
16462 shdr_status, shdr_add_status, rc);
16463 }
16464 return 0;
16465}
16466
16467
16468
16469
16470
16471
16472
16473
16474
16475
16476
16477
16478
16479
16480static uint16_t
16481lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16482{
16483 unsigned long xri;
16484
16485
16486
16487
16488
16489 spin_lock_irq(&phba->hbalock);
16490 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16491 phba->sli4_hba.max_cfg_param.max_xri, 0);
16492 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16493 spin_unlock_irq(&phba->hbalock);
16494 return NO_XRI;
16495 } else {
16496 set_bit(xri, phba->sli4_hba.xri_bmask);
16497 phba->sli4_hba.max_cfg_param.xri_used++;
16498 }
16499 spin_unlock_irq(&phba->hbalock);
16500 return xri;
16501}
16502
16503
16504
16505
16506
16507
16508
16509
16510static void
16511__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16512{
16513 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16514 phba->sli4_hba.max_cfg_param.xri_used--;
16515 }
16516}
16517
16518
16519
16520
16521
16522
16523
16524
16525void
16526lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16527{
16528 spin_lock_irq(&phba->hbalock);
16529 __lpfc_sli4_free_xri(phba, xri);
16530 spin_unlock_irq(&phba->hbalock);
16531}
16532
16533
16534
16535
16536
16537
16538
16539
16540
16541
16542
16543uint16_t
16544lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16545{
16546 uint16_t xri_index;
16547
16548 xri_index = lpfc_sli4_alloc_xri(phba);
16549 if (xri_index == NO_XRI)
16550 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16551 "2004 Failed to allocate XRI.last XRITAG is %d"
16552 " Max XRI is %d, Used XRI is %d\n",
16553 xri_index,
16554 phba->sli4_hba.max_cfg_param.max_xri,
16555 phba->sli4_hba.max_cfg_param.xri_used);
16556 return xri_index;
16557}
16558
16559
16560
16561
16562
16563
16564
16565
16566
16567
16568
16569
16570static int
16571lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16572 struct list_head *post_sgl_list,
16573 int post_cnt)
16574{
16575 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16576 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16577 struct sgl_page_pairs *sgl_pg_pairs;
16578 void *viraddr;
16579 LPFC_MBOXQ_t *mbox;
16580 uint32_t reqlen, alloclen, pg_pairs;
16581 uint32_t mbox_tmo;
16582 uint16_t xritag_start = 0;
16583 int rc = 0;
16584 uint32_t shdr_status, shdr_add_status;
16585 union lpfc_sli4_cfg_shdr *shdr;
16586
16587 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16588 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16589 if (reqlen > SLI4_PAGE_SIZE) {
16590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16591 "2559 Block sgl registration required DMA "
16592 "size (%d) great than a page\n", reqlen);
16593 return -ENOMEM;
16594 }
16595
16596 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16597 if (!mbox)
16598 return -ENOMEM;
16599
16600
16601 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16602 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16603 LPFC_SLI4_MBX_NEMBED);
16604
16605 if (alloclen < reqlen) {
16606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16607 "0285 Allocated DMA memory size (%d) is "
16608 "less than the requested DMA memory "
16609 "size (%d)\n", alloclen, reqlen);
16610 lpfc_sli4_mbox_cmd_free(phba, mbox);
16611 return -ENOMEM;
16612 }
16613
16614 viraddr = mbox->sge_array->addr[0];
16615 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16616 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16617
16618 pg_pairs = 0;
16619 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16620
16621 sgl_pg_pairs->sgl_pg0_addr_lo =
16622 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16623 sgl_pg_pairs->sgl_pg0_addr_hi =
16624 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16625 sgl_pg_pairs->sgl_pg1_addr_lo =
16626 cpu_to_le32(putPaddrLow(0));
16627 sgl_pg_pairs->sgl_pg1_addr_hi =
16628 cpu_to_le32(putPaddrHigh(0));
16629
16630
16631 if (pg_pairs == 0)
16632 xritag_start = sglq_entry->sli4_xritag;
16633 sgl_pg_pairs++;
16634 pg_pairs++;
16635 }
16636
16637
16638 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16639 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16640 sgl->word0 = cpu_to_le32(sgl->word0);
16641
16642 if (!phba->sli4_hba.intr_enable)
16643 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16644 else {
16645 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16646 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16647 }
16648 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16649 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16650 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16651 if (rc != MBX_TIMEOUT)
16652 lpfc_sli4_mbox_cmd_free(phba, mbox);
16653 if (shdr_status || shdr_add_status || rc) {
16654 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16655 "2513 POST_SGL_BLOCK mailbox command failed "
16656 "status x%x add_status x%x mbx status x%x\n",
16657 shdr_status, shdr_add_status, rc);
16658 rc = -ENXIO;
16659 }
16660 return rc;
16661}
16662
16663
16664
16665
16666
16667
16668
16669
16670
16671
16672
16673
16674static int
16675lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16676 int count)
16677{
16678 struct lpfc_io_buf *lpfc_ncmd;
16679 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16680 struct sgl_page_pairs *sgl_pg_pairs;
16681 void *viraddr;
16682 LPFC_MBOXQ_t *mbox;
16683 uint32_t reqlen, alloclen, pg_pairs;
16684 uint32_t mbox_tmo;
16685 uint16_t xritag_start = 0;
16686 int rc = 0;
16687 uint32_t shdr_status, shdr_add_status;
16688 dma_addr_t pdma_phys_bpl1;
16689 union lpfc_sli4_cfg_shdr *shdr;
16690
16691
16692 reqlen = count * sizeof(struct sgl_page_pairs) +
16693 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16694 if (reqlen > SLI4_PAGE_SIZE) {
16695 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16696 "6118 Block sgl registration required DMA "
16697 "size (%d) great than a page\n", reqlen);
16698 return -ENOMEM;
16699 }
16700 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16701 if (!mbox) {
16702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16703 "6119 Failed to allocate mbox cmd memory\n");
16704 return -ENOMEM;
16705 }
16706
16707
16708 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16709 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16710 reqlen, LPFC_SLI4_MBX_NEMBED);
16711
16712 if (alloclen < reqlen) {
16713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16714 "6120 Allocated DMA memory size (%d) is "
16715 "less than the requested DMA memory "
16716 "size (%d)\n", alloclen, reqlen);
16717 lpfc_sli4_mbox_cmd_free(phba, mbox);
16718 return -ENOMEM;
16719 }
16720
16721
16722 viraddr = mbox->sge_array->addr[0];
16723
16724
16725 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16726 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16727
16728 pg_pairs = 0;
16729 list_for_each_entry(lpfc_ncmd, nblist, list) {
16730
16731 sgl_pg_pairs->sgl_pg0_addr_lo =
16732 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
16733 sgl_pg_pairs->sgl_pg0_addr_hi =
16734 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
16735 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16736 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
16737 SGL_PAGE_SIZE;
16738 else
16739 pdma_phys_bpl1 = 0;
16740 sgl_pg_pairs->sgl_pg1_addr_lo =
16741 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16742 sgl_pg_pairs->sgl_pg1_addr_hi =
16743 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16744
16745 if (pg_pairs == 0)
16746 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
16747 sgl_pg_pairs++;
16748 pg_pairs++;
16749 }
16750 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16751 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16752
16753 sgl->word0 = cpu_to_le32(sgl->word0);
16754
16755 if (!phba->sli4_hba.intr_enable) {
16756 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16757 } else {
16758 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16759 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16760 }
16761 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
16762 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16763 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16764 if (rc != MBX_TIMEOUT)
16765 lpfc_sli4_mbox_cmd_free(phba, mbox);
16766 if (shdr_status || shdr_add_status || rc) {
16767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16768 "6125 POST_SGL_BLOCK mailbox command failed "
16769 "status x%x add_status x%x mbx status x%x\n",
16770 shdr_status, shdr_add_status, rc);
16771 rc = -ENXIO;
16772 }
16773 return rc;
16774}
16775
16776
16777
16778
16779
16780
16781
16782
16783
16784
16785
16786
16787
16788
16789
16790int
16791lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
16792 struct list_head *post_nblist, int sb_count)
16793{
16794 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
16795 int status, sgl_size;
16796 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
16797 dma_addr_t pdma_phys_sgl1;
16798 int last_xritag = NO_XRI;
16799 int cur_xritag;
16800 LIST_HEAD(prep_nblist);
16801 LIST_HEAD(blck_nblist);
16802 LIST_HEAD(nvme_nblist);
16803
16804
16805 if (sb_count <= 0)
16806 return -EINVAL;
16807
16808 sgl_size = phba->cfg_sg_dma_buf_size;
16809 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
16810 list_del_init(&lpfc_ncmd->list);
16811 block_cnt++;
16812 if ((last_xritag != NO_XRI) &&
16813 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
16814
16815 list_splice_init(&prep_nblist, &blck_nblist);
16816 post_cnt = block_cnt - 1;
16817
16818 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16819 block_cnt = 1;
16820 } else {
16821
16822 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
16823
16824 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
16825 list_splice_init(&prep_nblist, &blck_nblist);
16826 post_cnt = block_cnt;
16827 block_cnt = 0;
16828 }
16829 }
16830 num_posting++;
16831 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16832
16833
16834 if (num_posting == sb_count) {
16835 if (post_cnt == 0) {
16836
16837 list_splice_init(&prep_nblist, &blck_nblist);
16838 post_cnt = block_cnt;
16839 } else if (block_cnt == 1) {
16840
16841 if (sgl_size > SGL_PAGE_SIZE)
16842 pdma_phys_sgl1 =
16843 lpfc_ncmd->dma_phys_sgl +
16844 SGL_PAGE_SIZE;
16845 else
16846 pdma_phys_sgl1 = 0;
16847 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
16848 status = lpfc_sli4_post_sgl(
16849 phba, lpfc_ncmd->dma_phys_sgl,
16850 pdma_phys_sgl1, cur_xritag);
16851 if (status) {
16852
16853 lpfc_ncmd->flags |=
16854 LPFC_SBUF_NOT_POSTED;
16855 } else {
16856
16857 lpfc_ncmd->flags &=
16858 ~LPFC_SBUF_NOT_POSTED;
16859 lpfc_ncmd->status = IOSTAT_SUCCESS;
16860 num_posted++;
16861 }
16862
16863 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16864 }
16865 }
16866
16867
16868 if (post_cnt == 0)
16869 continue;
16870
16871
16872 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
16873 post_cnt);
16874
16875
16876 if (block_cnt == 0)
16877 last_xritag = NO_XRI;
16878
16879
16880 post_cnt = 0;
16881
16882
16883 while (!list_empty(&blck_nblist)) {
16884 list_remove_head(&blck_nblist, lpfc_ncmd,
16885 struct lpfc_io_buf, list);
16886 if (status) {
16887
16888 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
16889 } else {
16890
16891 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
16892 lpfc_ncmd->status = IOSTAT_SUCCESS;
16893 num_posted++;
16894 }
16895 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
16896 }
16897 }
16898
16899 lpfc_io_buf_replenish(phba, &nvme_nblist);
16900
16901 return num_posted;
16902}
16903
16904
16905
16906
16907
16908
16909
16910
16911
16912
16913
16914static int
16915lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16916{
16917
16918 struct fc_vft_header *fc_vft_hdr;
16919 uint32_t *header = (uint32_t *) fc_hdr;
16920
16921 switch (fc_hdr->fh_r_ctl) {
16922 case FC_RCTL_DD_UNCAT:
16923 case FC_RCTL_DD_SOL_DATA:
16924 case FC_RCTL_DD_UNSOL_CTL:
16925 case FC_RCTL_DD_SOL_CTL:
16926 case FC_RCTL_DD_UNSOL_DATA:
16927 case FC_RCTL_DD_DATA_DESC:
16928 case FC_RCTL_DD_UNSOL_CMD:
16929 case FC_RCTL_DD_CMD_STATUS:
16930 case FC_RCTL_ELS_REQ:
16931 case FC_RCTL_ELS_REP:
16932 case FC_RCTL_ELS4_REQ:
16933 case FC_RCTL_ELS4_REP:
16934 case FC_RCTL_BA_NOP:
16935 case FC_RCTL_BA_ABTS:
16936 case FC_RCTL_BA_RMC:
16937 case FC_RCTL_BA_ACC:
16938 case FC_RCTL_BA_RJT:
16939 case FC_RCTL_BA_PRMT:
16940 case FC_RCTL_ACK_1:
16941 case FC_RCTL_ACK_0:
16942 case FC_RCTL_P_RJT:
16943 case FC_RCTL_F_RJT:
16944 case FC_RCTL_P_BSY:
16945 case FC_RCTL_F_BSY:
16946 case FC_RCTL_F_BSYL:
16947 case FC_RCTL_LCR:
16948 case FC_RCTL_MDS_DIAGS:
16949 case FC_RCTL_END:
16950 break;
16951 case FC_RCTL_VFTH:
16952 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16953 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16954 return lpfc_fc_frame_check(phba, fc_hdr);
16955 default:
16956 goto drop;
16957 }
16958
16959 switch (fc_hdr->fh_type) {
16960 case FC_TYPE_BLS:
16961 case FC_TYPE_ELS:
16962 case FC_TYPE_FCP:
16963 case FC_TYPE_CT:
16964 case FC_TYPE_NVME:
16965 break;
16966 case FC_TYPE_IP:
16967 case FC_TYPE_ILS:
16968 default:
16969 goto drop;
16970 }
16971
16972 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16973 "2538 Received frame rctl:x%x, type:x%x, "
16974 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16975 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16976 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16977 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16978 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16979 be32_to_cpu(header[6]));
16980 return 0;
16981drop:
16982 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16983 "2539 Dropped frame rctl:x%x type:x%x\n",
16984 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16985 return 1;
16986}
16987
16988
16989
16990
16991
16992
16993
16994
16995
16996static uint32_t
16997lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16998{
16999 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17000
17001 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17002 return 0;
17003 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17004}
17005
17006
17007
17008
17009
17010
17011
17012
17013
17014
17015
17016
17017
17018static struct lpfc_vport *
17019lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17020 uint16_t fcfi, uint32_t did)
17021{
17022 struct lpfc_vport **vports;
17023 struct lpfc_vport *vport = NULL;
17024 int i;
17025
17026 if (did == Fabric_DID)
17027 return phba->pport;
17028 if ((phba->pport->fc_flag & FC_PT2PT) &&
17029 !(phba->link_state == LPFC_HBA_READY))
17030 return phba->pport;
17031
17032 vports = lpfc_create_vport_work_array(phba);
17033 if (vports != NULL) {
17034 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17035 if (phba->fcf.fcfi == fcfi &&
17036 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17037 vports[i]->fc_myDID == did) {
17038 vport = vports[i];
17039 break;
17040 }
17041 }
17042 }
17043 lpfc_destroy_vport_work_array(phba, vports);
17044 return vport;
17045}
17046
17047
17048
17049
17050
17051
17052
17053
17054
17055
17056
17057static void
17058lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17059{
17060 struct lpfc_dmabuf *h_buf;
17061 struct hbq_dmabuf *dmabuf = NULL;
17062
17063
17064 h_buf = list_get_first(&vport->rcv_buffer_list,
17065 struct lpfc_dmabuf, list);
17066 if (!h_buf)
17067 return;
17068 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17069 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17070}
17071
17072
17073
17074
17075
17076
17077
17078
17079
17080void
17081lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17082{
17083 struct lpfc_dmabuf *h_buf, *hnext;
17084 struct lpfc_dmabuf *d_buf, *dnext;
17085 struct hbq_dmabuf *dmabuf = NULL;
17086
17087
17088 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17089 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17090 list_del_init(&dmabuf->hbuf.list);
17091 list_for_each_entry_safe(d_buf, dnext,
17092 &dmabuf->dbuf.list, list) {
17093 list_del_init(&d_buf->list);
17094 lpfc_in_buf_free(vport->phba, d_buf);
17095 }
17096 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17097 }
17098}
17099
17100
17101
17102
17103
17104
17105
17106
17107
17108
17109
17110
17111
17112void
17113lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17114{
17115 struct lpfc_dmabuf *h_buf, *hnext;
17116 struct lpfc_dmabuf *d_buf, *dnext;
17117 struct hbq_dmabuf *dmabuf = NULL;
17118 unsigned long timeout;
17119 int abort_count = 0;
17120
17121 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17122 vport->rcv_buffer_time_stamp);
17123 if (list_empty(&vport->rcv_buffer_list) ||
17124 time_before(jiffies, timeout))
17125 return;
17126
17127 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17128 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17129 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17130 dmabuf->time_stamp);
17131 if (time_before(jiffies, timeout))
17132 break;
17133 abort_count++;
17134 list_del_init(&dmabuf->hbuf.list);
17135 list_for_each_entry_safe(d_buf, dnext,
17136 &dmabuf->dbuf.list, list) {
17137 list_del_init(&d_buf->list);
17138 lpfc_in_buf_free(vport->phba, d_buf);
17139 }
17140 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17141 }
17142 if (abort_count)
17143 lpfc_update_rcv_time_stamp(vport);
17144}
17145
17146
17147
17148
17149
17150
17151
17152
17153
17154
17155
17156
17157
17158static struct hbq_dmabuf *
17159lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17160{
17161 struct fc_frame_header *new_hdr;
17162 struct fc_frame_header *temp_hdr;
17163 struct lpfc_dmabuf *d_buf;
17164 struct lpfc_dmabuf *h_buf;
17165 struct hbq_dmabuf *seq_dmabuf = NULL;
17166 struct hbq_dmabuf *temp_dmabuf = NULL;
17167 uint8_t found = 0;
17168
17169 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17170 dmabuf->time_stamp = jiffies;
17171 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17172
17173
17174 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17175 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17176 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17177 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17178 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17179 continue;
17180
17181 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17182 break;
17183 }
17184 if (!seq_dmabuf) {
17185
17186
17187
17188
17189 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17190 lpfc_update_rcv_time_stamp(vport);
17191 return dmabuf;
17192 }
17193 temp_hdr = seq_dmabuf->hbuf.virt;
17194 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17195 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17196 list_del_init(&seq_dmabuf->hbuf.list);
17197 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17198 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17199 lpfc_update_rcv_time_stamp(vport);
17200 return dmabuf;
17201 }
17202
17203 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17204 seq_dmabuf->time_stamp = jiffies;
17205 lpfc_update_rcv_time_stamp(vport);
17206 if (list_empty(&seq_dmabuf->dbuf.list)) {
17207 temp_hdr = dmabuf->hbuf.virt;
17208 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17209 return seq_dmabuf;
17210 }
17211
17212 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17213 while (!found) {
17214 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17215 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17216
17217
17218
17219
17220 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17221 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17222 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17223 found = 1;
17224 break;
17225 }
17226
17227 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17228 break;
17229 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17230 }
17231
17232 if (found)
17233 return seq_dmabuf;
17234 return NULL;
17235}
17236
17237
17238
17239
17240
17241
17242
17243
17244
17245
17246
17247
17248
17249
17250
17251
17252
17253static bool
17254lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17255 struct hbq_dmabuf *dmabuf)
17256{
17257 struct fc_frame_header *new_hdr;
17258 struct fc_frame_header *temp_hdr;
17259 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17260 struct hbq_dmabuf *seq_dmabuf = NULL;
17261
17262
17263 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17264 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17265 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17266 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17267 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17268 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17269 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17270 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17271 continue;
17272
17273 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17274 break;
17275 }
17276
17277
17278 if (seq_dmabuf) {
17279 list_for_each_entry_safe(d_buf, n_buf,
17280 &seq_dmabuf->dbuf.list, list) {
17281 list_del_init(&d_buf->list);
17282 lpfc_in_buf_free(vport->phba, d_buf);
17283 }
17284 return true;
17285 }
17286 return false;
17287}
17288
17289
17290
17291
17292
17293
17294
17295
17296
17297
17298
17299
17300
17301
17302
17303
17304
17305static bool
17306lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17307{
17308 struct lpfc_hba *phba = vport->phba;
17309 int handled;
17310
17311
17312 if (phba->sli_rev < LPFC_SLI_REV4)
17313 return false;
17314
17315
17316 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17317 if (handled)
17318 return true;
17319
17320 return false;
17321}
17322
17323
17324
17325
17326
17327
17328
17329
17330
17331
17332
17333static void
17334lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17335 struct lpfc_iocbq *cmd_iocbq,
17336 struct lpfc_iocbq *rsp_iocbq)
17337{
17338 struct lpfc_nodelist *ndlp;
17339
17340 if (cmd_iocbq) {
17341 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17342 lpfc_nlp_put(ndlp);
17343 lpfc_nlp_not_used(ndlp);
17344 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17345 }
17346
17347
17348 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17349 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17350 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17351 rsp_iocbq->iocb.ulpStatus,
17352 rsp_iocbq->iocb.un.ulpWord[4]);
17353}
17354
17355
17356
17357
17358
17359
17360
17361
17362
17363uint16_t
17364lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17365 uint16_t xri)
17366{
17367 uint16_t i;
17368
17369 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17370 if (xri == phba->sli4_hba.xri_ids[i])
17371 return i;
17372 }
17373 return NO_XRI;
17374}
17375
17376
17377
17378
17379
17380
17381
17382
17383
17384void
17385lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17386 struct fc_frame_header *fc_hdr, bool aborted)
17387{
17388 struct lpfc_hba *phba = vport->phba;
17389 struct lpfc_iocbq *ctiocb = NULL;
17390 struct lpfc_nodelist *ndlp;
17391 uint16_t oxid, rxid, xri, lxri;
17392 uint32_t sid, fctl;
17393 IOCB_t *icmd;
17394 int rc;
17395
17396 if (!lpfc_is_link_up(phba))
17397 return;
17398
17399 sid = sli4_sid_from_fc_hdr(fc_hdr);
17400 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17401 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17402
17403 ndlp = lpfc_findnode_did(vport, sid);
17404 if (!ndlp) {
17405 ndlp = lpfc_nlp_init(vport, sid);
17406 if (!ndlp) {
17407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17408 "1268 Failed to allocate ndlp for "
17409 "oxid:x%x SID:x%x\n", oxid, sid);
17410 return;
17411 }
17412
17413 lpfc_enqueue_node(vport, ndlp);
17414 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17415
17416 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17417 if (!ndlp) {
17418 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17419 "3275 Failed to active ndlp found "
17420 "for oxid:x%x SID:x%x\n", oxid, sid);
17421 return;
17422 }
17423 }
17424
17425
17426 ctiocb = lpfc_sli_get_iocbq(phba);
17427 if (!ctiocb)
17428 return;
17429
17430
17431 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17432
17433 icmd = &ctiocb->iocb;
17434 icmd->un.xseq64.bdl.bdeSize = 0;
17435 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17436 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17437 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17438 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17439
17440
17441 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17442 icmd->ulpBdeCount = 0;
17443 icmd->ulpLe = 1;
17444 icmd->ulpClass = CLASS3;
17445 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17446 ctiocb->context1 = lpfc_nlp_get(ndlp);
17447
17448 ctiocb->iocb_cmpl = NULL;
17449 ctiocb->vport = phba->pport;
17450 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17451 ctiocb->sli4_lxritag = NO_XRI;
17452 ctiocb->sli4_xritag = NO_XRI;
17453
17454 if (fctl & FC_FC_EX_CTX)
17455
17456
17457
17458 xri = oxid;
17459 else
17460 xri = rxid;
17461 lxri = lpfc_sli4_xri_inrange(phba, xri);
17462 if (lxri != NO_XRI)
17463 lpfc_set_rrq_active(phba, ndlp, lxri,
17464 (xri == oxid) ? rxid : oxid, 0);
17465
17466
17467
17468
17469
17470 if ((fctl & FC_FC_EX_CTX) &&
17471 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17472 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17473 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17474 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17475 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17476 }
17477
17478
17479
17480
17481
17482 if (aborted == false) {
17483 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17484 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17485 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17486 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17487 }
17488
17489 if (fctl & FC_FC_EX_CTX) {
17490
17491
17492
17493
17494 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17495 } else {
17496
17497
17498
17499
17500 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17501 }
17502 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17503 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17504
17505
17506 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17507 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17508 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17509
17510 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17511 if (rc == IOCB_ERROR) {
17512 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17513 "2925 Failed to issue CT ABTS RSP x%x on "
17514 "xri x%x, Data x%x\n",
17515 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17516 phba->link_state);
17517 lpfc_nlp_put(ndlp);
17518 ctiocb->context1 = NULL;
17519 lpfc_sli_release_iocbq(phba, ctiocb);
17520 }
17521}
17522
17523
17524
17525
17526
17527
17528
17529
17530
17531
17532
17533
17534
17535
17536static void
17537lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17538 struct hbq_dmabuf *dmabuf)
17539{
17540 struct lpfc_hba *phba = vport->phba;
17541 struct fc_frame_header fc_hdr;
17542 uint32_t fctl;
17543 bool aborted;
17544
17545
17546 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17547 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17548
17549 if (fctl & FC_FC_EX_CTX) {
17550
17551 aborted = true;
17552 } else {
17553
17554 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17555 if (aborted == false)
17556 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17557 }
17558 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17559
17560 if (phba->nvmet_support) {
17561 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17562 return;
17563 }
17564
17565
17566 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17567}
17568
17569
17570
17571
17572
17573
17574
17575
17576
17577
17578
17579
17580
17581static int
17582lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17583{
17584 struct fc_frame_header *hdr;
17585 struct lpfc_dmabuf *d_buf;
17586 struct hbq_dmabuf *seq_dmabuf;
17587 uint32_t fctl;
17588 int seq_count = 0;
17589
17590 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17591
17592 if (hdr->fh_seq_cnt != seq_count)
17593 return 0;
17594 fctl = (hdr->fh_f_ctl[0] << 16 |
17595 hdr->fh_f_ctl[1] << 8 |
17596 hdr->fh_f_ctl[2]);
17597
17598 if (fctl & FC_FC_END_SEQ)
17599 return 1;
17600 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17601 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17602 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17603
17604 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17605 return 0;
17606 fctl = (hdr->fh_f_ctl[0] << 16 |
17607 hdr->fh_f_ctl[1] << 8 |
17608 hdr->fh_f_ctl[2]);
17609
17610 if (fctl & FC_FC_END_SEQ)
17611 return 1;
17612 }
17613 return 0;
17614}
17615
17616
17617
17618
17619
17620
17621
17622
17623
17624
17625
17626
17627
17628
17629static struct lpfc_iocbq *
17630lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17631{
17632 struct hbq_dmabuf *hbq_buf;
17633 struct lpfc_dmabuf *d_buf, *n_buf;
17634 struct lpfc_iocbq *first_iocbq, *iocbq;
17635 struct fc_frame_header *fc_hdr;
17636 uint32_t sid;
17637 uint32_t len, tot_len;
17638 struct ulp_bde64 *pbde;
17639
17640 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17641
17642 list_del_init(&seq_dmabuf->hbuf.list);
17643 lpfc_update_rcv_time_stamp(vport);
17644
17645 sid = sli4_sid_from_fc_hdr(fc_hdr);
17646 tot_len = 0;
17647
17648 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17649 if (first_iocbq) {
17650
17651 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17652 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17653 first_iocbq->vport = vport;
17654
17655
17656 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17657 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17658 first_iocbq->iocb.un.rcvels.parmRo =
17659 sli4_did_from_fc_hdr(fc_hdr);
17660 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17661 } else
17662 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17663 first_iocbq->iocb.ulpContext = NO_XRI;
17664 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17665 be16_to_cpu(fc_hdr->fh_ox_id);
17666
17667 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17668 vport->phba->vpi_ids[vport->vpi];
17669
17670 tot_len = bf_get(lpfc_rcqe_length,
17671 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17672
17673 first_iocbq->context2 = &seq_dmabuf->dbuf;
17674 first_iocbq->context3 = NULL;
17675 first_iocbq->iocb.ulpBdeCount = 1;
17676 if (tot_len > LPFC_DATA_BUF_SIZE)
17677 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17678 LPFC_DATA_BUF_SIZE;
17679 else
17680 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17681
17682 first_iocbq->iocb.un.rcvels.remoteID = sid;
17683
17684 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17685 }
17686 iocbq = first_iocbq;
17687
17688
17689
17690
17691 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17692 if (!iocbq) {
17693 lpfc_in_buf_free(vport->phba, d_buf);
17694 continue;
17695 }
17696 if (!iocbq->context3) {
17697 iocbq->context3 = d_buf;
17698 iocbq->iocb.ulpBdeCount++;
17699
17700 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17701 len = bf_get(lpfc_rcqe_length,
17702 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17703 pbde = (struct ulp_bde64 *)
17704 &iocbq->iocb.unsli3.sli3Words[4];
17705 if (len > LPFC_DATA_BUF_SIZE)
17706 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17707 else
17708 pbde->tus.f.bdeSize = len;
17709
17710 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
17711 tot_len += len;
17712 } else {
17713 iocbq = lpfc_sli_get_iocbq(vport->phba);
17714 if (!iocbq) {
17715 if (first_iocbq) {
17716 first_iocbq->iocb.ulpStatus =
17717 IOSTAT_FCP_RSP_ERROR;
17718 first_iocbq->iocb.un.ulpWord[4] =
17719 IOERR_NO_RESOURCES;
17720 }
17721 lpfc_in_buf_free(vport->phba, d_buf);
17722 continue;
17723 }
17724
17725 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17726 len = bf_get(lpfc_rcqe_length,
17727 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17728 iocbq->context2 = d_buf;
17729 iocbq->context3 = NULL;
17730 iocbq->iocb.ulpBdeCount = 1;
17731 if (len > LPFC_DATA_BUF_SIZE)
17732 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17733 LPFC_DATA_BUF_SIZE;
17734 else
17735 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17736
17737 tot_len += len;
17738 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17739
17740 iocbq->iocb.un.rcvels.remoteID = sid;
17741 list_add_tail(&iocbq->list, &first_iocbq->list);
17742 }
17743 }
17744 return first_iocbq;
17745}
17746
17747static void
17748lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17749 struct hbq_dmabuf *seq_dmabuf)
17750{
17751 struct fc_frame_header *fc_hdr;
17752 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17753 struct lpfc_hba *phba = vport->phba;
17754
17755 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17756 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17757 if (!iocbq) {
17758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17759 "2707 Ring %d handler: Failed to allocate "
17760 "iocb Rctl x%x Type x%x received\n",
17761 LPFC_ELS_RING,
17762 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17763 return;
17764 }
17765 if (!lpfc_complete_unsol_iocb(phba,
17766 phba->sli4_hba.els_wq->pring,
17767 iocbq, fc_hdr->fh_r_ctl,
17768 fc_hdr->fh_type))
17769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17770 "2540 Ring %d handler: unexpected Rctl "
17771 "x%x Type x%x received\n",
17772 LPFC_ELS_RING,
17773 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17774
17775
17776 list_for_each_entry_safe(curr_iocb, next_iocb,
17777 &iocbq->list, list) {
17778 list_del_init(&curr_iocb->list);
17779 lpfc_sli_release_iocbq(phba, curr_iocb);
17780 }
17781 lpfc_sli_release_iocbq(phba, iocbq);
17782}
17783
17784static void
17785lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17786 struct lpfc_iocbq *rspiocb)
17787{
17788 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17789
17790 if (pcmd && pcmd->virt)
17791 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17792 kfree(pcmd);
17793 lpfc_sli_release_iocbq(phba, cmdiocb);
17794 lpfc_drain_txq(phba);
17795}
17796
17797static void
17798lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17799 struct hbq_dmabuf *dmabuf)
17800{
17801 struct fc_frame_header *fc_hdr;
17802 struct lpfc_hba *phba = vport->phba;
17803 struct lpfc_iocbq *iocbq = NULL;
17804 union lpfc_wqe *wqe;
17805 struct lpfc_dmabuf *pcmd = NULL;
17806 uint32_t frame_len;
17807 int rc;
17808 unsigned long iflags;
17809
17810 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17811 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17812
17813
17814 iocbq = lpfc_sli_get_iocbq(phba);
17815 if (!iocbq) {
17816
17817 spin_lock_irqsave(&phba->hbalock, iflags);
17818 list_add_tail(&dmabuf->cq_event.list,
17819 &phba->sli4_hba.sp_queue_event);
17820 phba->hba_flag |= HBA_SP_QUEUE_EVT;
17821 spin_unlock_irqrestore(&phba->hbalock, iflags);
17822 lpfc_worker_wake_up(phba);
17823 return;
17824 }
17825
17826
17827 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17828 if (pcmd)
17829 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17830 &pcmd->phys);
17831 if (!pcmd || !pcmd->virt)
17832 goto exit;
17833
17834 INIT_LIST_HEAD(&pcmd->list);
17835
17836
17837 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17838
17839
17840 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17841 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17842 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17843 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17844
17845 iocbq->context2 = pcmd;
17846 iocbq->vport = vport;
17847 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17848 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17849
17850
17851
17852
17853
17854 wqe = (union lpfc_wqe *)&iocbq->iocb;
17855
17856 wqe->send_frame.frame_len = frame_len;
17857 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17858 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17859 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17860 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17861 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17862 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17863
17864 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17865 iocbq->iocb.ulpLe = 1;
17866 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17867 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17868 if (rc == IOCB_ERROR)
17869 goto exit;
17870
17871 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17872 return;
17873
17874exit:
17875 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17876 "2023 Unable to process MDS loopback frame\n");
17877 if (pcmd && pcmd->virt)
17878 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17879 kfree(pcmd);
17880 if (iocbq)
17881 lpfc_sli_release_iocbq(phba, iocbq);
17882 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17883}
17884
17885
17886
17887
17888
17889
17890
17891
17892
17893
17894
17895
17896void
17897lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17898 struct hbq_dmabuf *dmabuf)
17899{
17900 struct hbq_dmabuf *seq_dmabuf;
17901 struct fc_frame_header *fc_hdr;
17902 struct lpfc_vport *vport;
17903 uint32_t fcfi;
17904 uint32_t did;
17905
17906
17907 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17908
17909 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
17910 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
17911 vport = phba->pport;
17912
17913 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17914 return;
17915 }
17916
17917
17918 if (lpfc_fc_frame_check(phba, fc_hdr)) {
17919 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17920 return;
17921 }
17922
17923 if ((bf_get(lpfc_cqe_code,
17924 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17925 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17926 &dmabuf->cq_event.cqe.rcqe_cmpl);
17927 else
17928 fcfi = bf_get(lpfc_rcqe_fcf_id,
17929 &dmabuf->cq_event.cqe.rcqe_cmpl);
17930
17931
17932 did = sli4_did_from_fc_hdr(fc_hdr);
17933
17934 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17935 if (!vport) {
17936
17937 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17938 return;
17939 }
17940
17941
17942 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17943 (did != Fabric_DID)) {
17944
17945
17946
17947
17948
17949 if (!(vport->fc_flag & FC_PT2PT) ||
17950 (phba->link_state == LPFC_HBA_READY)) {
17951 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17952 return;
17953 }
17954 }
17955
17956
17957 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17958 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17959 return;
17960 }
17961
17962
17963 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17964 if (!seq_dmabuf) {
17965
17966 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17967 return;
17968 }
17969
17970 if (!lpfc_seq_complete(seq_dmabuf))
17971 return;
17972
17973
17974 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17975}
17976
17977
17978
17979
17980
17981
17982
17983
17984
17985
17986
17987
17988
17989
17990
17991
17992
17993
17994
17995
17996
17997
17998int
17999lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18000{
18001 struct lpfc_rpi_hdr *rpi_page;
18002 uint32_t rc = 0;
18003 uint16_t lrpi = 0;
18004
18005
18006 if (!phba->sli4_hba.rpi_hdrs_in_use)
18007 goto exit;
18008 if (phba->sli4_hba.extents_in_use)
18009 return -EIO;
18010
18011 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18012
18013
18014
18015
18016
18017 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18018 LPFC_RPI_RSRC_RDY)
18019 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18020
18021 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18022 if (rc != MBX_SUCCESS) {
18023 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18024 "2008 Error %d posting all rpi "
18025 "headers\n", rc);
18026 rc = -EIO;
18027 break;
18028 }
18029 }
18030
18031 exit:
18032 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18033 LPFC_RPI_RSRC_RDY);
18034 return rc;
18035}
18036
18037
18038
18039
18040
18041
18042
18043
18044
18045
18046
18047
18048
18049
18050
18051int
18052lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18053{
18054 LPFC_MBOXQ_t *mboxq;
18055 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18056 uint32_t rc = 0;
18057 uint32_t shdr_status, shdr_add_status;
18058 union lpfc_sli4_cfg_shdr *shdr;
18059
18060
18061 if (!phba->sli4_hba.rpi_hdrs_in_use)
18062 return rc;
18063 if (phba->sli4_hba.extents_in_use)
18064 return -EIO;
18065
18066
18067 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18068 if (!mboxq) {
18069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18070 "2001 Unable to allocate memory for issuing "
18071 "SLI_CONFIG_SPECIAL mailbox command\n");
18072 return -ENOMEM;
18073 }
18074
18075
18076 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18077 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18078 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18079 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18080 sizeof(struct lpfc_sli4_cfg_mhdr),
18081 LPFC_SLI4_MBX_EMBED);
18082
18083
18084
18085 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18086 rpi_page->start_rpi);
18087 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18088 hdr_tmpl, rpi_page->page_count);
18089
18090 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18091 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18092 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18093 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18094 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18095 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18096 if (rc != MBX_TIMEOUT)
18097 mempool_free(mboxq, phba->mbox_mem_pool);
18098 if (shdr_status || shdr_add_status || rc) {
18099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18100 "2514 POST_RPI_HDR mailbox failed with "
18101 "status x%x add_status x%x, mbx status x%x\n",
18102 shdr_status, shdr_add_status, rc);
18103 rc = -ENXIO;
18104 } else {
18105
18106
18107
18108
18109 spin_lock_irq(&phba->hbalock);
18110 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18111 spin_unlock_irq(&phba->hbalock);
18112 }
18113 return rc;
18114}
18115
18116
18117
18118
18119
18120
18121
18122
18123
18124
18125
18126
18127
18128
18129int
18130lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18131{
18132 unsigned long rpi;
18133 uint16_t max_rpi, rpi_limit;
18134 uint16_t rpi_remaining, lrpi = 0;
18135 struct lpfc_rpi_hdr *rpi_hdr;
18136 unsigned long iflag;
18137
18138
18139
18140
18141
18142 spin_lock_irqsave(&phba->hbalock, iflag);
18143 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18144 rpi_limit = phba->sli4_hba.next_rpi;
18145
18146 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18147 if (rpi >= rpi_limit)
18148 rpi = LPFC_RPI_ALLOC_ERROR;
18149 else {
18150 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18151 phba->sli4_hba.max_cfg_param.rpi_used++;
18152 phba->sli4_hba.rpi_count++;
18153 }
18154 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18155 "0001 rpi:%x max:%x lim:%x\n",
18156 (int) rpi, max_rpi, rpi_limit);
18157
18158
18159
18160
18161
18162 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18163 (phba->sli4_hba.rpi_count >= max_rpi)) {
18164 spin_unlock_irqrestore(&phba->hbalock, iflag);
18165 return rpi;
18166 }
18167
18168
18169
18170
18171
18172 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18173 spin_unlock_irqrestore(&phba->hbalock, iflag);
18174 return rpi;
18175 }
18176
18177
18178
18179
18180
18181
18182
18183 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18184 spin_unlock_irqrestore(&phba->hbalock, iflag);
18185 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18186 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18187 if (!rpi_hdr) {
18188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18189 "2002 Error Could not grow rpi "
18190 "count\n");
18191 } else {
18192 lrpi = rpi_hdr->start_rpi;
18193 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18194 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18195 }
18196 }
18197
18198 return rpi;
18199}
18200
18201
18202
18203
18204
18205
18206
18207
18208static void
18209__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18210{
18211 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18212 phba->sli4_hba.rpi_count--;
18213 phba->sli4_hba.max_cfg_param.rpi_used--;
18214 }
18215}
18216
18217
18218
18219
18220
18221
18222
18223
18224void
18225lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18226{
18227 spin_lock_irq(&phba->hbalock);
18228 __lpfc_sli4_free_rpi(phba, rpi);
18229 spin_unlock_irq(&phba->hbalock);
18230}
18231
18232
18233
18234
18235
18236
18237
18238
18239void
18240lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18241{
18242 kfree(phba->sli4_hba.rpi_bmask);
18243 kfree(phba->sli4_hba.rpi_ids);
18244 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18245}
18246
18247
18248
18249
18250
18251
18252
18253
18254int
18255lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18256 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18257{
18258 LPFC_MBOXQ_t *mboxq;
18259 struct lpfc_hba *phba = ndlp->phba;
18260 int rc;
18261
18262
18263 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18264 if (!mboxq)
18265 return -ENOMEM;
18266
18267
18268 lpfc_resume_rpi(mboxq, ndlp);
18269 if (cmpl) {
18270 mboxq->mbox_cmpl = cmpl;
18271 mboxq->ctx_buf = arg;
18272 mboxq->ctx_ndlp = ndlp;
18273 } else
18274 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18275 mboxq->vport = ndlp->vport;
18276 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18277 if (rc == MBX_NOT_FINISHED) {
18278 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18279 "2010 Resume RPI Mailbox failed "
18280 "status %d, mbxStatus x%x\n", rc,
18281 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18282 mempool_free(mboxq, phba->mbox_mem_pool);
18283 return -EIO;
18284 }
18285 return 0;
18286}
18287
18288
18289
18290
18291
18292
18293
18294
18295
18296
18297
18298int
18299lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18300{
18301 LPFC_MBOXQ_t *mboxq;
18302 int rc = 0;
18303 int retval = MBX_SUCCESS;
18304 uint32_t mbox_tmo;
18305 struct lpfc_hba *phba = vport->phba;
18306 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18307 if (!mboxq)
18308 return -ENOMEM;
18309 lpfc_init_vpi(phba, mboxq, vport->vpi);
18310 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18311 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18312 if (rc != MBX_SUCCESS) {
18313 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18314 "2022 INIT VPI Mailbox failed "
18315 "status %d, mbxStatus x%x\n", rc,
18316 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18317 retval = -EIO;
18318 }
18319 if (rc != MBX_TIMEOUT)
18320 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18321
18322 return retval;
18323}
18324
18325
18326
18327
18328
18329
18330
18331
18332
18333
18334static void
18335lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18336{
18337 void *virt_addr;
18338 union lpfc_sli4_cfg_shdr *shdr;
18339 uint32_t shdr_status, shdr_add_status;
18340
18341 virt_addr = mboxq->sge_array->addr[0];
18342
18343 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18344 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18345 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18346
18347 if ((shdr_status || shdr_add_status) &&
18348 (shdr_status != STATUS_FCF_IN_USE))
18349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18350 "2558 ADD_FCF_RECORD mailbox failed with "
18351 "status x%x add_status x%x\n",
18352 shdr_status, shdr_add_status);
18353
18354 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18355}
18356
18357
18358
18359
18360
18361
18362
18363
18364
18365
18366int
18367lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18368{
18369 int rc = 0;
18370 LPFC_MBOXQ_t *mboxq;
18371 uint8_t *bytep;
18372 void *virt_addr;
18373 struct lpfc_mbx_sge sge;
18374 uint32_t alloc_len, req_len;
18375 uint32_t fcfindex;
18376
18377 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18378 if (!mboxq) {
18379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18380 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18381 return -ENOMEM;
18382 }
18383
18384 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18385 sizeof(uint32_t);
18386
18387
18388 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18389 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18390 req_len, LPFC_SLI4_MBX_NEMBED);
18391 if (alloc_len < req_len) {
18392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18393 "2523 Allocated DMA memory size (x%x) is "
18394 "less than the requested DMA memory "
18395 "size (x%x)\n", alloc_len, req_len);
18396 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18397 return -ENOMEM;
18398 }
18399
18400
18401
18402
18403
18404 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18405 virt_addr = mboxq->sge_array->addr[0];
18406
18407
18408
18409
18410 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18411 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18412 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18413
18414
18415
18416
18417
18418
18419 bytep += sizeof(uint32_t);
18420 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18421 mboxq->vport = phba->pport;
18422 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18423 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18424 if (rc == MBX_NOT_FINISHED) {
18425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18426 "2515 ADD_FCF_RECORD mailbox failed with "
18427 "status 0x%x\n", rc);
18428 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18429 rc = -EIO;
18430 } else
18431 rc = 0;
18432
18433 return rc;
18434}
18435
18436
18437
18438
18439
18440
18441
18442
18443
18444
18445
18446void
18447lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18448 struct fcf_record *fcf_record,
18449 uint16_t fcf_index)
18450{
18451 memset(fcf_record, 0, sizeof(struct fcf_record));
18452 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18453 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18454 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18455 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18456 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18457 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18458 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18459 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18460 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18461 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18462 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18463 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18464 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18465 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18466 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18467 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18468 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18469
18470 if (phba->valid_vlan) {
18471 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18472 = 1 << (phba->vlan_id % 8);
18473 }
18474}
18475
18476
18477
18478
18479
18480
18481
18482
18483
18484
18485
18486
18487
18488int
18489lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18490{
18491 int rc = 0, error;
18492 LPFC_MBOXQ_t *mboxq;
18493
18494 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18495 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18496 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18497 if (!mboxq) {
18498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18499 "2000 Failed to allocate mbox for "
18500 "READ_FCF cmd\n");
18501 error = -ENOMEM;
18502 goto fail_fcf_scan;
18503 }
18504
18505 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18506 if (rc) {
18507 error = -EINVAL;
18508 goto fail_fcf_scan;
18509 }
18510
18511 mboxq->vport = phba->pport;
18512 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18513
18514 spin_lock_irq(&phba->hbalock);
18515 phba->hba_flag |= FCF_TS_INPROG;
18516 spin_unlock_irq(&phba->hbalock);
18517
18518 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18519 if (rc == MBX_NOT_FINISHED)
18520 error = -EIO;
18521 else {
18522
18523 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18524 phba->fcf.eligible_fcf_cnt = 0;
18525 error = 0;
18526 }
18527fail_fcf_scan:
18528 if (error) {
18529 if (mboxq)
18530 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18531
18532 spin_lock_irq(&phba->hbalock);
18533 phba->hba_flag &= ~FCF_TS_INPROG;
18534 spin_unlock_irq(&phba->hbalock);
18535 }
18536 return error;
18537}
18538
18539
18540
18541
18542
18543
18544
18545
18546
18547
18548
18549
18550int
18551lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18552{
18553 int rc = 0, error;
18554 LPFC_MBOXQ_t *mboxq;
18555
18556 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18557 if (!mboxq) {
18558 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18559 "2763 Failed to allocate mbox for "
18560 "READ_FCF cmd\n");
18561 error = -ENOMEM;
18562 goto fail_fcf_read;
18563 }
18564
18565 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18566 if (rc) {
18567 error = -EINVAL;
18568 goto fail_fcf_read;
18569 }
18570
18571 mboxq->vport = phba->pport;
18572 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18573 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18574 if (rc == MBX_NOT_FINISHED)
18575 error = -EIO;
18576 else
18577 error = 0;
18578
18579fail_fcf_read:
18580 if (error && mboxq)
18581 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18582 return error;
18583}
18584
18585
18586
18587
18588
18589
18590
18591
18592
18593
18594
18595
18596int
18597lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18598{
18599 int rc = 0, error;
18600 LPFC_MBOXQ_t *mboxq;
18601
18602 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18603 if (!mboxq) {
18604 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18605 "2758 Failed to allocate mbox for "
18606 "READ_FCF cmd\n");
18607 error = -ENOMEM;
18608 goto fail_fcf_read;
18609 }
18610
18611 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18612 if (rc) {
18613 error = -EINVAL;
18614 goto fail_fcf_read;
18615 }
18616
18617 mboxq->vport = phba->pport;
18618 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18619 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18620 if (rc == MBX_NOT_FINISHED)
18621 error = -EIO;
18622 else
18623 error = 0;
18624
18625fail_fcf_read:
18626 if (error && mboxq)
18627 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18628 return error;
18629}
18630
18631
18632
18633
18634
18635
18636
18637
18638
18639
18640
18641
18642
18643
18644static int
18645lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18646{
18647 uint16_t next_fcf_pri;
18648 uint16_t last_index;
18649 struct lpfc_fcf_pri *fcf_pri;
18650 int rc;
18651 int ret = 0;
18652
18653 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18654 LPFC_SLI4_FCF_TBL_INDX_MAX);
18655 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18656 "3060 Last IDX %d\n", last_index);
18657
18658
18659 spin_lock_irq(&phba->hbalock);
18660 if (list_empty(&phba->fcf.fcf_pri_list) ||
18661 list_is_singular(&phba->fcf.fcf_pri_list)) {
18662 spin_unlock_irq(&phba->hbalock);
18663 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18664 "3061 Last IDX %d\n", last_index);
18665 return 0;
18666 }
18667 spin_unlock_irq(&phba->hbalock);
18668
18669 next_fcf_pri = 0;
18670
18671
18672
18673
18674 memset(phba->fcf.fcf_rr_bmask, 0,
18675 sizeof(*phba->fcf.fcf_rr_bmask));
18676 spin_lock_irq(&phba->hbalock);
18677 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18678 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18679 continue;
18680
18681
18682
18683
18684 if (!next_fcf_pri)
18685 next_fcf_pri = fcf_pri->fcf_rec.priority;
18686 spin_unlock_irq(&phba->hbalock);
18687 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18688 rc = lpfc_sli4_fcf_rr_index_set(phba,
18689 fcf_pri->fcf_rec.fcf_index);
18690 if (rc)
18691 return 0;
18692 }
18693 spin_lock_irq(&phba->hbalock);
18694 }
18695
18696
18697
18698
18699
18700 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
18701 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18702 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
18703
18704
18705
18706
18707 if (!next_fcf_pri)
18708 next_fcf_pri = fcf_pri->fcf_rec.priority;
18709 spin_unlock_irq(&phba->hbalock);
18710 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
18711 rc = lpfc_sli4_fcf_rr_index_set(phba,
18712 fcf_pri->fcf_rec.fcf_index);
18713 if (rc)
18714 return 0;
18715 }
18716 spin_lock_irq(&phba->hbalock);
18717 }
18718 } else
18719 ret = 1;
18720 spin_unlock_irq(&phba->hbalock);
18721
18722 return ret;
18723}
18724
18725
18726
18727
18728
18729
18730
18731
18732
18733
18734uint16_t
18735lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
18736{
18737 uint16_t next_fcf_index;
18738
18739initial_priority:
18740
18741 next_fcf_index = phba->fcf.current_rec.fcf_indx;
18742
18743next_priority:
18744
18745 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18746 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18747 LPFC_SLI4_FCF_TBL_INDX_MAX,
18748 next_fcf_index);
18749
18750
18751 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18752
18753
18754
18755
18756
18757 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18758 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18759 }
18760
18761
18762
18763 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18764 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18765
18766
18767
18768
18769
18770
18771 if (lpfc_check_next_fcf_pri_level(phba))
18772 goto initial_priority;
18773 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18774 "2844 No roundrobin failover FCF available\n");
18775
18776 return LPFC_FCOE_FCF_NEXT_NONE;
18777 }
18778
18779 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18780 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18781 LPFC_FCF_FLOGI_FAILED) {
18782 if (list_is_singular(&phba->fcf.fcf_pri_list))
18783 return LPFC_FCOE_FCF_NEXT_NONE;
18784
18785 goto next_priority;
18786 }
18787
18788 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18789 "2845 Get next roundrobin failover FCF (x%x)\n",
18790 next_fcf_index);
18791
18792 return next_fcf_index;
18793}
18794
18795
18796
18797
18798
18799
18800
18801
18802
18803
18804
18805
18806
18807int
18808lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18809{
18810 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18811 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18812 "2610 FCF (x%x) reached driver's book "
18813 "keeping dimension:x%x\n",
18814 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18815 return -EINVAL;
18816 }
18817
18818 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18819
18820 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18821 "2790 Set FCF (x%x) to roundrobin FCF failover "
18822 "bmask\n", fcf_index);
18823
18824 return 0;
18825}
18826
18827
18828
18829
18830
18831
18832
18833
18834
18835
18836void
18837lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18838{
18839 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18840 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18841 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18842 "2762 FCF (x%x) reached driver's book "
18843 "keeping dimension:x%x\n",
18844 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18845 return;
18846 }
18847
18848 spin_lock_irq(&phba->hbalock);
18849 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18850 list) {
18851 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18852 list_del_init(&fcf_pri->list);
18853 break;
18854 }
18855 }
18856 spin_unlock_irq(&phba->hbalock);
18857 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18858
18859 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18860 "2791 Clear FCF (x%x) from roundrobin failover "
18861 "bmask\n", fcf_index);
18862}
18863
18864
18865
18866
18867
18868
18869
18870
18871
18872static void
18873lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18874{
18875 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18876 uint32_t shdr_status, shdr_add_status;
18877
18878 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18879
18880 shdr_status = bf_get(lpfc_mbox_hdr_status,
18881 &redisc_fcf->header.cfg_shdr.response);
18882 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18883 &redisc_fcf->header.cfg_shdr.response);
18884 if (shdr_status || shdr_add_status) {
18885 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18886 "2746 Requesting for FCF rediscovery failed "
18887 "status x%x add_status x%x\n",
18888 shdr_status, shdr_add_status);
18889 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18890 spin_lock_irq(&phba->hbalock);
18891 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18892 spin_unlock_irq(&phba->hbalock);
18893
18894
18895
18896
18897 lpfc_retry_pport_discovery(phba);
18898 } else {
18899 spin_lock_irq(&phba->hbalock);
18900 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18901 spin_unlock_irq(&phba->hbalock);
18902
18903
18904
18905
18906
18907 lpfc_sli4_fcf_dead_failthrough(phba);
18908 }
18909 } else {
18910 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18911 "2775 Start FCF rediscover quiescent timer\n");
18912
18913
18914
18915
18916 lpfc_fcf_redisc_wait_start_timer(phba);
18917 }
18918
18919 mempool_free(mbox, phba->mbox_mem_pool);
18920}
18921
18922
18923
18924
18925
18926
18927
18928
18929int
18930lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18931{
18932 LPFC_MBOXQ_t *mbox;
18933 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18934 int rc, length;
18935
18936
18937 lpfc_cancel_all_vport_retry_delay_timer(phba);
18938
18939 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18940 if (!mbox) {
18941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18942 "2745 Failed to allocate mbox for "
18943 "requesting FCF rediscover.\n");
18944 return -ENOMEM;
18945 }
18946
18947 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18948 sizeof(struct lpfc_sli4_cfg_mhdr));
18949 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18950 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18951 length, LPFC_SLI4_MBX_EMBED);
18952
18953 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18954
18955 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18956
18957
18958 mbox->vport = phba->pport;
18959 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18960 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18961
18962 if (rc == MBX_NOT_FINISHED) {
18963 mempool_free(mbox, phba->mbox_mem_pool);
18964 return -EIO;
18965 }
18966 return 0;
18967}
18968
18969
18970
18971
18972
18973
18974
18975
18976void
18977lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18978{
18979 uint32_t link_state;
18980
18981
18982
18983
18984
18985
18986 link_state = phba->link_state;
18987 lpfc_linkdown(phba);
18988 phba->link_state = link_state;
18989
18990
18991 lpfc_unregister_unused_fcf(phba);
18992}
18993
18994
18995
18996
18997
18998
18999
19000
19001
19002
19003static uint32_t
19004lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19005{
19006 LPFC_MBOXQ_t *pmb = NULL;
19007 MAILBOX_t *mb;
19008 uint32_t offset = 0;
19009 int rc;
19010
19011 if (!rgn23_data)
19012 return 0;
19013
19014 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19015 if (!pmb) {
19016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19017 "2600 failed to allocate mailbox memory\n");
19018 return 0;
19019 }
19020 mb = &pmb->u.mb;
19021
19022 do {
19023 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19024 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19025
19026 if (rc != MBX_SUCCESS) {
19027 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19028 "2601 failed to read config "
19029 "region 23, rc 0x%x Status 0x%x\n",
19030 rc, mb->mbxStatus);
19031 mb->un.varDmp.word_cnt = 0;
19032 }
19033
19034
19035
19036
19037 if (mb->un.varDmp.word_cnt == 0)
19038 break;
19039 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19040 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19041
19042 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19043 rgn23_data + offset,
19044 mb->un.varDmp.word_cnt);
19045 offset += mb->un.varDmp.word_cnt;
19046 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19047
19048 mempool_free(pmb, phba->mbox_mem_pool);
19049 return offset;
19050}
19051
19052
19053
19054
19055
19056
19057
19058
19059
19060
19061static uint32_t
19062lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19063{
19064 LPFC_MBOXQ_t *mboxq = NULL;
19065 struct lpfc_dmabuf *mp = NULL;
19066 struct lpfc_mqe *mqe;
19067 uint32_t data_length = 0;
19068 int rc;
19069
19070 if (!rgn23_data)
19071 return 0;
19072
19073 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19074 if (!mboxq) {
19075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19076 "3105 failed to allocate mailbox memory\n");
19077 return 0;
19078 }
19079
19080 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19081 goto out;
19082 mqe = &mboxq->u.mqe;
19083 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19084 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19085 if (rc)
19086 goto out;
19087 data_length = mqe->un.mb_words[5];
19088 if (data_length == 0)
19089 goto out;
19090 if (data_length > DMP_RGN23_SIZE) {
19091 data_length = 0;
19092 goto out;
19093 }
19094 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19095out:
19096 mempool_free(mboxq, phba->mbox_mem_pool);
19097 if (mp) {
19098 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19099 kfree(mp);
19100 }
19101 return data_length;
19102}
19103
19104
19105
19106
19107
19108
19109
19110
19111
19112void
19113lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19114{
19115 uint8_t *rgn23_data = NULL;
19116 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19117 uint32_t offset = 0;
19118
19119
19120 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19121 if (!rgn23_data)
19122 goto out;
19123
19124 if (phba->sli_rev < LPFC_SLI_REV4)
19125 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19126 else {
19127 if_type = bf_get(lpfc_sli_intf_if_type,
19128 &phba->sli4_hba.sli_intf);
19129 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19130 goto out;
19131 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19132 }
19133
19134 if (!data_size)
19135 goto out;
19136
19137
19138 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19140 "2619 Config region 23 has bad signature\n");
19141 goto out;
19142 }
19143 offset += 4;
19144
19145
19146 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19148 "2620 Config region 23 has bad version\n");
19149 goto out;
19150 }
19151 offset += 4;
19152
19153
19154 while (offset < data_size) {
19155 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19156 break;
19157
19158
19159
19160
19161 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19162 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19163 (rgn23_data[offset + 3] != 0)) {
19164 offset += rgn23_data[offset + 1] * 4 + 4;
19165 continue;
19166 }
19167
19168
19169 sub_tlv_len = rgn23_data[offset + 1] * 4;
19170 offset += 4;
19171 tlv_offset = 0;
19172
19173
19174
19175
19176 while ((offset < data_size) &&
19177 (tlv_offset < sub_tlv_len)) {
19178 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19179 offset += 4;
19180 tlv_offset += 4;
19181 break;
19182 }
19183 if (rgn23_data[offset] != PORT_STE_TYPE) {
19184 offset += rgn23_data[offset + 1] * 4 + 4;
19185 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19186 continue;
19187 }
19188
19189
19190 if (!rgn23_data[offset + 2])
19191 phba->hba_flag |= LINK_DISABLED;
19192
19193 goto out;
19194 }
19195 }
19196
19197out:
19198 kfree(rgn23_data);
19199 return;
19200}
19201
19202
19203
19204
19205
19206
19207
19208
19209
19210
19211
19212
19213
19214
19215
19216
19217
19218
19219
19220
19221int
19222lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19223 uint32_t size, uint32_t *offset)
19224{
19225 struct lpfc_mbx_wr_object *wr_object;
19226 LPFC_MBOXQ_t *mbox;
19227 int rc = 0, i = 0;
19228 uint32_t shdr_status, shdr_add_status, shdr_change_status;
19229 uint32_t mbox_tmo;
19230 struct lpfc_dmabuf *dmabuf;
19231 uint32_t written = 0;
19232 bool check_change_status = false;
19233
19234 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19235 if (!mbox)
19236 return -ENOMEM;
19237
19238 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19239 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19240 sizeof(struct lpfc_mbx_wr_object) -
19241 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19242
19243 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19244 wr_object->u.request.write_offset = *offset;
19245 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19246 wr_object->u.request.object_name[0] =
19247 cpu_to_le32(wr_object->u.request.object_name[0]);
19248 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19249 list_for_each_entry(dmabuf, dmabuf_list, list) {
19250 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19251 break;
19252 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19253 wr_object->u.request.bde[i].addrHigh =
19254 putPaddrHigh(dmabuf->phys);
19255 if (written + SLI4_PAGE_SIZE >= size) {
19256 wr_object->u.request.bde[i].tus.f.bdeSize =
19257 (size - written);
19258 written += (size - written);
19259 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19260 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19261 check_change_status = true;
19262 } else {
19263 wr_object->u.request.bde[i].tus.f.bdeSize =
19264 SLI4_PAGE_SIZE;
19265 written += SLI4_PAGE_SIZE;
19266 }
19267 i++;
19268 }
19269 wr_object->u.request.bde_count = i;
19270 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19271 if (!phba->sli4_hba.intr_enable)
19272 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19273 else {
19274 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19275 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19276 }
19277
19278 shdr_status = bf_get(lpfc_mbox_hdr_status,
19279 &wr_object->header.cfg_shdr.response);
19280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19281 &wr_object->header.cfg_shdr.response);
19282 if (check_change_status) {
19283 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19284 &wr_object->u.response);
19285 switch (shdr_change_status) {
19286 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19287 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19288 "3198 Firmware write complete: System "
19289 "reboot required to instantiate\n");
19290 break;
19291 case (LPFC_CHANGE_STATUS_FW_RESET):
19292 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19293 "3199 Firmware write complete: Firmware"
19294 " reset required to instantiate\n");
19295 break;
19296 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19297 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19298 "3200 Firmware write complete: Port "
19299 "Migration or PCI Reset required to "
19300 "instantiate\n");
19301 break;
19302 case (LPFC_CHANGE_STATUS_PCI_RESET):
19303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19304 "3201 Firmware write complete: PCI "
19305 "Reset required to instantiate\n");
19306 break;
19307 default:
19308 break;
19309 }
19310 }
19311 if (rc != MBX_TIMEOUT)
19312 mempool_free(mbox, phba->mbox_mem_pool);
19313 if (shdr_status || shdr_add_status || rc) {
19314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19315 "3025 Write Object mailbox failed with "
19316 "status x%x add_status x%x, mbx status x%x\n",
19317 shdr_status, shdr_add_status, rc);
19318 rc = -ENXIO;
19319 *offset = shdr_add_status;
19320 } else
19321 *offset += wr_object->u.response.actual_write_length;
19322 return rc;
19323}
19324
19325
19326
19327
19328
19329
19330
19331
19332
19333
19334void
19335lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19336{
19337 struct lpfc_hba *phba = vport->phba;
19338 LPFC_MBOXQ_t *mb, *nextmb;
19339 struct lpfc_dmabuf *mp;
19340 struct lpfc_nodelist *ndlp;
19341 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19342 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19343 LIST_HEAD(mbox_cmd_list);
19344 uint8_t restart_loop;
19345
19346
19347 spin_lock_irq(&phba->hbalock);
19348 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19349 if (mb->vport != vport)
19350 continue;
19351
19352 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19353 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19354 continue;
19355
19356 list_del(&mb->list);
19357 list_add_tail(&mb->list, &mbox_cmd_list);
19358 }
19359
19360 mb = phba->sli.mbox_active;
19361 if (mb && (mb->vport == vport)) {
19362 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19363 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19364 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19365 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19366 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19367
19368 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19369
19370 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19371 }
19372 }
19373
19374 do {
19375 restart_loop = 0;
19376 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19377
19378
19379
19380
19381 if ((mb->vport != vport) ||
19382 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19383 continue;
19384
19385 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19386 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19387 continue;
19388
19389 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19390 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19391 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19392
19393 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19394 restart_loop = 1;
19395 spin_unlock_irq(&phba->hbalock);
19396 spin_lock(shost->host_lock);
19397 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19398 spin_unlock(shost->host_lock);
19399 spin_lock_irq(&phba->hbalock);
19400 break;
19401 }
19402 }
19403 } while (restart_loop);
19404
19405 spin_unlock_irq(&phba->hbalock);
19406
19407
19408 while (!list_empty(&mbox_cmd_list)) {
19409 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19410 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19411 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19412 if (mp) {
19413 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19414 kfree(mp);
19415 }
19416 mb->ctx_buf = NULL;
19417 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19418 mb->ctx_ndlp = NULL;
19419 if (ndlp) {
19420 spin_lock(shost->host_lock);
19421 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19422 spin_unlock(shost->host_lock);
19423 lpfc_nlp_put(ndlp);
19424 }
19425 }
19426 mempool_free(mb, phba->mbox_mem_pool);
19427 }
19428
19429
19430 if (act_mbx_ndlp) {
19431 spin_lock(shost->host_lock);
19432 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19433 spin_unlock(shost->host_lock);
19434 lpfc_nlp_put(act_mbx_ndlp);
19435 }
19436}
19437
19438
19439
19440
19441
19442
19443
19444
19445
19446
19447
19448
19449uint32_t
19450lpfc_drain_txq(struct lpfc_hba *phba)
19451{
19452 LIST_HEAD(completions);
19453 struct lpfc_sli_ring *pring;
19454 struct lpfc_iocbq *piocbq = NULL;
19455 unsigned long iflags = 0;
19456 char *fail_msg = NULL;
19457 struct lpfc_sglq *sglq;
19458 union lpfc_wqe128 wqe;
19459 uint32_t txq_cnt = 0;
19460 struct lpfc_queue *wq;
19461
19462 if (phba->link_flag & LS_MDS_LOOPBACK) {
19463
19464 wq = phba->sli4_hba.hdwq[0].fcp_wq;
19465 if (unlikely(!wq))
19466 return 0;
19467 pring = wq->pring;
19468 } else {
19469 wq = phba->sli4_hba.els_wq;
19470 if (unlikely(!wq))
19471 return 0;
19472 pring = lpfc_phba_elsring(phba);
19473 }
19474
19475 if (unlikely(!pring) || list_empty(&pring->txq))
19476 return 0;
19477
19478 spin_lock_irqsave(&pring->ring_lock, iflags);
19479 list_for_each_entry(piocbq, &pring->txq, list) {
19480 txq_cnt++;
19481 }
19482
19483 if (txq_cnt > pring->txq_max)
19484 pring->txq_max = txq_cnt;
19485
19486 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19487
19488 while (!list_empty(&pring->txq)) {
19489 spin_lock_irqsave(&pring->ring_lock, iflags);
19490
19491 piocbq = lpfc_sli_ringtx_get(phba, pring);
19492 if (!piocbq) {
19493 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19495 "2823 txq empty and txq_cnt is %d\n ",
19496 txq_cnt);
19497 break;
19498 }
19499 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19500 if (!sglq) {
19501 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19502 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19503 break;
19504 }
19505 txq_cnt--;
19506
19507
19508
19509
19510 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19511 piocbq->sli4_xritag = sglq->sli4_xritag;
19512 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19513 fail_msg = "to convert bpl to sgl";
19514 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19515 fail_msg = "to convert iocb to wqe";
19516 else if (lpfc_sli4_wq_put(wq, &wqe))
19517 fail_msg = " - Wq is full";
19518 else
19519 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19520
19521 if (fail_msg) {
19522
19523 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19524 "2822 IOCB failed %s iotag 0x%x "
19525 "xri 0x%x\n",
19526 fail_msg,
19527 piocbq->iotag, piocbq->sli4_xritag);
19528 list_add_tail(&piocbq->list, &completions);
19529 }
19530 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19531 }
19532
19533
19534 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19535 IOERR_SLI_ABORTED);
19536
19537 return txq_cnt;
19538}
19539
19540
19541
19542
19543
19544
19545
19546
19547
19548
19549
19550
19551
19552
19553
19554
19555
19556
19557static uint16_t
19558lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19559 struct lpfc_sglq *sglq)
19560{
19561 uint16_t xritag = NO_XRI;
19562 struct ulp_bde64 *bpl = NULL;
19563 struct ulp_bde64 bde;
19564 struct sli4_sge *sgl = NULL;
19565 struct lpfc_dmabuf *dmabuf;
19566 union lpfc_wqe128 *wqe;
19567 int numBdes = 0;
19568 int i = 0;
19569 uint32_t offset = 0;
19570 int inbound = 0;
19571 uint32_t cmd;
19572
19573 if (!pwqeq || !sglq)
19574 return xritag;
19575
19576 sgl = (struct sli4_sge *)sglq->sgl;
19577 wqe = &pwqeq->wqe;
19578 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19579
19580 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19581 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19582 return sglq->sli4_xritag;
19583 numBdes = pwqeq->rsvd2;
19584 if (numBdes) {
19585
19586
19587
19588
19589 if (pwqeq->context3)
19590 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19591 else
19592 return xritag;
19593
19594 bpl = (struct ulp_bde64 *)dmabuf->virt;
19595 if (!bpl)
19596 return xritag;
19597
19598 for (i = 0; i < numBdes; i++) {
19599
19600 sgl->addr_hi = bpl->addrHigh;
19601 sgl->addr_lo = bpl->addrLow;
19602
19603 sgl->word2 = le32_to_cpu(sgl->word2);
19604 if ((i+1) == numBdes)
19605 bf_set(lpfc_sli4_sge_last, sgl, 1);
19606 else
19607 bf_set(lpfc_sli4_sge_last, sgl, 0);
19608
19609
19610
19611 bde.tus.w = le32_to_cpu(bpl->tus.w);
19612 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19613
19614
19615
19616
19617 switch (cmd) {
19618 case CMD_GEN_REQUEST64_WQE:
19619
19620 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19621 inbound++;
19622
19623 if (inbound == 1)
19624 offset = 0;
19625 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19626 bf_set(lpfc_sli4_sge_type, sgl,
19627 LPFC_SGE_TYPE_DATA);
19628 offset += bde.tus.f.bdeSize;
19629 break;
19630 case CMD_FCP_TRSP64_WQE:
19631 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19632 bf_set(lpfc_sli4_sge_type, sgl,
19633 LPFC_SGE_TYPE_DATA);
19634 break;
19635 case CMD_FCP_TSEND64_WQE:
19636 case CMD_FCP_TRECEIVE64_WQE:
19637 bf_set(lpfc_sli4_sge_type, sgl,
19638 bpl->tus.f.bdeFlags);
19639 if (i < 3)
19640 offset = 0;
19641 else
19642 offset += bde.tus.f.bdeSize;
19643 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19644 break;
19645 }
19646 sgl->word2 = cpu_to_le32(sgl->word2);
19647 bpl++;
19648 sgl++;
19649 }
19650 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19651
19652
19653
19654
19655 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19656 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19657 sgl->word2 = le32_to_cpu(sgl->word2);
19658 bf_set(lpfc_sli4_sge_last, sgl, 1);
19659 sgl->word2 = cpu_to_le32(sgl->word2);
19660 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19661 }
19662 return sglq->sli4_xritag;
19663}
19664
19665
19666
19667
19668
19669
19670
19671int
19672lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19673 struct lpfc_iocbq *pwqe)
19674{
19675 union lpfc_wqe128 *wqe = &pwqe->wqe;
19676 struct lpfc_nvmet_rcv_ctx *ctxp;
19677 struct lpfc_queue *wq;
19678 struct lpfc_sglq *sglq;
19679 struct lpfc_sli_ring *pring;
19680 unsigned long iflags;
19681 uint32_t ret = 0;
19682
19683
19684 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
19685 pring = phba->sli4_hba.nvmels_wq->pring;
19686 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19687 qp, wq_access);
19688 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
19689 if (!sglq) {
19690 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19691 return WQE_BUSY;
19692 }
19693 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19694 pwqe->sli4_xritag = sglq->sli4_xritag;
19695 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
19696 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19697 return WQE_ERROR;
19698 }
19699 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19700 pwqe->sli4_xritag);
19701 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
19702 if (ret) {
19703 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19704 return ret;
19705 }
19706
19707 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19708 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19709 return 0;
19710 }
19711
19712
19713 if (pwqe->iocb_flag & LPFC_IO_NVME) {
19714
19715 wq = qp->nvme_wq;
19716 pring = wq->pring;
19717
19718 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19719
19720 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19721 qp, wq_access);
19722 ret = lpfc_sli4_wq_put(wq, wqe);
19723 if (ret) {
19724 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19725 return ret;
19726 }
19727 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19728 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19729 return 0;
19730 }
19731
19732
19733 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
19734
19735 wq = qp->nvme_wq;
19736 pring = wq->pring;
19737
19738 ctxp = pwqe->context2;
19739 sglq = ctxp->ctxbuf->sglq;
19740 if (pwqe->sli4_xritag == NO_XRI) {
19741 pwqe->sli4_lxritag = sglq->sli4_lxritag;
19742 pwqe->sli4_xritag = sglq->sli4_xritag;
19743 }
19744 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
19745 pwqe->sli4_xritag);
19746 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
19747
19748 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
19749 qp, wq_access);
19750 ret = lpfc_sli4_wq_put(wq, wqe);
19751 if (ret) {
19752 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19753 return ret;
19754 }
19755 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
19756 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19757 return 0;
19758 }
19759 return WQE_ERROR;
19760}
19761
19762#ifdef LPFC_MXP_STAT
19763
19764
19765
19766
19767
19768
19769
19770
19771
19772
19773
19774
19775
19776
19777void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
19778{
19779 struct lpfc_sli4_hdw_queue *qp;
19780 struct lpfc_multixri_pool *multixri_pool;
19781 struct lpfc_pvt_pool *pvt_pool;
19782 struct lpfc_pbl_pool *pbl_pool;
19783 u32 txcmplq_cnt;
19784
19785 qp = &phba->sli4_hba.hdwq[hwqid];
19786 multixri_pool = qp->p_multixri_pool;
19787 if (!multixri_pool)
19788 return;
19789
19790 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
19791 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19792 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19793 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19794 if (qp->nvme_wq)
19795 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19796
19797 multixri_pool->stat_pbl_count = pbl_pool->count;
19798 multixri_pool->stat_pvt_count = pvt_pool->count;
19799 multixri_pool->stat_busy_count = txcmplq_cnt;
19800 }
19801
19802 multixri_pool->stat_snapshot_taken++;
19803}
19804#endif
19805
19806
19807
19808
19809
19810
19811
19812
19813
19814void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
19815{
19816 struct lpfc_multixri_pool *multixri_pool;
19817 u32 io_req_count;
19818 u32 prev_io_req_count;
19819
19820 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
19821 if (!multixri_pool)
19822 return;
19823 io_req_count = multixri_pool->io_req_count;
19824 prev_io_req_count = multixri_pool->prev_io_req_count;
19825
19826 if (prev_io_req_count != io_req_count) {
19827
19828 multixri_pool->prev_io_req_count = io_req_count;
19829 } else {
19830
19831
19832
19833 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
19834 }
19835}
19836
19837
19838
19839
19840
19841
19842
19843
19844
19845void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
19846{
19847 u32 new_watermark;
19848 u32 watermark_max;
19849 u32 watermark_min;
19850 u32 xri_limit;
19851 u32 txcmplq_cnt;
19852 u32 abts_io_bufs;
19853 struct lpfc_multixri_pool *multixri_pool;
19854 struct lpfc_sli4_hdw_queue *qp;
19855
19856 qp = &phba->sli4_hba.hdwq[hwqid];
19857 multixri_pool = qp->p_multixri_pool;
19858 if (!multixri_pool)
19859 return;
19860 xri_limit = multixri_pool->xri_limit;
19861
19862 watermark_max = xri_limit;
19863 watermark_min = xri_limit / 2;
19864
19865 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
19866 abts_io_bufs = qp->abts_scsi_io_bufs;
19867 if (qp->nvme_wq) {
19868 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
19869 abts_io_bufs += qp->abts_nvme_io_bufs;
19870 }
19871
19872 new_watermark = txcmplq_cnt + abts_io_bufs;
19873 new_watermark = min(watermark_max, new_watermark);
19874 new_watermark = max(watermark_min, new_watermark);
19875 multixri_pool->pvt_pool.high_watermark = new_watermark;
19876
19877#ifdef LPFC_MXP_STAT
19878 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
19879 new_watermark);
19880#endif
19881}
19882
19883
19884
19885
19886
19887
19888
19889
19890
19891
19892
19893void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
19894{
19895 struct lpfc_pbl_pool *pbl_pool;
19896 struct lpfc_pvt_pool *pvt_pool;
19897 struct lpfc_sli4_hdw_queue *qp;
19898 struct lpfc_io_buf *lpfc_ncmd;
19899 struct lpfc_io_buf *lpfc_ncmd_next;
19900 unsigned long iflag;
19901 struct list_head tmp_list;
19902 u32 tmp_count;
19903
19904 qp = &phba->sli4_hba.hdwq[hwqid];
19905 pbl_pool = &qp->p_multixri_pool->pbl_pool;
19906 pvt_pool = &qp->p_multixri_pool->pvt_pool;
19907 tmp_count = 0;
19908
19909 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
19910 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
19911
19912 if (pvt_pool->count > pvt_pool->low_watermark) {
19913
19914
19915
19916
19917
19918 INIT_LIST_HEAD(&tmp_list);
19919 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
19920 &pvt_pool->list, list) {
19921 list_move_tail(&lpfc_ncmd->list, &tmp_list);
19922 tmp_count++;
19923 if (tmp_count >= pvt_pool->low_watermark)
19924 break;
19925 }
19926
19927
19928 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19929
19930
19931 list_splice(&tmp_list, &pvt_pool->list);
19932
19933 pbl_pool->count += (pvt_pool->count - tmp_count);
19934 pvt_pool->count = tmp_count;
19935 } else {
19936
19937 list_splice_init(&pvt_pool->list, &pbl_pool->list);
19938 pbl_pool->count += pvt_pool->count;
19939 pvt_pool->count = 0;
19940 }
19941
19942 spin_unlock(&pvt_pool->lock);
19943 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
19944}
19945
19946
19947
19948
19949
19950
19951
19952
19953
19954
19955
19956
19957
19958
19959
19960
19961
19962static bool
19963_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
19964 struct lpfc_pbl_pool *pbl_pool,
19965 struct lpfc_pvt_pool *pvt_pool, u32 count)
19966{
19967 struct lpfc_io_buf *lpfc_ncmd;
19968 struct lpfc_io_buf *lpfc_ncmd_next;
19969 unsigned long iflag;
19970 int ret;
19971
19972 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
19973 if (ret) {
19974 if (pbl_pool->count) {
19975
19976 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
19977 list_for_each_entry_safe(lpfc_ncmd,
19978 lpfc_ncmd_next,
19979 &pbl_pool->list,
19980 list) {
19981 list_move_tail(&lpfc_ncmd->list,
19982 &pvt_pool->list);
19983 pvt_pool->count++;
19984 pbl_pool->count--;
19985 count--;
19986 if (count == 0)
19987 break;
19988 }
19989
19990 spin_unlock(&pvt_pool->lock);
19991 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
19992 return true;
19993 }
19994 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
19995 }
19996
19997 return false;
19998}
19999
20000
20001
20002
20003
20004
20005
20006
20007
20008
20009
20010
20011
20012void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20013{
20014 struct lpfc_multixri_pool *multixri_pool;
20015 struct lpfc_multixri_pool *next_multixri_pool;
20016 struct lpfc_pvt_pool *pvt_pool;
20017 struct lpfc_pbl_pool *pbl_pool;
20018 struct lpfc_sli4_hdw_queue *qp;
20019 u32 next_hwqid;
20020 u32 hwq_count;
20021 int ret;
20022
20023 qp = &phba->sli4_hba.hdwq[hwqid];
20024 multixri_pool = qp->p_multixri_pool;
20025 pvt_pool = &multixri_pool->pvt_pool;
20026 pbl_pool = &multixri_pool->pbl_pool;
20027
20028
20029 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20030 if (ret) {
20031#ifdef LPFC_MXP_STAT
20032 multixri_pool->local_pbl_hit_count++;
20033#endif
20034 return;
20035 }
20036
20037 hwq_count = phba->cfg_hdw_queue;
20038
20039
20040 next_hwqid = multixri_pool->rrb_next_hwqid;
20041
20042 do {
20043
20044 next_hwqid = (next_hwqid + 1) % hwq_count;
20045
20046 next_multixri_pool =
20047 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20048 pbl_pool = &next_multixri_pool->pbl_pool;
20049
20050
20051 ret = _lpfc_move_xri_pbl_to_pvt(
20052 phba, qp, pbl_pool, pvt_pool, count);
20053
20054
20055 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20056
20057
20058 multixri_pool->rrb_next_hwqid = next_hwqid;
20059
20060 if (!ret) {
20061
20062 multixri_pool->pbl_empty_count++;
20063 }
20064
20065#ifdef LPFC_MXP_STAT
20066 if (ret) {
20067 if (next_hwqid == hwqid)
20068 multixri_pool->local_pbl_hit_count++;
20069 else
20070 multixri_pool->other_pbl_hit_count++;
20071 }
20072#endif
20073}
20074
20075
20076
20077
20078
20079
20080
20081
20082
20083void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20084{
20085 struct lpfc_multixri_pool *multixri_pool;
20086 struct lpfc_pvt_pool *pvt_pool;
20087
20088 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20089 pvt_pool = &multixri_pool->pvt_pool;
20090
20091 if (pvt_pool->count < pvt_pool->low_watermark)
20092 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20093}
20094
20095
20096
20097
20098
20099
20100
20101
20102
20103
20104
20105
20106
20107void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20108 struct lpfc_sli4_hdw_queue *qp)
20109{
20110 unsigned long iflag;
20111 struct lpfc_pbl_pool *pbl_pool;
20112 struct lpfc_pvt_pool *pvt_pool;
20113 struct lpfc_epd_pool *epd_pool;
20114 u32 txcmplq_cnt;
20115 u32 xri_owned;
20116 u32 xri_limit;
20117 u32 abts_io_bufs;
20118
20119
20120 lpfc_ncmd->nvmeCmd = NULL;
20121 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20122 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20123
20124 if (phba->cfg_xri_rebalancing) {
20125 if (lpfc_ncmd->expedite) {
20126
20127 epd_pool = &phba->epd_pool;
20128 spin_lock_irqsave(&epd_pool->lock, iflag);
20129 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20130 epd_pool->count++;
20131 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20132 return;
20133 }
20134
20135
20136
20137
20138
20139 if (!qp->p_multixri_pool)
20140 return;
20141
20142 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20143 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20144
20145 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt;
20146 abts_io_bufs = qp->abts_scsi_io_bufs;
20147 if (qp->nvme_wq) {
20148 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt;
20149 abts_io_bufs += qp->abts_nvme_io_bufs;
20150 }
20151
20152 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20153 xri_limit = qp->p_multixri_pool->xri_limit;
20154
20155#ifdef LPFC_MXP_STAT
20156 if (xri_owned <= xri_limit)
20157 qp->p_multixri_pool->below_limit_count++;
20158 else
20159 qp->p_multixri_pool->above_limit_count++;
20160#endif
20161
20162
20163
20164
20165 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20166 (xri_owned < xri_limit &&
20167 pvt_pool->count < pvt_pool->high_watermark)) {
20168 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20169 qp, free_pvt_pool);
20170 list_add_tail(&lpfc_ncmd->list,
20171 &pvt_pool->list);
20172 pvt_pool->count++;
20173 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20174 } else {
20175 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20176 qp, free_pub_pool);
20177 list_add_tail(&lpfc_ncmd->list,
20178 &pbl_pool->list);
20179 pbl_pool->count++;
20180 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20181 }
20182 } else {
20183 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20184 qp, free_xri);
20185 list_add_tail(&lpfc_ncmd->list,
20186 &qp->lpfc_io_buf_list_put);
20187 qp->put_io_bufs++;
20188 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20189 iflag);
20190 }
20191}
20192
20193
20194
20195
20196
20197
20198
20199
20200
20201
20202
20203
20204
20205static struct lpfc_io_buf *
20206lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20207 struct lpfc_sli4_hdw_queue *qp,
20208 struct lpfc_pvt_pool *pvt_pool,
20209 struct lpfc_nodelist *ndlp)
20210{
20211 struct lpfc_io_buf *lpfc_ncmd;
20212 struct lpfc_io_buf *lpfc_ncmd_next;
20213 unsigned long iflag;
20214
20215 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20216 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20217 &pvt_pool->list, list) {
20218 if (lpfc_test_rrq_active(
20219 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20220 continue;
20221 list_del(&lpfc_ncmd->list);
20222 pvt_pool->count--;
20223 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20224 return lpfc_ncmd;
20225 }
20226 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20227
20228 return NULL;
20229}
20230
20231
20232
20233
20234
20235
20236
20237
20238
20239
20240
20241static struct lpfc_io_buf *
20242lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20243{
20244 struct lpfc_io_buf *lpfc_ncmd;
20245 struct lpfc_io_buf *lpfc_ncmd_next;
20246 unsigned long iflag;
20247 struct lpfc_epd_pool *epd_pool;
20248
20249 epd_pool = &phba->epd_pool;
20250 lpfc_ncmd = NULL;
20251
20252 spin_lock_irqsave(&epd_pool->lock, iflag);
20253 if (epd_pool->count > 0) {
20254 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20255 &epd_pool->list, list) {
20256 list_del(&lpfc_ncmd->list);
20257 epd_pool->count--;
20258 break;
20259 }
20260 }
20261 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20262
20263 return lpfc_ncmd;
20264}
20265
20266
20267
20268
20269
20270
20271
20272
20273
20274
20275
20276
20277
20278
20279
20280
20281
20282
20283
20284
20285
20286
20287
20288
20289static struct lpfc_io_buf *
20290lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20291 struct lpfc_nodelist *ndlp,
20292 int hwqid, int expedite)
20293{
20294 struct lpfc_sli4_hdw_queue *qp;
20295 struct lpfc_multixri_pool *multixri_pool;
20296 struct lpfc_pvt_pool *pvt_pool;
20297 struct lpfc_io_buf *lpfc_ncmd;
20298
20299 qp = &phba->sli4_hba.hdwq[hwqid];
20300 lpfc_ncmd = NULL;
20301 multixri_pool = qp->p_multixri_pool;
20302 pvt_pool = &multixri_pool->pvt_pool;
20303 multixri_pool->io_req_count++;
20304
20305
20306 if (pvt_pool->count == 0)
20307 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20308
20309
20310 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20311
20312 if (lpfc_ncmd) {
20313 lpfc_ncmd->hdwq = qp;
20314 lpfc_ncmd->hdwq_no = hwqid;
20315 } else if (expedite) {
20316
20317
20318
20319 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20320 }
20321
20322 return lpfc_ncmd;
20323}
20324
20325static inline struct lpfc_io_buf *
20326lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20327{
20328 struct lpfc_sli4_hdw_queue *qp;
20329 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20330
20331 qp = &phba->sli4_hba.hdwq[idx];
20332 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20333 &qp->lpfc_io_buf_list_get, list) {
20334 if (lpfc_test_rrq_active(phba, ndlp,
20335 lpfc_cmd->cur_iocbq.sli4_lxritag))
20336 continue;
20337
20338 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20339 continue;
20340
20341 list_del_init(&lpfc_cmd->list);
20342 qp->get_io_bufs--;
20343 lpfc_cmd->hdwq = qp;
20344 lpfc_cmd->hdwq_no = idx;
20345 return lpfc_cmd;
20346 }
20347 return NULL;
20348}
20349
20350
20351
20352
20353
20354
20355
20356
20357
20358
20359
20360
20361
20362
20363
20364
20365
20366
20367
20368struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20369 struct lpfc_nodelist *ndlp,
20370 u32 hwqid, int expedite)
20371{
20372 struct lpfc_sli4_hdw_queue *qp;
20373 unsigned long iflag;
20374 struct lpfc_io_buf *lpfc_cmd;
20375
20376 qp = &phba->sli4_hba.hdwq[hwqid];
20377 lpfc_cmd = NULL;
20378
20379 if (phba->cfg_xri_rebalancing)
20380 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20381 phba, ndlp, hwqid, expedite);
20382 else {
20383 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20384 qp, alloc_xri_get);
20385 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20386 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20387 if (!lpfc_cmd) {
20388 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20389 qp, alloc_xri_put);
20390 list_splice(&qp->lpfc_io_buf_list_put,
20391 &qp->lpfc_io_buf_list_get);
20392 qp->get_io_bufs += qp->put_io_bufs;
20393 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20394 qp->put_io_bufs = 0;
20395 spin_unlock(&qp->io_buf_list_put_lock);
20396 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20397 expedite)
20398 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20399 }
20400 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20401 }
20402
20403 return lpfc_cmd;
20404}
20405