1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/aer.h>
38#ifdef CONFIG_X86
39#include <asm/set_memory.h>
40#endif
41
42#include "lpfc_hw4.h"
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
45#include "lpfc_sli4.h"
46#include "lpfc_nl.h"
47#include "lpfc_disc.h"
48#include "lpfc.h"
49#include "lpfc_scsi.h"
50#include "lpfc_nvme.h"
51#include "lpfc_crtn.h"
52#include "lpfc_logmsg.h"
53#include "lpfc_compat.h"
54#include "lpfc_debugfs.h"
55#include "lpfc_vport.h"
56#include "lpfc_version.h"
57
58
59typedef enum _lpfc_iocb_type {
60 LPFC_UNKNOWN_IOCB,
61 LPFC_UNSOL_IOCB,
62 LPFC_SOL_IOCB,
63 LPFC_ABORT_IOCB
64} lpfc_iocb_type;
65
66
67
68static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
69 uint32_t);
70static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 uint8_t *, uint32_t *);
72static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
73 struct lpfc_iocbq *);
74static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
75 struct hbq_dmabuf *);
76static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
77 struct hbq_dmabuf *dmabuf);
78static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
79 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
80static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
81 int);
82static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
83 struct lpfc_queue *eq,
84 struct lpfc_eqe *eqe);
85static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
86static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
87static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
88static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
89 struct lpfc_queue *cq,
90 struct lpfc_cqe *cqe);
91
92static IOCB_t *
93lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
94{
95 return &iocbq->iocb;
96}
97
98#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
99
100
101
102
103
104
105
106
107
108
109
110
111
112static void
113lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
114{
115 uint64_t *src = srcp;
116 uint64_t *dest = destp;
117 int i;
118
119 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
120 *dest++ = *src++;
121}
122#else
123#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
124#endif
125
126
127
128
129
130
131
132
133
134
135
136
137
138static int
139lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
140{
141 union lpfc_wqe *temp_wqe;
142 struct lpfc_register doorbell;
143 uint32_t host_index;
144 uint32_t idx;
145 uint32_t i = 0;
146 uint8_t *tmp;
147 u32 if_type;
148
149
150 if (unlikely(!q))
151 return -ENOMEM;
152 temp_wqe = lpfc_sli4_qe(q, q->host_index);
153
154
155 idx = ((q->host_index + 1) % q->entry_count);
156 if (idx == q->hba_index) {
157 q->WQ_overflow++;
158 return -EBUSY;
159 }
160 q->WQ_posted++;
161
162 if (!((q->host_index + 1) % q->notify_interval))
163 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
164 else
165 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
166 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
167 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
168 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
169 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
170
171 tmp = (uint8_t *)temp_wqe;
172#ifdef __raw_writeq
173 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
174 __raw_writeq(*((uint64_t *)(tmp + i)),
175 q->dpp_regaddr + i);
176#else
177 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
178 __raw_writel(*((uint32_t *)(tmp + i)),
179 q->dpp_regaddr + i);
180#endif
181 }
182
183 wmb();
184
185
186 host_index = q->host_index;
187
188 q->host_index = idx;
189
190
191 doorbell.word0 = 0;
192 if (q->db_format == LPFC_DB_LIST_FORMAT) {
193 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
194 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
195 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
197 q->dpp_id);
198 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
199 q->queue_id);
200 } else {
201 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
202 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
203
204
205 if_type = bf_get(lpfc_sli_intf_if_type,
206 &q->phba->sli4_hba.sli_intf);
207 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
208 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
209 host_index);
210 }
211 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
212 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
213 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
214 } else {
215 return -EINVAL;
216 }
217 writel(doorbell.word0, q->db_regaddr);
218
219 return 0;
220}
221
222
223
224
225
226
227
228
229
230
231
232static void
233lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
234{
235
236 if (unlikely(!q))
237 return;
238
239 q->hba_index = index;
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254static uint32_t
255lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
256{
257 struct lpfc_mqe *temp_mqe;
258 struct lpfc_register doorbell;
259
260
261 if (unlikely(!q))
262 return -ENOMEM;
263 temp_mqe = lpfc_sli4_qe(q, q->host_index);
264
265
266 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
267 return -ENOMEM;
268 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
269
270 q->phba->mbox = (MAILBOX_t *)temp_mqe;
271
272
273 q->host_index = ((q->host_index + 1) % q->entry_count);
274
275
276 doorbell.word0 = 0;
277 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
278 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
279 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
280 return 0;
281}
282
283
284
285
286
287
288
289
290
291
292
293static uint32_t
294lpfc_sli4_mq_release(struct lpfc_queue *q)
295{
296
297 if (unlikely(!q))
298 return 0;
299
300
301 q->phba->mbox = NULL;
302 q->hba_index = ((q->hba_index + 1) % q->entry_count);
303 return 1;
304}
305
306
307
308
309
310
311
312
313
314
315static struct lpfc_eqe *
316lpfc_sli4_eq_get(struct lpfc_queue *q)
317{
318 struct lpfc_eqe *eqe;
319
320
321 if (unlikely(!q))
322 return NULL;
323 eqe = lpfc_sli4_qe(q, q->host_index);
324
325
326 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
327 return NULL;
328
329
330
331
332
333
334
335
336
337
338 mb();
339 return eqe;
340}
341
342
343
344
345
346
347void
348lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
349{
350 struct lpfc_register doorbell;
351
352 doorbell.word0 = 0;
353 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
354 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
355 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
356 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
357 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
358 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
359}
360
361
362
363
364
365
366void
367lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
368{
369 struct lpfc_register doorbell;
370
371 doorbell.word0 = 0;
372 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
373 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
374}
375
376
377
378
379
380
381
382
383
384
385
386
387void
388lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
389 uint32_t count, bool arm)
390{
391 struct lpfc_register doorbell;
392
393
394 if (unlikely(!q || (count == 0 && !arm)))
395 return;
396
397
398 doorbell.word0 = 0;
399 if (arm) {
400 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
401 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
402 }
403 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
404 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
405 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
406 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
407 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
408 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
409
410 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
411 readl(q->phba->sli4_hba.EQDBregaddr);
412}
413
414
415
416
417
418
419
420
421
422
423
424
425void
426lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
427 uint32_t count, bool arm)
428{
429 struct lpfc_register doorbell;
430
431
432 if (unlikely(!q || (count == 0 && !arm)))
433 return;
434
435
436 doorbell.word0 = 0;
437 if (arm)
438 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
439 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
440 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
441 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
442
443 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
444 readl(q->phba->sli4_hba.EQDBregaddr);
445}
446
447static void
448__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
449 struct lpfc_eqe *eqe)
450{
451 if (!phba->sli4_hba.pc_sli4_params.eqav)
452 bf_set_le32(lpfc_eqe_valid, eqe, 0);
453
454 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
455
456
457 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
458 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
459}
460
461static void
462lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
463{
464 struct lpfc_eqe *eqe = NULL;
465 u32 eq_count = 0, cq_count = 0;
466 struct lpfc_cqe *cqe = NULL;
467 struct lpfc_queue *cq = NULL, *childq = NULL;
468 int cqid = 0;
469
470
471 eqe = lpfc_sli4_eq_get(eq);
472 while (eqe) {
473
474 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
475 cq = NULL;
476
477 list_for_each_entry(childq, &eq->child_list, list) {
478 if (childq->queue_id == cqid) {
479 cq = childq;
480 break;
481 }
482 }
483
484 if (cq) {
485 cqe = lpfc_sli4_cq_get(cq);
486 while (cqe) {
487 __lpfc_sli4_consume_cqe(phba, cq, cqe);
488 cq_count++;
489 cqe = lpfc_sli4_cq_get(cq);
490 }
491
492 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
493 LPFC_QUEUE_REARM);
494 cq_count = 0;
495 }
496 __lpfc_sli4_consume_eqe(phba, eq, eqe);
497 eq_count++;
498 eqe = lpfc_sli4_eq_get(eq);
499 }
500
501
502 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
503}
504
505static int
506lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
507 uint8_t rearm)
508{
509 struct lpfc_eqe *eqe;
510 int count = 0, consumed = 0;
511
512 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
513 goto rearm_and_exit;
514
515 eqe = lpfc_sli4_eq_get(eq);
516 while (eqe) {
517 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
518 __lpfc_sli4_consume_eqe(phba, eq, eqe);
519
520 consumed++;
521 if (!(++count % eq->max_proc_limit))
522 break;
523
524 if (!(count % eq->notify_interval)) {
525 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
526 LPFC_QUEUE_NOARM);
527 consumed = 0;
528 }
529
530 eqe = lpfc_sli4_eq_get(eq);
531 }
532 eq->EQ_processed += count;
533
534
535 if (count > eq->EQ_max_eqe)
536 eq->EQ_max_eqe = count;
537
538 xchg(&eq->queue_claimed, 0);
539
540rearm_and_exit:
541
542 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
543
544 return count;
545}
546
547
548
549
550
551
552
553
554
555
556static struct lpfc_cqe *
557lpfc_sli4_cq_get(struct lpfc_queue *q)
558{
559 struct lpfc_cqe *cqe;
560
561
562 if (unlikely(!q))
563 return NULL;
564 cqe = lpfc_sli4_qe(q, q->host_index);
565
566
567 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
568 return NULL;
569
570
571
572
573
574
575
576
577
578 mb();
579 return cqe;
580}
581
582static void
583__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
584 struct lpfc_cqe *cqe)
585{
586 if (!phba->sli4_hba.pc_sli4_params.cqav)
587 bf_set_le32(lpfc_cqe_valid, cqe, 0);
588
589 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
590
591
592 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
593 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
594}
595
596
597
598
599
600
601
602
603
604
605
606
607void
608lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
609 uint32_t count, bool arm)
610{
611 struct lpfc_register doorbell;
612
613
614 if (unlikely(!q || (count == 0 && !arm)))
615 return;
616
617
618 doorbell.word0 = 0;
619 if (arm)
620 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
621 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
622 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
623 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
624 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
625 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
626 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
627}
628
629
630
631
632
633
634
635
636
637
638
639
640void
641lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
642 uint32_t count, bool arm)
643{
644 struct lpfc_register doorbell;
645
646
647 if (unlikely(!q || (count == 0 && !arm)))
648 return;
649
650
651 doorbell.word0 = 0;
652 if (arm)
653 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
654 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
655 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
656 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671int
672lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
673 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
674{
675 struct lpfc_rqe *temp_hrqe;
676 struct lpfc_rqe *temp_drqe;
677 struct lpfc_register doorbell;
678 int hq_put_index;
679 int dq_put_index;
680
681
682 if (unlikely(!hq) || unlikely(!dq))
683 return -ENOMEM;
684 hq_put_index = hq->host_index;
685 dq_put_index = dq->host_index;
686 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
687 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
688
689 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
690 return -EINVAL;
691 if (hq_put_index != dq_put_index)
692 return -EINVAL;
693
694 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
695 return -EBUSY;
696 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
697 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
698
699
700 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
701 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
702 hq->RQ_buf_posted++;
703
704
705 if (!(hq->host_index % hq->notify_interval)) {
706 doorbell.word0 = 0;
707 if (hq->db_format == LPFC_DB_RING_FORMAT) {
708 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
709 hq->notify_interval);
710 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
711 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
712 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
713 hq->notify_interval);
714 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
715 hq->host_index);
716 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
717 } else {
718 return -EINVAL;
719 }
720 writel(doorbell.word0, hq->db_regaddr);
721 }
722 return hq_put_index;
723}
724
725
726
727
728
729
730
731
732
733
734
735static uint32_t
736lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
737{
738
739 if (unlikely(!hq) || unlikely(!dq))
740 return 0;
741
742 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
743 return 0;
744 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
745 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
746 return 1;
747}
748
749
750
751
752
753
754
755
756
757
758
759static inline IOCB_t *
760lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
761{
762 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
763 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
764}
765
766
767
768
769
770
771
772
773
774
775
776static inline IOCB_t *
777lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
778{
779 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
780 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
781}
782
783
784
785
786
787
788
789
790
791
792struct lpfc_iocbq *
793__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
794{
795 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
796 struct lpfc_iocbq * iocbq = NULL;
797
798 lockdep_assert_held(&phba->hbalock);
799
800 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
801 if (iocbq)
802 phba->iocb_cnt++;
803 if (phba->iocb_cnt > phba->iocb_max)
804 phba->iocb_max = phba->iocb_cnt;
805 return iocbq;
806}
807
808
809
810
811
812
813
814
815
816
817
818
819
820struct lpfc_sglq *
821__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
822{
823 struct lpfc_sglq *sglq;
824
825 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
826 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
827 return sglq;
828}
829
830
831
832
833
834
835
836
837
838
839
840
841
842struct lpfc_sglq *
843__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
844{
845 struct lpfc_sglq *sglq;
846
847 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
848 return sglq;
849}
850
851
852
853
854
855
856
857
858void
859lpfc_clr_rrq_active(struct lpfc_hba *phba,
860 uint16_t xritag,
861 struct lpfc_node_rrq *rrq)
862{
863 struct lpfc_nodelist *ndlp = NULL;
864
865 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
866 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
867
868
869
870
871
872 if ((!ndlp) && rrq->ndlp)
873 ndlp = rrq->ndlp;
874
875 if (!ndlp)
876 goto out;
877
878 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
879 rrq->send_rrq = 0;
880 rrq->xritag = 0;
881 rrq->rrq_stop_time = 0;
882 }
883out:
884 mempool_free(rrq, phba->rrq_pool);
885}
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901void
902lpfc_handle_rrq_active(struct lpfc_hba *phba)
903{
904 struct lpfc_node_rrq *rrq;
905 struct lpfc_node_rrq *nextrrq;
906 unsigned long next_time;
907 unsigned long iflags;
908 LIST_HEAD(send_rrq);
909
910 spin_lock_irqsave(&phba->hbalock, iflags);
911 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
912 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
913 list_for_each_entry_safe(rrq, nextrrq,
914 &phba->active_rrq_list, list) {
915 if (time_after(jiffies, rrq->rrq_stop_time))
916 list_move(&rrq->list, &send_rrq);
917 else if (time_before(rrq->rrq_stop_time, next_time))
918 next_time = rrq->rrq_stop_time;
919 }
920 spin_unlock_irqrestore(&phba->hbalock, iflags);
921 if ((!list_empty(&phba->active_rrq_list)) &&
922 (!(phba->pport->load_flag & FC_UNLOADING)))
923 mod_timer(&phba->rrq_tmr, next_time);
924 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
925 list_del(&rrq->list);
926 if (!rrq->send_rrq) {
927
928 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
929 } else if (lpfc_send_rrq(phba, rrq)) {
930
931
932
933 lpfc_clr_rrq_active(phba, rrq->xritag,
934 rrq);
935 }
936 }
937}
938
939
940
941
942
943
944
945
946
947
948struct lpfc_node_rrq *
949lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
950{
951 struct lpfc_hba *phba = vport->phba;
952 struct lpfc_node_rrq *rrq;
953 struct lpfc_node_rrq *nextrrq;
954 unsigned long iflags;
955
956 if (phba->sli_rev != LPFC_SLI_REV4)
957 return NULL;
958 spin_lock_irqsave(&phba->hbalock, iflags);
959 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
960 if (rrq->vport == vport && rrq->xritag == xri &&
961 rrq->nlp_DID == did){
962 list_del(&rrq->list);
963 spin_unlock_irqrestore(&phba->hbalock, iflags);
964 return rrq;
965 }
966 }
967 spin_unlock_irqrestore(&phba->hbalock, iflags);
968 return NULL;
969}
970
971
972
973
974
975
976
977
978
979void
980lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
981
982{
983 struct lpfc_hba *phba = vport->phba;
984 struct lpfc_node_rrq *rrq;
985 struct lpfc_node_rrq *nextrrq;
986 unsigned long iflags;
987 LIST_HEAD(rrq_list);
988
989 if (phba->sli_rev != LPFC_SLI_REV4)
990 return;
991 if (!ndlp) {
992 lpfc_sli4_vport_delete_els_xri_aborted(vport);
993 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
994 }
995 spin_lock_irqsave(&phba->hbalock, iflags);
996 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
997 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
998 list_move(&rrq->list, &rrq_list);
999 spin_unlock_irqrestore(&phba->hbalock, iflags);
1000
1001 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1002 list_del(&rrq->list);
1003 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1004 }
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017int
1018lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1019 uint16_t xritag)
1020{
1021 if (!ndlp)
1022 return 0;
1023 if (!ndlp->active_rrqs_xri_bitmap)
1024 return 0;
1025 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1026 return 1;
1027 else
1028 return 0;
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046int
1047lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1048 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1049{
1050 unsigned long iflags;
1051 struct lpfc_node_rrq *rrq;
1052 int empty;
1053
1054 if (!ndlp)
1055 return -EINVAL;
1056
1057 if (!phba->cfg_enable_rrq)
1058 return -EINVAL;
1059
1060 spin_lock_irqsave(&phba->hbalock, iflags);
1061 if (phba->pport->load_flag & FC_UNLOADING) {
1062 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1063 goto out;
1064 }
1065
1066
1067
1068
1069 if (NLP_CHK_FREE_REQ(ndlp))
1070 goto out;
1071
1072 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1073 goto out;
1074
1075 if (!ndlp->active_rrqs_xri_bitmap)
1076 goto out;
1077
1078 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1079 goto out;
1080
1081 spin_unlock_irqrestore(&phba->hbalock, iflags);
1082 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
1083 if (!rrq) {
1084 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1085 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1086 " DID:0x%x Send:%d\n",
1087 xritag, rxid, ndlp->nlp_DID, send_rrq);
1088 return -EINVAL;
1089 }
1090 if (phba->cfg_enable_rrq == 1)
1091 rrq->send_rrq = send_rrq;
1092 else
1093 rrq->send_rrq = 0;
1094 rrq->xritag = xritag;
1095 rrq->rrq_stop_time = jiffies +
1096 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1097 rrq->ndlp = ndlp;
1098 rrq->nlp_DID = ndlp->nlp_DID;
1099 rrq->vport = ndlp->vport;
1100 rrq->rxid = rxid;
1101 spin_lock_irqsave(&phba->hbalock, iflags);
1102 empty = list_empty(&phba->active_rrq_list);
1103 list_add_tail(&rrq->list, &phba->active_rrq_list);
1104 phba->hba_flag |= HBA_RRQ_ACTIVE;
1105 if (empty)
1106 lpfc_worker_wake_up(phba);
1107 spin_unlock_irqrestore(&phba->hbalock, iflags);
1108 return 0;
1109out:
1110 spin_unlock_irqrestore(&phba->hbalock, iflags);
1111 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1112 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1113 " DID:0x%x Send:%d\n",
1114 xritag, rxid, ndlp->nlp_DID, send_rrq);
1115 return -EINVAL;
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129static struct lpfc_sglq *
1130__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1131{
1132 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1133 struct lpfc_sglq *sglq = NULL;
1134 struct lpfc_sglq *start_sglq = NULL;
1135 struct lpfc_io_buf *lpfc_cmd;
1136 struct lpfc_nodelist *ndlp;
1137 struct lpfc_sli_ring *pring = NULL;
1138 int found = 0;
1139
1140 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1141 pring = phba->sli4_hba.nvmels_wq->pring;
1142 else
1143 pring = lpfc_phba_elsring(phba);
1144
1145 lockdep_assert_held(&pring->ring_lock);
1146
1147 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1148 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1149 ndlp = lpfc_cmd->rdata->pnode;
1150 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1151 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1152 ndlp = piocbq->context_un.ndlp;
1153 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1154 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1155 ndlp = NULL;
1156 else
1157 ndlp = piocbq->context_un.ndlp;
1158 } else {
1159 ndlp = piocbq->context1;
1160 }
1161
1162 spin_lock(&phba->sli4_hba.sgl_list_lock);
1163 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1164 start_sglq = sglq;
1165 while (!found) {
1166 if (!sglq)
1167 break;
1168 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1169 test_bit(sglq->sli4_lxritag,
1170 ndlp->active_rrqs_xri_bitmap)) {
1171
1172
1173
1174 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1175 sglq = NULL;
1176 list_remove_head(lpfc_els_sgl_list, sglq,
1177 struct lpfc_sglq, list);
1178 if (sglq == start_sglq) {
1179 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1180 sglq = NULL;
1181 break;
1182 } else
1183 continue;
1184 }
1185 sglq->ndlp = ndlp;
1186 found = 1;
1187 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1188 sglq->state = SGL_ALLOCATED;
1189 }
1190 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1191 return sglq;
1192}
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204struct lpfc_sglq *
1205__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1206{
1207 struct list_head *lpfc_nvmet_sgl_list;
1208 struct lpfc_sglq *sglq = NULL;
1209
1210 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1211
1212 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1213
1214 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1215 if (!sglq)
1216 return NULL;
1217 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1218 sglq->state = SGL_ALLOCATED;
1219 return sglq;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231struct lpfc_iocbq *
1232lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1233{
1234 struct lpfc_iocbq * iocbq = NULL;
1235 unsigned long iflags;
1236
1237 spin_lock_irqsave(&phba->hbalock, iflags);
1238 iocbq = __lpfc_sli_get_iocbq(phba);
1239 spin_unlock_irqrestore(&phba->hbalock, iflags);
1240 return iocbq;
1241}
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static void
1263__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1264{
1265 struct lpfc_sglq *sglq;
1266 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1267 unsigned long iflag = 0;
1268 struct lpfc_sli_ring *pring;
1269
1270 if (iocbq->sli4_xritag == NO_XRI)
1271 sglq = NULL;
1272 else
1273 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1274
1275
1276 if (sglq) {
1277 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1278 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1279 iflag);
1280 sglq->state = SGL_FREED;
1281 sglq->ndlp = NULL;
1282 list_add_tail(&sglq->list,
1283 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1284 spin_unlock_irqrestore(
1285 &phba->sli4_hba.sgl_list_lock, iflag);
1286 goto out;
1287 }
1288
1289 pring = phba->sli4_hba.els_wq->pring;
1290 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1291 (sglq->state != SGL_XRI_ABORTED)) {
1292 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1293 iflag);
1294 list_add(&sglq->list,
1295 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1296 spin_unlock_irqrestore(
1297 &phba->sli4_hba.sgl_list_lock, iflag);
1298 } else {
1299 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1300 iflag);
1301 sglq->state = SGL_FREED;
1302 sglq->ndlp = NULL;
1303 list_add_tail(&sglq->list,
1304 &phba->sli4_hba.lpfc_els_sgl_list);
1305 spin_unlock_irqrestore(
1306 &phba->sli4_hba.sgl_list_lock, iflag);
1307
1308
1309 if (!list_empty(&pring->txq))
1310 lpfc_worker_wake_up(phba);
1311 }
1312 }
1313
1314out:
1315
1316
1317
1318 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1319 iocbq->sli4_lxritag = NO_XRI;
1320 iocbq->sli4_xritag = NO_XRI;
1321 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1322 LPFC_IO_NVME_LS);
1323 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338static void
1339__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1340{
1341 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1342
1343
1344
1345
1346 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1347 iocbq->sli4_xritag = NO_XRI;
1348 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361static void
1362__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1363{
1364 lockdep_assert_held(&phba->hbalock);
1365
1366 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1367 phba->iocb_cnt--;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378void
1379lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1380{
1381 unsigned long iflags;
1382
1383
1384
1385
1386 spin_lock_irqsave(&phba->hbalock, iflags);
1387 __lpfc_sli_release_iocbq(phba, iocbq);
1388 spin_unlock_irqrestore(&phba->hbalock, iflags);
1389}
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403void
1404lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1405 uint32_t ulpstatus, uint32_t ulpWord4)
1406{
1407 struct lpfc_iocbq *piocb;
1408
1409 while (!list_empty(iocblist)) {
1410 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1411 if (!piocb->iocb_cmpl) {
1412 if (piocb->iocb_flag & LPFC_IO_NVME)
1413 lpfc_nvme_cancel_iocb(phba, piocb);
1414 else
1415 lpfc_sli_release_iocbq(phba, piocb);
1416 } else {
1417 piocb->iocb.ulpStatus = ulpstatus;
1418 piocb->iocb.un.ulpWord[4] = ulpWord4;
1419 (piocb->iocb_cmpl) (phba, piocb, piocb);
1420 }
1421 }
1422 return;
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440static lpfc_iocb_type
1441lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1442{
1443 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1444
1445 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1446 return 0;
1447
1448 switch (iocb_cmnd) {
1449 case CMD_XMIT_SEQUENCE_CR:
1450 case CMD_XMIT_SEQUENCE_CX:
1451 case CMD_XMIT_BCAST_CN:
1452 case CMD_XMIT_BCAST_CX:
1453 case CMD_ELS_REQUEST_CR:
1454 case CMD_ELS_REQUEST_CX:
1455 case CMD_CREATE_XRI_CR:
1456 case CMD_CREATE_XRI_CX:
1457 case CMD_GET_RPI_CN:
1458 case CMD_XMIT_ELS_RSP_CX:
1459 case CMD_GET_RPI_CR:
1460 case CMD_FCP_IWRITE_CR:
1461 case CMD_FCP_IWRITE_CX:
1462 case CMD_FCP_IREAD_CR:
1463 case CMD_FCP_IREAD_CX:
1464 case CMD_FCP_ICMND_CR:
1465 case CMD_FCP_ICMND_CX:
1466 case CMD_FCP_TSEND_CX:
1467 case CMD_FCP_TRSP_CX:
1468 case CMD_FCP_TRECEIVE_CX:
1469 case CMD_FCP_AUTO_TRSP_CX:
1470 case CMD_ADAPTER_MSG:
1471 case CMD_ADAPTER_DUMP:
1472 case CMD_XMIT_SEQUENCE64_CR:
1473 case CMD_XMIT_SEQUENCE64_CX:
1474 case CMD_XMIT_BCAST64_CN:
1475 case CMD_XMIT_BCAST64_CX:
1476 case CMD_ELS_REQUEST64_CR:
1477 case CMD_ELS_REQUEST64_CX:
1478 case CMD_FCP_IWRITE64_CR:
1479 case CMD_FCP_IWRITE64_CX:
1480 case CMD_FCP_IREAD64_CR:
1481 case CMD_FCP_IREAD64_CX:
1482 case CMD_FCP_ICMND64_CR:
1483 case CMD_FCP_ICMND64_CX:
1484 case CMD_FCP_TSEND64_CX:
1485 case CMD_FCP_TRSP64_CX:
1486 case CMD_FCP_TRECEIVE64_CX:
1487 case CMD_GEN_REQUEST64_CR:
1488 case CMD_GEN_REQUEST64_CX:
1489 case CMD_XMIT_ELS_RSP64_CX:
1490 case DSSCMD_IWRITE64_CR:
1491 case DSSCMD_IWRITE64_CX:
1492 case DSSCMD_IREAD64_CR:
1493 case DSSCMD_IREAD64_CX:
1494 type = LPFC_SOL_IOCB;
1495 break;
1496 case CMD_ABORT_XRI_CN:
1497 case CMD_ABORT_XRI_CX:
1498 case CMD_CLOSE_XRI_CN:
1499 case CMD_CLOSE_XRI_CX:
1500 case CMD_XRI_ABORTED_CX:
1501 case CMD_ABORT_MXRI64_CN:
1502 case CMD_XMIT_BLS_RSP64_CX:
1503 type = LPFC_ABORT_IOCB;
1504 break;
1505 case CMD_RCV_SEQUENCE_CX:
1506 case CMD_RCV_ELS_REQ_CX:
1507 case CMD_RCV_SEQUENCE64_CX:
1508 case CMD_RCV_ELS_REQ64_CX:
1509 case CMD_ASYNC_STATUS:
1510 case CMD_IOCB_RCV_SEQ64_CX:
1511 case CMD_IOCB_RCV_ELS64_CX:
1512 case CMD_IOCB_RCV_CONT64_CX:
1513 case CMD_IOCB_RET_XRI64_CX:
1514 type = LPFC_UNSOL_IOCB;
1515 break;
1516 case CMD_IOCB_XMIT_MSEQ64_CR:
1517 case CMD_IOCB_XMIT_MSEQ64_CX:
1518 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1519 case CMD_IOCB_RCV_ELS_LIST64_CX:
1520 case CMD_IOCB_CLOSE_EXTENDED_CN:
1521 case CMD_IOCB_ABORT_EXTENDED_CN:
1522 case CMD_IOCB_RET_HBQE64_CN:
1523 case CMD_IOCB_FCP_IBIDIR64_CR:
1524 case CMD_IOCB_FCP_IBIDIR64_CX:
1525 case CMD_IOCB_FCP_ITASKMGT64_CX:
1526 case CMD_IOCB_LOGENTRY_CN:
1527 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1528 printk("%s - Unhandled SLI-3 Command x%x\n",
1529 __func__, iocb_cmnd);
1530 type = LPFC_UNKNOWN_IOCB;
1531 break;
1532 default:
1533 type = LPFC_UNKNOWN_IOCB;
1534 break;
1535 }
1536
1537 return type;
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static int
1552lpfc_sli_ring_map(struct lpfc_hba *phba)
1553{
1554 struct lpfc_sli *psli = &phba->sli;
1555 LPFC_MBOXQ_t *pmb;
1556 MAILBOX_t *pmbox;
1557 int i, rc, ret = 0;
1558
1559 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1560 if (!pmb)
1561 return -ENOMEM;
1562 pmbox = &pmb->u.mb;
1563 phba->link_state = LPFC_INIT_MBX_CMDS;
1564 for (i = 0; i < psli->num_rings; i++) {
1565 lpfc_config_ring(phba, i, pmb);
1566 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1567 if (rc != MBX_SUCCESS) {
1568 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1569 "0446 Adapter failed to init (%d), "
1570 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1571 "ring %d\n",
1572 rc, pmbox->mbxCommand,
1573 pmbox->mbxStatus, i);
1574 phba->link_state = LPFC_HBA_ERROR;
1575 ret = -ENXIO;
1576 break;
1577 }
1578 }
1579 mempool_free(pmb, phba->mbox_mem_pool);
1580 return ret;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596static int
1597lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1598 struct lpfc_iocbq *piocb)
1599{
1600 if (phba->sli_rev == LPFC_SLI_REV4)
1601 lockdep_assert_held(&pring->ring_lock);
1602 else
1603 lockdep_assert_held(&phba->hbalock);
1604
1605 BUG_ON(!piocb);
1606
1607 list_add_tail(&piocb->list, &pring->txcmplq);
1608 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1609 pring->txcmplq_cnt++;
1610
1611 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1612 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1613 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1614 BUG_ON(!piocb->vport);
1615 if (!(piocb->vport->load_flag & FC_UNLOADING))
1616 mod_timer(&piocb->vport->els_tmofunc,
1617 jiffies +
1618 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1619 }
1620
1621 return 0;
1622}
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634struct lpfc_iocbq *
1635lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1636{
1637 struct lpfc_iocbq *cmd_iocb;
1638
1639 lockdep_assert_held(&phba->hbalock);
1640
1641 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1642 return cmd_iocb;
1643}
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659static IOCB_t *
1660lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1661{
1662 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1663 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1664
1665 lockdep_assert_held(&phba->hbalock);
1666
1667 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1668 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1669 pring->sli.sli3.next_cmdidx = 0;
1670
1671 if (unlikely(pring->sli.sli3.local_getidx ==
1672 pring->sli.sli3.next_cmdidx)) {
1673
1674 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1675
1676 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1678 "0315 Ring %d issue: portCmdGet %d "
1679 "is bigger than cmd ring %d\n",
1680 pring->ringno,
1681 pring->sli.sli3.local_getidx,
1682 max_cmd_idx);
1683
1684 phba->link_state = LPFC_HBA_ERROR;
1685
1686
1687
1688
1689 phba->work_ha |= HA_ERATT;
1690 phba->work_hs = HS_FFER3;
1691
1692 lpfc_worker_wake_up(phba);
1693
1694 return NULL;
1695 }
1696
1697 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1698 return NULL;
1699 }
1700
1701 return lpfc_cmd_iocb(phba, pring);
1702}
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716uint16_t
1717lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1718{
1719 struct lpfc_iocbq **new_arr;
1720 struct lpfc_iocbq **old_arr;
1721 size_t new_len;
1722 struct lpfc_sli *psli = &phba->sli;
1723 uint16_t iotag;
1724
1725 spin_lock_irq(&phba->hbalock);
1726 iotag = psli->last_iotag;
1727 if(++iotag < psli->iocbq_lookup_len) {
1728 psli->last_iotag = iotag;
1729 psli->iocbq_lookup[iotag] = iocbq;
1730 spin_unlock_irq(&phba->hbalock);
1731 iocbq->iotag = iotag;
1732 return iotag;
1733 } else if (psli->iocbq_lookup_len < (0xffff
1734 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1735 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1736 spin_unlock_irq(&phba->hbalock);
1737 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1738 GFP_KERNEL);
1739 if (new_arr) {
1740 spin_lock_irq(&phba->hbalock);
1741 old_arr = psli->iocbq_lookup;
1742 if (new_len <= psli->iocbq_lookup_len) {
1743
1744 kfree(new_arr);
1745 iotag = psli->last_iotag;
1746 if(++iotag < psli->iocbq_lookup_len) {
1747 psli->last_iotag = iotag;
1748 psli->iocbq_lookup[iotag] = iocbq;
1749 spin_unlock_irq(&phba->hbalock);
1750 iocbq->iotag = iotag;
1751 return iotag;
1752 }
1753 spin_unlock_irq(&phba->hbalock);
1754 return 0;
1755 }
1756 if (psli->iocbq_lookup)
1757 memcpy(new_arr, old_arr,
1758 ((psli->last_iotag + 1) *
1759 sizeof (struct lpfc_iocbq *)));
1760 psli->iocbq_lookup = new_arr;
1761 psli->iocbq_lookup_len = new_len;
1762 psli->last_iotag = iotag;
1763 psli->iocbq_lookup[iotag] = iocbq;
1764 spin_unlock_irq(&phba->hbalock);
1765 iocbq->iotag = iotag;
1766 kfree(old_arr);
1767 return iotag;
1768 }
1769 } else
1770 spin_unlock_irq(&phba->hbalock);
1771
1772 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1773 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1774 psli->last_iotag);
1775
1776 return 0;
1777}
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794static void
1795lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1796 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1797{
1798
1799
1800
1801 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1802
1803
1804 if (pring->ringno == LPFC_ELS_RING) {
1805 lpfc_debugfs_slow_ring_trc(phba,
1806 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1807 *(((uint32_t *) &nextiocb->iocb) + 4),
1808 *(((uint32_t *) &nextiocb->iocb) + 6),
1809 *(((uint32_t *) &nextiocb->iocb) + 7));
1810 }
1811
1812
1813
1814
1815 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1816 wmb();
1817 pring->stats.iocb_cmd++;
1818
1819
1820
1821
1822
1823
1824 if (nextiocb->iocb_cmpl)
1825 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1826 else
1827 __lpfc_sli_release_iocbq(phba, nextiocb);
1828
1829
1830
1831
1832
1833 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1834 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1835}
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849static void
1850lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1851{
1852 int ringno = pring->ringno;
1853
1854 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1855
1856 wmb();
1857
1858
1859
1860
1861
1862 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1863 readl(phba->CAregaddr);
1864
1865 pring->stats.iocb_cmd_full++;
1866}
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877static void
1878lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1879{
1880 int ringno = pring->ringno;
1881
1882
1883
1884
1885 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1886 wmb();
1887 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1888 readl(phba->CAregaddr);
1889 }
1890}
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901static void
1902lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1903{
1904 IOCB_t *iocb;
1905 struct lpfc_iocbq *nextiocb;
1906
1907 lockdep_assert_held(&phba->hbalock);
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917 if (lpfc_is_link_up(phba) &&
1918 (!list_empty(&pring->txq)) &&
1919 (pring->ringno != LPFC_FCP_RING ||
1920 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1921
1922 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1923 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1924 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1925
1926 if (iocb)
1927 lpfc_sli_update_ring(phba, pring);
1928 else
1929 lpfc_sli_update_full_ring(phba, pring);
1930 }
1931
1932 return;
1933}
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945static struct lpfc_hbq_entry *
1946lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1947{
1948 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1949
1950 lockdep_assert_held(&phba->hbalock);
1951
1952 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1953 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1954 hbqp->next_hbqPutIdx = 0;
1955
1956 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1957 uint32_t raw_index = phba->hbq_get[hbqno];
1958 uint32_t getidx = le32_to_cpu(raw_index);
1959
1960 hbqp->local_hbqGetIdx = getidx;
1961
1962 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1963 lpfc_printf_log(phba, KERN_ERR,
1964 LOG_SLI | LOG_VPORT,
1965 "1802 HBQ %d: local_hbqGetIdx "
1966 "%u is > than hbqp->entry_count %u\n",
1967 hbqno, hbqp->local_hbqGetIdx,
1968 hbqp->entry_count);
1969
1970 phba->link_state = LPFC_HBA_ERROR;
1971 return NULL;
1972 }
1973
1974 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1975 return NULL;
1976 }
1977
1978 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1979 hbqp->hbqPutIdx;
1980}
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991void
1992lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1993{
1994 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1995 struct hbq_dmabuf *hbq_buf;
1996 unsigned long flags;
1997 int i, hbq_count;
1998
1999 hbq_count = lpfc_sli_hbq_count();
2000
2001 spin_lock_irqsave(&phba->hbalock, flags);
2002 for (i = 0; i < hbq_count; ++i) {
2003 list_for_each_entry_safe(dmabuf, next_dmabuf,
2004 &phba->hbqs[i].hbq_buffer_list, list) {
2005 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2006 list_del(&hbq_buf->dbuf.list);
2007 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2008 }
2009 phba->hbqs[i].buffer_count = 0;
2010 }
2011
2012
2013 phba->hbq_in_use = 0;
2014 spin_unlock_irqrestore(&phba->hbalock, flags);
2015}
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static int
2030lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2031 struct hbq_dmabuf *hbq_buf)
2032{
2033 lockdep_assert_held(&phba->hbalock);
2034 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2035}
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048static int
2049lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2050 struct hbq_dmabuf *hbq_buf)
2051{
2052 struct lpfc_hbq_entry *hbqe;
2053 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2054
2055 lockdep_assert_held(&phba->hbalock);
2056
2057 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2058 if (hbqe) {
2059 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2060
2061 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2062 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2063 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2064 hbqe->bde.tus.f.bdeFlags = 0;
2065 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2066 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2067
2068 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2069 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2070
2071 readl(phba->hbq_put + hbqno);
2072 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2073 return 0;
2074 } else
2075 return -ENOMEM;
2076}
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088static int
2089lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2090 struct hbq_dmabuf *hbq_buf)
2091{
2092 int rc;
2093 struct lpfc_rqe hrqe;
2094 struct lpfc_rqe drqe;
2095 struct lpfc_queue *hrq;
2096 struct lpfc_queue *drq;
2097
2098 if (hbqno != LPFC_ELS_HBQ)
2099 return 1;
2100 hrq = phba->sli4_hba.hdr_rq;
2101 drq = phba->sli4_hba.dat_rq;
2102
2103 lockdep_assert_held(&phba->hbalock);
2104 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2105 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2106 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2107 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2108 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2109 if (rc < 0)
2110 return rc;
2111 hbq_buf->tag = (rc | (hbqno << 16));
2112 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2113 return 0;
2114}
2115
2116
2117static struct lpfc_hbq_init lpfc_els_hbq = {
2118 .rn = 1,
2119 .entry_count = 256,
2120 .mask_count = 0,
2121 .profile = 0,
2122 .ring_mask = (1 << LPFC_ELS_RING),
2123 .buffer_count = 0,
2124 .init_count = 40,
2125 .add_count = 40,
2126};
2127
2128
2129struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2130 &lpfc_els_hbq,
2131};
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143static int
2144lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2145{
2146 uint32_t i, posted = 0;
2147 unsigned long flags;
2148 struct hbq_dmabuf *hbq_buffer;
2149 LIST_HEAD(hbq_buf_list);
2150 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2151 return 0;
2152
2153 if ((phba->hbqs[hbqno].buffer_count + count) >
2154 lpfc_hbq_defs[hbqno]->entry_count)
2155 count = lpfc_hbq_defs[hbqno]->entry_count -
2156 phba->hbqs[hbqno].buffer_count;
2157 if (!count)
2158 return 0;
2159
2160 for (i = 0; i < count; i++) {
2161 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2162 if (!hbq_buffer)
2163 break;
2164 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2165 }
2166
2167 spin_lock_irqsave(&phba->hbalock, flags);
2168 if (!phba->hbq_in_use)
2169 goto err;
2170 while (!list_empty(&hbq_buf_list)) {
2171 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2172 dbuf.list);
2173 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2174 (hbqno << 16));
2175 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2176 phba->hbqs[hbqno].buffer_count++;
2177 posted++;
2178 } else
2179 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2180 }
2181 spin_unlock_irqrestore(&phba->hbalock, flags);
2182 return posted;
2183err:
2184 spin_unlock_irqrestore(&phba->hbalock, flags);
2185 while (!list_empty(&hbq_buf_list)) {
2186 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2187 dbuf.list);
2188 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2189 }
2190 return 0;
2191}
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202int
2203lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2204{
2205 if (phba->sli_rev == LPFC_SLI_REV4)
2206 return 0;
2207 else
2208 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2209 lpfc_hbq_defs[qno]->add_count);
2210}
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221static int
2222lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2223{
2224 if (phba->sli_rev == LPFC_SLI_REV4)
2225 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2226 lpfc_hbq_defs[qno]->entry_count);
2227 else
2228 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2229 lpfc_hbq_defs[qno]->init_count);
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240static struct hbq_dmabuf *
2241lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2242{
2243 struct lpfc_dmabuf *d_buf;
2244
2245 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2246 if (!d_buf)
2247 return NULL;
2248 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2249}
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259static struct rqb_dmabuf *
2260lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2261{
2262 struct lpfc_dmabuf *h_buf;
2263 struct lpfc_rqb *rqbp;
2264
2265 rqbp = hrq->rqbp;
2266 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2267 struct lpfc_dmabuf, list);
2268 if (!h_buf)
2269 return NULL;
2270 rqbp->buffer_count--;
2271 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2272}
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283static struct hbq_dmabuf *
2284lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2285{
2286 struct lpfc_dmabuf *d_buf;
2287 struct hbq_dmabuf *hbq_buf;
2288 uint32_t hbqno;
2289
2290 hbqno = tag >> 16;
2291 if (hbqno >= LPFC_MAX_HBQS)
2292 return NULL;
2293
2294 spin_lock_irq(&phba->hbalock);
2295 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2296 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2297 if (hbq_buf->tag == tag) {
2298 spin_unlock_irq(&phba->hbalock);
2299 return hbq_buf;
2300 }
2301 }
2302 spin_unlock_irq(&phba->hbalock);
2303 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2304 "1803 Bad hbq tag. Data: x%x x%x\n",
2305 tag, phba->hbqs[tag >> 16].buffer_count);
2306 return NULL;
2307}
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318void
2319lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2320{
2321 uint32_t hbqno;
2322
2323 if (hbq_buffer) {
2324 hbqno = hbq_buffer->tag >> 16;
2325 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2326 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2327 }
2328}
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339static int
2340lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2341{
2342 uint8_t ret;
2343
2344 switch (mbxCommand) {
2345 case MBX_LOAD_SM:
2346 case MBX_READ_NV:
2347 case MBX_WRITE_NV:
2348 case MBX_WRITE_VPARMS:
2349 case MBX_RUN_BIU_DIAG:
2350 case MBX_INIT_LINK:
2351 case MBX_DOWN_LINK:
2352 case MBX_CONFIG_LINK:
2353 case MBX_CONFIG_RING:
2354 case MBX_RESET_RING:
2355 case MBX_READ_CONFIG:
2356 case MBX_READ_RCONFIG:
2357 case MBX_READ_SPARM:
2358 case MBX_READ_STATUS:
2359 case MBX_READ_RPI:
2360 case MBX_READ_XRI:
2361 case MBX_READ_REV:
2362 case MBX_READ_LNK_STAT:
2363 case MBX_REG_LOGIN:
2364 case MBX_UNREG_LOGIN:
2365 case MBX_CLEAR_LA:
2366 case MBX_DUMP_MEMORY:
2367 case MBX_DUMP_CONTEXT:
2368 case MBX_RUN_DIAGS:
2369 case MBX_RESTART:
2370 case MBX_UPDATE_CFG:
2371 case MBX_DOWN_LOAD:
2372 case MBX_DEL_LD_ENTRY:
2373 case MBX_RUN_PROGRAM:
2374 case MBX_SET_MASK:
2375 case MBX_SET_VARIABLE:
2376 case MBX_UNREG_D_ID:
2377 case MBX_KILL_BOARD:
2378 case MBX_CONFIG_FARP:
2379 case MBX_BEACON:
2380 case MBX_LOAD_AREA:
2381 case MBX_RUN_BIU_DIAG64:
2382 case MBX_CONFIG_PORT:
2383 case MBX_READ_SPARM64:
2384 case MBX_READ_RPI64:
2385 case MBX_REG_LOGIN64:
2386 case MBX_READ_TOPOLOGY:
2387 case MBX_WRITE_WWN:
2388 case MBX_SET_DEBUG:
2389 case MBX_LOAD_EXP_ROM:
2390 case MBX_ASYNCEVT_ENABLE:
2391 case MBX_REG_VPI:
2392 case MBX_UNREG_VPI:
2393 case MBX_HEARTBEAT:
2394 case MBX_PORT_CAPABILITIES:
2395 case MBX_PORT_IOV_CONTROL:
2396 case MBX_SLI4_CONFIG:
2397 case MBX_SLI4_REQ_FTRS:
2398 case MBX_REG_FCFI:
2399 case MBX_UNREG_FCFI:
2400 case MBX_REG_VFI:
2401 case MBX_UNREG_VFI:
2402 case MBX_INIT_VPI:
2403 case MBX_INIT_VFI:
2404 case MBX_RESUME_RPI:
2405 case MBX_READ_EVENT_LOG_STATUS:
2406 case MBX_READ_EVENT_LOG:
2407 case MBX_SECURITY_MGMT:
2408 case MBX_AUTH_PORT:
2409 case MBX_ACCESS_VDATA:
2410 ret = mbxCommand;
2411 break;
2412 default:
2413 ret = MBX_SHUTDOWN;
2414 break;
2415 }
2416 return ret;
2417}
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430void
2431lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2432{
2433 unsigned long drvr_flag;
2434 struct completion *pmbox_done;
2435
2436
2437
2438
2439
2440 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2441 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2442 pmbox_done = (struct completion *)pmboxq->context3;
2443 if (pmbox_done)
2444 complete(pmbox_done);
2445 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2446 return;
2447}
2448
2449static void
2450__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2451{
2452 unsigned long iflags;
2453
2454 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2455 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2456 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2457 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2458 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2459 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2460 }
2461 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2462}
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474void
2475lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2476{
2477 struct lpfc_vport *vport = pmb->vport;
2478 struct lpfc_dmabuf *mp;
2479 struct lpfc_nodelist *ndlp;
2480 struct Scsi_Host *shost;
2481 uint16_t rpi, vpi;
2482 int rc;
2483
2484 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2485
2486 if (mp) {
2487 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2488 kfree(mp);
2489 }
2490
2491
2492
2493
2494
2495 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2496 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2497 !pmb->u.mb.mbxStatus) {
2498 rpi = pmb->u.mb.un.varWords[0];
2499 vpi = pmb->u.mb.un.varRegLogin.vpi;
2500 if (phba->sli_rev == LPFC_SLI_REV4)
2501 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2502 lpfc_unreg_login(phba, vpi, rpi, pmb);
2503 pmb->vport = vport;
2504 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2505 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2506 if (rc != MBX_NOT_FINISHED)
2507 return;
2508 }
2509
2510 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2511 !(phba->pport->load_flag & FC_UNLOADING) &&
2512 !pmb->u.mb.mbxStatus) {
2513 shost = lpfc_shost_from_vport(vport);
2514 spin_lock_irq(shost->host_lock);
2515 vport->vpi_state |= LPFC_VPI_REGISTERED;
2516 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2517 spin_unlock_irq(shost->host_lock);
2518 }
2519
2520 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2521 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2522 lpfc_nlp_put(ndlp);
2523 pmb->ctx_buf = NULL;
2524 pmb->ctx_ndlp = NULL;
2525 }
2526
2527 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2528 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2529
2530
2531 if (ndlp) {
2532 lpfc_printf_vlog(
2533 vport,
2534 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2535 "1438 UNREG cmpl deferred mbox x%x "
2536 "on NPort x%x Data: x%x x%x %px\n",
2537 ndlp->nlp_rpi, ndlp->nlp_DID,
2538 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2539
2540 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2541 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2542 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2543 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2544 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2545 } else {
2546 __lpfc_sli_rpi_release(vport, ndlp);
2547 }
2548 if (vport->load_flag & FC_UNLOADING)
2549 lpfc_nlp_put(ndlp);
2550 pmb->ctx_ndlp = NULL;
2551 }
2552 }
2553
2554
2555 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2556 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2557 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2558 "2860 SLI authentication is required "
2559 "for INIT_LINK but has not done yet\n");
2560
2561 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2562 lpfc_sli4_mbox_cmd_free(phba, pmb);
2563 else
2564 mempool_free(pmb, phba->mbox_mem_pool);
2565}
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579void
2580lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2581{
2582 struct lpfc_vport *vport = pmb->vport;
2583 struct lpfc_nodelist *ndlp;
2584
2585 ndlp = pmb->ctx_ndlp;
2586 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2587 if (phba->sli_rev == LPFC_SLI_REV4 &&
2588 (bf_get(lpfc_sli_intf_if_type,
2589 &phba->sli4_hba.sli_intf) >=
2590 LPFC_SLI_INTF_IF_TYPE_2)) {
2591 if (ndlp) {
2592 lpfc_printf_vlog(
2593 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2594 "0010 UNREG_LOGIN vpi:%x "
2595 "rpi:%x DID:%x defer x%x flg x%x "
2596 "map:%x %px\n",
2597 vport->vpi, ndlp->nlp_rpi,
2598 ndlp->nlp_DID, ndlp->nlp_defer_did,
2599 ndlp->nlp_flag,
2600 ndlp->nlp_usg_map, ndlp);
2601 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2602 lpfc_nlp_put(ndlp);
2603
2604
2605
2606
2607 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2608 (ndlp->nlp_defer_did !=
2609 NLP_EVT_NOTHING_PENDING)) {
2610 lpfc_printf_vlog(
2611 vport, KERN_INFO, LOG_DISCOVERY,
2612 "4111 UNREG cmpl deferred "
2613 "clr x%x on "
2614 "NPort x%x Data: x%x x%px\n",
2615 ndlp->nlp_rpi, ndlp->nlp_DID,
2616 ndlp->nlp_defer_did, ndlp);
2617 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2618 ndlp->nlp_defer_did =
2619 NLP_EVT_NOTHING_PENDING;
2620 lpfc_issue_els_plogi(
2621 vport, ndlp->nlp_DID, 0);
2622 } else {
2623 __lpfc_sli_rpi_release(vport, ndlp);
2624 }
2625 }
2626 }
2627 }
2628
2629 mempool_free(pmb, phba->mbox_mem_pool);
2630}
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645int
2646lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2647{
2648 MAILBOX_t *pmbox;
2649 LPFC_MBOXQ_t *pmb;
2650 int rc;
2651 LIST_HEAD(cmplq);
2652
2653 phba->sli.slistat.mbox_event++;
2654
2655
2656 spin_lock_irq(&phba->hbalock);
2657 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2658 spin_unlock_irq(&phba->hbalock);
2659
2660
2661 do {
2662 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2663 if (pmb == NULL)
2664 break;
2665
2666 pmbox = &pmb->u.mb;
2667
2668 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2669 if (pmb->vport) {
2670 lpfc_debugfs_disc_trc(pmb->vport,
2671 LPFC_DISC_TRC_MBOX_VPORT,
2672 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2673 (uint32_t)pmbox->mbxCommand,
2674 pmbox->un.varWords[0],
2675 pmbox->un.varWords[1]);
2676 }
2677 else {
2678 lpfc_debugfs_disc_trc(phba->pport,
2679 LPFC_DISC_TRC_MBOX,
2680 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2681 (uint32_t)pmbox->mbxCommand,
2682 pmbox->un.varWords[0],
2683 pmbox->un.varWords[1]);
2684 }
2685 }
2686
2687
2688
2689
2690 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2691 MBX_SHUTDOWN) {
2692
2693 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2694 "(%d):0323 Unknown Mailbox command "
2695 "x%x (x%x/x%x) Cmpl\n",
2696 pmb->vport ? pmb->vport->vpi :
2697 LPFC_VPORT_UNKNOWN,
2698 pmbox->mbxCommand,
2699 lpfc_sli_config_mbox_subsys_get(phba,
2700 pmb),
2701 lpfc_sli_config_mbox_opcode_get(phba,
2702 pmb));
2703 phba->link_state = LPFC_HBA_ERROR;
2704 phba->work_hs = HS_FFER3;
2705 lpfc_handle_eratt(phba);
2706 continue;
2707 }
2708
2709 if (pmbox->mbxStatus) {
2710 phba->sli.slistat.mbox_stat_err++;
2711 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2712
2713 lpfc_printf_log(phba, KERN_INFO,
2714 LOG_MBOX | LOG_SLI,
2715 "(%d):0305 Mbox cmd cmpl "
2716 "error - RETRYing Data: x%x "
2717 "(x%x/x%x) x%x x%x x%x\n",
2718 pmb->vport ? pmb->vport->vpi :
2719 LPFC_VPORT_UNKNOWN,
2720 pmbox->mbxCommand,
2721 lpfc_sli_config_mbox_subsys_get(phba,
2722 pmb),
2723 lpfc_sli_config_mbox_opcode_get(phba,
2724 pmb),
2725 pmbox->mbxStatus,
2726 pmbox->un.varWords[0],
2727 pmb->vport ? pmb->vport->port_state :
2728 LPFC_VPORT_UNKNOWN);
2729 pmbox->mbxStatus = 0;
2730 pmbox->mbxOwner = OWN_HOST;
2731 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2732 if (rc != MBX_NOT_FINISHED)
2733 continue;
2734 }
2735 }
2736
2737
2738 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2739 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2740 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2741 "x%x x%x x%x\n",
2742 pmb->vport ? pmb->vport->vpi : 0,
2743 pmbox->mbxCommand,
2744 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2745 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2746 pmb->mbox_cmpl,
2747 *((uint32_t *) pmbox),
2748 pmbox->un.varWords[0],
2749 pmbox->un.varWords[1],
2750 pmbox->un.varWords[2],
2751 pmbox->un.varWords[3],
2752 pmbox->un.varWords[4],
2753 pmbox->un.varWords[5],
2754 pmbox->un.varWords[6],
2755 pmbox->un.varWords[7],
2756 pmbox->un.varWords[8],
2757 pmbox->un.varWords[9],
2758 pmbox->un.varWords[10]);
2759
2760 if (pmb->mbox_cmpl)
2761 pmb->mbox_cmpl(phba,pmb);
2762 } while (1);
2763 return 0;
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778static struct lpfc_dmabuf *
2779lpfc_sli_get_buff(struct lpfc_hba *phba,
2780 struct lpfc_sli_ring *pring,
2781 uint32_t tag)
2782{
2783 struct hbq_dmabuf *hbq_entry;
2784
2785 if (tag & QUE_BUFTAG_BIT)
2786 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2787 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2788 if (!hbq_entry)
2789 return NULL;
2790 return &hbq_entry->dbuf;
2791}
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804static void
2805lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2806{
2807 struct lpfc_nodelist *ndlp;
2808 struct lpfc_dmabuf *d_buf;
2809 struct hbq_dmabuf *nvmebuf;
2810 struct fc_frame_header *fc_hdr;
2811 struct lpfc_async_xchg_ctx *axchg = NULL;
2812 char *failwhy = NULL;
2813 uint32_t oxid, sid, did, fctl, size;
2814 int ret = 1;
2815
2816 d_buf = piocb->context2;
2817
2818 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2819 fc_hdr = nvmebuf->hbuf.virt;
2820 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2821 sid = sli4_sid_from_fc_hdr(fc_hdr);
2822 did = sli4_did_from_fc_hdr(fc_hdr);
2823 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2824 fc_hdr->fh_f_ctl[1] << 8 |
2825 fc_hdr->fh_f_ctl[2]);
2826 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2827
2828 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2829 oxid, size, sid);
2830
2831 if (phba->pport->load_flag & FC_UNLOADING) {
2832 failwhy = "Driver Unloading";
2833 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2834 failwhy = "NVME FC4 Disabled";
2835 } else if (!phba->nvmet_support && !phba->pport->localport) {
2836 failwhy = "No Localport";
2837 } else if (phba->nvmet_support && !phba->targetport) {
2838 failwhy = "No Targetport";
2839 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2840 failwhy = "Bad NVME LS R_CTL";
2841 } else if (unlikely((fctl & 0x00FF0000) !=
2842 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2843 failwhy = "Bad NVME LS F_CTL";
2844 } else {
2845 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2846 if (!axchg)
2847 failwhy = "No CTX memory";
2848 }
2849
2850 if (unlikely(failwhy)) {
2851 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
2852 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2853 sid, oxid, failwhy);
2854 goto out_fail;
2855 }
2856
2857
2858 ndlp = lpfc_findnode_did(phba->pport, sid);
2859 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2860 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2861 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2862 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2863 "6216 NVME Unsol rcv: No ndlp: "
2864 "NPort_ID x%x oxid x%x\n",
2865 sid, oxid);
2866 goto out_fail;
2867 }
2868
2869 axchg->phba = phba;
2870 axchg->ndlp = ndlp;
2871 axchg->size = size;
2872 axchg->oxid = oxid;
2873 axchg->sid = sid;
2874 axchg->wqeq = NULL;
2875 axchg->state = LPFC_NVME_STE_LS_RCV;
2876 axchg->entry_cnt = 1;
2877 axchg->rqb_buffer = (void *)nvmebuf;
2878 axchg->hdwq = &phba->sli4_hba.hdwq[0];
2879 axchg->payload = nvmebuf->dbuf.virt;
2880 INIT_LIST_HEAD(&axchg->list);
2881
2882 if (phba->nvmet_support)
2883 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
2884 else
2885 ret = lpfc_nvme_handle_lsreq(phba, axchg);
2886
2887
2888 if (!ret)
2889 return;
2890
2891 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC | LOG_NVME_IOERR,
2892 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
2893 "NVMe%s handler failed %d\n",
2894 did, sid, oxid,
2895 (phba->nvmet_support) ? "T" : "I", ret);
2896
2897out_fail:
2898
2899
2900 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2901
2902
2903 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
2904 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
2905
2906 if (ret)
2907 kfree(axchg);
2908}
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922static int
2923lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2924 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2925 uint32_t fch_type)
2926{
2927 int i;
2928
2929 switch (fch_type) {
2930 case FC_TYPE_NVME:
2931 lpfc_nvme_unsol_ls_handler(phba, saveq);
2932 return 1;
2933 default:
2934 break;
2935 }
2936
2937
2938 if (pring->prt[0].profile) {
2939 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2940 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2941 saveq);
2942 return 1;
2943 }
2944
2945
2946 for (i = 0; i < pring->num_mask; i++) {
2947 if ((pring->prt[i].rctl == fch_r_ctl) &&
2948 (pring->prt[i].type == fch_type)) {
2949 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2950 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2951 (phba, pring, saveq);
2952 return 1;
2953 }
2954 }
2955 return 0;
2956}
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972static int
2973lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2974 struct lpfc_iocbq *saveq)
2975{
2976 IOCB_t * irsp;
2977 WORD5 * w5p;
2978 uint32_t Rctl, Type;
2979 struct lpfc_iocbq *iocbq;
2980 struct lpfc_dmabuf *dmzbuf;
2981
2982 irsp = &(saveq->iocb);
2983
2984 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2985 if (pring->lpfc_sli_rcv_async_status)
2986 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2987 else
2988 lpfc_printf_log(phba,
2989 KERN_WARNING,
2990 LOG_SLI,
2991 "0316 Ring %d handler: unexpected "
2992 "ASYNC_STATUS iocb received evt_code "
2993 "0x%x\n",
2994 pring->ringno,
2995 irsp->un.asyncstat.evt_code);
2996 return 1;
2997 }
2998
2999 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3000 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3001 if (irsp->ulpBdeCount > 0) {
3002 dmzbuf = lpfc_sli_get_buff(phba, pring,
3003 irsp->un.ulpWord[3]);
3004 lpfc_in_buf_free(phba, dmzbuf);
3005 }
3006
3007 if (irsp->ulpBdeCount > 1) {
3008 dmzbuf = lpfc_sli_get_buff(phba, pring,
3009 irsp->unsli3.sli3Words[3]);
3010 lpfc_in_buf_free(phba, dmzbuf);
3011 }
3012
3013 if (irsp->ulpBdeCount > 2) {
3014 dmzbuf = lpfc_sli_get_buff(phba, pring,
3015 irsp->unsli3.sli3Words[7]);
3016 lpfc_in_buf_free(phba, dmzbuf);
3017 }
3018
3019 return 1;
3020 }
3021
3022 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3023 if (irsp->ulpBdeCount != 0) {
3024 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3025 irsp->un.ulpWord[3]);
3026 if (!saveq->context2)
3027 lpfc_printf_log(phba,
3028 KERN_ERR,
3029 LOG_SLI,
3030 "0341 Ring %d Cannot find buffer for "
3031 "an unsolicited iocb. tag 0x%x\n",
3032 pring->ringno,
3033 irsp->un.ulpWord[3]);
3034 }
3035 if (irsp->ulpBdeCount == 2) {
3036 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3037 irsp->unsli3.sli3Words[7]);
3038 if (!saveq->context3)
3039 lpfc_printf_log(phba,
3040 KERN_ERR,
3041 LOG_SLI,
3042 "0342 Ring %d Cannot find buffer for an"
3043 " unsolicited iocb. tag 0x%x\n",
3044 pring->ringno,
3045 irsp->unsli3.sli3Words[7]);
3046 }
3047 list_for_each_entry(iocbq, &saveq->list, list) {
3048 irsp = &(iocbq->iocb);
3049 if (irsp->ulpBdeCount != 0) {
3050 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3051 irsp->un.ulpWord[3]);
3052 if (!iocbq->context2)
3053 lpfc_printf_log(phba,
3054 KERN_ERR,
3055 LOG_SLI,
3056 "0343 Ring %d Cannot find "
3057 "buffer for an unsolicited iocb"
3058 ". tag 0x%x\n", pring->ringno,
3059 irsp->un.ulpWord[3]);
3060 }
3061 if (irsp->ulpBdeCount == 2) {
3062 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3063 irsp->unsli3.sli3Words[7]);
3064 if (!iocbq->context3)
3065 lpfc_printf_log(phba,
3066 KERN_ERR,
3067 LOG_SLI,
3068 "0344 Ring %d Cannot find "
3069 "buffer for an unsolicited "
3070 "iocb. tag 0x%x\n",
3071 pring->ringno,
3072 irsp->unsli3.sli3Words[7]);
3073 }
3074 }
3075 }
3076 if (irsp->ulpBdeCount != 0 &&
3077 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3078 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3079 int found = 0;
3080
3081
3082 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3083 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3084 saveq->iocb.unsli3.rcvsli3.ox_id) {
3085 list_add_tail(&saveq->list, &iocbq->list);
3086 found = 1;
3087 break;
3088 }
3089 }
3090 if (!found)
3091 list_add_tail(&saveq->clist,
3092 &pring->iocb_continue_saveq);
3093 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3094 list_del_init(&iocbq->clist);
3095 saveq = iocbq;
3096 irsp = &(saveq->iocb);
3097 } else
3098 return 0;
3099 }
3100 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3101 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3102 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3103 Rctl = FC_RCTL_ELS_REQ;
3104 Type = FC_TYPE_ELS;
3105 } else {
3106 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3107 Rctl = w5p->hcsw.Rctl;
3108 Type = w5p->hcsw.Type;
3109
3110
3111 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3112 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3113 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3114 Rctl = FC_RCTL_ELS_REQ;
3115 Type = FC_TYPE_ELS;
3116 w5p->hcsw.Rctl = Rctl;
3117 w5p->hcsw.Type = Type;
3118 }
3119 }
3120
3121 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3122 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3123 "0313 Ring %d handler: unexpected Rctl x%x "
3124 "Type x%x received\n",
3125 pring->ringno, Rctl, Type);
3126
3127 return 1;
3128}
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143static struct lpfc_iocbq *
3144lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3145 struct lpfc_sli_ring *pring,
3146 struct lpfc_iocbq *prspiocb)
3147{
3148 struct lpfc_iocbq *cmd_iocb = NULL;
3149 uint16_t iotag;
3150 spinlock_t *temp_lock = NULL;
3151 unsigned long iflag = 0;
3152
3153 if (phba->sli_rev == LPFC_SLI_REV4)
3154 temp_lock = &pring->ring_lock;
3155 else
3156 temp_lock = &phba->hbalock;
3157
3158 spin_lock_irqsave(temp_lock, iflag);
3159 iotag = prspiocb->iocb.ulpIoTag;
3160
3161 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3162 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3163 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3164
3165 list_del_init(&cmd_iocb->list);
3166 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3167 pring->txcmplq_cnt--;
3168 spin_unlock_irqrestore(temp_lock, iflag);
3169 return cmd_iocb;
3170 }
3171 }
3172
3173 spin_unlock_irqrestore(temp_lock, iflag);
3174 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3175 "0317 iotag x%x is out of "
3176 "range: max iotag x%x wd0 x%x\n",
3177 iotag, phba->sli.last_iotag,
3178 *(((uint32_t *) &prspiocb->iocb) + 7));
3179 return NULL;
3180}
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194static struct lpfc_iocbq *
3195lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3196 struct lpfc_sli_ring *pring, uint16_t iotag)
3197{
3198 struct lpfc_iocbq *cmd_iocb = NULL;
3199 spinlock_t *temp_lock = NULL;
3200 unsigned long iflag = 0;
3201
3202 if (phba->sli_rev == LPFC_SLI_REV4)
3203 temp_lock = &pring->ring_lock;
3204 else
3205 temp_lock = &phba->hbalock;
3206
3207 spin_lock_irqsave(temp_lock, iflag);
3208 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3209 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3210 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3211
3212 list_del_init(&cmd_iocb->list);
3213 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3214 pring->txcmplq_cnt--;
3215 spin_unlock_irqrestore(temp_lock, iflag);
3216 return cmd_iocb;
3217 }
3218 }
3219
3220 spin_unlock_irqrestore(temp_lock, iflag);
3221 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3222 "0372 iotag x%x lookup error: max iotag (x%x) "
3223 "iocb_flag x%x\n",
3224 iotag, phba->sli.last_iotag,
3225 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3226 return NULL;
3227}
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246static int
3247lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3248 struct lpfc_iocbq *saveq)
3249{
3250 struct lpfc_iocbq *cmdiocbp;
3251 int rc = 1;
3252 unsigned long iflag;
3253
3254 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3255 if (cmdiocbp) {
3256 if (cmdiocbp->iocb_cmpl) {
3257
3258
3259
3260
3261 if (saveq->iocb.ulpStatus &&
3262 (pring->ringno == LPFC_ELS_RING) &&
3263 (cmdiocbp->iocb.ulpCommand ==
3264 CMD_ELS_REQUEST64_CR))
3265 lpfc_send_els_failure_event(phba,
3266 cmdiocbp, saveq);
3267
3268
3269
3270
3271
3272 if (pring->ringno == LPFC_ELS_RING) {
3273 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3274 (cmdiocbp->iocb_flag &
3275 LPFC_DRIVER_ABORTED)) {
3276 spin_lock_irqsave(&phba->hbalock,
3277 iflag);
3278 cmdiocbp->iocb_flag &=
3279 ~LPFC_DRIVER_ABORTED;
3280 spin_unlock_irqrestore(&phba->hbalock,
3281 iflag);
3282 saveq->iocb.ulpStatus =
3283 IOSTAT_LOCAL_REJECT;
3284 saveq->iocb.un.ulpWord[4] =
3285 IOERR_SLI_ABORTED;
3286
3287
3288
3289
3290
3291 spin_lock_irqsave(&phba->hbalock,
3292 iflag);
3293 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3294 spin_unlock_irqrestore(&phba->hbalock,
3295 iflag);
3296 }
3297 if (phba->sli_rev == LPFC_SLI_REV4) {
3298 if (saveq->iocb_flag &
3299 LPFC_EXCHANGE_BUSY) {
3300
3301
3302
3303
3304
3305
3306 spin_lock_irqsave(
3307 &phba->hbalock, iflag);
3308 cmdiocbp->iocb_flag |=
3309 LPFC_EXCHANGE_BUSY;
3310 spin_unlock_irqrestore(
3311 &phba->hbalock, iflag);
3312 }
3313 if (cmdiocbp->iocb_flag &
3314 LPFC_DRIVER_ABORTED) {
3315
3316
3317
3318
3319
3320 spin_lock_irqsave(
3321 &phba->hbalock, iflag);
3322 cmdiocbp->iocb_flag &=
3323 ~LPFC_DRIVER_ABORTED;
3324 spin_unlock_irqrestore(
3325 &phba->hbalock, iflag);
3326 cmdiocbp->iocb.ulpStatus =
3327 IOSTAT_LOCAL_REJECT;
3328 cmdiocbp->iocb.un.ulpWord[4] =
3329 IOERR_ABORT_REQUESTED;
3330
3331
3332
3333
3334
3335
3336 saveq->iocb.ulpStatus =
3337 IOSTAT_LOCAL_REJECT;
3338 saveq->iocb.un.ulpWord[4] =
3339 IOERR_SLI_ABORTED;
3340 spin_lock_irqsave(
3341 &phba->hbalock, iflag);
3342 saveq->iocb_flag |=
3343 LPFC_DELAY_MEM_FREE;
3344 spin_unlock_irqrestore(
3345 &phba->hbalock, iflag);
3346 }
3347 }
3348 }
3349 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3350 } else
3351 lpfc_sli_release_iocbq(phba, cmdiocbp);
3352 } else {
3353
3354
3355
3356
3357
3358 if (pring->ringno != LPFC_ELS_RING) {
3359
3360
3361
3362
3363 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3364 "0322 Ring %d handler: "
3365 "unexpected completion IoTag x%x "
3366 "Data: x%x x%x x%x x%x\n",
3367 pring->ringno,
3368 saveq->iocb.ulpIoTag,
3369 saveq->iocb.ulpStatus,
3370 saveq->iocb.un.ulpWord[4],
3371 saveq->iocb.ulpCommand,
3372 saveq->iocb.ulpContext);
3373 }
3374 }
3375
3376 return rc;
3377}
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389static void
3390lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3391{
3392 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3393
3394
3395
3396
3397 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3398 "0312 Ring %d handler: portRspPut %d "
3399 "is bigger than rsp ring %d\n",
3400 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3401 pring->sli.sli3.numRiocb);
3402
3403 phba->link_state = LPFC_HBA_ERROR;
3404
3405
3406
3407
3408
3409 phba->work_ha |= HA_ERATT;
3410 phba->work_hs = HS_FFER3;
3411
3412 lpfc_worker_wake_up(phba);
3413
3414 return;
3415}
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427void lpfc_poll_eratt(struct timer_list *t)
3428{
3429 struct lpfc_hba *phba;
3430 uint32_t eratt = 0;
3431 uint64_t sli_intr, cnt;
3432
3433 phba = from_timer(phba, t, eratt_poll);
3434
3435
3436 sli_intr = phba->sli.slistat.sli_intr;
3437
3438 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3439 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3440 sli_intr);
3441 else
3442 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3443
3444
3445 do_div(cnt, phba->eratt_poll_interval);
3446 phba->sli.slistat.sli_ips = cnt;
3447
3448 phba->sli.slistat.sli_prev_intr = sli_intr;
3449
3450
3451 eratt = lpfc_sli_check_eratt(phba);
3452
3453 if (eratt)
3454
3455 lpfc_worker_wake_up(phba);
3456 else
3457
3458 mod_timer(&phba->eratt_poll,
3459 jiffies +
3460 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3461 return;
3462}
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482int
3483lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3484 struct lpfc_sli_ring *pring, uint32_t mask)
3485{
3486 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3487 IOCB_t *irsp = NULL;
3488 IOCB_t *entry = NULL;
3489 struct lpfc_iocbq *cmdiocbq = NULL;
3490 struct lpfc_iocbq rspiocbq;
3491 uint32_t status;
3492 uint32_t portRspPut, portRspMax;
3493 int rc = 1;
3494 lpfc_iocb_type type;
3495 unsigned long iflag;
3496 uint32_t rsp_cmpl = 0;
3497
3498 spin_lock_irqsave(&phba->hbalock, iflag);
3499 pring->stats.iocb_event++;
3500
3501
3502
3503
3504
3505 portRspMax = pring->sli.sli3.numRiocb;
3506 portRspPut = le32_to_cpu(pgp->rspPutInx);
3507 if (unlikely(portRspPut >= portRspMax)) {
3508 lpfc_sli_rsp_pointers_error(phba, pring);
3509 spin_unlock_irqrestore(&phba->hbalock, iflag);
3510 return 1;
3511 }
3512 if (phba->fcp_ring_in_use) {
3513 spin_unlock_irqrestore(&phba->hbalock, iflag);
3514 return 1;
3515 } else
3516 phba->fcp_ring_in_use = 1;
3517
3518 rmb();
3519 while (pring->sli.sli3.rspidx != portRspPut) {
3520
3521
3522
3523
3524
3525 entry = lpfc_resp_iocb(phba, pring);
3526 phba->last_completion_time = jiffies;
3527
3528 if (++pring->sli.sli3.rspidx >= portRspMax)
3529 pring->sli.sli3.rspidx = 0;
3530
3531 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3532 (uint32_t *) &rspiocbq.iocb,
3533 phba->iocb_rsp_size);
3534 INIT_LIST_HEAD(&(rspiocbq.list));
3535 irsp = &rspiocbq.iocb;
3536
3537 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3538 pring->stats.iocb_rsp++;
3539 rsp_cmpl++;
3540
3541 if (unlikely(irsp->ulpStatus)) {
3542
3543
3544
3545
3546 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3547 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3548 IOERR_NO_RESOURCES)) {
3549 spin_unlock_irqrestore(&phba->hbalock, iflag);
3550 phba->lpfc_rampdown_queue_depth(phba);
3551 spin_lock_irqsave(&phba->hbalock, iflag);
3552 }
3553
3554
3555 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3556 "0336 Rsp Ring %d error: IOCB Data: "
3557 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3558 pring->ringno,
3559 irsp->un.ulpWord[0],
3560 irsp->un.ulpWord[1],
3561 irsp->un.ulpWord[2],
3562 irsp->un.ulpWord[3],
3563 irsp->un.ulpWord[4],
3564 irsp->un.ulpWord[5],
3565 *(uint32_t *)&irsp->un1,
3566 *((uint32_t *)&irsp->un1 + 1));
3567 }
3568
3569 switch (type) {
3570 case LPFC_ABORT_IOCB:
3571 case LPFC_SOL_IOCB:
3572
3573
3574
3575
3576 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3577 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3578 "0333 IOCB cmd 0x%x"
3579 " processed. Skipping"
3580 " completion\n",
3581 irsp->ulpCommand);
3582 break;
3583 }
3584
3585 spin_unlock_irqrestore(&phba->hbalock, iflag);
3586 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3587 &rspiocbq);
3588 spin_lock_irqsave(&phba->hbalock, iflag);
3589 if (unlikely(!cmdiocbq))
3590 break;
3591 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3592 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3593 if (cmdiocbq->iocb_cmpl) {
3594 spin_unlock_irqrestore(&phba->hbalock, iflag);
3595 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3596 &rspiocbq);
3597 spin_lock_irqsave(&phba->hbalock, iflag);
3598 }
3599 break;
3600 case LPFC_UNSOL_IOCB:
3601 spin_unlock_irqrestore(&phba->hbalock, iflag);
3602 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3603 spin_lock_irqsave(&phba->hbalock, iflag);
3604 break;
3605 default:
3606 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3607 char adaptermsg[LPFC_MAX_ADPTMSG];
3608 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3609 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3610 MAX_MSG_DATA);
3611 dev_warn(&((phba->pcidev)->dev),
3612 "lpfc%d: %s\n",
3613 phba->brd_no, adaptermsg);
3614 } else {
3615
3616 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3617 "0334 Unknown IOCB command "
3618 "Data: x%x, x%x x%x x%x x%x\n",
3619 type, irsp->ulpCommand,
3620 irsp->ulpStatus,
3621 irsp->ulpIoTag,
3622 irsp->ulpContext);
3623 }
3624 break;
3625 }
3626
3627
3628
3629
3630
3631
3632
3633 writel(pring->sli.sli3.rspidx,
3634 &phba->host_gp[pring->ringno].rspGetInx);
3635
3636 if (pring->sli.sli3.rspidx == portRspPut)
3637 portRspPut = le32_to_cpu(pgp->rspPutInx);
3638 }
3639
3640 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3641 pring->stats.iocb_rsp_full++;
3642 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3643 writel(status, phba->CAregaddr);
3644 readl(phba->CAregaddr);
3645 }
3646 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3647 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3648 pring->stats.iocb_cmd_empty++;
3649
3650
3651 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3652 lpfc_sli_resume_iocb(phba, pring);
3653
3654 if ((pring->lpfc_sli_cmd_available))
3655 (pring->lpfc_sli_cmd_available) (phba, pring);
3656
3657 }
3658
3659 phba->fcp_ring_in_use = 0;
3660 spin_unlock_irqrestore(&phba->hbalock, iflag);
3661 return rc;
3662}
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682static struct lpfc_iocbq *
3683lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3684 struct lpfc_iocbq *rspiocbp)
3685{
3686 struct lpfc_iocbq *saveq;
3687 struct lpfc_iocbq *cmdiocbp;
3688 struct lpfc_iocbq *next_iocb;
3689 IOCB_t *irsp = NULL;
3690 uint32_t free_saveq;
3691 uint8_t iocb_cmd_type;
3692 lpfc_iocb_type type;
3693 unsigned long iflag;
3694 int rc;
3695
3696 spin_lock_irqsave(&phba->hbalock, iflag);
3697
3698 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3699 pring->iocb_continueq_cnt++;
3700
3701
3702 irsp = &rspiocbp->iocb;
3703 if (irsp->ulpLe) {
3704
3705
3706
3707
3708 free_saveq = 1;
3709 saveq = list_get_first(&pring->iocb_continueq,
3710 struct lpfc_iocbq, list);
3711 irsp = &(saveq->iocb);
3712 list_del_init(&pring->iocb_continueq);
3713 pring->iocb_continueq_cnt = 0;
3714
3715 pring->stats.iocb_rsp++;
3716
3717
3718
3719
3720
3721 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3722 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3723 IOERR_NO_RESOURCES)) {
3724 spin_unlock_irqrestore(&phba->hbalock, iflag);
3725 phba->lpfc_rampdown_queue_depth(phba);
3726 spin_lock_irqsave(&phba->hbalock, iflag);
3727 }
3728
3729 if (irsp->ulpStatus) {
3730
3731 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3732 "0328 Rsp Ring %d error: "
3733 "IOCB Data: "
3734 "x%x x%x x%x x%x "
3735 "x%x x%x x%x x%x "
3736 "x%x x%x x%x x%x "
3737 "x%x x%x x%x x%x\n",
3738 pring->ringno,
3739 irsp->un.ulpWord[0],
3740 irsp->un.ulpWord[1],
3741 irsp->un.ulpWord[2],
3742 irsp->un.ulpWord[3],
3743 irsp->un.ulpWord[4],
3744 irsp->un.ulpWord[5],
3745 *(((uint32_t *) irsp) + 6),
3746 *(((uint32_t *) irsp) + 7),
3747 *(((uint32_t *) irsp) + 8),
3748 *(((uint32_t *) irsp) + 9),
3749 *(((uint32_t *) irsp) + 10),
3750 *(((uint32_t *) irsp) + 11),
3751 *(((uint32_t *) irsp) + 12),
3752 *(((uint32_t *) irsp) + 13),
3753 *(((uint32_t *) irsp) + 14),
3754 *(((uint32_t *) irsp) + 15));
3755 }
3756
3757
3758
3759
3760
3761
3762
3763 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3764 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3765 switch (type) {
3766 case LPFC_SOL_IOCB:
3767 spin_unlock_irqrestore(&phba->hbalock, iflag);
3768 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3769 spin_lock_irqsave(&phba->hbalock, iflag);
3770 break;
3771
3772 case LPFC_UNSOL_IOCB:
3773 spin_unlock_irqrestore(&phba->hbalock, iflag);
3774 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3775 spin_lock_irqsave(&phba->hbalock, iflag);
3776 if (!rc)
3777 free_saveq = 0;
3778 break;
3779
3780 case LPFC_ABORT_IOCB:
3781 cmdiocbp = NULL;
3782 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3783 spin_unlock_irqrestore(&phba->hbalock, iflag);
3784 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3785 saveq);
3786 spin_lock_irqsave(&phba->hbalock, iflag);
3787 }
3788 if (cmdiocbp) {
3789
3790 if (cmdiocbp->iocb_cmpl) {
3791 spin_unlock_irqrestore(&phba->hbalock,
3792 iflag);
3793 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3794 saveq);
3795 spin_lock_irqsave(&phba->hbalock,
3796 iflag);
3797 } else
3798 __lpfc_sli_release_iocbq(phba,
3799 cmdiocbp);
3800 }
3801 break;
3802
3803 case LPFC_UNKNOWN_IOCB:
3804 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3805 char adaptermsg[LPFC_MAX_ADPTMSG];
3806 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3807 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3808 MAX_MSG_DATA);
3809 dev_warn(&((phba->pcidev)->dev),
3810 "lpfc%d: %s\n",
3811 phba->brd_no, adaptermsg);
3812 } else {
3813
3814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3815 "0335 Unknown IOCB "
3816 "command Data: x%x "
3817 "x%x x%x x%x\n",
3818 irsp->ulpCommand,
3819 irsp->ulpStatus,
3820 irsp->ulpIoTag,
3821 irsp->ulpContext);
3822 }
3823 break;
3824 }
3825
3826 if (free_saveq) {
3827 list_for_each_entry_safe(rspiocbp, next_iocb,
3828 &saveq->list, list) {
3829 list_del_init(&rspiocbp->list);
3830 __lpfc_sli_release_iocbq(phba, rspiocbp);
3831 }
3832 __lpfc_sli_release_iocbq(phba, saveq);
3833 }
3834 rspiocbp = NULL;
3835 }
3836 spin_unlock_irqrestore(&phba->hbalock, iflag);
3837 return rspiocbp;
3838}
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849void
3850lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3851 struct lpfc_sli_ring *pring, uint32_t mask)
3852{
3853 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3854}
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867static void
3868lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3869 struct lpfc_sli_ring *pring, uint32_t mask)
3870{
3871 struct lpfc_pgp *pgp;
3872 IOCB_t *entry;
3873 IOCB_t *irsp = NULL;
3874 struct lpfc_iocbq *rspiocbp = NULL;
3875 uint32_t portRspPut, portRspMax;
3876 unsigned long iflag;
3877 uint32_t status;
3878
3879 pgp = &phba->port_gp[pring->ringno];
3880 spin_lock_irqsave(&phba->hbalock, iflag);
3881 pring->stats.iocb_event++;
3882
3883
3884
3885
3886
3887 portRspMax = pring->sli.sli3.numRiocb;
3888 portRspPut = le32_to_cpu(pgp->rspPutInx);
3889 if (portRspPut >= portRspMax) {
3890
3891
3892
3893
3894 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3895 "0303 Ring %d handler: portRspPut %d "
3896 "is bigger than rsp ring %d\n",
3897 pring->ringno, portRspPut, portRspMax);
3898
3899 phba->link_state = LPFC_HBA_ERROR;
3900 spin_unlock_irqrestore(&phba->hbalock, iflag);
3901
3902 phba->work_hs = HS_FFER3;
3903 lpfc_handle_eratt(phba);
3904
3905 return;
3906 }
3907
3908 rmb();
3909 while (pring->sli.sli3.rspidx != portRspPut) {
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923 entry = lpfc_resp_iocb(phba, pring);
3924
3925 phba->last_completion_time = jiffies;
3926 rspiocbp = __lpfc_sli_get_iocbq(phba);
3927 if (rspiocbp == NULL) {
3928 printk(KERN_ERR "%s: out of buffers! Failing "
3929 "completion.\n", __func__);
3930 break;
3931 }
3932
3933 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3934 phba->iocb_rsp_size);
3935 irsp = &rspiocbp->iocb;
3936
3937 if (++pring->sli.sli3.rspidx >= portRspMax)
3938 pring->sli.sli3.rspidx = 0;
3939
3940 if (pring->ringno == LPFC_ELS_RING) {
3941 lpfc_debugfs_slow_ring_trc(phba,
3942 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3943 *(((uint32_t *) irsp) + 4),
3944 *(((uint32_t *) irsp) + 6),
3945 *(((uint32_t *) irsp) + 7));
3946 }
3947
3948 writel(pring->sli.sli3.rspidx,
3949 &phba->host_gp[pring->ringno].rspGetInx);
3950
3951 spin_unlock_irqrestore(&phba->hbalock, iflag);
3952
3953 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3954 spin_lock_irqsave(&phba->hbalock, iflag);
3955
3956
3957
3958
3959
3960
3961 if (pring->sli.sli3.rspidx == portRspPut) {
3962 portRspPut = le32_to_cpu(pgp->rspPutInx);
3963 }
3964 }
3965
3966 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3967
3968 pring->stats.iocb_rsp_full++;
3969
3970 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3971 writel(status, phba->CAregaddr);
3972 readl(phba->CAregaddr);
3973 }
3974 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3975 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3976 pring->stats.iocb_cmd_empty++;
3977
3978
3979 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3980 lpfc_sli_resume_iocb(phba, pring);
3981
3982 if ((pring->lpfc_sli_cmd_available))
3983 (pring->lpfc_sli_cmd_available) (phba, pring);
3984
3985 }
3986
3987 spin_unlock_irqrestore(&phba->hbalock, iflag);
3988 return;
3989}
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003static void
4004lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4005 struct lpfc_sli_ring *pring, uint32_t mask)
4006{
4007 struct lpfc_iocbq *irspiocbq;
4008 struct hbq_dmabuf *dmabuf;
4009 struct lpfc_cq_event *cq_event;
4010 unsigned long iflag;
4011 int count = 0;
4012
4013 spin_lock_irqsave(&phba->hbalock, iflag);
4014 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4015 spin_unlock_irqrestore(&phba->hbalock, iflag);
4016 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4017
4018 spin_lock_irqsave(&phba->hbalock, iflag);
4019 list_remove_head(&phba->sli4_hba.sp_queue_event,
4020 cq_event, struct lpfc_cq_event, list);
4021 spin_unlock_irqrestore(&phba->hbalock, iflag);
4022
4023 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4024 case CQE_CODE_COMPL_WQE:
4025 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4026 cq_event);
4027
4028 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4029 irspiocbq);
4030 if (irspiocbq)
4031 lpfc_sli_sp_handle_rspiocb(phba, pring,
4032 irspiocbq);
4033 count++;
4034 break;
4035 case CQE_CODE_RECEIVE:
4036 case CQE_CODE_RECEIVE_V1:
4037 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4038 cq_event);
4039 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4040 count++;
4041 break;
4042 default:
4043 break;
4044 }
4045
4046
4047 if (count == 64)
4048 break;
4049 }
4050}
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062void
4063lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4064{
4065 LIST_HEAD(completions);
4066 struct lpfc_iocbq *iocb, *next_iocb;
4067
4068 if (pring->ringno == LPFC_ELS_RING) {
4069 lpfc_fabric_abort_hba(phba);
4070 }
4071
4072
4073
4074
4075 if (phba->sli_rev >= LPFC_SLI_REV4) {
4076 spin_lock_irq(&pring->ring_lock);
4077 list_splice_init(&pring->txq, &completions);
4078 pring->txq_cnt = 0;
4079 spin_unlock_irq(&pring->ring_lock);
4080
4081 spin_lock_irq(&phba->hbalock);
4082
4083 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4084 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4085 spin_unlock_irq(&phba->hbalock);
4086 } else {
4087 spin_lock_irq(&phba->hbalock);
4088 list_splice_init(&pring->txq, &completions);
4089 pring->txq_cnt = 0;
4090
4091
4092 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4093 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4094 spin_unlock_irq(&phba->hbalock);
4095 }
4096
4097
4098 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4099 IOERR_SLI_ABORTED);
4100}
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112void
4113lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4114{
4115 struct lpfc_sli *psli = &phba->sli;
4116 struct lpfc_sli_ring *pring;
4117 uint32_t i;
4118
4119
4120 if (phba->sli_rev >= LPFC_SLI_REV4) {
4121 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4122 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4123 lpfc_sli_abort_iocb_ring(phba, pring);
4124 }
4125 } else {
4126 pring = &psli->sli3_ring[LPFC_FCP_RING];
4127 lpfc_sli_abort_iocb_ring(phba, pring);
4128 }
4129}
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141void
4142lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4143{
4144 LIST_HEAD(txq);
4145 LIST_HEAD(txcmplq);
4146 struct lpfc_sli *psli = &phba->sli;
4147 struct lpfc_sli_ring *pring;
4148 uint32_t i;
4149 struct lpfc_iocbq *piocb, *next_iocb;
4150
4151 spin_lock_irq(&phba->hbalock);
4152 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4153 !phba->sli4_hba.hdwq) {
4154 spin_unlock_irq(&phba->hbalock);
4155 return;
4156 }
4157
4158 phba->hba_flag |= HBA_IOQ_FLUSH;
4159 spin_unlock_irq(&phba->hbalock);
4160
4161
4162 if (phba->sli_rev >= LPFC_SLI_REV4) {
4163 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4164 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4165
4166 spin_lock_irq(&pring->ring_lock);
4167
4168 list_splice_init(&pring->txq, &txq);
4169 list_for_each_entry_safe(piocb, next_iocb,
4170 &pring->txcmplq, list)
4171 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4172
4173 list_splice_init(&pring->txcmplq, &txcmplq);
4174 pring->txq_cnt = 0;
4175 pring->txcmplq_cnt = 0;
4176 spin_unlock_irq(&pring->ring_lock);
4177
4178
4179 lpfc_sli_cancel_iocbs(phba, &txq,
4180 IOSTAT_LOCAL_REJECT,
4181 IOERR_SLI_DOWN);
4182
4183 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4184 IOSTAT_LOCAL_REJECT,
4185 IOERR_SLI_DOWN);
4186 }
4187 } else {
4188 pring = &psli->sli3_ring[LPFC_FCP_RING];
4189
4190 spin_lock_irq(&phba->hbalock);
4191
4192 list_splice_init(&pring->txq, &txq);
4193 list_for_each_entry_safe(piocb, next_iocb,
4194 &pring->txcmplq, list)
4195 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4196
4197 list_splice_init(&pring->txcmplq, &txcmplq);
4198 pring->txq_cnt = 0;
4199 pring->txcmplq_cnt = 0;
4200 spin_unlock_irq(&phba->hbalock);
4201
4202
4203 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4204 IOERR_SLI_DOWN);
4205
4206 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4207 IOERR_SLI_DOWN);
4208 }
4209}
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224static int
4225lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4226{
4227 uint32_t status;
4228 int i = 0;
4229 int retval = 0;
4230
4231
4232 if (lpfc_readl(phba->HSregaddr, &status))
4233 return 1;
4234
4235
4236
4237
4238
4239
4240
4241 while (((status & mask) != mask) &&
4242 !(status & HS_FFERM) &&
4243 i++ < 20) {
4244
4245 if (i <= 5)
4246 msleep(10);
4247 else if (i <= 10)
4248 msleep(500);
4249 else
4250 msleep(2500);
4251
4252 if (i == 15) {
4253
4254 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4255 lpfc_sli_brdrestart(phba);
4256 }
4257
4258 if (lpfc_readl(phba->HSregaddr, &status)) {
4259 retval = 1;
4260 break;
4261 }
4262 }
4263
4264
4265 if ((status & HS_FFERM) || (i >= 20)) {
4266 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4267 "2751 Adapter failed to restart, "
4268 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4269 status,
4270 readl(phba->MBslimaddr + 0xa8),
4271 readl(phba->MBslimaddr + 0xac));
4272 phba->link_state = LPFC_HBA_ERROR;
4273 retval = 1;
4274 }
4275
4276 return retval;
4277}
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290static int
4291lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4292{
4293 uint32_t status;
4294 int retval = 0;
4295
4296
4297 status = lpfc_sli4_post_status_check(phba);
4298
4299 if (status) {
4300 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4301 lpfc_sli_brdrestart(phba);
4302 status = lpfc_sli4_post_status_check(phba);
4303 }
4304
4305
4306 if (status) {
4307 phba->link_state = LPFC_HBA_ERROR;
4308 retval = 1;
4309 } else
4310 phba->sli4_hba.intr_enable = 0;
4311
4312 return retval;
4313}
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323int
4324lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4325{
4326 return phba->lpfc_sli_brdready(phba, mask);
4327}
4328
4329#define BARRIER_TEST_PATTERN (0xdeadbeef)
4330
4331
4332
4333
4334
4335
4336
4337
4338void lpfc_reset_barrier(struct lpfc_hba *phba)
4339{
4340 uint32_t __iomem *resp_buf;
4341 uint32_t __iomem *mbox_buf;
4342 volatile uint32_t mbox;
4343 uint32_t hc_copy, ha_copy, resp_data;
4344 int i;
4345 uint8_t hdrtype;
4346
4347 lockdep_assert_held(&phba->hbalock);
4348
4349 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4350 if (hdrtype != 0x80 ||
4351 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4352 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4353 return;
4354
4355
4356
4357
4358
4359 resp_buf = phba->MBslimaddr;
4360
4361
4362 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4363 return;
4364 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4365 readl(phba->HCregaddr);
4366 phba->link_flag |= LS_IGNORE_ERATT;
4367
4368 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4369 return;
4370 if (ha_copy & HA_ERATT) {
4371
4372 writel(HA_ERATT, phba->HAregaddr);
4373 phba->pport->stopped = 1;
4374 }
4375
4376 mbox = 0;
4377 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4378 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4379
4380 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4381 mbox_buf = phba->MBslimaddr;
4382 writel(mbox, mbox_buf);
4383
4384 for (i = 0; i < 50; i++) {
4385 if (lpfc_readl((resp_buf + 1), &resp_data))
4386 return;
4387 if (resp_data != ~(BARRIER_TEST_PATTERN))
4388 mdelay(1);
4389 else
4390 break;
4391 }
4392 resp_data = 0;
4393 if (lpfc_readl((resp_buf + 1), &resp_data))
4394 return;
4395 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4396 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4397 phba->pport->stopped)
4398 goto restore_hc;
4399 else
4400 goto clear_errat;
4401 }
4402
4403 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4404 resp_data = 0;
4405 for (i = 0; i < 500; i++) {
4406 if (lpfc_readl(resp_buf, &resp_data))
4407 return;
4408 if (resp_data != mbox)
4409 mdelay(1);
4410 else
4411 break;
4412 }
4413
4414clear_errat:
4415
4416 while (++i < 500) {
4417 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4418 return;
4419 if (!(ha_copy & HA_ERATT))
4420 mdelay(1);
4421 else
4422 break;
4423 }
4424
4425 if (readl(phba->HAregaddr) & HA_ERATT) {
4426 writel(HA_ERATT, phba->HAregaddr);
4427 phba->pport->stopped = 1;
4428 }
4429
4430restore_hc:
4431 phba->link_flag &= ~LS_IGNORE_ERATT;
4432 writel(hc_copy, phba->HCregaddr);
4433 readl(phba->HCregaddr);
4434}
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447int
4448lpfc_sli_brdkill(struct lpfc_hba *phba)
4449{
4450 struct lpfc_sli *psli;
4451 LPFC_MBOXQ_t *pmb;
4452 uint32_t status;
4453 uint32_t ha_copy;
4454 int retval;
4455 int i = 0;
4456
4457 psli = &phba->sli;
4458
4459
4460 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4461 "0329 Kill HBA Data: x%x x%x\n",
4462 phba->pport->port_state, psli->sli_flag);
4463
4464 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4465 if (!pmb)
4466 return 1;
4467
4468
4469 spin_lock_irq(&phba->hbalock);
4470 if (lpfc_readl(phba->HCregaddr, &status)) {
4471 spin_unlock_irq(&phba->hbalock);
4472 mempool_free(pmb, phba->mbox_mem_pool);
4473 return 1;
4474 }
4475 status &= ~HC_ERINT_ENA;
4476 writel(status, phba->HCregaddr);
4477 readl(phba->HCregaddr);
4478 phba->link_flag |= LS_IGNORE_ERATT;
4479 spin_unlock_irq(&phba->hbalock);
4480
4481 lpfc_kill_board(phba, pmb);
4482 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4483 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4484
4485 if (retval != MBX_SUCCESS) {
4486 if (retval != MBX_BUSY)
4487 mempool_free(pmb, phba->mbox_mem_pool);
4488 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4489 "2752 KILL_BOARD command failed retval %d\n",
4490 retval);
4491 spin_lock_irq(&phba->hbalock);
4492 phba->link_flag &= ~LS_IGNORE_ERATT;
4493 spin_unlock_irq(&phba->hbalock);
4494 return 1;
4495 }
4496
4497 spin_lock_irq(&phba->hbalock);
4498 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4499 spin_unlock_irq(&phba->hbalock);
4500
4501 mempool_free(pmb, phba->mbox_mem_pool);
4502
4503
4504
4505
4506
4507
4508 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4509 return 1;
4510 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4511 mdelay(100);
4512 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4513 return 1;
4514 }
4515
4516 del_timer_sync(&psli->mbox_tmo);
4517 if (ha_copy & HA_ERATT) {
4518 writel(HA_ERATT, phba->HAregaddr);
4519 phba->pport->stopped = 1;
4520 }
4521 spin_lock_irq(&phba->hbalock);
4522 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4523 psli->mbox_active = NULL;
4524 phba->link_flag &= ~LS_IGNORE_ERATT;
4525 spin_unlock_irq(&phba->hbalock);
4526
4527 lpfc_hba_down_post(phba);
4528 phba->link_state = LPFC_HBA_ERROR;
4529
4530 return ha_copy & HA_ERATT ? 0 : 1;
4531}
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544int
4545lpfc_sli_brdreset(struct lpfc_hba *phba)
4546{
4547 struct lpfc_sli *psli;
4548 struct lpfc_sli_ring *pring;
4549 uint16_t cfg_value;
4550 int i;
4551
4552 psli = &phba->sli;
4553
4554
4555 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4556 "0325 Reset HBA Data: x%x x%x\n",
4557 (phba->pport) ? phba->pport->port_state : 0,
4558 psli->sli_flag);
4559
4560
4561 phba->fc_eventTag = 0;
4562 phba->link_events = 0;
4563 if (phba->pport) {
4564 phba->pport->fc_myDID = 0;
4565 phba->pport->fc_prevDID = 0;
4566 }
4567
4568
4569 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4570 return -EIO;
4571
4572 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4573 (cfg_value &
4574 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4575
4576 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4577
4578
4579 writel(HC_INITFF, phba->HCregaddr);
4580 mdelay(1);
4581 readl(phba->HCregaddr);
4582 writel(0, phba->HCregaddr);
4583 readl(phba->HCregaddr);
4584
4585
4586 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4587
4588
4589 for (i = 0; i < psli->num_rings; i++) {
4590 pring = &psli->sli3_ring[i];
4591 pring->flag = 0;
4592 pring->sli.sli3.rspidx = 0;
4593 pring->sli.sli3.next_cmdidx = 0;
4594 pring->sli.sli3.local_getidx = 0;
4595 pring->sli.sli3.cmdidx = 0;
4596 pring->missbufcnt = 0;
4597 }
4598
4599 phba->link_state = LPFC_WARM_START;
4600 return 0;
4601}
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613int
4614lpfc_sli4_brdreset(struct lpfc_hba *phba)
4615{
4616 struct lpfc_sli *psli = &phba->sli;
4617 uint16_t cfg_value;
4618 int rc = 0;
4619
4620
4621 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4622 "0295 Reset HBA Data: x%x x%x x%x\n",
4623 phba->pport->port_state, psli->sli_flag,
4624 phba->hba_flag);
4625
4626
4627 phba->fc_eventTag = 0;
4628 phba->link_events = 0;
4629 phba->pport->fc_myDID = 0;
4630 phba->pport->fc_prevDID = 0;
4631
4632 spin_lock_irq(&phba->hbalock);
4633 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4634 phba->fcf.fcf_flag = 0;
4635 spin_unlock_irq(&phba->hbalock);
4636
4637
4638 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4639 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4640 return rc;
4641 }
4642
4643
4644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4645 "0389 Performing PCI function reset!\n");
4646
4647
4648 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4649 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4650 "3205 PCI read Config failed\n");
4651 return -EIO;
4652 }
4653
4654 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4655 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4656
4657
4658 rc = lpfc_pci_function_reset(phba);
4659
4660
4661 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4662
4663 return rc;
4664}
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679static int
4680lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4681{
4682 MAILBOX_t *mb;
4683 struct lpfc_sli *psli;
4684 volatile uint32_t word0;
4685 void __iomem *to_slim;
4686 uint32_t hba_aer_enabled;
4687
4688 spin_lock_irq(&phba->hbalock);
4689
4690
4691 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4692
4693 psli = &phba->sli;
4694
4695
4696 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4697 "0337 Restart HBA Data: x%x x%x\n",
4698 (phba->pport) ? phba->pport->port_state : 0,
4699 psli->sli_flag);
4700
4701 word0 = 0;
4702 mb = (MAILBOX_t *) &word0;
4703 mb->mbxCommand = MBX_RESTART;
4704 mb->mbxHc = 1;
4705
4706 lpfc_reset_barrier(phba);
4707
4708 to_slim = phba->MBslimaddr;
4709 writel(*(uint32_t *) mb, to_slim);
4710 readl(to_slim);
4711
4712
4713 if (phba->pport && phba->pport->port_state)
4714 word0 = 1;
4715 else
4716 word0 = 0;
4717 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4718 writel(*(uint32_t *) mb, to_slim);
4719 readl(to_slim);
4720
4721 lpfc_sli_brdreset(phba);
4722 if (phba->pport)
4723 phba->pport->stopped = 0;
4724 phba->link_state = LPFC_INIT_START;
4725 phba->hba_flag = 0;
4726 spin_unlock_irq(&phba->hbalock);
4727
4728 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4729 psli->stats_start = ktime_get_seconds();
4730
4731
4732 mdelay(100);
4733
4734
4735 if (hba_aer_enabled)
4736 pci_disable_pcie_error_reporting(phba->pcidev);
4737
4738 lpfc_hba_down_post(phba);
4739
4740 return 0;
4741}
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752static int
4753lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4754{
4755 struct lpfc_sli *psli = &phba->sli;
4756 uint32_t hba_aer_enabled;
4757 int rc;
4758
4759
4760 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4761 "0296 Restart HBA Data: x%x x%x\n",
4762 phba->pport->port_state, psli->sli_flag);
4763
4764
4765 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4766
4767 rc = lpfc_sli4_brdreset(phba);
4768 if (rc) {
4769 phba->link_state = LPFC_HBA_ERROR;
4770 goto hba_down_queue;
4771 }
4772
4773 spin_lock_irq(&phba->hbalock);
4774 phba->pport->stopped = 0;
4775 phba->link_state = LPFC_INIT_START;
4776 phba->hba_flag = 0;
4777 spin_unlock_irq(&phba->hbalock);
4778
4779 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4780 psli->stats_start = ktime_get_seconds();
4781
4782
4783 if (hba_aer_enabled)
4784 pci_disable_pcie_error_reporting(phba->pcidev);
4785
4786hba_down_queue:
4787 lpfc_hba_down_post(phba);
4788 lpfc_sli4_queue_destroy(phba);
4789
4790 return rc;
4791}
4792
4793
4794
4795
4796
4797
4798
4799
4800int
4801lpfc_sli_brdrestart(struct lpfc_hba *phba)
4802{
4803 return phba->lpfc_sli_brdrestart(phba);
4804}
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816int
4817lpfc_sli_chipset_init(struct lpfc_hba *phba)
4818{
4819 uint32_t status, i = 0;
4820
4821
4822 if (lpfc_readl(phba->HSregaddr, &status))
4823 return -EIO;
4824
4825
4826 i = 0;
4827 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837 if (i++ >= 200) {
4838
4839
4840 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4841 "0436 Adapter failed to init, "
4842 "timeout, status reg x%x, "
4843 "FW Data: A8 x%x AC x%x\n", status,
4844 readl(phba->MBslimaddr + 0xa8),
4845 readl(phba->MBslimaddr + 0xac));
4846 phba->link_state = LPFC_HBA_ERROR;
4847 return -ETIMEDOUT;
4848 }
4849
4850
4851 if (status & HS_FFERM) {
4852
4853
4854
4855 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4856 "0437 Adapter failed to init, "
4857 "chipset, status reg x%x, "
4858 "FW Data: A8 x%x AC x%x\n", status,
4859 readl(phba->MBslimaddr + 0xa8),
4860 readl(phba->MBslimaddr + 0xac));
4861 phba->link_state = LPFC_HBA_ERROR;
4862 return -EIO;
4863 }
4864
4865 if (i <= 10)
4866 msleep(10);
4867 else if (i <= 100)
4868 msleep(100);
4869 else
4870 msleep(1000);
4871
4872 if (i == 150) {
4873
4874 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4875 lpfc_sli_brdrestart(phba);
4876 }
4877
4878 if (lpfc_readl(phba->HSregaddr, &status))
4879 return -EIO;
4880 }
4881
4882
4883 if (status & HS_FFERM) {
4884
4885
4886 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4887 "0438 Adapter failed to init, chipset, "
4888 "status reg x%x, "
4889 "FW Data: A8 x%x AC x%x\n", status,
4890 readl(phba->MBslimaddr + 0xa8),
4891 readl(phba->MBslimaddr + 0xac));
4892 phba->link_state = LPFC_HBA_ERROR;
4893 return -EIO;
4894 }
4895
4896
4897 writel(0, phba->HCregaddr);
4898 readl(phba->HCregaddr);
4899
4900
4901 writel(0xffffffff, phba->HAregaddr);
4902 readl(phba->HAregaddr);
4903 return 0;
4904}
4905
4906
4907
4908
4909
4910
4911
4912int
4913lpfc_sli_hbq_count(void)
4914{
4915 return ARRAY_SIZE(lpfc_hbq_defs);
4916}
4917
4918
4919
4920
4921
4922
4923
4924
4925static int
4926lpfc_sli_hbq_entry_count(void)
4927{
4928 int hbq_count = lpfc_sli_hbq_count();
4929 int count = 0;
4930 int i;
4931
4932 for (i = 0; i < hbq_count; ++i)
4933 count += lpfc_hbq_defs[i]->entry_count;
4934 return count;
4935}
4936
4937
4938
4939
4940
4941
4942
4943int
4944lpfc_sli_hbq_size(void)
4945{
4946 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4947}
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958static int
4959lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4960{
4961 int hbq_count = lpfc_sli_hbq_count();
4962 LPFC_MBOXQ_t *pmb;
4963 MAILBOX_t *pmbox;
4964 uint32_t hbqno;
4965 uint32_t hbq_entry_index;
4966
4967
4968
4969
4970 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4971
4972 if (!pmb)
4973 return -ENOMEM;
4974
4975 pmbox = &pmb->u.mb;
4976
4977
4978 phba->link_state = LPFC_INIT_MBX_CMDS;
4979 phba->hbq_in_use = 1;
4980
4981 hbq_entry_index = 0;
4982 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4983 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4984 phba->hbqs[hbqno].hbqPutIdx = 0;
4985 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4986 phba->hbqs[hbqno].entry_count =
4987 lpfc_hbq_defs[hbqno]->entry_count;
4988 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4989 hbq_entry_index, pmb);
4990 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4991
4992 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4993
4994
4995
4996 lpfc_printf_log(phba, KERN_ERR,
4997 LOG_SLI | LOG_VPORT,
4998 "1805 Adapter failed to init. "
4999 "Data: x%x x%x x%x\n",
5000 pmbox->mbxCommand,
5001 pmbox->mbxStatus, hbqno);
5002
5003 phba->link_state = LPFC_HBA_ERROR;
5004 mempool_free(pmb, phba->mbox_mem_pool);
5005 return -ENXIO;
5006 }
5007 }
5008 phba->hbq_count = hbq_count;
5009
5010 mempool_free(pmb, phba->mbox_mem_pool);
5011
5012
5013 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5014 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5015 return 0;
5016}
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027static int
5028lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5029{
5030 phba->hbq_in_use = 1;
5031
5032
5033
5034
5035
5036 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5037 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5038 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5039 else
5040 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5041 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5042 phba->hbq_count = 1;
5043 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5044
5045 return 0;
5046}
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061int
5062lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5063{
5064 LPFC_MBOXQ_t *pmb;
5065 uint32_t resetcount = 0, rc = 0, done = 0;
5066
5067 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5068 if (!pmb) {
5069 phba->link_state = LPFC_HBA_ERROR;
5070 return -ENOMEM;
5071 }
5072
5073 phba->sli_rev = sli_mode;
5074 while (resetcount < 2 && !done) {
5075 spin_lock_irq(&phba->hbalock);
5076 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5077 spin_unlock_irq(&phba->hbalock);
5078 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5079 lpfc_sli_brdrestart(phba);
5080 rc = lpfc_sli_chipset_init(phba);
5081 if (rc)
5082 break;
5083
5084 spin_lock_irq(&phba->hbalock);
5085 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5086 spin_unlock_irq(&phba->hbalock);
5087 resetcount++;
5088
5089
5090
5091
5092
5093
5094 rc = lpfc_config_port_prep(phba);
5095 if (rc == -ERESTART) {
5096 phba->link_state = LPFC_LINK_UNKNOWN;
5097 continue;
5098 } else if (rc)
5099 break;
5100
5101 phba->link_state = LPFC_INIT_MBX_CMDS;
5102 lpfc_config_port(phba, pmb);
5103 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5104 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5105 LPFC_SLI3_HBQ_ENABLED |
5106 LPFC_SLI3_CRP_ENABLED |
5107 LPFC_SLI3_DSS_ENABLED);
5108 if (rc != MBX_SUCCESS) {
5109 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5110 "0442 Adapter failed to init, mbxCmd x%x "
5111 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5112 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5113 spin_lock_irq(&phba->hbalock);
5114 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5115 spin_unlock_irq(&phba->hbalock);
5116 rc = -ENXIO;
5117 } else {
5118
5119 spin_lock_irq(&phba->hbalock);
5120 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5121 spin_unlock_irq(&phba->hbalock);
5122 done = 1;
5123
5124 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5125 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5126 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5127 "3110 Port did not grant ASABT\n");
5128 }
5129 }
5130 if (!done) {
5131 rc = -EINVAL;
5132 goto do_prep_failed;
5133 }
5134 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5135 if (!pmb->u.mb.un.varCfgPort.cMA) {
5136 rc = -ENXIO;
5137 goto do_prep_failed;
5138 }
5139 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5140 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5141 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5142 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5143 phba->max_vpi : phba->max_vports;
5144
5145 } else
5146 phba->max_vpi = 0;
5147 if (pmb->u.mb.un.varCfgPort.gerbm)
5148 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5149 if (pmb->u.mb.un.varCfgPort.gcrp)
5150 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5151
5152 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5153 phba->port_gp = phba->mbox->us.s3_pgp.port;
5154
5155 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5156 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5157 phba->cfg_enable_bg = 0;
5158 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5159 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5160 "0443 Adapter did not grant "
5161 "BlockGuard\n");
5162 }
5163 }
5164 } else {
5165 phba->hbq_get = NULL;
5166 phba->port_gp = phba->mbox->us.s2.port;
5167 phba->max_vpi = 0;
5168 }
5169do_prep_failed:
5170 mempool_free(pmb, phba->mbox_mem_pool);
5171 return rc;
5172}
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188int
5189lpfc_sli_hba_setup(struct lpfc_hba *phba)
5190{
5191 uint32_t rc;
5192 int mode = 3, i;
5193 int longs;
5194
5195 switch (phba->cfg_sli_mode) {
5196 case 2:
5197 if (phba->cfg_enable_npiv) {
5198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5199 "1824 NPIV enabled: Override sli_mode "
5200 "parameter (%d) to auto (0).\n",
5201 phba->cfg_sli_mode);
5202 break;
5203 }
5204 mode = 2;
5205 break;
5206 case 0:
5207 case 3:
5208 break;
5209 default:
5210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5211 "1819 Unrecognized sli_mode parameter: %d.\n",
5212 phba->cfg_sli_mode);
5213
5214 break;
5215 }
5216 phba->fcp_embed_io = 0;
5217
5218 rc = lpfc_sli_config_port(phba, mode);
5219
5220 if (rc && phba->cfg_sli_mode == 3)
5221 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
5222 "1820 Unable to select SLI-3. "
5223 "Not supported by adapter.\n");
5224 if (rc && mode != 2)
5225 rc = lpfc_sli_config_port(phba, 2);
5226 else if (rc && mode == 2)
5227 rc = lpfc_sli_config_port(phba, 3);
5228 if (rc)
5229 goto lpfc_sli_hba_setup_error;
5230
5231
5232 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5233 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5234 if (!rc) {
5235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5236 "2709 This device supports "
5237 "Advanced Error Reporting (AER)\n");
5238 spin_lock_irq(&phba->hbalock);
5239 phba->hba_flag |= HBA_AER_ENABLED;
5240 spin_unlock_irq(&phba->hbalock);
5241 } else {
5242 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5243 "2708 This device does not support "
5244 "Advanced Error Reporting (AER): %d\n",
5245 rc);
5246 phba->cfg_aer_support = 0;
5247 }
5248 }
5249
5250 if (phba->sli_rev == 3) {
5251 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5252 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5253 } else {
5254 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5255 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5256 phba->sli3_options = 0;
5257 }
5258
5259 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5260 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5261 phba->sli_rev, phba->max_vpi);
5262 rc = lpfc_sli_ring_map(phba);
5263
5264 if (rc)
5265 goto lpfc_sli_hba_setup_error;
5266
5267
5268 if (phba->sli_rev == LPFC_SLI_REV3) {
5269
5270
5271
5272
5273
5274 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5275 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5276 phba->vpi_bmask = kcalloc(longs,
5277 sizeof(unsigned long),
5278 GFP_KERNEL);
5279 if (!phba->vpi_bmask) {
5280 rc = -ENOMEM;
5281 goto lpfc_sli_hba_setup_error;
5282 }
5283
5284 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5285 sizeof(uint16_t),
5286 GFP_KERNEL);
5287 if (!phba->vpi_ids) {
5288 kfree(phba->vpi_bmask);
5289 rc = -ENOMEM;
5290 goto lpfc_sli_hba_setup_error;
5291 }
5292 for (i = 0; i < phba->max_vpi; i++)
5293 phba->vpi_ids[i] = i;
5294 }
5295 }
5296
5297
5298 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5299 rc = lpfc_sli_hbq_setup(phba);
5300 if (rc)
5301 goto lpfc_sli_hba_setup_error;
5302 }
5303 spin_lock_irq(&phba->hbalock);
5304 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5305 spin_unlock_irq(&phba->hbalock);
5306
5307 rc = lpfc_config_port_post(phba);
5308 if (rc)
5309 goto lpfc_sli_hba_setup_error;
5310
5311 return rc;
5312
5313lpfc_sli_hba_setup_error:
5314 phba->link_state = LPFC_HBA_ERROR;
5315 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5316 "0445 Firmware initialization failed\n");
5317 return rc;
5318}
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328static int
5329lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5330{
5331 LPFC_MBOXQ_t *mboxq;
5332 struct lpfc_dmabuf *mp;
5333 struct lpfc_mqe *mqe;
5334 uint32_t data_length;
5335 int rc;
5336
5337
5338 phba->valid_vlan = 0;
5339 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5340 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5341 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5342
5343 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5344 if (!mboxq)
5345 return -ENOMEM;
5346
5347 mqe = &mboxq->u.mqe;
5348 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5349 rc = -ENOMEM;
5350 goto out_free_mboxq;
5351 }
5352
5353 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5354 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5355
5356 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5357 "(%d):2571 Mailbox cmd x%x Status x%x "
5358 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5359 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5360 "CQ: x%x x%x x%x x%x\n",
5361 mboxq->vport ? mboxq->vport->vpi : 0,
5362 bf_get(lpfc_mqe_command, mqe),
5363 bf_get(lpfc_mqe_status, mqe),
5364 mqe->un.mb_words[0], mqe->un.mb_words[1],
5365 mqe->un.mb_words[2], mqe->un.mb_words[3],
5366 mqe->un.mb_words[4], mqe->un.mb_words[5],
5367 mqe->un.mb_words[6], mqe->un.mb_words[7],
5368 mqe->un.mb_words[8], mqe->un.mb_words[9],
5369 mqe->un.mb_words[10], mqe->un.mb_words[11],
5370 mqe->un.mb_words[12], mqe->un.mb_words[13],
5371 mqe->un.mb_words[14], mqe->un.mb_words[15],
5372 mqe->un.mb_words[16], mqe->un.mb_words[50],
5373 mboxq->mcqe.word0,
5374 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5375 mboxq->mcqe.trailer);
5376
5377 if (rc) {
5378 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5379 kfree(mp);
5380 rc = -EIO;
5381 goto out_free_mboxq;
5382 }
5383 data_length = mqe->un.mb_words[5];
5384 if (data_length > DMP_RGN23_SIZE) {
5385 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5386 kfree(mp);
5387 rc = -EIO;
5388 goto out_free_mboxq;
5389 }
5390
5391 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5392 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5393 kfree(mp);
5394 rc = 0;
5395
5396out_free_mboxq:
5397 mempool_free(mboxq, phba->mbox_mem_pool);
5398 return rc;
5399}
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416static int
5417lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5418 uint8_t *vpd, uint32_t *vpd_size)
5419{
5420 int rc = 0;
5421 uint32_t dma_size;
5422 struct lpfc_dmabuf *dmabuf;
5423 struct lpfc_mqe *mqe;
5424
5425 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5426 if (!dmabuf)
5427 return -ENOMEM;
5428
5429
5430
5431
5432
5433 dma_size = *vpd_size;
5434 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5435 &dmabuf->phys, GFP_KERNEL);
5436 if (!dmabuf->virt) {
5437 kfree(dmabuf);
5438 return -ENOMEM;
5439 }
5440
5441
5442
5443
5444
5445
5446 lpfc_read_rev(phba, mboxq);
5447 mqe = &mboxq->u.mqe;
5448 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5449 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5450 mqe->un.read_rev.word1 &= 0x0000FFFF;
5451 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5452 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5453
5454 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5455 if (rc) {
5456 dma_free_coherent(&phba->pcidev->dev, dma_size,
5457 dmabuf->virt, dmabuf->phys);
5458 kfree(dmabuf);
5459 return -EIO;
5460 }
5461
5462
5463
5464
5465
5466
5467 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5468 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5469
5470 memcpy(vpd, dmabuf->virt, *vpd_size);
5471
5472 dma_free_coherent(&phba->pcidev->dev, dma_size,
5473 dmabuf->virt, dmabuf->phys);
5474 kfree(dmabuf);
5475 return 0;
5476}
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489static int
5490lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5491{
5492 LPFC_MBOXQ_t *mboxq;
5493 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5494 struct lpfc_controller_attribute *cntl_attr;
5495 void *virtaddr = NULL;
5496 uint32_t alloclen, reqlen;
5497 uint32_t shdr_status, shdr_add_status;
5498 union lpfc_sli4_cfg_shdr *shdr;
5499 int rc;
5500
5501 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5502 if (!mboxq)
5503 return -ENOMEM;
5504
5505
5506 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5507 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5508 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5509 LPFC_SLI4_MBX_NEMBED);
5510
5511 if (alloclen < reqlen) {
5512 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5513 "3084 Allocated DMA memory size (%d) is "
5514 "less than the requested DMA memory size "
5515 "(%d)\n", alloclen, reqlen);
5516 rc = -ENOMEM;
5517 goto out_free_mboxq;
5518 }
5519 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5520 virtaddr = mboxq->sge_array->addr[0];
5521 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5522 shdr = &mbx_cntl_attr->cfg_shdr;
5523 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5524 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5525 if (shdr_status || shdr_add_status || rc) {
5526 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5527 "3085 Mailbox x%x (x%x/x%x) failed, "
5528 "rc:x%x, status:x%x, add_status:x%x\n",
5529 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5530 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5531 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5532 rc, shdr_status, shdr_add_status);
5533 rc = -ENXIO;
5534 goto out_free_mboxq;
5535 }
5536
5537 cntl_attr = &mbx_cntl_attr->cntl_attr;
5538 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5539 phba->sli4_hba.lnk_info.lnk_tp =
5540 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5541 phba->sli4_hba.lnk_info.lnk_no =
5542 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5543
5544 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5545 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5546 sizeof(phba->BIOSVersion));
5547
5548 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5549 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5550 phba->sli4_hba.lnk_info.lnk_tp,
5551 phba->sli4_hba.lnk_info.lnk_no,
5552 phba->BIOSVersion);
5553out_free_mboxq:
5554 if (rc != MBX_TIMEOUT) {
5555 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5556 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5557 else
5558 mempool_free(mboxq, phba->mbox_mem_pool);
5559 }
5560 return rc;
5561}
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574static int
5575lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5576{
5577 LPFC_MBOXQ_t *mboxq;
5578 struct lpfc_mbx_get_port_name *get_port_name;
5579 uint32_t shdr_status, shdr_add_status;
5580 union lpfc_sli4_cfg_shdr *shdr;
5581 char cport_name = 0;
5582 int rc;
5583
5584
5585 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5586 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5587
5588 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5589 if (!mboxq)
5590 return -ENOMEM;
5591
5592 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5593 lpfc_sli4_read_config(phba);
5594 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5595 goto retrieve_ppname;
5596
5597
5598 rc = lpfc_sli4_get_ctl_attr(phba);
5599 if (rc)
5600 goto out_free_mboxq;
5601
5602retrieve_ppname:
5603 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5604 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5605 sizeof(struct lpfc_mbx_get_port_name) -
5606 sizeof(struct lpfc_sli4_cfg_mhdr),
5607 LPFC_SLI4_MBX_EMBED);
5608 get_port_name = &mboxq->u.mqe.un.get_port_name;
5609 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5610 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5611 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5612 phba->sli4_hba.lnk_info.lnk_tp);
5613 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5614 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5615 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5616 if (shdr_status || shdr_add_status || rc) {
5617 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5618 "3087 Mailbox x%x (x%x/x%x) failed: "
5619 "rc:x%x, status:x%x, add_status:x%x\n",
5620 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5621 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5622 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5623 rc, shdr_status, shdr_add_status);
5624 rc = -ENXIO;
5625 goto out_free_mboxq;
5626 }
5627 switch (phba->sli4_hba.lnk_info.lnk_no) {
5628 case LPFC_LINK_NUMBER_0:
5629 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5630 &get_port_name->u.response);
5631 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5632 break;
5633 case LPFC_LINK_NUMBER_1:
5634 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5635 &get_port_name->u.response);
5636 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5637 break;
5638 case LPFC_LINK_NUMBER_2:
5639 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5640 &get_port_name->u.response);
5641 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5642 break;
5643 case LPFC_LINK_NUMBER_3:
5644 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5645 &get_port_name->u.response);
5646 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5647 break;
5648 default:
5649 break;
5650 }
5651
5652 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5653 phba->Port[0] = cport_name;
5654 phba->Port[1] = '\0';
5655 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5656 "3091 SLI get port name: %s\n", phba->Port);
5657 }
5658
5659out_free_mboxq:
5660 if (rc != MBX_TIMEOUT) {
5661 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5662 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5663 else
5664 mempool_free(mboxq, phba->mbox_mem_pool);
5665 }
5666 return rc;
5667}
5668
5669
5670
5671
5672
5673
5674
5675
5676static void
5677lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5678{
5679 int qidx;
5680 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5681 struct lpfc_sli4_hdw_queue *qp;
5682 struct lpfc_queue *eq;
5683
5684 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5685 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5686 if (sli4_hba->nvmels_cq)
5687 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5688 LPFC_QUEUE_REARM);
5689
5690 if (sli4_hba->hdwq) {
5691
5692 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5693 qp = &sli4_hba->hdwq[qidx];
5694
5695 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5696 LPFC_QUEUE_REARM);
5697 }
5698
5699
5700 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5701 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5702
5703 sli4_hba->sli4_write_eq_db(phba, eq,
5704 0, LPFC_QUEUE_REARM);
5705 }
5706 }
5707
5708 if (phba->nvmet_support) {
5709 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5710 sli4_hba->sli4_write_cq_db(phba,
5711 sli4_hba->nvmet_cqset[qidx], 0,
5712 LPFC_QUEUE_REARM);
5713 }
5714 }
5715}
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729int
5730lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5731 uint16_t *extnt_count, uint16_t *extnt_size)
5732{
5733 int rc = 0;
5734 uint32_t length;
5735 uint32_t mbox_tmo;
5736 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5737 LPFC_MBOXQ_t *mbox;
5738
5739 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5740 if (!mbox)
5741 return -ENOMEM;
5742
5743
5744 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5745 sizeof(struct lpfc_sli4_cfg_mhdr));
5746 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5747 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5748 length, LPFC_SLI4_MBX_EMBED);
5749
5750
5751 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5752 LPFC_SLI4_MBX_EMBED);
5753 if (unlikely(rc)) {
5754 rc = -EIO;
5755 goto err_exit;
5756 }
5757
5758 if (!phba->sli4_hba.intr_enable)
5759 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5760 else {
5761 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5762 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5763 }
5764 if (unlikely(rc)) {
5765 rc = -EIO;
5766 goto err_exit;
5767 }
5768
5769 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5770 if (bf_get(lpfc_mbox_hdr_status,
5771 &rsrc_info->header.cfg_shdr.response)) {
5772 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5773 "2930 Failed to get resource extents "
5774 "Status 0x%x Add'l Status 0x%x\n",
5775 bf_get(lpfc_mbox_hdr_status,
5776 &rsrc_info->header.cfg_shdr.response),
5777 bf_get(lpfc_mbox_hdr_add_status,
5778 &rsrc_info->header.cfg_shdr.response));
5779 rc = -EIO;
5780 goto err_exit;
5781 }
5782
5783 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5784 &rsrc_info->u.rsp);
5785 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5786 &rsrc_info->u.rsp);
5787
5788 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5789 "3162 Retrieved extents type-%d from port: count:%d, "
5790 "size:%d\n", type, *extnt_count, *extnt_size);
5791
5792err_exit:
5793 mempool_free(mbox, phba->mbox_mem_pool);
5794 return rc;
5795}
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812static int
5813lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5814{
5815 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5816 uint16_t size_diff, rsrc_ext_size;
5817 int rc = 0;
5818 struct lpfc_rsrc_blks *rsrc_entry;
5819 struct list_head *rsrc_blk_list = NULL;
5820
5821 size_diff = 0;
5822 curr_ext_cnt = 0;
5823 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5824 &rsrc_ext_cnt,
5825 &rsrc_ext_size);
5826 if (unlikely(rc))
5827 return -EIO;
5828
5829 switch (type) {
5830 case LPFC_RSC_TYPE_FCOE_RPI:
5831 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5832 break;
5833 case LPFC_RSC_TYPE_FCOE_VPI:
5834 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5835 break;
5836 case LPFC_RSC_TYPE_FCOE_XRI:
5837 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5838 break;
5839 case LPFC_RSC_TYPE_FCOE_VFI:
5840 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5841 break;
5842 default:
5843 break;
5844 }
5845
5846 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5847 curr_ext_cnt++;
5848 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5849 size_diff++;
5850 }
5851
5852 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5853 rc = 1;
5854
5855 return rc;
5856}
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875static int
5876lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5877 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5878{
5879 int rc = 0;
5880 uint32_t req_len;
5881 uint32_t emb_len;
5882 uint32_t alloc_len, mbox_tmo;
5883
5884
5885 req_len = extnt_cnt * sizeof(uint16_t);
5886
5887
5888
5889
5890
5891 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5892 sizeof(uint32_t);
5893
5894
5895
5896
5897
5898 *emb = LPFC_SLI4_MBX_EMBED;
5899 if (req_len > emb_len) {
5900 req_len = extnt_cnt * sizeof(uint16_t) +
5901 sizeof(union lpfc_sli4_cfg_shdr) +
5902 sizeof(uint32_t);
5903 *emb = LPFC_SLI4_MBX_NEMBED;
5904 }
5905
5906 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5907 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5908 req_len, *emb);
5909 if (alloc_len < req_len) {
5910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5911 "2982 Allocated DMA memory size (x%x) is "
5912 "less than the requested DMA memory "
5913 "size (x%x)\n", alloc_len, req_len);
5914 return -ENOMEM;
5915 }
5916 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5917 if (unlikely(rc))
5918 return -EIO;
5919
5920 if (!phba->sli4_hba.intr_enable)
5921 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5922 else {
5923 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5924 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5925 }
5926
5927 if (unlikely(rc))
5928 rc = -EIO;
5929 return rc;
5930}
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940static int
5941lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5942{
5943 bool emb = false;
5944 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5945 uint16_t rsrc_id, rsrc_start, j, k;
5946 uint16_t *ids;
5947 int i, rc;
5948 unsigned long longs;
5949 unsigned long *bmask;
5950 struct lpfc_rsrc_blks *rsrc_blks;
5951 LPFC_MBOXQ_t *mbox;
5952 uint32_t length;
5953 struct lpfc_id_range *id_array = NULL;
5954 void *virtaddr = NULL;
5955 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5956 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5957 struct list_head *ext_blk_list;
5958
5959 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5960 &rsrc_cnt,
5961 &rsrc_size);
5962 if (unlikely(rc))
5963 return -EIO;
5964
5965 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5966 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5967 "3009 No available Resource Extents "
5968 "for resource type 0x%x: Count: 0x%x, "
5969 "Size 0x%x\n", type, rsrc_cnt,
5970 rsrc_size);
5971 return -ENOMEM;
5972 }
5973
5974 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5975 "2903 Post resource extents type-0x%x: "
5976 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5977
5978 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5979 if (!mbox)
5980 return -ENOMEM;
5981
5982 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5983 if (unlikely(rc)) {
5984 rc = -EIO;
5985 goto err_exit;
5986 }
5987
5988
5989
5990
5991
5992
5993
5994 if (emb == LPFC_SLI4_MBX_EMBED) {
5995 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5996 id_array = &rsrc_ext->u.rsp.id[0];
5997 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5998 } else {
5999 virtaddr = mbox->sge_array->addr[0];
6000 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6001 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6002 id_array = &n_rsrc->id;
6003 }
6004
6005 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6006 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6007
6008
6009
6010
6011
6012 length = sizeof(struct lpfc_rsrc_blks);
6013 switch (type) {
6014 case LPFC_RSC_TYPE_FCOE_RPI:
6015 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6016 sizeof(unsigned long),
6017 GFP_KERNEL);
6018 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6019 rc = -ENOMEM;
6020 goto err_exit;
6021 }
6022 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6023 sizeof(uint16_t),
6024 GFP_KERNEL);
6025 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6026 kfree(phba->sli4_hba.rpi_bmask);
6027 rc = -ENOMEM;
6028 goto err_exit;
6029 }
6030
6031
6032
6033
6034
6035
6036 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6037
6038
6039 bmask = phba->sli4_hba.rpi_bmask;
6040 ids = phba->sli4_hba.rpi_ids;
6041 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6042 break;
6043 case LPFC_RSC_TYPE_FCOE_VPI:
6044 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6045 GFP_KERNEL);
6046 if (unlikely(!phba->vpi_bmask)) {
6047 rc = -ENOMEM;
6048 goto err_exit;
6049 }
6050 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6051 GFP_KERNEL);
6052 if (unlikely(!phba->vpi_ids)) {
6053 kfree(phba->vpi_bmask);
6054 rc = -ENOMEM;
6055 goto err_exit;
6056 }
6057
6058
6059 bmask = phba->vpi_bmask;
6060 ids = phba->vpi_ids;
6061 ext_blk_list = &phba->lpfc_vpi_blk_list;
6062 break;
6063 case LPFC_RSC_TYPE_FCOE_XRI:
6064 phba->sli4_hba.xri_bmask = kcalloc(longs,
6065 sizeof(unsigned long),
6066 GFP_KERNEL);
6067 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6068 rc = -ENOMEM;
6069 goto err_exit;
6070 }
6071 phba->sli4_hba.max_cfg_param.xri_used = 0;
6072 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6073 sizeof(uint16_t),
6074 GFP_KERNEL);
6075 if (unlikely(!phba->sli4_hba.xri_ids)) {
6076 kfree(phba->sli4_hba.xri_bmask);
6077 rc = -ENOMEM;
6078 goto err_exit;
6079 }
6080
6081
6082 bmask = phba->sli4_hba.xri_bmask;
6083 ids = phba->sli4_hba.xri_ids;
6084 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6085 break;
6086 case LPFC_RSC_TYPE_FCOE_VFI:
6087 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6088 sizeof(unsigned long),
6089 GFP_KERNEL);
6090 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6091 rc = -ENOMEM;
6092 goto err_exit;
6093 }
6094 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6095 sizeof(uint16_t),
6096 GFP_KERNEL);
6097 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6098 kfree(phba->sli4_hba.vfi_bmask);
6099 rc = -ENOMEM;
6100 goto err_exit;
6101 }
6102
6103
6104 bmask = phba->sli4_hba.vfi_bmask;
6105 ids = phba->sli4_hba.vfi_ids;
6106 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6107 break;
6108 default:
6109
6110 id_array = NULL;
6111 bmask = NULL;
6112 ids = NULL;
6113 ext_blk_list = NULL;
6114 goto err_exit;
6115 }
6116
6117
6118
6119
6120
6121
6122
6123 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6124 if ((i % 2) == 0)
6125 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6126 &id_array[k]);
6127 else
6128 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6129 &id_array[k]);
6130
6131 rsrc_blks = kzalloc(length, GFP_KERNEL);
6132 if (unlikely(!rsrc_blks)) {
6133 rc = -ENOMEM;
6134 kfree(bmask);
6135 kfree(ids);
6136 goto err_exit;
6137 }
6138 rsrc_blks->rsrc_start = rsrc_id;
6139 rsrc_blks->rsrc_size = rsrc_size;
6140 list_add_tail(&rsrc_blks->list, ext_blk_list);
6141 rsrc_start = rsrc_id;
6142 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6143 phba->sli4_hba.io_xri_start = rsrc_start +
6144 lpfc_sli4_get_iocb_cnt(phba);
6145 }
6146
6147 while (rsrc_id < (rsrc_start + rsrc_size)) {
6148 ids[j] = rsrc_id;
6149 rsrc_id++;
6150 j++;
6151 }
6152
6153 if ((i % 2) == 1)
6154 k++;
6155 }
6156 err_exit:
6157 lpfc_sli4_mbox_cmd_free(phba, mbox);
6158 return rc;
6159}
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172static int
6173lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6174{
6175 int rc;
6176 uint32_t length, mbox_tmo = 0;
6177 LPFC_MBOXQ_t *mbox;
6178 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6179 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6180
6181 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6182 if (!mbox)
6183 return -ENOMEM;
6184
6185
6186
6187
6188
6189
6190 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6191 sizeof(struct lpfc_sli4_cfg_mhdr));
6192 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6193 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6194 length, LPFC_SLI4_MBX_EMBED);
6195
6196
6197 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6198 LPFC_SLI4_MBX_EMBED);
6199 if (unlikely(rc)) {
6200 rc = -EIO;
6201 goto out_free_mbox;
6202 }
6203 if (!phba->sli4_hba.intr_enable)
6204 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6205 else {
6206 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6207 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6208 }
6209 if (unlikely(rc)) {
6210 rc = -EIO;
6211 goto out_free_mbox;
6212 }
6213
6214 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6215 if (bf_get(lpfc_mbox_hdr_status,
6216 &dealloc_rsrc->header.cfg_shdr.response)) {
6217 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6218 "2919 Failed to release resource extents "
6219 "for type %d - Status 0x%x Add'l Status 0x%x. "
6220 "Resource memory not released.\n",
6221 type,
6222 bf_get(lpfc_mbox_hdr_status,
6223 &dealloc_rsrc->header.cfg_shdr.response),
6224 bf_get(lpfc_mbox_hdr_add_status,
6225 &dealloc_rsrc->header.cfg_shdr.response));
6226 rc = -EIO;
6227 goto out_free_mbox;
6228 }
6229
6230
6231 switch (type) {
6232 case LPFC_RSC_TYPE_FCOE_VPI:
6233 kfree(phba->vpi_bmask);
6234 kfree(phba->vpi_ids);
6235 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6236 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6237 &phba->lpfc_vpi_blk_list, list) {
6238 list_del_init(&rsrc_blk->list);
6239 kfree(rsrc_blk);
6240 }
6241 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6242 break;
6243 case LPFC_RSC_TYPE_FCOE_XRI:
6244 kfree(phba->sli4_hba.xri_bmask);
6245 kfree(phba->sli4_hba.xri_ids);
6246 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6247 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6248 list_del_init(&rsrc_blk->list);
6249 kfree(rsrc_blk);
6250 }
6251 break;
6252 case LPFC_RSC_TYPE_FCOE_VFI:
6253 kfree(phba->sli4_hba.vfi_bmask);
6254 kfree(phba->sli4_hba.vfi_ids);
6255 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6256 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6257 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6258 list_del_init(&rsrc_blk->list);
6259 kfree(rsrc_blk);
6260 }
6261 break;
6262 case LPFC_RSC_TYPE_FCOE_RPI:
6263
6264 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6265 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6266 list_del_init(&rsrc_blk->list);
6267 kfree(rsrc_blk);
6268 }
6269 break;
6270 default:
6271 break;
6272 }
6273
6274 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6275
6276 out_free_mbox:
6277 mempool_free(mbox, phba->mbox_mem_pool);
6278 return rc;
6279}
6280
6281static void
6282lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6283 uint32_t feature)
6284{
6285 uint32_t len;
6286
6287 len = sizeof(struct lpfc_mbx_set_feature) -
6288 sizeof(struct lpfc_sli4_cfg_mhdr);
6289 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6290 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6291 LPFC_SLI4_MBX_EMBED);
6292
6293 switch (feature) {
6294 case LPFC_SET_UE_RECOVERY:
6295 bf_set(lpfc_mbx_set_feature_UER,
6296 &mbox->u.mqe.un.set_feature, 1);
6297 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6298 mbox->u.mqe.un.set_feature.param_len = 8;
6299 break;
6300 case LPFC_SET_MDS_DIAGS:
6301 bf_set(lpfc_mbx_set_feature_mds,
6302 &mbox->u.mqe.un.set_feature, 1);
6303 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6304 &mbox->u.mqe.un.set_feature, 1);
6305 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6306 mbox->u.mqe.un.set_feature.param_len = 8;
6307 break;
6308 case LPFC_SET_DUAL_DUMP:
6309 bf_set(lpfc_mbx_set_feature_dd,
6310 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6311 bf_set(lpfc_mbx_set_feature_ddquery,
6312 &mbox->u.mqe.un.set_feature, 0);
6313 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6314 mbox->u.mqe.un.set_feature.param_len = 4;
6315 break;
6316 }
6317
6318 return;
6319}
6320
6321
6322
6323
6324
6325
6326
6327
6328void
6329lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6330{
6331 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6332
6333 spin_lock_irq(&phba->hbalock);
6334 ras_fwlog->state = INACTIVE;
6335 spin_unlock_irq(&phba->hbalock);
6336
6337
6338 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6339 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6340
6341
6342 usleep_range(10 * 1000, 20 * 1000);
6343}
6344
6345
6346
6347
6348
6349
6350
6351
6352void
6353lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6354{
6355 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6356 struct lpfc_dmabuf *dmabuf, *next;
6357
6358 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6359 list_for_each_entry_safe(dmabuf, next,
6360 &ras_fwlog->fwlog_buff_list,
6361 list) {
6362 list_del(&dmabuf->list);
6363 dma_free_coherent(&phba->pcidev->dev,
6364 LPFC_RAS_MAX_ENTRY_SIZE,
6365 dmabuf->virt, dmabuf->phys);
6366 kfree(dmabuf);
6367 }
6368 }
6369
6370 if (ras_fwlog->lwpd.virt) {
6371 dma_free_coherent(&phba->pcidev->dev,
6372 sizeof(uint32_t) * 2,
6373 ras_fwlog->lwpd.virt,
6374 ras_fwlog->lwpd.phys);
6375 ras_fwlog->lwpd.virt = NULL;
6376 }
6377
6378 spin_lock_irq(&phba->hbalock);
6379 ras_fwlog->state = INACTIVE;
6380 spin_unlock_irq(&phba->hbalock);
6381}
6382
6383
6384
6385
6386
6387
6388
6389
6390
6391
6392
6393
6394static int
6395lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6396 uint32_t fwlog_buff_count)
6397{
6398 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6399 struct lpfc_dmabuf *dmabuf;
6400 int rc = 0, i = 0;
6401
6402
6403 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6404
6405
6406 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6407 sizeof(uint32_t) * 2,
6408 &ras_fwlog->lwpd.phys,
6409 GFP_KERNEL);
6410 if (!ras_fwlog->lwpd.virt) {
6411 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6412 "6185 LWPD Memory Alloc Failed\n");
6413
6414 return -ENOMEM;
6415 }
6416
6417 ras_fwlog->fw_buffcount = fwlog_buff_count;
6418 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6419 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6420 GFP_KERNEL);
6421 if (!dmabuf) {
6422 rc = -ENOMEM;
6423 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6424 "6186 Memory Alloc failed FW logging");
6425 goto free_mem;
6426 }
6427
6428 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6429 LPFC_RAS_MAX_ENTRY_SIZE,
6430 &dmabuf->phys, GFP_KERNEL);
6431 if (!dmabuf->virt) {
6432 kfree(dmabuf);
6433 rc = -ENOMEM;
6434 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6435 "6187 DMA Alloc Failed FW logging");
6436 goto free_mem;
6437 }
6438 dmabuf->buffer_tag = i;
6439 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6440 }
6441
6442free_mem:
6443 if (rc)
6444 lpfc_sli4_ras_dma_free(phba);
6445
6446 return rc;
6447}
6448
6449
6450
6451
6452
6453
6454
6455
6456static void
6457lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6458{
6459 MAILBOX_t *mb;
6460 union lpfc_sli4_cfg_shdr *shdr;
6461 uint32_t shdr_status, shdr_add_status;
6462 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6463
6464 mb = &pmb->u.mb;
6465
6466 shdr = (union lpfc_sli4_cfg_shdr *)
6467 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6468 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6469 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6470
6471 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
6473 "6188 FW LOG mailbox "
6474 "completed with status x%x add_status x%x,"
6475 " mbx status x%x\n",
6476 shdr_status, shdr_add_status, mb->mbxStatus);
6477
6478 ras_fwlog->ras_hwsupport = false;
6479 goto disable_ras;
6480 }
6481
6482 spin_lock_irq(&phba->hbalock);
6483 ras_fwlog->state = ACTIVE;
6484 spin_unlock_irq(&phba->hbalock);
6485 mempool_free(pmb, phba->mbox_mem_pool);
6486
6487 return;
6488
6489disable_ras:
6490
6491 lpfc_sli4_ras_dma_free(phba);
6492 mempool_free(pmb, phba->mbox_mem_pool);
6493}
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504int
6505lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6506 uint32_t fwlog_level,
6507 uint32_t fwlog_enable)
6508{
6509 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6510 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6511 struct lpfc_dmabuf *dmabuf;
6512 LPFC_MBOXQ_t *mbox;
6513 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6514 int rc = 0;
6515
6516 spin_lock_irq(&phba->hbalock);
6517 ras_fwlog->state = INACTIVE;
6518 spin_unlock_irq(&phba->hbalock);
6519
6520 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6521 phba->cfg_ras_fwlog_buffsize);
6522 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6523
6524
6525
6526
6527
6528 if (!ras_fwlog->lwpd.virt) {
6529 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6530 if (rc) {
6531 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6532 "6189 FW Log Memory Allocation Failed");
6533 return rc;
6534 }
6535 }
6536
6537
6538 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6539 if (!mbox) {
6540 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6541 "6190 RAS MBX Alloc Failed");
6542 rc = -ENOMEM;
6543 goto mem_free;
6544 }
6545
6546 ras_fwlog->fw_loglevel = fwlog_level;
6547 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6548 sizeof(struct lpfc_sli4_cfg_mhdr));
6549
6550 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6551 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6552 len, LPFC_SLI4_MBX_EMBED);
6553
6554 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6555 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6556 fwlog_enable);
6557 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6558 ras_fwlog->fw_loglevel);
6559 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6560 ras_fwlog->fw_buffcount);
6561 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6562 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6563
6564
6565 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6566 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6567
6568 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6569 putPaddrLow(dmabuf->phys);
6570
6571 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6572 putPaddrHigh(dmabuf->phys);
6573 }
6574
6575
6576 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6577 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6578
6579 spin_lock_irq(&phba->hbalock);
6580 ras_fwlog->state = REG_INPROGRESS;
6581 spin_unlock_irq(&phba->hbalock);
6582 mbox->vport = phba->pport;
6583 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6584
6585 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6586
6587 if (rc == MBX_NOT_FINISHED) {
6588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6589 "6191 FW-Log Mailbox failed. "
6590 "status %d mbxStatus : x%x", rc,
6591 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6592 mempool_free(mbox, phba->mbox_mem_pool);
6593 rc = -EIO;
6594 goto mem_free;
6595 } else
6596 rc = 0;
6597mem_free:
6598 if (rc)
6599 lpfc_sli4_ras_dma_free(phba);
6600
6601 return rc;
6602}
6603
6604
6605
6606
6607
6608
6609
6610void
6611lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6612{
6613
6614 if (lpfc_check_fwlog_support(phba))
6615 return;
6616
6617 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6618 LPFC_RAS_ENABLE_LOGGING);
6619}
6620
6621
6622
6623
6624
6625
6626
6627int
6628lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6629{
6630 int i, rc, error = 0;
6631 uint16_t count, base;
6632 unsigned long longs;
6633
6634 if (!phba->sli4_hba.rpi_hdrs_in_use)
6635 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6636 if (phba->sli4_hba.extents_in_use) {
6637
6638
6639
6640
6641
6642 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6643 LPFC_IDX_RSRC_RDY) {
6644
6645
6646
6647
6648
6649 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6650 LPFC_RSC_TYPE_FCOE_VFI);
6651 if (rc != 0)
6652 error++;
6653 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6654 LPFC_RSC_TYPE_FCOE_VPI);
6655 if (rc != 0)
6656 error++;
6657 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6658 LPFC_RSC_TYPE_FCOE_XRI);
6659 if (rc != 0)
6660 error++;
6661 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6662 LPFC_RSC_TYPE_FCOE_RPI);
6663 if (rc != 0)
6664 error++;
6665
6666
6667
6668
6669
6670
6671
6672 if (error) {
6673 lpfc_printf_log(phba, KERN_INFO,
6674 LOG_MBOX | LOG_INIT,
6675 "2931 Detected extent resource "
6676 "change. Reallocating all "
6677 "extents.\n");
6678 rc = lpfc_sli4_dealloc_extent(phba,
6679 LPFC_RSC_TYPE_FCOE_VFI);
6680 rc = lpfc_sli4_dealloc_extent(phba,
6681 LPFC_RSC_TYPE_FCOE_VPI);
6682 rc = lpfc_sli4_dealloc_extent(phba,
6683 LPFC_RSC_TYPE_FCOE_XRI);
6684 rc = lpfc_sli4_dealloc_extent(phba,
6685 LPFC_RSC_TYPE_FCOE_RPI);
6686 } else
6687 return 0;
6688 }
6689
6690 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6691 if (unlikely(rc))
6692 goto err_exit;
6693
6694 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6695 if (unlikely(rc))
6696 goto err_exit;
6697
6698 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6699 if (unlikely(rc))
6700 goto err_exit;
6701
6702 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6703 if (unlikely(rc))
6704 goto err_exit;
6705 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6706 LPFC_IDX_RSRC_RDY);
6707 return rc;
6708 } else {
6709
6710
6711
6712
6713
6714
6715
6716 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6717 LPFC_IDX_RSRC_RDY) {
6718 lpfc_sli4_dealloc_resource_identifiers(phba);
6719 lpfc_sli4_remove_rpis(phba);
6720 }
6721
6722 count = phba->sli4_hba.max_cfg_param.max_rpi;
6723 if (count <= 0) {
6724 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6725 "3279 Invalid provisioning of "
6726 "rpi:%d\n", count);
6727 rc = -EINVAL;
6728 goto err_exit;
6729 }
6730 base = phba->sli4_hba.max_cfg_param.rpi_base;
6731 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6732 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6733 sizeof(unsigned long),
6734 GFP_KERNEL);
6735 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6736 rc = -ENOMEM;
6737 goto err_exit;
6738 }
6739 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6740 GFP_KERNEL);
6741 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6742 rc = -ENOMEM;
6743 goto free_rpi_bmask;
6744 }
6745
6746 for (i = 0; i < count; i++)
6747 phba->sli4_hba.rpi_ids[i] = base + i;
6748
6749
6750 count = phba->sli4_hba.max_cfg_param.max_vpi;
6751 if (count <= 0) {
6752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6753 "3280 Invalid provisioning of "
6754 "vpi:%d\n", count);
6755 rc = -EINVAL;
6756 goto free_rpi_ids;
6757 }
6758 base = phba->sli4_hba.max_cfg_param.vpi_base;
6759 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6760 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6761 GFP_KERNEL);
6762 if (unlikely(!phba->vpi_bmask)) {
6763 rc = -ENOMEM;
6764 goto free_rpi_ids;
6765 }
6766 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6767 GFP_KERNEL);
6768 if (unlikely(!phba->vpi_ids)) {
6769 rc = -ENOMEM;
6770 goto free_vpi_bmask;
6771 }
6772
6773 for (i = 0; i < count; i++)
6774 phba->vpi_ids[i] = base + i;
6775
6776
6777 count = phba->sli4_hba.max_cfg_param.max_xri;
6778 if (count <= 0) {
6779 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6780 "3281 Invalid provisioning of "
6781 "xri:%d\n", count);
6782 rc = -EINVAL;
6783 goto free_vpi_ids;
6784 }
6785 base = phba->sli4_hba.max_cfg_param.xri_base;
6786 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6787 phba->sli4_hba.xri_bmask = kcalloc(longs,
6788 sizeof(unsigned long),
6789 GFP_KERNEL);
6790 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6791 rc = -ENOMEM;
6792 goto free_vpi_ids;
6793 }
6794 phba->sli4_hba.max_cfg_param.xri_used = 0;
6795 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6796 GFP_KERNEL);
6797 if (unlikely(!phba->sli4_hba.xri_ids)) {
6798 rc = -ENOMEM;
6799 goto free_xri_bmask;
6800 }
6801
6802 for (i = 0; i < count; i++)
6803 phba->sli4_hba.xri_ids[i] = base + i;
6804
6805
6806 count = phba->sli4_hba.max_cfg_param.max_vfi;
6807 if (count <= 0) {
6808 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6809 "3282 Invalid provisioning of "
6810 "vfi:%d\n", count);
6811 rc = -EINVAL;
6812 goto free_xri_ids;
6813 }
6814 base = phba->sli4_hba.max_cfg_param.vfi_base;
6815 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6816 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6817 sizeof(unsigned long),
6818 GFP_KERNEL);
6819 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6820 rc = -ENOMEM;
6821 goto free_xri_ids;
6822 }
6823 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6824 GFP_KERNEL);
6825 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6826 rc = -ENOMEM;
6827 goto free_vfi_bmask;
6828 }
6829
6830 for (i = 0; i < count; i++)
6831 phba->sli4_hba.vfi_ids[i] = base + i;
6832
6833
6834
6835
6836
6837 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6838 LPFC_IDX_RSRC_RDY);
6839 return 0;
6840 }
6841
6842 free_vfi_bmask:
6843 kfree(phba->sli4_hba.vfi_bmask);
6844 phba->sli4_hba.vfi_bmask = NULL;
6845 free_xri_ids:
6846 kfree(phba->sli4_hba.xri_ids);
6847 phba->sli4_hba.xri_ids = NULL;
6848 free_xri_bmask:
6849 kfree(phba->sli4_hba.xri_bmask);
6850 phba->sli4_hba.xri_bmask = NULL;
6851 free_vpi_ids:
6852 kfree(phba->vpi_ids);
6853 phba->vpi_ids = NULL;
6854 free_vpi_bmask:
6855 kfree(phba->vpi_bmask);
6856 phba->vpi_bmask = NULL;
6857 free_rpi_ids:
6858 kfree(phba->sli4_hba.rpi_ids);
6859 phba->sli4_hba.rpi_ids = NULL;
6860 free_rpi_bmask:
6861 kfree(phba->sli4_hba.rpi_bmask);
6862 phba->sli4_hba.rpi_bmask = NULL;
6863 err_exit:
6864 return rc;
6865}
6866
6867
6868
6869
6870
6871
6872
6873
6874int
6875lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6876{
6877 if (phba->sli4_hba.extents_in_use) {
6878 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6879 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6880 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6881 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6882 } else {
6883 kfree(phba->vpi_bmask);
6884 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6885 kfree(phba->vpi_ids);
6886 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6887 kfree(phba->sli4_hba.xri_bmask);
6888 kfree(phba->sli4_hba.xri_ids);
6889 kfree(phba->sli4_hba.vfi_bmask);
6890 kfree(phba->sli4_hba.vfi_ids);
6891 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6892 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6893 }
6894
6895 return 0;
6896}
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908int
6909lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6910 uint16_t *extnt_cnt, uint16_t *extnt_size)
6911{
6912 bool emb;
6913 int rc = 0;
6914 uint16_t curr_blks = 0;
6915 uint32_t req_len, emb_len;
6916 uint32_t alloc_len, mbox_tmo;
6917 struct list_head *blk_list_head;
6918 struct lpfc_rsrc_blks *rsrc_blk;
6919 LPFC_MBOXQ_t *mbox;
6920 void *virtaddr = NULL;
6921 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6922 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6923 union lpfc_sli4_cfg_shdr *shdr;
6924
6925 switch (type) {
6926 case LPFC_RSC_TYPE_FCOE_VPI:
6927 blk_list_head = &phba->lpfc_vpi_blk_list;
6928 break;
6929 case LPFC_RSC_TYPE_FCOE_XRI:
6930 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6931 break;
6932 case LPFC_RSC_TYPE_FCOE_VFI:
6933 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6934 break;
6935 case LPFC_RSC_TYPE_FCOE_RPI:
6936 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6937 break;
6938 default:
6939 return -EIO;
6940 }
6941
6942
6943 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6944 if (curr_blks == 0) {
6945
6946
6947
6948
6949
6950
6951
6952 *extnt_size = rsrc_blk->rsrc_size;
6953 }
6954 curr_blks++;
6955 }
6956
6957
6958
6959
6960
6961 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6962 sizeof(uint32_t);
6963
6964
6965
6966
6967
6968 emb = LPFC_SLI4_MBX_EMBED;
6969 req_len = emb_len;
6970 if (req_len > emb_len) {
6971 req_len = curr_blks * sizeof(uint16_t) +
6972 sizeof(union lpfc_sli4_cfg_shdr) +
6973 sizeof(uint32_t);
6974 emb = LPFC_SLI4_MBX_NEMBED;
6975 }
6976
6977 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6978 if (!mbox)
6979 return -ENOMEM;
6980 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6981
6982 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6983 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6984 req_len, emb);
6985 if (alloc_len < req_len) {
6986 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6987 "2983 Allocated DMA memory size (x%x) is "
6988 "less than the requested DMA memory "
6989 "size (x%x)\n", alloc_len, req_len);
6990 rc = -ENOMEM;
6991 goto err_exit;
6992 }
6993 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6994 if (unlikely(rc)) {
6995 rc = -EIO;
6996 goto err_exit;
6997 }
6998
6999 if (!phba->sli4_hba.intr_enable)
7000 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7001 else {
7002 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7003 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7004 }
7005
7006 if (unlikely(rc)) {
7007 rc = -EIO;
7008 goto err_exit;
7009 }
7010
7011
7012
7013
7014
7015
7016
7017 if (emb == LPFC_SLI4_MBX_EMBED) {
7018 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7019 shdr = &rsrc_ext->header.cfg_shdr;
7020 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7021 } else {
7022 virtaddr = mbox->sge_array->addr[0];
7023 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7024 shdr = &n_rsrc->cfg_shdr;
7025 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7026 }
7027
7028 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7029 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
7030 "2984 Failed to read allocated resources "
7031 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7032 type,
7033 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7034 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7035 rc = -EIO;
7036 goto err_exit;
7037 }
7038 err_exit:
7039 lpfc_sli4_mbox_cmd_free(phba, mbox);
7040 return rc;
7041}
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060static int
7061lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7062 struct list_head *sgl_list, int cnt)
7063{
7064 struct lpfc_sglq *sglq_entry = NULL;
7065 struct lpfc_sglq *sglq_entry_next = NULL;
7066 struct lpfc_sglq *sglq_entry_first = NULL;
7067 int status, total_cnt;
7068 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7069 int last_xritag = NO_XRI;
7070 LIST_HEAD(prep_sgl_list);
7071 LIST_HEAD(blck_sgl_list);
7072 LIST_HEAD(allc_sgl_list);
7073 LIST_HEAD(post_sgl_list);
7074 LIST_HEAD(free_sgl_list);
7075
7076 spin_lock_irq(&phba->hbalock);
7077 spin_lock(&phba->sli4_hba.sgl_list_lock);
7078 list_splice_init(sgl_list, &allc_sgl_list);
7079 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7080 spin_unlock_irq(&phba->hbalock);
7081
7082 total_cnt = cnt;
7083 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7084 &allc_sgl_list, list) {
7085 list_del_init(&sglq_entry->list);
7086 block_cnt++;
7087 if ((last_xritag != NO_XRI) &&
7088 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7089
7090 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7091 post_cnt = block_cnt - 1;
7092
7093 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7094 block_cnt = 1;
7095 } else {
7096
7097 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7098
7099 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7100 list_splice_init(&prep_sgl_list,
7101 &blck_sgl_list);
7102 post_cnt = block_cnt;
7103 block_cnt = 0;
7104 }
7105 }
7106 num_posted++;
7107
7108
7109 last_xritag = sglq_entry->sli4_xritag;
7110
7111
7112 if (num_posted == total_cnt) {
7113 if (post_cnt == 0) {
7114 list_splice_init(&prep_sgl_list,
7115 &blck_sgl_list);
7116 post_cnt = block_cnt;
7117 } else if (block_cnt == 1) {
7118 status = lpfc_sli4_post_sgl(phba,
7119 sglq_entry->phys, 0,
7120 sglq_entry->sli4_xritag);
7121 if (!status) {
7122
7123 list_add_tail(&sglq_entry->list,
7124 &post_sgl_list);
7125 } else {
7126
7127 lpfc_printf_log(phba, KERN_WARNING,
7128 LOG_SLI,
7129 "3159 Failed to post "
7130 "sgl, xritag:x%x\n",
7131 sglq_entry->sli4_xritag);
7132 list_add_tail(&sglq_entry->list,
7133 &free_sgl_list);
7134 total_cnt--;
7135 }
7136 }
7137 }
7138
7139
7140 if (post_cnt == 0)
7141 continue;
7142
7143
7144 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7145 post_cnt);
7146
7147 if (!status) {
7148
7149 list_splice_init(&blck_sgl_list, &post_sgl_list);
7150 } else {
7151
7152 sglq_entry_first = list_first_entry(&blck_sgl_list,
7153 struct lpfc_sglq,
7154 list);
7155 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7156 "3160 Failed to post sgl-list, "
7157 "xritag:x%x-x%x\n",
7158 sglq_entry_first->sli4_xritag,
7159 (sglq_entry_first->sli4_xritag +
7160 post_cnt - 1));
7161 list_splice_init(&blck_sgl_list, &free_sgl_list);
7162 total_cnt -= post_cnt;
7163 }
7164
7165
7166 if (block_cnt == 0)
7167 last_xritag = NO_XRI;
7168
7169
7170 post_cnt = 0;
7171 }
7172
7173
7174 lpfc_free_sgl_list(phba, &free_sgl_list);
7175
7176
7177 if (!list_empty(&post_sgl_list)) {
7178 spin_lock_irq(&phba->hbalock);
7179 spin_lock(&phba->sli4_hba.sgl_list_lock);
7180 list_splice_init(&post_sgl_list, sgl_list);
7181 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7182 spin_unlock_irq(&phba->hbalock);
7183 } else {
7184 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7185 "3161 Failure to post sgl to port.\n");
7186 return -EIO;
7187 }
7188
7189
7190 return total_cnt;
7191}
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203
7204
7205static int
7206lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7207{
7208 LIST_HEAD(post_nblist);
7209 int num_posted, rc = 0;
7210
7211
7212 lpfc_io_buf_flush(phba, &post_nblist);
7213
7214
7215 if (!list_empty(&post_nblist)) {
7216 num_posted = lpfc_sli4_post_io_sgl_list(
7217 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7218
7219 if (num_posted == 0)
7220 rc = -EIO;
7221 }
7222 return rc;
7223}
7224
7225static void
7226lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7227{
7228 uint32_t len;
7229
7230 len = sizeof(struct lpfc_mbx_set_host_data) -
7231 sizeof(struct lpfc_sli4_cfg_mhdr);
7232 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7233 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7234 LPFC_SLI4_MBX_EMBED);
7235
7236 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7237 mbox->u.mqe.un.set_host_data.param_len =
7238 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7239 snprintf(mbox->u.mqe.un.set_host_data.data,
7240 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7241 "Linux %s v"LPFC_DRIVER_VERSION,
7242 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7243}
7244
7245int
7246lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7247 struct lpfc_queue *drq, int count, int idx)
7248{
7249 int rc, i;
7250 struct lpfc_rqe hrqe;
7251 struct lpfc_rqe drqe;
7252 struct lpfc_rqb *rqbp;
7253 unsigned long flags;
7254 struct rqb_dmabuf *rqb_buffer;
7255 LIST_HEAD(rqb_buf_list);
7256
7257 spin_lock_irqsave(&phba->hbalock, flags);
7258 rqbp = hrq->rqbp;
7259 for (i = 0; i < count; i++) {
7260
7261 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7262 break;
7263 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7264 if (!rqb_buffer)
7265 break;
7266 rqb_buffer->hrq = hrq;
7267 rqb_buffer->drq = drq;
7268 rqb_buffer->idx = idx;
7269 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7270 }
7271 while (!list_empty(&rqb_buf_list)) {
7272 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7273 hbuf.list);
7274
7275 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7276 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7277 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7278 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7279 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7280 if (rc < 0) {
7281 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7282 "6421 Cannot post to HRQ %d: %x %x %x "
7283 "DRQ %x %x\n",
7284 hrq->queue_id,
7285 hrq->host_index,
7286 hrq->hba_index,
7287 hrq->entry_count,
7288 drq->host_index,
7289 drq->hba_index);
7290 rqbp->rqb_free_buffer(phba, rqb_buffer);
7291 } else {
7292 list_add_tail(&rqb_buffer->hbuf.list,
7293 &rqbp->rqb_buffer_list);
7294 rqbp->buffer_count++;
7295 }
7296 }
7297 spin_unlock_irqrestore(&phba->hbalock, flags);
7298 return 1;
7299}
7300
7301
7302
7303
7304
7305
7306
7307
7308
7309
7310int
7311lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7312{
7313 int rc, i, cnt, len, dd;
7314 LPFC_MBOXQ_t *mboxq;
7315 struct lpfc_mqe *mqe;
7316 uint8_t *vpd;
7317 uint32_t vpd_size;
7318 uint32_t ftr_rsp = 0;
7319 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7320 struct lpfc_vport *vport = phba->pport;
7321 struct lpfc_dmabuf *mp;
7322 struct lpfc_rqb *rqbp;
7323
7324
7325 rc = lpfc_pci_function_reset(phba);
7326 if (unlikely(rc))
7327 return -ENODEV;
7328
7329
7330 rc = lpfc_sli4_post_status_check(phba);
7331 if (unlikely(rc))
7332 return -ENODEV;
7333 else {
7334 spin_lock_irq(&phba->hbalock);
7335 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7336 spin_unlock_irq(&phba->hbalock);
7337 }
7338
7339
7340
7341
7342
7343 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7344 if (!mboxq)
7345 return -ENOMEM;
7346
7347
7348 vpd_size = SLI4_PAGE_SIZE;
7349 vpd = kzalloc(vpd_size, GFP_KERNEL);
7350 if (!vpd) {
7351 rc = -ENOMEM;
7352 goto out_free_mbox;
7353 }
7354
7355 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7356 if (unlikely(rc)) {
7357 kfree(vpd);
7358 goto out_free_mbox;
7359 }
7360
7361 mqe = &mboxq->u.mqe;
7362 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7363 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7364 phba->hba_flag |= HBA_FCOE_MODE;
7365 phba->fcp_embed_io = 0;
7366 } else {
7367 phba->hba_flag &= ~HBA_FCOE_MODE;
7368 }
7369
7370 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7371 LPFC_DCBX_CEE_MODE)
7372 phba->hba_flag |= HBA_FIP_SUPPORT;
7373 else
7374 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7375
7376 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7377
7378 if (phba->sli_rev != LPFC_SLI_REV4) {
7379 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7380 "0376 READ_REV Error. SLI Level %d "
7381 "FCoE enabled %d\n",
7382 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7383 rc = -EIO;
7384 kfree(vpd);
7385 goto out_free_mbox;
7386 }
7387
7388
7389
7390
7391
7392
7393 if (phba->hba_flag & HBA_FCOE_MODE &&
7394 lpfc_sli4_read_fcoe_params(phba))
7395 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7396 "2570 Failed to read FCoE parameters\n");
7397
7398
7399
7400
7401
7402 rc = lpfc_sli4_retrieve_pport_name(phba);
7403 if (!rc)
7404 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7405 "3080 Successful retrieving SLI4 device "
7406 "physical port name: %s.\n", phba->Port);
7407
7408 rc = lpfc_sli4_get_ctl_attr(phba);
7409 if (!rc)
7410 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7411 "8351 Successful retrieving SLI4 device "
7412 "CTL ATTR\n");
7413
7414
7415
7416
7417
7418
7419 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7420 if (unlikely(!rc)) {
7421 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7422 "0377 Error %d parsing vpd. "
7423 "Using defaults.\n", rc);
7424 rc = 0;
7425 }
7426 kfree(vpd);
7427
7428
7429 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7430 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7431
7432
7433
7434
7435
7436 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7437 LPFC_SLI_INTF_IF_TYPE_6) &&
7438 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7439 (phba->vpd.rev.smRev == 0) &&
7440 (phba->cfg_nvme_embed_cmd == 1))
7441 phba->cfg_nvme_embed_cmd = 0;
7442
7443 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7444 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7445 &mqe->un.read_rev);
7446 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7447 &mqe->un.read_rev);
7448 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7449 &mqe->un.read_rev);
7450 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7451 &mqe->un.read_rev);
7452 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7453 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7454 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7455 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7456 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7457 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7458 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7459 "(%d):0380 READ_REV Status x%x "
7460 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7461 mboxq->vport ? mboxq->vport->vpi : 0,
7462 bf_get(lpfc_mqe_status, mqe),
7463 phba->vpd.rev.opFwName,
7464 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7465 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7466
7467 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7468 LPFC_SLI_INTF_IF_TYPE_0) {
7469 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7470 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7471 if (rc == MBX_SUCCESS) {
7472 phba->hba_flag |= HBA_RECOVERABLE_UE;
7473
7474 phba->eratt_poll_interval = 1;
7475 phba->sli4_hba.ue_to_sr = bf_get(
7476 lpfc_mbx_set_feature_UESR,
7477 &mboxq->u.mqe.un.set_feature);
7478 phba->sli4_hba.ue_to_rp = bf_get(
7479 lpfc_mbx_set_feature_UERP,
7480 &mboxq->u.mqe.un.set_feature);
7481 }
7482 }
7483
7484 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7485
7486 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7487 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7488 if (rc != MBX_SUCCESS)
7489 phba->mds_diags_support = 0;
7490 }
7491
7492
7493
7494
7495
7496 lpfc_request_features(phba, mboxq);
7497 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7498 if (unlikely(rc)) {
7499 rc = -EIO;
7500 goto out_free_mbox;
7501 }
7502
7503
7504
7505
7506
7507 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7508 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7509 "0378 No support for fcpi mode.\n");
7510 ftr_rsp++;
7511 }
7512
7513
7514 if (phba->hba_flag & HBA_FCOE_MODE) {
7515 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7516 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7517 else
7518 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7519 }
7520
7521
7522
7523
7524
7525
7526 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7527 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7528 phba->cfg_enable_bg = 0;
7529 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7530 ftr_rsp++;
7531 }
7532 }
7533
7534 if (phba->max_vpi && phba->cfg_enable_npiv &&
7535 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7536 ftr_rsp++;
7537
7538 if (ftr_rsp) {
7539 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7540 "0379 Feature Mismatch Data: x%08x %08x "
7541 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7542 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7543 phba->cfg_enable_npiv, phba->max_vpi);
7544 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7545 phba->cfg_enable_bg = 0;
7546 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7547 phba->cfg_enable_npiv = 0;
7548 }
7549
7550
7551 spin_lock_irq(&phba->hbalock);
7552 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7553 spin_unlock_irq(&phba->hbalock);
7554
7555
7556 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7557 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7558 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7559 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7560 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT,
7561 "6448 Dual Dump is enabled\n");
7562 else
7563 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7564 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7565 "rc:x%x dd:x%x\n",
7566 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7567 lpfc_sli_config_mbox_subsys_get(
7568 phba, mboxq),
7569 lpfc_sli_config_mbox_opcode_get(
7570 phba, mboxq),
7571 rc, dd);
7572
7573
7574
7575
7576 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7577 if (rc) {
7578 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7579 "2920 Failed to alloc Resource IDs "
7580 "rc = x%x\n", rc);
7581 goto out_free_mbox;
7582 }
7583
7584 lpfc_set_host_data(phba, mboxq);
7585
7586 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7587 if (rc) {
7588 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7589 "2134 Failed to set host os driver version %x",
7590 rc);
7591 }
7592
7593
7594 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7595 if (rc) {
7596 phba->link_state = LPFC_HBA_ERROR;
7597 rc = -ENOMEM;
7598 goto out_free_mbox;
7599 }
7600
7601 mboxq->vport = vport;
7602 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7603 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7604 if (rc == MBX_SUCCESS) {
7605 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7606 rc = 0;
7607 }
7608
7609
7610
7611
7612
7613 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7614 kfree(mp);
7615 mboxq->ctx_buf = NULL;
7616 if (unlikely(rc)) {
7617 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7618 "0382 READ_SPARAM command failed "
7619 "status %d, mbxStatus x%x\n",
7620 rc, bf_get(lpfc_mqe_status, mqe));
7621 phba->link_state = LPFC_HBA_ERROR;
7622 rc = -EIO;
7623 goto out_free_mbox;
7624 }
7625
7626 lpfc_update_vport_wwn(vport);
7627
7628
7629 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7630 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7631
7632
7633 rc = lpfc_sli4_queue_create(phba);
7634 if (rc) {
7635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7636 "3089 Failed to allocate queues\n");
7637 rc = -ENODEV;
7638 goto out_free_mbox;
7639 }
7640
7641 rc = lpfc_sli4_queue_setup(phba);
7642 if (unlikely(rc)) {
7643 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7644 "0381 Error %d during queue setup.\n ", rc);
7645 goto out_stop_timers;
7646 }
7647
7648 lpfc_sli4_setup(phba);
7649 lpfc_sli4_queue_init(phba);
7650
7651
7652 rc = lpfc_sli4_els_sgl_update(phba);
7653 if (unlikely(rc)) {
7654 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7655 "1400 Failed to update xri-sgl size and "
7656 "mapping: %d\n", rc);
7657 goto out_destroy_queue;
7658 }
7659
7660
7661 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7662 phba->sli4_hba.els_xri_cnt);
7663 if (unlikely(rc < 0)) {
7664 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7665 "0582 Error %d during els sgl post "
7666 "operation\n", rc);
7667 rc = -ENODEV;
7668 goto out_destroy_queue;
7669 }
7670 phba->sli4_hba.els_xri_cnt = rc;
7671
7672 if (phba->nvmet_support) {
7673
7674 rc = lpfc_sli4_nvmet_sgl_update(phba);
7675 if (unlikely(rc)) {
7676 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7677 "6308 Failed to update nvmet-sgl size "
7678 "and mapping: %d\n", rc);
7679 goto out_destroy_queue;
7680 }
7681
7682
7683 rc = lpfc_sli4_repost_sgl_list(
7684 phba,
7685 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7686 phba->sli4_hba.nvmet_xri_cnt);
7687 if (unlikely(rc < 0)) {
7688 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7689 "3117 Error %d during nvmet "
7690 "sgl post\n", rc);
7691 rc = -ENODEV;
7692 goto out_destroy_queue;
7693 }
7694 phba->sli4_hba.nvmet_xri_cnt = rc;
7695
7696
7697
7698
7699 cnt = phba->sli4_hba.nvmet_xri_cnt +
7700 phba->sli4_hba.max_cfg_param.max_xri;
7701 } else {
7702
7703 rc = lpfc_sli4_io_sgl_update(phba);
7704 if (unlikely(rc)) {
7705 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7706 "6082 Failed to update nvme-sgl size "
7707 "and mapping: %d\n", rc);
7708 goto out_destroy_queue;
7709 }
7710
7711
7712 rc = lpfc_sli4_repost_io_sgl_list(phba);
7713 if (unlikely(rc)) {
7714 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7715 "6116 Error %d during nvme sgl post "
7716 "operation\n", rc);
7717
7718
7719 rc = -ENODEV;
7720 goto out_destroy_queue;
7721 }
7722
7723
7724
7725 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7726 }
7727
7728 if (!phba->sli.iocbq_lookup) {
7729
7730 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7731 "2821 initialize iocb list with %d entries\n",
7732 cnt);
7733 rc = lpfc_init_iocb_list(phba, cnt);
7734 if (rc) {
7735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7736 "1413 Failed to init iocb list.\n");
7737 goto out_destroy_queue;
7738 }
7739 }
7740
7741 if (phba->nvmet_support)
7742 lpfc_nvmet_create_targetport(phba);
7743
7744 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7745
7746 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7747 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7748 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7749 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7750 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7751 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7752 rqbp->buffer_count = 0;
7753
7754 lpfc_post_rq_buffer(
7755 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7756 phba->sli4_hba.nvmet_mrq_data[i],
7757 phba->cfg_nvmet_mrq_post, i);
7758 }
7759 }
7760
7761
7762 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7763 if (unlikely(rc)) {
7764 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7765 "0393 Error %d during rpi post operation\n",
7766 rc);
7767 rc = -ENODEV;
7768 goto out_destroy_queue;
7769 }
7770 lpfc_sli4_node_prep(phba);
7771
7772 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7773 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7774
7775
7776
7777 lpfc_reg_fcfi(phba, mboxq);
7778 mboxq->vport = phba->pport;
7779 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7780 if (rc != MBX_SUCCESS)
7781 goto out_unset_queue;
7782 rc = 0;
7783 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7784 &mboxq->u.mqe.un.reg_fcfi);
7785 } else {
7786
7787
7788
7789 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7790 mboxq->vport = phba->pport;
7791 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7792 if (rc != MBX_SUCCESS)
7793 goto out_unset_queue;
7794 rc = 0;
7795 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7796 &mboxq->u.mqe.un.reg_fcfi_mrq);
7797
7798
7799 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7800 mboxq->vport = phba->pport;
7801 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7802 if (rc != MBX_SUCCESS)
7803 goto out_unset_queue;
7804 rc = 0;
7805 }
7806
7807 lpfc_sli_read_link_ste(phba);
7808 }
7809
7810
7811
7812
7813 if (phba->nvmet_support == 0) {
7814 if (phba->sli4_hba.io_xri_cnt == 0) {
7815 len = lpfc_new_io_buf(
7816 phba, phba->sli4_hba.io_xri_max);
7817 if (len == 0) {
7818 rc = -ENOMEM;
7819 goto out_unset_queue;
7820 }
7821
7822 if (phba->cfg_xri_rebalancing)
7823 lpfc_create_multixri_pools(phba);
7824 }
7825 } else {
7826 phba->cfg_xri_rebalancing = 0;
7827 }
7828
7829
7830 spin_lock_irq(&phba->hbalock);
7831 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7832 spin_unlock_irq(&phba->hbalock);
7833
7834
7835 lpfc_sli4_rb_setup(phba);
7836
7837
7838 phba->fcf.fcf_flag = 0;
7839 phba->fcf.current_rec.flag = 0;
7840
7841
7842 mod_timer(&vport->els_tmofunc,
7843 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7844
7845
7846 mod_timer(&phba->hb_tmofunc,
7847 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7848 phba->hb_outstanding = 0;
7849 phba->last_completion_time = jiffies;
7850
7851
7852 if (phba->cfg_auto_imax)
7853 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7854 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7855
7856
7857 mod_timer(&phba->eratt_poll,
7858 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7859
7860
7861 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7862 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7863 if (!rc) {
7864 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7865 "2829 This device supports "
7866 "Advanced Error Reporting (AER)\n");
7867 spin_lock_irq(&phba->hbalock);
7868 phba->hba_flag |= HBA_AER_ENABLED;
7869 spin_unlock_irq(&phba->hbalock);
7870 } else {
7871 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7872 "2830 This device does not support "
7873 "Advanced Error Reporting (AER)\n");
7874 phba->cfg_aer_support = 0;
7875 }
7876 rc = 0;
7877 }
7878
7879
7880
7881
7882
7883 spin_lock_irq(&phba->hbalock);
7884 phba->link_state = LPFC_LINK_DOWN;
7885
7886
7887 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7888 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7889 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7890 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7891 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7892 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7893 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7894 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7895 spin_unlock_irq(&phba->hbalock);
7896
7897
7898 lpfc_sli4_arm_cqeq_intr(phba);
7899
7900
7901 phba->sli4_hba.intr_enable = 1;
7902
7903 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7904 (phba->hba_flag & LINK_DISABLED)) {
7905 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7906 "3103 Adapter Link is disabled.\n");
7907 lpfc_down_link(phba, mboxq);
7908 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7909 if (rc != MBX_SUCCESS) {
7910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7911 "3104 Adapter failed to issue "
7912 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7913 goto out_io_buff_free;
7914 }
7915 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7916
7917 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7918 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7919 if (rc)
7920 goto out_io_buff_free;
7921 }
7922 }
7923 mempool_free(mboxq, phba->mbox_mem_pool);
7924 return rc;
7925out_io_buff_free:
7926
7927 lpfc_io_free(phba);
7928out_unset_queue:
7929
7930 lpfc_sli4_queue_unset(phba);
7931out_destroy_queue:
7932 lpfc_free_iocb_list(phba);
7933 lpfc_sli4_queue_destroy(phba);
7934out_stop_timers:
7935 lpfc_stop_hba_timers(phba);
7936out_free_mbox:
7937 mempool_free(mboxq, phba->mbox_mem_pool);
7938 return rc;
7939}
7940
7941
7942
7943
7944
7945
7946
7947
7948
7949
7950
7951
7952
7953void
7954lpfc_mbox_timeout(struct timer_list *t)
7955{
7956 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
7957 unsigned long iflag;
7958 uint32_t tmo_posted;
7959
7960 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7961 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7962 if (!tmo_posted)
7963 phba->pport->work_port_events |= WORKER_MBOX_TMO;
7964 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7965
7966 if (!tmo_posted)
7967 lpfc_worker_wake_up(phba);
7968 return;
7969}
7970
7971
7972
7973
7974
7975
7976
7977
7978
7979static bool
7980lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7981{
7982
7983 uint32_t idx;
7984 struct lpfc_queue *mcq;
7985 struct lpfc_mcqe *mcqe;
7986 bool pending_completions = false;
7987 uint8_t qe_valid;
7988
7989 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7990 return false;
7991
7992
7993
7994 mcq = phba->sli4_hba.mbx_cq;
7995 idx = mcq->hba_index;
7996 qe_valid = mcq->qe_valid;
7997 while (bf_get_le32(lpfc_cqe_valid,
7998 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
7999 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8000 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8001 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8002 pending_completions = true;
8003 break;
8004 }
8005 idx = (idx + 1) % mcq->entry_count;
8006 if (mcq->hba_index == idx)
8007 break;
8008
8009
8010 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8011 qe_valid = (qe_valid) ? 0 : 1;
8012 }
8013 return pending_completions;
8014
8015}
8016
8017
8018
8019
8020
8021
8022
8023
8024
8025
8026
8027
8028static bool
8029lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8030{
8031 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8032 uint32_t eqidx;
8033 struct lpfc_queue *fpeq = NULL;
8034 struct lpfc_queue *eq;
8035 bool mbox_pending;
8036
8037 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8038 return false;
8039
8040
8041 if (sli4_hba->hdwq) {
8042 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8043 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8044 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8045 fpeq = eq;
8046 break;
8047 }
8048 }
8049 }
8050 if (!fpeq)
8051 return false;
8052
8053
8054
8055 sli4_hba->sli4_eq_clr_intr(fpeq);
8056
8057
8058
8059 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8060
8061
8062
8063
8064
8065
8066
8067
8068 if (mbox_pending)
8069
8070 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8071 else
8072
8073 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8074
8075 return mbox_pending;
8076
8077}
8078
8079
8080
8081
8082
8083
8084
8085
8086
8087void
8088lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8089{
8090 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8091 MAILBOX_t *mb = NULL;
8092
8093 struct lpfc_sli *psli = &phba->sli;
8094
8095
8096 if (lpfc_sli4_process_missed_mbox_completions(phba))
8097 return;
8098
8099 if (pmbox != NULL)
8100 mb = &pmbox->u.mb;
8101
8102
8103
8104
8105
8106 spin_lock_irq(&phba->hbalock);
8107 if (pmbox == NULL) {
8108 lpfc_printf_log(phba, KERN_WARNING,
8109 LOG_MBOX | LOG_SLI,
8110 "0353 Active Mailbox cleared - mailbox timeout "
8111 "exiting\n");
8112 spin_unlock_irq(&phba->hbalock);
8113 return;
8114 }
8115
8116
8117 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8118 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8119 mb->mbxCommand,
8120 phba->pport->port_state,
8121 phba->sli.sli_flag,
8122 phba->sli.mbox_active);
8123 spin_unlock_irq(&phba->hbalock);
8124
8125
8126
8127
8128
8129 spin_lock_irq(&phba->pport->work_port_lock);
8130 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8131 spin_unlock_irq(&phba->pport->work_port_lock);
8132 spin_lock_irq(&phba->hbalock);
8133 phba->link_state = LPFC_LINK_UNKNOWN;
8134 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8135 spin_unlock_irq(&phba->hbalock);
8136
8137 lpfc_sli_abort_fcp_rings(phba);
8138
8139 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8140 "0345 Resetting board due to mailbox timeout\n");
8141
8142
8143 lpfc_reset_hba(phba);
8144}
8145
8146
8147
8148
8149
8150
8151
8152
8153
8154
8155
8156
8157
8158
8159
8160
8161
8162
8163
8164
8165
8166
8167
8168
8169
8170
8171
8172static int
8173lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8174 uint32_t flag)
8175{
8176 MAILBOX_t *mbx;
8177 struct lpfc_sli *psli = &phba->sli;
8178 uint32_t status, evtctr;
8179 uint32_t ha_copy, hc_copy;
8180 int i;
8181 unsigned long timeout;
8182 unsigned long drvr_flag = 0;
8183 uint32_t word0, ldata;
8184 void __iomem *to_slim;
8185 int processing_queue = 0;
8186
8187 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8188 if (!pmbox) {
8189 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8190
8191 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8192 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8193 return MBX_SUCCESS;
8194 }
8195 processing_queue = 1;
8196 pmbox = lpfc_mbox_get(phba);
8197 if (!pmbox) {
8198 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8199 return MBX_SUCCESS;
8200 }
8201 }
8202
8203 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8204 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8205 if(!pmbox->vport) {
8206 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8207 lpfc_printf_log(phba, KERN_ERR,
8208 LOG_MBOX | LOG_VPORT,
8209 "1806 Mbox x%x failed. No vport\n",
8210 pmbox->u.mb.mbxCommand);
8211 dump_stack();
8212 goto out_not_finished;
8213 }
8214 }
8215
8216
8217 if (unlikely(pci_channel_offline(phba->pcidev))) {
8218 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8219 goto out_not_finished;
8220 }
8221
8222
8223 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8224 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8225 goto out_not_finished;
8226 }
8227
8228 psli = &phba->sli;
8229
8230 mbx = &pmbox->u.mb;
8231 status = MBX_SUCCESS;
8232
8233 if (phba->link_state == LPFC_HBA_ERROR) {
8234 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8235
8236
8237 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8238 "(%d):0311 Mailbox command x%x cannot "
8239 "issue Data: x%x x%x\n",
8240 pmbox->vport ? pmbox->vport->vpi : 0,
8241 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8242 goto out_not_finished;
8243 }
8244
8245 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8246 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8247 !(hc_copy & HC_MBINT_ENA)) {
8248 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8249 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8250 "(%d):2528 Mailbox command x%x cannot "
8251 "issue Data: x%x x%x\n",
8252 pmbox->vport ? pmbox->vport->vpi : 0,
8253 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8254 goto out_not_finished;
8255 }
8256 }
8257
8258 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8259
8260
8261
8262
8263
8264 if (flag & MBX_POLL) {
8265 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8266
8267
8268 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8269 "(%d):2529 Mailbox command x%x "
8270 "cannot issue Data: x%x x%x\n",
8271 pmbox->vport ? pmbox->vport->vpi : 0,
8272 pmbox->u.mb.mbxCommand,
8273 psli->sli_flag, flag);
8274 goto out_not_finished;
8275 }
8276
8277 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8278 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8279
8280 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8281 "(%d):2530 Mailbox command x%x "
8282 "cannot issue Data: x%x x%x\n",
8283 pmbox->vport ? pmbox->vport->vpi : 0,
8284 pmbox->u.mb.mbxCommand,
8285 psli->sli_flag, flag);
8286 goto out_not_finished;
8287 }
8288
8289
8290
8291
8292 lpfc_mbox_put(phba, pmbox);
8293
8294
8295 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8296 "(%d):0308 Mbox cmd issue - BUSY Data: "
8297 "x%x x%x x%x x%x\n",
8298 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8299 mbx->mbxCommand,
8300 phba->pport ? phba->pport->port_state : 0xff,
8301 psli->sli_flag, flag);
8302
8303 psli->slistat.mbox_busy++;
8304 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8305
8306 if (pmbox->vport) {
8307 lpfc_debugfs_disc_trc(pmbox->vport,
8308 LPFC_DISC_TRC_MBOX_VPORT,
8309 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8310 (uint32_t)mbx->mbxCommand,
8311 mbx->un.varWords[0], mbx->un.varWords[1]);
8312 }
8313 else {
8314 lpfc_debugfs_disc_trc(phba->pport,
8315 LPFC_DISC_TRC_MBOX,
8316 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8317 (uint32_t)mbx->mbxCommand,
8318 mbx->un.varWords[0], mbx->un.varWords[1]);
8319 }
8320
8321 return MBX_BUSY;
8322 }
8323
8324 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8325
8326
8327 if (flag != MBX_POLL) {
8328 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8329 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8330 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8331 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8332
8333 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8334 "(%d):2531 Mailbox command x%x "
8335 "cannot issue Data: x%x x%x\n",
8336 pmbox->vport ? pmbox->vport->vpi : 0,
8337 pmbox->u.mb.mbxCommand,
8338 psli->sli_flag, flag);
8339 goto out_not_finished;
8340 }
8341
8342 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8343 1000);
8344 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8345 }
8346
8347
8348 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8349 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8350 "x%x\n",
8351 pmbox->vport ? pmbox->vport->vpi : 0,
8352 mbx->mbxCommand,
8353 phba->pport ? phba->pport->port_state : 0xff,
8354 psli->sli_flag, flag);
8355
8356 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8357 if (pmbox->vport) {
8358 lpfc_debugfs_disc_trc(pmbox->vport,
8359 LPFC_DISC_TRC_MBOX_VPORT,
8360 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8361 (uint32_t)mbx->mbxCommand,
8362 mbx->un.varWords[0], mbx->un.varWords[1]);
8363 }
8364 else {
8365 lpfc_debugfs_disc_trc(phba->pport,
8366 LPFC_DISC_TRC_MBOX,
8367 "MBOX Send: cmd:x%x mb:x%x x%x",
8368 (uint32_t)mbx->mbxCommand,
8369 mbx->un.varWords[0], mbx->un.varWords[1]);
8370 }
8371 }
8372
8373 psli->slistat.mbox_cmd++;
8374 evtctr = psli->slistat.mbox_event;
8375
8376
8377 mbx->mbxOwner = OWN_CHIP;
8378
8379 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8380
8381 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8382 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8383 = (uint8_t *)phba->mbox_ext
8384 - (uint8_t *)phba->mbox;
8385 }
8386
8387
8388 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8389 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8390 (uint8_t *)phba->mbox_ext,
8391 pmbox->in_ext_byte_len);
8392 }
8393
8394 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8395 } else {
8396
8397 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8398 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8399 = MAILBOX_HBA_EXT_OFFSET;
8400
8401
8402 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8403 lpfc_memcpy_to_slim(phba->MBslimaddr +
8404 MAILBOX_HBA_EXT_OFFSET,
8405 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8406
8407 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8408
8409 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8410 MAILBOX_CMD_SIZE);
8411
8412
8413
8414 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8415 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8416 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8417
8418
8419 ldata = *((uint32_t *)mbx);
8420 to_slim = phba->MBslimaddr;
8421 writel(ldata, to_slim);
8422 readl(to_slim);
8423
8424 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8425
8426 psli->sli_flag |= LPFC_SLI_ACTIVE;
8427 }
8428
8429 wmb();
8430
8431 switch (flag) {
8432 case MBX_NOWAIT:
8433
8434 psli->mbox_active = pmbox;
8435
8436 writel(CA_MBATT, phba->CAregaddr);
8437 readl(phba->CAregaddr);
8438
8439 break;
8440
8441 case MBX_POLL:
8442
8443 psli->mbox_active = NULL;
8444
8445 writel(CA_MBATT, phba->CAregaddr);
8446 readl(phba->CAregaddr);
8447
8448 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8449
8450 word0 = *((uint32_t *)phba->mbox);
8451 word0 = le32_to_cpu(word0);
8452 } else {
8453
8454 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8455 spin_unlock_irqrestore(&phba->hbalock,
8456 drvr_flag);
8457 goto out_not_finished;
8458 }
8459 }
8460
8461
8462 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8463 spin_unlock_irqrestore(&phba->hbalock,
8464 drvr_flag);
8465 goto out_not_finished;
8466 }
8467 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8468 1000) + jiffies;
8469 i = 0;
8470
8471 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8472 (!(ha_copy & HA_MBATT) &&
8473 (phba->link_state > LPFC_WARM_START))) {
8474 if (time_after(jiffies, timeout)) {
8475 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8476 spin_unlock_irqrestore(&phba->hbalock,
8477 drvr_flag);
8478 goto out_not_finished;
8479 }
8480
8481
8482
8483 if (((word0 & OWN_CHIP) != OWN_CHIP)
8484 && (evtctr != psli->slistat.mbox_event))
8485 break;
8486
8487 if (i++ > 10) {
8488 spin_unlock_irqrestore(&phba->hbalock,
8489 drvr_flag);
8490 msleep(1);
8491 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8492 }
8493
8494 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8495
8496 word0 = *((uint32_t *)phba->mbox);
8497 word0 = le32_to_cpu(word0);
8498 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8499 MAILBOX_t *slimmb;
8500 uint32_t slimword0;
8501
8502 slimword0 = readl(phba->MBslimaddr);
8503 slimmb = (MAILBOX_t *) & slimword0;
8504 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8505 && slimmb->mbxStatus) {
8506 psli->sli_flag &=
8507 ~LPFC_SLI_ACTIVE;
8508 word0 = slimword0;
8509 }
8510 }
8511 } else {
8512
8513 word0 = readl(phba->MBslimaddr);
8514 }
8515
8516 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8517 spin_unlock_irqrestore(&phba->hbalock,
8518 drvr_flag);
8519 goto out_not_finished;
8520 }
8521 }
8522
8523 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8524
8525 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8526 MAILBOX_CMD_SIZE);
8527
8528 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8529 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8530 pmbox->ctx_buf,
8531 pmbox->out_ext_byte_len);
8532 }
8533 } else {
8534
8535 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8536 MAILBOX_CMD_SIZE);
8537
8538 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8539 lpfc_memcpy_from_slim(
8540 pmbox->ctx_buf,
8541 phba->MBslimaddr +
8542 MAILBOX_HBA_EXT_OFFSET,
8543 pmbox->out_ext_byte_len);
8544 }
8545 }
8546
8547 writel(HA_MBATT, phba->HAregaddr);
8548 readl(phba->HAregaddr);
8549
8550 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8551 status = mbx->mbxStatus;
8552 }
8553
8554 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8555 return status;
8556
8557out_not_finished:
8558 if (processing_queue) {
8559 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8560 lpfc_mbox_cmpl_put(phba, pmbox);
8561 }
8562 return MBX_NOT_FINISHED;
8563}
8564
8565
8566
8567
8568
8569
8570
8571
8572
8573
8574
8575
8576
8577static int
8578lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8579{
8580 struct lpfc_sli *psli = &phba->sli;
8581 int rc = 0;
8582 unsigned long timeout = 0;
8583
8584
8585 spin_lock_irq(&phba->hbalock);
8586 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8587
8588
8589
8590 if (phba->sli.mbox_active)
8591 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8592 phba->sli.mbox_active) *
8593 1000) + jiffies;
8594 spin_unlock_irq(&phba->hbalock);
8595
8596
8597 if (timeout)
8598 lpfc_sli4_process_missed_mbox_completions(phba);
8599
8600
8601 while (phba->sli.mbox_active) {
8602
8603 msleep(2);
8604 if (time_after(jiffies, timeout)) {
8605
8606 rc = 1;
8607 break;
8608 }
8609 }
8610
8611
8612 if (rc) {
8613 spin_lock_irq(&phba->hbalock);
8614 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8615 spin_unlock_irq(&phba->hbalock);
8616 }
8617 return rc;
8618}
8619
8620
8621
8622
8623
8624
8625
8626
8627
8628
8629
8630
8631static void
8632lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8633{
8634 struct lpfc_sli *psli = &phba->sli;
8635
8636 spin_lock_irq(&phba->hbalock);
8637 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8638
8639 spin_unlock_irq(&phba->hbalock);
8640 return;
8641 }
8642
8643
8644
8645
8646
8647
8648 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8649 spin_unlock_irq(&phba->hbalock);
8650
8651
8652 lpfc_worker_wake_up(phba);
8653}
8654
8655
8656
8657
8658
8659
8660
8661
8662
8663
8664
8665
8666static int
8667lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8668{
8669 uint32_t db_ready;
8670 unsigned long timeout;
8671 struct lpfc_register bmbx_reg;
8672
8673 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8674 * 1000) + jiffies;
8675
8676 do {
8677 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8678 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8679 if (!db_ready)
8680 mdelay(2);
8681
8682 if (time_after(jiffies, timeout))
8683 return MBXERR_ERROR;
8684 } while (!db_ready);
8685
8686 return 0;
8687}
8688
8689
8690
8691
8692
8693
8694
8695
8696
8697
8698
8699
8700
8701
8702
8703
8704
8705static int
8706lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8707{
8708 int rc = MBX_SUCCESS;
8709 unsigned long iflag;
8710 uint32_t mcqe_status;
8711 uint32_t mbx_cmnd;
8712 struct lpfc_sli *psli = &phba->sli;
8713 struct lpfc_mqe *mb = &mboxq->u.mqe;
8714 struct lpfc_bmbx_create *mbox_rgn;
8715 struct dma_address *dma_address;
8716
8717
8718
8719
8720
8721 spin_lock_irqsave(&phba->hbalock, iflag);
8722 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8723 spin_unlock_irqrestore(&phba->hbalock, iflag);
8724 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8725 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8726 "cannot issue Data: x%x x%x\n",
8727 mboxq->vport ? mboxq->vport->vpi : 0,
8728 mboxq->u.mb.mbxCommand,
8729 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8730 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8731 psli->sli_flag, MBX_POLL);
8732 return MBXERR_ERROR;
8733 }
8734
8735 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8736 phba->sli.mbox_active = mboxq;
8737 spin_unlock_irqrestore(&phba->hbalock, iflag);
8738
8739
8740 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8741 if (rc)
8742 goto exit;
8743
8744
8745
8746
8747
8748 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8749 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8750 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8751 sizeof(struct lpfc_mqe));
8752
8753
8754 dma_address = &phba->sli4_hba.bmbx.dma_address;
8755 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8756
8757
8758 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8759 if (rc)
8760 goto exit;
8761
8762
8763 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8764
8765
8766 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8767 if (rc)
8768 goto exit;
8769
8770
8771
8772
8773
8774
8775 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8776 sizeof(struct lpfc_mqe));
8777 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8778 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8779 sizeof(struct lpfc_mcqe));
8780 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8781
8782
8783
8784
8785
8786 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8787 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8788 bf_set(lpfc_mqe_status, mb,
8789 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8790 rc = MBXERR_ERROR;
8791 } else
8792 lpfc_sli4_swap_str(phba, mboxq);
8793
8794 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8795 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8796 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8797 " x%x x%x CQ: x%x x%x x%x x%x\n",
8798 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8799 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8800 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8801 bf_get(lpfc_mqe_status, mb),
8802 mb->un.mb_words[0], mb->un.mb_words[1],
8803 mb->un.mb_words[2], mb->un.mb_words[3],
8804 mb->un.mb_words[4], mb->un.mb_words[5],
8805 mb->un.mb_words[6], mb->un.mb_words[7],
8806 mb->un.mb_words[8], mb->un.mb_words[9],
8807 mb->un.mb_words[10], mb->un.mb_words[11],
8808 mb->un.mb_words[12], mboxq->mcqe.word0,
8809 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8810 mboxq->mcqe.trailer);
8811exit:
8812
8813 spin_lock_irqsave(&phba->hbalock, iflag);
8814 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8815 phba->sli.mbox_active = NULL;
8816 spin_unlock_irqrestore(&phba->hbalock, iflag);
8817 return rc;
8818}
8819
8820
8821
8822
8823
8824
8825
8826
8827
8828
8829
8830
8831
8832static int
8833lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8834 uint32_t flag)
8835{
8836 struct lpfc_sli *psli = &phba->sli;
8837 unsigned long iflags;
8838 int rc;
8839
8840
8841 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8842
8843 rc = lpfc_mbox_dev_check(phba);
8844 if (unlikely(rc)) {
8845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8846 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8847 "cannot issue Data: x%x x%x\n",
8848 mboxq->vport ? mboxq->vport->vpi : 0,
8849 mboxq->u.mb.mbxCommand,
8850 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8851 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8852 psli->sli_flag, flag);
8853 goto out_not_finished;
8854 }
8855
8856
8857 if (!phba->sli4_hba.intr_enable) {
8858 if (flag == MBX_POLL)
8859 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8860 else
8861 rc = -EIO;
8862 if (rc != MBX_SUCCESS)
8863 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8864 "(%d):2541 Mailbox command x%x "
8865 "(x%x/x%x) failure: "
8866 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8867 "Data: x%x x%x\n,",
8868 mboxq->vport ? mboxq->vport->vpi : 0,
8869 mboxq->u.mb.mbxCommand,
8870 lpfc_sli_config_mbox_subsys_get(phba,
8871 mboxq),
8872 lpfc_sli_config_mbox_opcode_get(phba,
8873 mboxq),
8874 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8875 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8876 bf_get(lpfc_mcqe_ext_status,
8877 &mboxq->mcqe),
8878 psli->sli_flag, flag);
8879 return rc;
8880 } else if (flag == MBX_POLL) {
8881 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8882 "(%d):2542 Try to issue mailbox command "
8883 "x%x (x%x/x%x) synchronously ahead of async "
8884 "mailbox command queue: x%x x%x\n",
8885 mboxq->vport ? mboxq->vport->vpi : 0,
8886 mboxq->u.mb.mbxCommand,
8887 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8888 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8889 psli->sli_flag, flag);
8890
8891 rc = lpfc_sli4_async_mbox_block(phba);
8892 if (!rc) {
8893
8894 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8895 if (rc != MBX_SUCCESS)
8896 lpfc_printf_log(phba, KERN_WARNING,
8897 LOG_MBOX | LOG_SLI,
8898 "(%d):2597 Sync Mailbox command "
8899 "x%x (x%x/x%x) failure: "
8900 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8901 "Data: x%x x%x\n,",
8902 mboxq->vport ? mboxq->vport->vpi : 0,
8903 mboxq->u.mb.mbxCommand,
8904 lpfc_sli_config_mbox_subsys_get(phba,
8905 mboxq),
8906 lpfc_sli_config_mbox_opcode_get(phba,
8907 mboxq),
8908 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8909 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8910 bf_get(lpfc_mcqe_ext_status,
8911 &mboxq->mcqe),
8912 psli->sli_flag, flag);
8913
8914 lpfc_sli4_async_mbox_unblock(phba);
8915 }
8916 return rc;
8917 }
8918
8919
8920 rc = lpfc_mbox_cmd_check(phba, mboxq);
8921 if (rc) {
8922 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8923 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8924 "cannot issue Data: x%x x%x\n",
8925 mboxq->vport ? mboxq->vport->vpi : 0,
8926 mboxq->u.mb.mbxCommand,
8927 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8928 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8929 psli->sli_flag, flag);
8930 goto out_not_finished;
8931 }
8932
8933
8934 psli->slistat.mbox_busy++;
8935 spin_lock_irqsave(&phba->hbalock, iflags);
8936 lpfc_mbox_put(phba, mboxq);
8937 spin_unlock_irqrestore(&phba->hbalock, iflags);
8938 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8939 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8940 "x%x (x%x/x%x) x%x x%x x%x\n",
8941 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8942 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8943 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8944 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8945 phba->pport->port_state,
8946 psli->sli_flag, MBX_NOWAIT);
8947
8948 lpfc_worker_wake_up(phba);
8949
8950 return MBX_BUSY;
8951
8952out_not_finished:
8953 return MBX_NOT_FINISHED;
8954}
8955
8956
8957
8958
8959
8960
8961
8962
8963
8964int
8965lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8966{
8967 struct lpfc_sli *psli = &phba->sli;
8968 LPFC_MBOXQ_t *mboxq;
8969 int rc = MBX_SUCCESS;
8970 unsigned long iflags;
8971 struct lpfc_mqe *mqe;
8972 uint32_t mbx_cmnd;
8973
8974
8975 if (unlikely(!phba->sli4_hba.intr_enable))
8976 return MBX_NOT_FINISHED;
8977
8978
8979 spin_lock_irqsave(&phba->hbalock, iflags);
8980 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8981 spin_unlock_irqrestore(&phba->hbalock, iflags);
8982 return MBX_NOT_FINISHED;
8983 }
8984 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8985 spin_unlock_irqrestore(&phba->hbalock, iflags);
8986 return MBX_NOT_FINISHED;
8987 }
8988 if (unlikely(phba->sli.mbox_active)) {
8989 spin_unlock_irqrestore(&phba->hbalock, iflags);
8990 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8991 "0384 There is pending active mailbox cmd\n");
8992 return MBX_NOT_FINISHED;
8993 }
8994
8995 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8996
8997
8998 mboxq = lpfc_mbox_get(phba);
8999
9000
9001 if (!mboxq) {
9002 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9003 spin_unlock_irqrestore(&phba->hbalock, iflags);
9004 return MBX_SUCCESS;
9005 }
9006 phba->sli.mbox_active = mboxq;
9007 spin_unlock_irqrestore(&phba->hbalock, iflags);
9008
9009
9010 rc = lpfc_mbox_dev_check(phba);
9011 if (unlikely(rc))
9012
9013 goto out_not_finished;
9014
9015
9016 mqe = &mboxq->u.mqe;
9017 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9018
9019
9020 mod_timer(&psli->mbox_tmo, (jiffies +
9021 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9022
9023 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9024 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9025 "x%x x%x\n",
9026 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9027 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9028 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9029 phba->pport->port_state, psli->sli_flag);
9030
9031 if (mbx_cmnd != MBX_HEARTBEAT) {
9032 if (mboxq->vport) {
9033 lpfc_debugfs_disc_trc(mboxq->vport,
9034 LPFC_DISC_TRC_MBOX_VPORT,
9035 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9036 mbx_cmnd, mqe->un.mb_words[0],
9037 mqe->un.mb_words[1]);
9038 } else {
9039 lpfc_debugfs_disc_trc(phba->pport,
9040 LPFC_DISC_TRC_MBOX,
9041 "MBOX Send: cmd:x%x mb:x%x x%x",
9042 mbx_cmnd, mqe->un.mb_words[0],
9043 mqe->un.mb_words[1]);
9044 }
9045 }
9046 psli->slistat.mbox_cmd++;
9047
9048
9049 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9050 if (rc != MBX_SUCCESS) {
9051 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
9052 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9053 "cannot issue Data: x%x x%x\n",
9054 mboxq->vport ? mboxq->vport->vpi : 0,
9055 mboxq->u.mb.mbxCommand,
9056 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9057 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9058 psli->sli_flag, MBX_NOWAIT);
9059 goto out_not_finished;
9060 }
9061
9062 return rc;
9063
9064out_not_finished:
9065 spin_lock_irqsave(&phba->hbalock, iflags);
9066 if (phba->sli.mbox_active) {
9067 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9068 __lpfc_mbox_cmpl_put(phba, mboxq);
9069
9070 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9071 phba->sli.mbox_active = NULL;
9072 }
9073 spin_unlock_irqrestore(&phba->hbalock, iflags);
9074
9075 return MBX_NOT_FINISHED;
9076}
9077
9078
9079
9080
9081
9082
9083
9084
9085
9086
9087
9088
9089
9090int
9091lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9092{
9093 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9094}
9095
9096
9097
9098
9099
9100
9101
9102
9103
9104
9105int
9106lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9107{
9108
9109 switch (dev_grp) {
9110 case LPFC_PCI_DEV_LP:
9111 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9112 phba->lpfc_sli_handle_slow_ring_event =
9113 lpfc_sli_handle_slow_ring_event_s3;
9114 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9115 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9116 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9117 break;
9118 case LPFC_PCI_DEV_OC:
9119 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9120 phba->lpfc_sli_handle_slow_ring_event =
9121 lpfc_sli_handle_slow_ring_event_s4;
9122 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9123 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9124 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9125 break;
9126 default:
9127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9128 "1420 Invalid HBA PCI-device group: 0x%x\n",
9129 dev_grp);
9130 return -ENODEV;
9131 break;
9132 }
9133 return 0;
9134}
9135
9136
9137
9138
9139
9140
9141
9142
9143
9144
9145
9146
9147void
9148__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9149 struct lpfc_iocbq *piocb)
9150{
9151 if (phba->sli_rev == LPFC_SLI_REV4)
9152 lockdep_assert_held(&pring->ring_lock);
9153 else
9154 lockdep_assert_held(&phba->hbalock);
9155
9156 list_add_tail(&piocb->list, &pring->txq);
9157}
9158
9159
9160
9161
9162
9163
9164
9165
9166
9167
9168
9169
9170
9171
9172
9173
9174
9175
9176static struct lpfc_iocbq *
9177lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9178 struct lpfc_iocbq **piocb)
9179{
9180 struct lpfc_iocbq * nextiocb;
9181
9182 lockdep_assert_held(&phba->hbalock);
9183
9184 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9185 if (!nextiocb) {
9186 nextiocb = *piocb;
9187 *piocb = NULL;
9188 }
9189
9190 return nextiocb;
9191}
9192
9193
9194
9195
9196
9197
9198
9199
9200
9201
9202
9203
9204
9205
9206
9207
9208
9209
9210
9211
9212
9213
9214
9215static int
9216__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9217 struct lpfc_iocbq *piocb, uint32_t flag)
9218{
9219 struct lpfc_iocbq *nextiocb;
9220 IOCB_t *iocb;
9221 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9222
9223 lockdep_assert_held(&phba->hbalock);
9224
9225 if (piocb->iocb_cmpl && (!piocb->vport) &&
9226 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9227 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9228 lpfc_printf_log(phba, KERN_ERR,
9229 LOG_SLI | LOG_VPORT,
9230 "1807 IOCB x%x failed. No vport\n",
9231 piocb->iocb.ulpCommand);
9232 dump_stack();
9233 return IOCB_ERROR;
9234 }
9235
9236
9237
9238 if (unlikely(pci_channel_offline(phba->pcidev)))
9239 return IOCB_ERROR;
9240
9241
9242 if (unlikely(phba->hba_flag & DEFER_ERATT))
9243 return IOCB_ERROR;
9244
9245
9246
9247
9248 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9249 return IOCB_ERROR;
9250
9251
9252
9253
9254
9255 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9256 goto iocb_busy;
9257
9258 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9259
9260
9261
9262
9263 switch (piocb->iocb.ulpCommand) {
9264 case CMD_GEN_REQUEST64_CR:
9265 case CMD_GEN_REQUEST64_CX:
9266 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9267 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9268 FC_RCTL_DD_UNSOL_CMD) ||
9269 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9270 MENLO_TRANSPORT_TYPE))
9271
9272 goto iocb_busy;
9273 break;
9274 case CMD_QUE_RING_BUF_CN:
9275 case CMD_QUE_RING_BUF64_CN:
9276
9277
9278
9279
9280 if (piocb->iocb_cmpl)
9281 piocb->iocb_cmpl = NULL;
9282
9283 case CMD_CREATE_XRI_CR:
9284 case CMD_CLOSE_XRI_CN:
9285 case CMD_CLOSE_XRI_CX:
9286 break;
9287 default:
9288 goto iocb_busy;
9289 }
9290
9291
9292
9293
9294
9295 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9296 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9297 goto iocb_busy;
9298 }
9299
9300 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9301 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9302 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9303
9304 if (iocb)
9305 lpfc_sli_update_ring(phba, pring);
9306 else
9307 lpfc_sli_update_full_ring(phba, pring);
9308
9309 if (!piocb)
9310 return IOCB_SUCCESS;
9311
9312 goto out_busy;
9313
9314 iocb_busy:
9315 pring->stats.iocb_cmd_delay++;
9316
9317 out_busy:
9318
9319 if (!(flag & SLI_IOCB_RET_IOCB)) {
9320 __lpfc_sli_ringtx_put(phba, pring, piocb);
9321 return IOCB_SUCCESS;
9322 }
9323
9324 return IOCB_BUSY;
9325}
9326
9327
9328
9329
9330
9331
9332
9333
9334
9335
9336
9337
9338
9339
9340
9341
9342
9343
9344static uint16_t
9345lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9346 struct lpfc_sglq *sglq)
9347{
9348 uint16_t xritag = NO_XRI;
9349 struct ulp_bde64 *bpl = NULL;
9350 struct ulp_bde64 bde;
9351 struct sli4_sge *sgl = NULL;
9352 struct lpfc_dmabuf *dmabuf;
9353 IOCB_t *icmd;
9354 int numBdes = 0;
9355 int i = 0;
9356 uint32_t offset = 0;
9357 int inbound = 0;
9358
9359 if (!piocbq || !sglq)
9360 return xritag;
9361
9362 sgl = (struct sli4_sge *)sglq->sgl;
9363 icmd = &piocbq->iocb;
9364 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9365 return sglq->sli4_xritag;
9366 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9367 numBdes = icmd->un.genreq64.bdl.bdeSize /
9368 sizeof(struct ulp_bde64);
9369
9370
9371
9372
9373 if (piocbq->context3)
9374 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9375 else
9376 return xritag;
9377
9378 bpl = (struct ulp_bde64 *)dmabuf->virt;
9379 if (!bpl)
9380 return xritag;
9381
9382 for (i = 0; i < numBdes; i++) {
9383
9384 sgl->addr_hi = bpl->addrHigh;
9385 sgl->addr_lo = bpl->addrLow;
9386
9387 sgl->word2 = le32_to_cpu(sgl->word2);
9388 if ((i+1) == numBdes)
9389 bf_set(lpfc_sli4_sge_last, sgl, 1);
9390 else
9391 bf_set(lpfc_sli4_sge_last, sgl, 0);
9392
9393
9394
9395 bde.tus.w = le32_to_cpu(bpl->tus.w);
9396 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9397
9398
9399
9400
9401 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9402
9403 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9404 inbound++;
9405
9406 if (inbound == 1)
9407 offset = 0;
9408 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9409 bf_set(lpfc_sli4_sge_type, sgl,
9410 LPFC_SGE_TYPE_DATA);
9411 offset += bde.tus.f.bdeSize;
9412 }
9413 sgl->word2 = cpu_to_le32(sgl->word2);
9414 bpl++;
9415 sgl++;
9416 }
9417 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9418
9419
9420
9421
9422 sgl->addr_hi =
9423 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9424 sgl->addr_lo =
9425 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9426 sgl->word2 = le32_to_cpu(sgl->word2);
9427 bf_set(lpfc_sli4_sge_last, sgl, 1);
9428 sgl->word2 = cpu_to_le32(sgl->word2);
9429 sgl->sge_len =
9430 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9431 }
9432 return sglq->sli4_xritag;
9433}
9434
9435
9436
9437
9438
9439
9440
9441
9442
9443
9444
9445
9446
9447
9448
9449static int
9450lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9451 union lpfc_wqe128 *wqe)
9452{
9453 uint32_t xmit_len = 0, total_len = 0;
9454 uint8_t ct = 0;
9455 uint32_t fip;
9456 uint32_t abort_tag;
9457 uint8_t command_type = ELS_COMMAND_NON_FIP;
9458 uint8_t cmnd;
9459 uint16_t xritag;
9460 uint16_t abrt_iotag;
9461 struct lpfc_iocbq *abrtiocbq;
9462 struct ulp_bde64 *bpl = NULL;
9463 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9464 int numBdes, i;
9465 struct ulp_bde64 bde;
9466 struct lpfc_nodelist *ndlp;
9467 uint32_t *pcmd;
9468 uint32_t if_type;
9469
9470 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9471
9472 if (iocbq->iocb_flag & LPFC_IO_FCP)
9473 command_type = FCP_COMMAND;
9474 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9475 command_type = ELS_COMMAND_FIP;
9476 else
9477 command_type = ELS_COMMAND_NON_FIP;
9478
9479 if (phba->fcp_embed_io)
9480 memset(wqe, 0, sizeof(union lpfc_wqe128));
9481
9482 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9483
9484 wqe->generic.wqe_com.word7 = 0;
9485 wqe->generic.wqe_com.word10 = 0;
9486
9487 abort_tag = (uint32_t) iocbq->iotag;
9488 xritag = iocbq->sli4_xritag;
9489
9490 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9491 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9492 sizeof(struct ulp_bde64);
9493 bpl = (struct ulp_bde64 *)
9494 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9495 if (!bpl)
9496 return IOCB_ERROR;
9497
9498
9499 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9500 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9501
9502
9503
9504 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9505 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9506 total_len = 0;
9507 for (i = 0; i < numBdes; i++) {
9508 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9509 total_len += bde.tus.f.bdeSize;
9510 }
9511 } else
9512 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9513
9514 iocbq->iocb.ulpIoTag = iocbq->iotag;
9515 cmnd = iocbq->iocb.ulpCommand;
9516
9517 switch (iocbq->iocb.ulpCommand) {
9518 case CMD_ELS_REQUEST64_CR:
9519 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9520 ndlp = iocbq->context_un.ndlp;
9521 else
9522 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9523 if (!iocbq->iocb.ulpLe) {
9524 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9525 "2007 Only Limited Edition cmd Format"
9526 " supported 0x%x\n",
9527 iocbq->iocb.ulpCommand);
9528 return IOCB_ERROR;
9529 }
9530
9531 wqe->els_req.payload_len = xmit_len;
9532
9533 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9534 iocbq->iocb.ulpTimeout);
9535
9536 bf_set(els_req64_vf, &wqe->els_req, 0);
9537
9538 bf_set(els_req64_vfid, &wqe->els_req, 0);
9539 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9540 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9541 iocbq->iocb.ulpContext);
9542 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9543 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9544
9545 if (command_type == ELS_COMMAND_FIP)
9546 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9547 >> LPFC_FIP_ELS_ID_SHIFT);
9548 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9549 iocbq->context2)->virt);
9550 if_type = bf_get(lpfc_sli_intf_if_type,
9551 &phba->sli4_hba.sli_intf);
9552 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9553 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9554 *pcmd == ELS_CMD_SCR ||
9555 *pcmd == ELS_CMD_RDF ||
9556 *pcmd == ELS_CMD_RSCN_XMT ||
9557 *pcmd == ELS_CMD_FDISC ||
9558 *pcmd == ELS_CMD_LOGO ||
9559 *pcmd == ELS_CMD_PLOGI)) {
9560 bf_set(els_req64_sp, &wqe->els_req, 1);
9561 bf_set(els_req64_sid, &wqe->els_req,
9562 iocbq->vport->fc_myDID);
9563 if ((*pcmd == ELS_CMD_FLOGI) &&
9564 !(phba->fc_topology ==
9565 LPFC_TOPOLOGY_LOOP))
9566 bf_set(els_req64_sid, &wqe->els_req, 0);
9567 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9568 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9569 phba->vpi_ids[iocbq->vport->vpi]);
9570 } else if (pcmd && iocbq->context1) {
9571 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9572 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9573 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9574 }
9575 }
9576 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9577 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9578 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9579 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9580 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9581 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9582 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9583 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9584 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9585 break;
9586 case CMD_XMIT_SEQUENCE64_CX:
9587 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9588 iocbq->iocb.un.ulpWord[3]);
9589 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9590 iocbq->iocb.unsli3.rcvsli3.ox_id);
9591
9592 xmit_len = total_len;
9593 cmnd = CMD_XMIT_SEQUENCE64_CR;
9594 if (phba->link_flag & LS_LOOPBACK_MODE)
9595 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9596
9597 case CMD_XMIT_SEQUENCE64_CR:
9598
9599 wqe->xmit_sequence.rsvd3 = 0;
9600
9601
9602 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9603 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9604 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9605 LPFC_WQE_IOD_WRITE);
9606 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9607 LPFC_WQE_LENLOC_WORD12);
9608 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9609 wqe->xmit_sequence.xmit_len = xmit_len;
9610 command_type = OTHER_COMMAND;
9611 break;
9612 case CMD_XMIT_BCAST64_CN:
9613
9614 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9615
9616
9617
9618 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9619 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9620 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9621 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9622 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9623 LPFC_WQE_LENLOC_WORD3);
9624 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9625 break;
9626 case CMD_FCP_IWRITE64_CR:
9627 command_type = FCP_COMMAND_DATA_OUT;
9628
9629
9630 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9631 xmit_len + sizeof(struct fcp_rsp));
9632 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9633 0);
9634
9635
9636 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9637 iocbq->iocb.ulpFCP2Rcvy);
9638 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9639
9640 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9641 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9642 LPFC_WQE_LENLOC_WORD4);
9643 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9644 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9645 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9646 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9647 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9648 if (iocbq->priority) {
9649 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9650 (iocbq->priority << 1));
9651 } else {
9652 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9653 (phba->cfg_XLanePriority << 1));
9654 }
9655 }
9656
9657
9658
9659 if (phba->cfg_enable_pbde)
9660 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9661 else
9662 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9663
9664 if (phba->fcp_embed_io) {
9665 struct lpfc_io_buf *lpfc_cmd;
9666 struct sli4_sge *sgl;
9667 struct fcp_cmnd *fcp_cmnd;
9668 uint32_t *ptr;
9669
9670
9671
9672 lpfc_cmd = iocbq->context1;
9673 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9674 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9675
9676
9677 wqe->generic.bde.tus.f.bdeFlags =
9678 BUFF_TYPE_BDE_IMMED;
9679 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9680 wqe->generic.bde.addrHigh = 0;
9681 wqe->generic.bde.addrLow = 88;
9682
9683 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9684 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9685
9686
9687 ptr = &wqe->words[22];
9688 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9689 }
9690 break;
9691 case CMD_FCP_IREAD64_CR:
9692
9693
9694 bf_set(payload_offset_len, &wqe->fcp_iread,
9695 xmit_len + sizeof(struct fcp_rsp));
9696 bf_set(cmd_buff_len, &wqe->fcp_iread,
9697 0);
9698
9699
9700 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9701 iocbq->iocb.ulpFCP2Rcvy);
9702 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9703
9704 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9705 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9706 LPFC_WQE_LENLOC_WORD4);
9707 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9708 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9709 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9710 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9711 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9712 if (iocbq->priority) {
9713 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9714 (iocbq->priority << 1));
9715 } else {
9716 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9717 (phba->cfg_XLanePriority << 1));
9718 }
9719 }
9720
9721
9722
9723 if (phba->cfg_enable_pbde)
9724 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9725 else
9726 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9727
9728 if (phba->fcp_embed_io) {
9729 struct lpfc_io_buf *lpfc_cmd;
9730 struct sli4_sge *sgl;
9731 struct fcp_cmnd *fcp_cmnd;
9732 uint32_t *ptr;
9733
9734
9735
9736 lpfc_cmd = iocbq->context1;
9737 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9738 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9739
9740
9741 wqe->generic.bde.tus.f.bdeFlags =
9742 BUFF_TYPE_BDE_IMMED;
9743 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9744 wqe->generic.bde.addrHigh = 0;
9745 wqe->generic.bde.addrLow = 88;
9746
9747 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9748 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9749
9750
9751 ptr = &wqe->words[22];
9752 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9753 }
9754 break;
9755 case CMD_FCP_ICMND64_CR:
9756
9757
9758 bf_set(payload_offset_len, &wqe->fcp_icmd,
9759 xmit_len + sizeof(struct fcp_rsp));
9760 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9761 0);
9762
9763 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9764
9765 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9766 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9767 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9768 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9769 LPFC_WQE_LENLOC_NONE);
9770 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9771 iocbq->iocb.ulpFCP2Rcvy);
9772 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9773 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9774 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9775 if (iocbq->priority) {
9776 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9777 (iocbq->priority << 1));
9778 } else {
9779 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9780 (phba->cfg_XLanePriority << 1));
9781 }
9782 }
9783
9784
9785 if (phba->fcp_embed_io) {
9786 struct lpfc_io_buf *lpfc_cmd;
9787 struct sli4_sge *sgl;
9788 struct fcp_cmnd *fcp_cmnd;
9789 uint32_t *ptr;
9790
9791
9792
9793 lpfc_cmd = iocbq->context1;
9794 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9795 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9796
9797
9798 wqe->generic.bde.tus.f.bdeFlags =
9799 BUFF_TYPE_BDE_IMMED;
9800 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9801 wqe->generic.bde.addrHigh = 0;
9802 wqe->generic.bde.addrLow = 88;
9803
9804 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9805 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9806
9807
9808 ptr = &wqe->words[22];
9809 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9810 }
9811 break;
9812 case CMD_GEN_REQUEST64_CR:
9813
9814
9815
9816 xmit_len = 0;
9817 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9818 sizeof(struct ulp_bde64);
9819 for (i = 0; i < numBdes; i++) {
9820 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9821 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9822 break;
9823 xmit_len += bde.tus.f.bdeSize;
9824 }
9825
9826 wqe->gen_req.request_payload_len = xmit_len;
9827
9828
9829
9830 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9831 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9832 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9833 "2015 Invalid CT %x command 0x%x\n",
9834 ct, iocbq->iocb.ulpCommand);
9835 return IOCB_ERROR;
9836 }
9837 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9838 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9839 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9840 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9841 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9842 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9843 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9844 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9845 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9846 command_type = OTHER_COMMAND;
9847 break;
9848 case CMD_XMIT_ELS_RSP64_CX:
9849 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9850
9851
9852 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9853
9854 wqe->xmit_els_rsp.word4 = 0;
9855
9856 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9857 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9858
9859 if_type = bf_get(lpfc_sli_intf_if_type,
9860 &phba->sli4_hba.sli_intf);
9861 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9862 if (iocbq->vport->fc_flag & FC_PT2PT) {
9863 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9864 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9865 iocbq->vport->fc_myDID);
9866 if (iocbq->vport->fc_myDID == Fabric_DID) {
9867 bf_set(wqe_els_did,
9868 &wqe->xmit_els_rsp.wqe_dest, 0);
9869 }
9870 }
9871 }
9872 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9873 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9874 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9875 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9876 iocbq->iocb.unsli3.rcvsli3.ox_id);
9877 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9878 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9879 phba->vpi_ids[iocbq->vport->vpi]);
9880 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9881 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9882 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9883 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9884 LPFC_WQE_LENLOC_WORD3);
9885 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9886 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9887 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9888 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9889 iocbq->context2)->virt);
9890 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9891 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9892 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9893 iocbq->vport->fc_myDID);
9894 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9895 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9896 phba->vpi_ids[phba->pport->vpi]);
9897 }
9898 command_type = OTHER_COMMAND;
9899 break;
9900 case CMD_CLOSE_XRI_CN:
9901 case CMD_ABORT_XRI_CN:
9902 case CMD_ABORT_XRI_CX:
9903
9904
9905 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9906 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9907 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9908 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9909 } else
9910 fip = 0;
9911
9912 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9913
9914
9915
9916
9917
9918 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9919 else
9920 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9921 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9922
9923 wqe->abort_cmd.rsrvd5 = 0;
9924 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9925 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9926 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9927
9928
9929
9930
9931 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9932 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9933 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9934 LPFC_WQE_LENLOC_NONE);
9935 cmnd = CMD_ABORT_XRI_CX;
9936 command_type = OTHER_COMMAND;
9937 xritag = 0;
9938 break;
9939 case CMD_XMIT_BLS_RSP64_CX:
9940 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9941
9942
9943
9944
9945 memset(wqe, 0, sizeof(*wqe));
9946
9947 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9948 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9949 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9950 LPFC_ABTS_UNSOL_INT) {
9951
9952
9953
9954
9955 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9956 iocbq->sli4_xritag);
9957 } else {
9958
9959
9960
9961
9962 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9963 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9964 }
9965 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9966 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9967
9968
9969 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9970 ndlp->nlp_DID);
9971 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9972 iocbq->iocb.ulpContext);
9973 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9974 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9975 phba->vpi_ids[phba->pport->vpi]);
9976 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9977 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9978 LPFC_WQE_LENLOC_NONE);
9979
9980 command_type = OTHER_COMMAND;
9981 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9982 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9983 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9984 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9985 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9986 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9987 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9988 }
9989
9990 break;
9991 case CMD_SEND_FRAME:
9992 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
9993 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E);
9994 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41);
9995 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
9996 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
9997 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
9998 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
9999 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10000 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10001 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10002 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10003 return 0;
10004 case CMD_XRI_ABORTED_CX:
10005 case CMD_CREATE_XRI_CR:
10006 case CMD_IOCB_FCP_IBIDIR64_CR:
10007 case CMD_FCP_TSEND64_CX:
10008 case CMD_FCP_TRSP64_CX:
10009 case CMD_FCP_AUTO_TRSP_CX:
10010 default:
10011 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10012 "2014 Invalid command 0x%x\n",
10013 iocbq->iocb.ulpCommand);
10014 return IOCB_ERROR;
10015 break;
10016 }
10017
10018 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10019 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10020 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10021 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10022 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10023 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10024 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10025 LPFC_IO_DIF_INSERT);
10026 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10027 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10028 wqe->generic.wqe_com.abort_tag = abort_tag;
10029 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10030 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10031 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10032 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10033 return 0;
10034}
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050static int
10051__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10052 struct lpfc_iocbq *piocb, uint32_t flag)
10053{
10054 struct lpfc_sglq *sglq;
10055 union lpfc_wqe128 wqe;
10056 struct lpfc_queue *wq;
10057 struct lpfc_sli_ring *pring;
10058
10059
10060 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10061 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10062 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10063 } else {
10064 wq = phba->sli4_hba.els_wq;
10065 }
10066
10067
10068 pring = wq->pring;
10069
10070
10071
10072
10073
10074 lockdep_assert_held(&pring->ring_lock);
10075
10076 if (piocb->sli4_xritag == NO_XRI) {
10077 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10078 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10079 sglq = NULL;
10080 else {
10081 if (!list_empty(&pring->txq)) {
10082 if (!(flag & SLI_IOCB_RET_IOCB)) {
10083 __lpfc_sli_ringtx_put(phba,
10084 pring, piocb);
10085 return IOCB_SUCCESS;
10086 } else {
10087 return IOCB_BUSY;
10088 }
10089 } else {
10090 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10091 if (!sglq) {
10092 if (!(flag & SLI_IOCB_RET_IOCB)) {
10093 __lpfc_sli_ringtx_put(phba,
10094 pring,
10095 piocb);
10096 return IOCB_SUCCESS;
10097 } else
10098 return IOCB_BUSY;
10099 }
10100 }
10101 }
10102 } else if (piocb->iocb_flag & LPFC_IO_FCP)
10103
10104 sglq = NULL;
10105 else {
10106
10107
10108
10109
10110 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10111 if (!sglq)
10112 return IOCB_ERROR;
10113 }
10114
10115 if (sglq) {
10116 piocb->sli4_lxritag = sglq->sli4_lxritag;
10117 piocb->sli4_xritag = sglq->sli4_xritag;
10118 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10119 return IOCB_ERROR;
10120 }
10121
10122 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10123 return IOCB_ERROR;
10124
10125 if (lpfc_sli4_wq_put(wq, &wqe))
10126 return IOCB_ERROR;
10127 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10128
10129 return 0;
10130}
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142
10143int
10144__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10145 struct lpfc_iocbq *piocb, uint32_t flag)
10146{
10147 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10148}
10149
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159int
10160lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10161{
10162
10163 switch (dev_grp) {
10164 case LPFC_PCI_DEV_LP:
10165 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10166 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10167 break;
10168 case LPFC_PCI_DEV_OC:
10169 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10170 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10171 break;
10172 default:
10173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10174 "1419 Invalid HBA PCI-device group: 0x%x\n",
10175 dev_grp);
10176 return -ENODEV;
10177 break;
10178 }
10179 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10180 return 0;
10181}
10182
10183
10184
10185
10186
10187
10188
10189
10190
10191
10192
10193struct lpfc_sli_ring *
10194lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10195{
10196 struct lpfc_io_buf *lpfc_cmd;
10197
10198 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10199 if (unlikely(!phba->sli4_hba.hdwq))
10200 return NULL;
10201
10202
10203
10204
10205 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10206 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10207 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10208 }
10209 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10210 } else {
10211 if (unlikely(!phba->sli4_hba.els_wq))
10212 return NULL;
10213 piocb->hba_wqidx = 0;
10214 return phba->sli4_hba.els_wq->pring;
10215 }
10216}
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231int
10232lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10233 struct lpfc_iocbq *piocb, uint32_t flag)
10234{
10235 struct lpfc_sli_ring *pring;
10236 struct lpfc_queue *eq;
10237 unsigned long iflags;
10238 int rc;
10239
10240 if (phba->sli_rev == LPFC_SLI_REV4) {
10241 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10242
10243 pring = lpfc_sli4_calc_ring(phba, piocb);
10244 if (unlikely(pring == NULL))
10245 return IOCB_ERROR;
10246
10247 spin_lock_irqsave(&pring->ring_lock, iflags);
10248 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10249 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10250
10251 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10252 } else {
10253
10254 spin_lock_irqsave(&phba->hbalock, iflags);
10255 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10256 spin_unlock_irqrestore(&phba->hbalock, iflags);
10257 }
10258 return rc;
10259}
10260
10261
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272static int
10273lpfc_extra_ring_setup( struct lpfc_hba *phba)
10274{
10275 struct lpfc_sli *psli;
10276 struct lpfc_sli_ring *pring;
10277
10278 psli = &phba->sli;
10279
10280
10281
10282
10283 pring = &psli->sli3_ring[LPFC_FCP_RING];
10284 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10285 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10286 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10287 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10288
10289
10290 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10291
10292 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10293 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10294 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10295 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10296
10297
10298 pring->iotag_max = 4096;
10299 pring->num_mask = 1;
10300 pring->prt[0].profile = 0;
10301 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10302 pring->prt[0].type = phba->cfg_multi_ring_type;
10303 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10304 return 0;
10305}
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318
10319static void
10320lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10321 struct lpfc_iocbq *iocbq)
10322{
10323 struct lpfc_nodelist *ndlp = NULL;
10324 uint16_t rpi = 0, vpi = 0;
10325 struct lpfc_vport *vport = NULL;
10326
10327
10328 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10329 rpi = iocbq->iocb.ulpContext;
10330
10331 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10332 "3092 Port generated ABTS async event "
10333 "on vpi %d rpi %d status 0x%x\n",
10334 vpi, rpi, iocbq->iocb.ulpStatus);
10335
10336 vport = lpfc_find_vport_by_vpid(phba, vpi);
10337 if (!vport)
10338 goto err_exit;
10339 ndlp = lpfc_findnode_rpi(vport, rpi);
10340 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10341 goto err_exit;
10342
10343 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10344 lpfc_sli_abts_recover_port(vport, ndlp);
10345 return;
10346
10347 err_exit:
10348 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10349 "3095 Event Context not found, no "
10350 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10351 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10352 vpi, rpi);
10353}
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363
10364
10365void
10366lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10367 struct lpfc_nodelist *ndlp,
10368 struct sli4_wcqe_xri_aborted *axri)
10369{
10370 struct lpfc_vport *vport;
10371 uint32_t ext_status = 0;
10372
10373 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10374 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10375 "3115 Node Context not found, driver "
10376 "ignoring abts err event\n");
10377 return;
10378 }
10379
10380 vport = ndlp->vport;
10381 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10382 "3116 Port generated FCP XRI ABORT event on "
10383 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10384 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10385 bf_get(lpfc_wcqe_xa_xri, axri),
10386 bf_get(lpfc_wcqe_xa_status, axri),
10387 axri->parameter);
10388
10389
10390
10391
10392
10393
10394 ext_status = axri->parameter & IOERR_PARAM_MASK;
10395 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10396 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10397 lpfc_sli_abts_recover_port(vport, ndlp);
10398}
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413static void
10414lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10415 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10416{
10417 IOCB_t *icmd;
10418 uint16_t evt_code;
10419 struct temp_event temp_event_data;
10420 struct Scsi_Host *shost;
10421 uint32_t *iocb_w;
10422
10423 icmd = &iocbq->iocb;
10424 evt_code = icmd->un.asyncstat.evt_code;
10425
10426 switch (evt_code) {
10427 case ASYNC_TEMP_WARN:
10428 case ASYNC_TEMP_SAFE:
10429 temp_event_data.data = (uint32_t) icmd->ulpContext;
10430 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10431 if (evt_code == ASYNC_TEMP_WARN) {
10432 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10433 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10434 "0347 Adapter is very hot, please take "
10435 "corrective action. temperature : %d Celsius\n",
10436 (uint32_t) icmd->ulpContext);
10437 } else {
10438 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10439 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
10440 "0340 Adapter temperature is OK now. "
10441 "temperature : %d Celsius\n",
10442 (uint32_t) icmd->ulpContext);
10443 }
10444
10445
10446 shost = lpfc_shost_from_vport(phba->pport);
10447 fc_host_post_vendor_event(shost, fc_get_event_number(),
10448 sizeof(temp_event_data), (char *) &temp_event_data,
10449 LPFC_NL_VENDOR_ID);
10450 break;
10451 case ASYNC_STATUS_CN:
10452 lpfc_sli_abts_err_handler(phba, iocbq);
10453 break;
10454 default:
10455 iocb_w = (uint32_t *) icmd;
10456 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
10457 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10458 " evt_code 0x%x\n"
10459 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10460 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10461 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10462 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10463 pring->ringno, icmd->un.asyncstat.evt_code,
10464 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10465 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10466 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10467 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10468
10469 break;
10470 }
10471}
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484
10485int
10486lpfc_sli4_setup(struct lpfc_hba *phba)
10487{
10488 struct lpfc_sli_ring *pring;
10489
10490 pring = phba->sli4_hba.els_wq->pring;
10491 pring->num_mask = LPFC_MAX_RING_MASK;
10492 pring->prt[0].profile = 0;
10493 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10494 pring->prt[0].type = FC_TYPE_ELS;
10495 pring->prt[0].lpfc_sli_rcv_unsol_event =
10496 lpfc_els_unsol_event;
10497 pring->prt[1].profile = 0;
10498 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10499 pring->prt[1].type = FC_TYPE_ELS;
10500 pring->prt[1].lpfc_sli_rcv_unsol_event =
10501 lpfc_els_unsol_event;
10502 pring->prt[2].profile = 0;
10503
10504 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10505
10506 pring->prt[2].type = FC_TYPE_CT;
10507 pring->prt[2].lpfc_sli_rcv_unsol_event =
10508 lpfc_ct_unsol_event;
10509 pring->prt[3].profile = 0;
10510
10511 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10512
10513 pring->prt[3].type = FC_TYPE_CT;
10514 pring->prt[3].lpfc_sli_rcv_unsol_event =
10515 lpfc_ct_unsol_event;
10516 return 0;
10517}
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529
10530int
10531lpfc_sli_setup(struct lpfc_hba *phba)
10532{
10533 int i, totiocbsize = 0;
10534 struct lpfc_sli *psli = &phba->sli;
10535 struct lpfc_sli_ring *pring;
10536
10537 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10538 psli->sli_flag = 0;
10539
10540 psli->iocbq_lookup = NULL;
10541 psli->iocbq_lookup_len = 0;
10542 psli->last_iotag = 0;
10543
10544 for (i = 0; i < psli->num_rings; i++) {
10545 pring = &psli->sli3_ring[i];
10546 switch (i) {
10547 case LPFC_FCP_RING:
10548
10549 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10550 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10551 pring->sli.sli3.numCiocb +=
10552 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10553 pring->sli.sli3.numRiocb +=
10554 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10555 pring->sli.sli3.numCiocb +=
10556 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10557 pring->sli.sli3.numRiocb +=
10558 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10559 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10560 SLI3_IOCB_CMD_SIZE :
10561 SLI2_IOCB_CMD_SIZE;
10562 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10563 SLI3_IOCB_RSP_SIZE :
10564 SLI2_IOCB_RSP_SIZE;
10565 pring->iotag_ctr = 0;
10566 pring->iotag_max =
10567 (phba->cfg_hba_queue_depth * 2);
10568 pring->fast_iotag = pring->iotag_max;
10569 pring->num_mask = 0;
10570 break;
10571 case LPFC_EXTRA_RING:
10572
10573 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10574 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10575 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10576 SLI3_IOCB_CMD_SIZE :
10577 SLI2_IOCB_CMD_SIZE;
10578 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10579 SLI3_IOCB_RSP_SIZE :
10580 SLI2_IOCB_RSP_SIZE;
10581 pring->iotag_max = phba->cfg_hba_queue_depth;
10582 pring->num_mask = 0;
10583 break;
10584 case LPFC_ELS_RING:
10585
10586 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10587 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10588 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10589 SLI3_IOCB_CMD_SIZE :
10590 SLI2_IOCB_CMD_SIZE;
10591 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10592 SLI3_IOCB_RSP_SIZE :
10593 SLI2_IOCB_RSP_SIZE;
10594 pring->fast_iotag = 0;
10595 pring->iotag_ctr = 0;
10596 pring->iotag_max = 4096;
10597 pring->lpfc_sli_rcv_async_status =
10598 lpfc_sli_async_event_handler;
10599 pring->num_mask = LPFC_MAX_RING_MASK;
10600 pring->prt[0].profile = 0;
10601 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10602 pring->prt[0].type = FC_TYPE_ELS;
10603 pring->prt[0].lpfc_sli_rcv_unsol_event =
10604 lpfc_els_unsol_event;
10605 pring->prt[1].profile = 0;
10606 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10607 pring->prt[1].type = FC_TYPE_ELS;
10608 pring->prt[1].lpfc_sli_rcv_unsol_event =
10609 lpfc_els_unsol_event;
10610 pring->prt[2].profile = 0;
10611
10612 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10613
10614 pring->prt[2].type = FC_TYPE_CT;
10615 pring->prt[2].lpfc_sli_rcv_unsol_event =
10616 lpfc_ct_unsol_event;
10617 pring->prt[3].profile = 0;
10618
10619 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10620
10621 pring->prt[3].type = FC_TYPE_CT;
10622 pring->prt[3].lpfc_sli_rcv_unsol_event =
10623 lpfc_ct_unsol_event;
10624 break;
10625 }
10626 totiocbsize += (pring->sli.sli3.numCiocb *
10627 pring->sli.sli3.sizeCiocb) +
10628 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10629 }
10630 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10631
10632 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10633 "SLI2 SLIM Data: x%x x%lx\n",
10634 phba->brd_no, totiocbsize,
10635 (unsigned long) MAX_SLIM_IOCB_SIZE);
10636 }
10637 if (phba->cfg_multi_ring_support == 2)
10638 lpfc_extra_ring_setup(phba);
10639
10640 return 0;
10641}
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654void
10655lpfc_sli4_queue_init(struct lpfc_hba *phba)
10656{
10657 struct lpfc_sli *psli;
10658 struct lpfc_sli_ring *pring;
10659 int i;
10660
10661 psli = &phba->sli;
10662 spin_lock_irq(&phba->hbalock);
10663 INIT_LIST_HEAD(&psli->mboxq);
10664 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10665
10666 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10667 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10668 pring->flag = 0;
10669 pring->ringno = LPFC_FCP_RING;
10670 pring->txcmplq_cnt = 0;
10671 INIT_LIST_HEAD(&pring->txq);
10672 INIT_LIST_HEAD(&pring->txcmplq);
10673 INIT_LIST_HEAD(&pring->iocb_continueq);
10674 spin_lock_init(&pring->ring_lock);
10675 }
10676 pring = phba->sli4_hba.els_wq->pring;
10677 pring->flag = 0;
10678 pring->ringno = LPFC_ELS_RING;
10679 pring->txcmplq_cnt = 0;
10680 INIT_LIST_HEAD(&pring->txq);
10681 INIT_LIST_HEAD(&pring->txcmplq);
10682 INIT_LIST_HEAD(&pring->iocb_continueq);
10683 spin_lock_init(&pring->ring_lock);
10684
10685 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10686 pring = phba->sli4_hba.nvmels_wq->pring;
10687 pring->flag = 0;
10688 pring->ringno = LPFC_ELS_RING;
10689 pring->txcmplq_cnt = 0;
10690 INIT_LIST_HEAD(&pring->txq);
10691 INIT_LIST_HEAD(&pring->txcmplq);
10692 INIT_LIST_HEAD(&pring->iocb_continueq);
10693 spin_lock_init(&pring->ring_lock);
10694 }
10695
10696 spin_unlock_irq(&phba->hbalock);
10697}
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710void
10711lpfc_sli_queue_init(struct lpfc_hba *phba)
10712{
10713 struct lpfc_sli *psli;
10714 struct lpfc_sli_ring *pring;
10715 int i;
10716
10717 psli = &phba->sli;
10718 spin_lock_irq(&phba->hbalock);
10719 INIT_LIST_HEAD(&psli->mboxq);
10720 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10721
10722 for (i = 0; i < psli->num_rings; i++) {
10723 pring = &psli->sli3_ring[i];
10724 pring->ringno = i;
10725 pring->sli.sli3.next_cmdidx = 0;
10726 pring->sli.sli3.local_getidx = 0;
10727 pring->sli.sli3.cmdidx = 0;
10728 INIT_LIST_HEAD(&pring->iocb_continueq);
10729 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10730 INIT_LIST_HEAD(&pring->postbufq);
10731 pring->flag = 0;
10732 INIT_LIST_HEAD(&pring->txq);
10733 INIT_LIST_HEAD(&pring->txcmplq);
10734 spin_lock_init(&pring->ring_lock);
10735 }
10736 spin_unlock_irq(&phba->hbalock);
10737}
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754static void
10755lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10756{
10757 LIST_HEAD(completions);
10758 struct lpfc_sli *psli = &phba->sli;
10759 LPFC_MBOXQ_t *pmb;
10760 unsigned long iflag;
10761
10762
10763 local_bh_disable();
10764
10765
10766 spin_lock_irqsave(&phba->hbalock, iflag);
10767
10768
10769 list_splice_init(&phba->sli.mboxq, &completions);
10770
10771 if (psli->mbox_active) {
10772 list_add_tail(&psli->mbox_active->list, &completions);
10773 psli->mbox_active = NULL;
10774 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10775 }
10776
10777 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10778 spin_unlock_irqrestore(&phba->hbalock, iflag);
10779
10780
10781 local_bh_enable();
10782
10783
10784 while (!list_empty(&completions)) {
10785 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10786 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10787 if (pmb->mbox_cmpl)
10788 pmb->mbox_cmpl(phba, pmb);
10789 }
10790}
10791
10792
10793
10794
10795
10796
10797
10798
10799
10800
10801
10802
10803
10804
10805
10806
10807
10808
10809int
10810lpfc_sli_host_down(struct lpfc_vport *vport)
10811{
10812 LIST_HEAD(completions);
10813 struct lpfc_hba *phba = vport->phba;
10814 struct lpfc_sli *psli = &phba->sli;
10815 struct lpfc_queue *qp = NULL;
10816 struct lpfc_sli_ring *pring;
10817 struct lpfc_iocbq *iocb, *next_iocb;
10818 int i;
10819 unsigned long flags = 0;
10820 uint16_t prev_pring_flag;
10821
10822 lpfc_cleanup_discovery_resources(vport);
10823
10824 spin_lock_irqsave(&phba->hbalock, flags);
10825
10826
10827
10828
10829
10830
10831 if (phba->sli_rev != LPFC_SLI_REV4) {
10832 for (i = 0; i < psli->num_rings; i++) {
10833 pring = &psli->sli3_ring[i];
10834 prev_pring_flag = pring->flag;
10835
10836 if (pring->ringno == LPFC_ELS_RING) {
10837 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10838
10839 set_bit(LPFC_DATA_READY, &phba->data_flags);
10840 }
10841 list_for_each_entry_safe(iocb, next_iocb,
10842 &pring->txq, list) {
10843 if (iocb->vport != vport)
10844 continue;
10845 list_move_tail(&iocb->list, &completions);
10846 }
10847 list_for_each_entry_safe(iocb, next_iocb,
10848 &pring->txcmplq, list) {
10849 if (iocb->vport != vport)
10850 continue;
10851 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10852 }
10853 pring->flag = prev_pring_flag;
10854 }
10855 } else {
10856 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10857 pring = qp->pring;
10858 if (!pring)
10859 continue;
10860 if (pring == phba->sli4_hba.els_wq->pring) {
10861 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10862
10863 set_bit(LPFC_DATA_READY, &phba->data_flags);
10864 }
10865 prev_pring_flag = pring->flag;
10866 spin_lock(&pring->ring_lock);
10867 list_for_each_entry_safe(iocb, next_iocb,
10868 &pring->txq, list) {
10869 if (iocb->vport != vport)
10870 continue;
10871 list_move_tail(&iocb->list, &completions);
10872 }
10873 spin_unlock(&pring->ring_lock);
10874 list_for_each_entry_safe(iocb, next_iocb,
10875 &pring->txcmplq, list) {
10876 if (iocb->vport != vport)
10877 continue;
10878 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10879 }
10880 pring->flag = prev_pring_flag;
10881 }
10882 }
10883 spin_unlock_irqrestore(&phba->hbalock, flags);
10884
10885
10886 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10887 IOERR_SLI_DOWN);
10888 return 1;
10889}
10890
10891
10892
10893
10894
10895
10896
10897
10898
10899
10900
10901
10902
10903
10904
10905
10906int
10907lpfc_sli_hba_down(struct lpfc_hba *phba)
10908{
10909 LIST_HEAD(completions);
10910 struct lpfc_sli *psli = &phba->sli;
10911 struct lpfc_queue *qp = NULL;
10912 struct lpfc_sli_ring *pring;
10913 struct lpfc_dmabuf *buf_ptr;
10914 unsigned long flags = 0;
10915 int i;
10916
10917
10918 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10919
10920 lpfc_hba_down_prep(phba);
10921
10922
10923 local_bh_disable();
10924
10925 lpfc_fabric_abort_hba(phba);
10926
10927 spin_lock_irqsave(&phba->hbalock, flags);
10928
10929
10930
10931
10932
10933 if (phba->sli_rev != LPFC_SLI_REV4) {
10934 for (i = 0; i < psli->num_rings; i++) {
10935 pring = &psli->sli3_ring[i];
10936
10937 if (pring->ringno == LPFC_ELS_RING) {
10938 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10939
10940 set_bit(LPFC_DATA_READY, &phba->data_flags);
10941 }
10942 list_splice_init(&pring->txq, &completions);
10943 }
10944 } else {
10945 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10946 pring = qp->pring;
10947 if (!pring)
10948 continue;
10949 spin_lock(&pring->ring_lock);
10950 list_splice_init(&pring->txq, &completions);
10951 spin_unlock(&pring->ring_lock);
10952 if (pring == phba->sli4_hba.els_wq->pring) {
10953 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10954
10955 set_bit(LPFC_DATA_READY, &phba->data_flags);
10956 }
10957 }
10958 }
10959 spin_unlock_irqrestore(&phba->hbalock, flags);
10960
10961
10962 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10963 IOERR_SLI_DOWN);
10964
10965 spin_lock_irqsave(&phba->hbalock, flags);
10966 list_splice_init(&phba->elsbuf, &completions);
10967 phba->elsbuf_cnt = 0;
10968 phba->elsbuf_prev_cnt = 0;
10969 spin_unlock_irqrestore(&phba->hbalock, flags);
10970
10971 while (!list_empty(&completions)) {
10972 list_remove_head(&completions, buf_ptr,
10973 struct lpfc_dmabuf, list);
10974 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10975 kfree(buf_ptr);
10976 }
10977
10978
10979 local_bh_enable();
10980
10981
10982 del_timer_sync(&psli->mbox_tmo);
10983
10984 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10985 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10986 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10987
10988 return 1;
10989}
10990
10991
10992
10993
10994
10995
10996
10997
10998
10999
11000
11001
11002
11003void
11004lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11005{
11006 uint32_t *src = srcp;
11007 uint32_t *dest = destp;
11008 uint32_t ldata;
11009 int i;
11010
11011 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11012 ldata = *src;
11013 ldata = le32_to_cpu(ldata);
11014 *dest = ldata;
11015 src++;
11016 dest++;
11017 }
11018}
11019
11020
11021
11022
11023
11024
11025
11026
11027
11028
11029
11030
11031void
11032lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11033{
11034 uint32_t *src = srcp;
11035 uint32_t *dest = destp;
11036 uint32_t ldata;
11037 int i;
11038
11039 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11040 ldata = *src;
11041 ldata = be32_to_cpu(ldata);
11042 *dest = ldata;
11043 src++;
11044 dest++;
11045 }
11046}
11047
11048
11049
11050
11051
11052
11053
11054
11055
11056
11057
11058int
11059lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11060 struct lpfc_dmabuf *mp)
11061{
11062
11063
11064 spin_lock_irq(&phba->hbalock);
11065 list_add_tail(&mp->list, &pring->postbufq);
11066 pring->postbufq_cnt++;
11067 spin_unlock_irq(&phba->hbalock);
11068 return 0;
11069}
11070
11071
11072
11073
11074
11075
11076
11077
11078
11079
11080
11081
11082uint32_t
11083lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11084{
11085 spin_lock_irq(&phba->hbalock);
11086 phba->buffer_tag_count++;
11087
11088
11089
11090
11091 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11092 spin_unlock_irq(&phba->hbalock);
11093 return phba->buffer_tag_count;
11094}
11095
11096
11097
11098
11099
11100
11101
11102
11103
11104
11105
11106
11107
11108
11109
11110
11111struct lpfc_dmabuf *
11112lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11113 uint32_t tag)
11114{
11115 struct lpfc_dmabuf *mp, *next_mp;
11116 struct list_head *slp = &pring->postbufq;
11117
11118
11119 spin_lock_irq(&phba->hbalock);
11120 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11121 if (mp->buffer_tag == tag) {
11122 list_del_init(&mp->list);
11123 pring->postbufq_cnt--;
11124 spin_unlock_irq(&phba->hbalock);
11125 return mp;
11126 }
11127 }
11128
11129 spin_unlock_irq(&phba->hbalock);
11130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11131 "0402 Cannot find virtual addr for buffer tag on "
11132 "ring %d Data x%lx x%px x%px x%x\n",
11133 pring->ringno, (unsigned long) tag,
11134 slp->next, slp->prev, pring->postbufq_cnt);
11135
11136 return NULL;
11137}
11138
11139
11140
11141
11142
11143
11144
11145
11146
11147
11148
11149
11150
11151
11152
11153
11154
11155struct lpfc_dmabuf *
11156lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11157 dma_addr_t phys)
11158{
11159 struct lpfc_dmabuf *mp, *next_mp;
11160 struct list_head *slp = &pring->postbufq;
11161
11162
11163 spin_lock_irq(&phba->hbalock);
11164 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11165 if (mp->phys == phys) {
11166 list_del_init(&mp->list);
11167 pring->postbufq_cnt--;
11168 spin_unlock_irq(&phba->hbalock);
11169 return mp;
11170 }
11171 }
11172
11173 spin_unlock_irq(&phba->hbalock);
11174 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11175 "0410 Cannot find virtual addr for mapped buf on "
11176 "ring %d Data x%llx x%px x%px x%x\n",
11177 pring->ringno, (unsigned long long)phys,
11178 slp->next, slp->prev, pring->postbufq_cnt);
11179 return NULL;
11180}
11181
11182
11183
11184
11185
11186
11187
11188
11189
11190
11191
11192
11193static void
11194lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11195 struct lpfc_iocbq *rspiocb)
11196{
11197 IOCB_t *irsp = &rspiocb->iocb;
11198 uint16_t abort_iotag, abort_context;
11199 struct lpfc_iocbq *abort_iocb = NULL;
11200
11201 if (irsp->ulpStatus) {
11202
11203
11204
11205
11206
11207 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11208 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11209
11210 spin_lock_irq(&phba->hbalock);
11211 if (phba->sli_rev < LPFC_SLI_REV4) {
11212 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11213 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11214 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11215 spin_unlock_irq(&phba->hbalock);
11216 goto release_iocb;
11217 }
11218 if (abort_iotag != 0 &&
11219 abort_iotag <= phba->sli.last_iotag)
11220 abort_iocb =
11221 phba->sli.iocbq_lookup[abort_iotag];
11222 } else
11223
11224
11225
11226
11227
11228 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11229
11230 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11231 "0327 Cannot abort els iocb x%px "
11232 "with tag %x context %x, abort status %x, "
11233 "abort code %x\n",
11234 abort_iocb, abort_iotag, abort_context,
11235 irsp->ulpStatus, irsp->un.ulpWord[4]);
11236
11237 spin_unlock_irq(&phba->hbalock);
11238 }
11239release_iocb:
11240 lpfc_sli_release_iocbq(phba, cmdiocb);
11241 return;
11242}
11243
11244
11245
11246
11247
11248
11249
11250
11251
11252
11253
11254
11255static void
11256lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11257 struct lpfc_iocbq *rspiocb)
11258{
11259 IOCB_t *irsp = &rspiocb->iocb;
11260
11261
11262 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11263 "0139 Ignoring ELS cmd tag x%x completion Data: "
11264 "x%x x%x x%x\n",
11265 irsp->ulpIoTag, irsp->ulpStatus,
11266 irsp->un.ulpWord[4], irsp->ulpTimeout);
11267 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11268 lpfc_ct_free_iocb(phba, cmdiocb);
11269 else
11270 lpfc_els_free_iocb(phba, cmdiocb);
11271 return;
11272}
11273
11274
11275
11276
11277
11278
11279
11280
11281
11282
11283
11284
11285
11286
11287static int
11288lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11289 struct lpfc_iocbq *cmdiocb)
11290{
11291 struct lpfc_vport *vport = cmdiocb->vport;
11292 struct lpfc_iocbq *abtsiocbp;
11293 IOCB_t *icmd = NULL;
11294 IOCB_t *iabt = NULL;
11295 int retval;
11296 unsigned long iflags;
11297 struct lpfc_nodelist *ndlp;
11298
11299
11300
11301
11302
11303
11304 icmd = &cmdiocb->iocb;
11305 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11306 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11307 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11308 return 0;
11309
11310
11311 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11312 if (abtsiocbp == NULL)
11313 return 0;
11314
11315
11316
11317
11318 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11319
11320 iabt = &abtsiocbp->iocb;
11321 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11322 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11323 if (phba->sli_rev == LPFC_SLI_REV4) {
11324 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11325 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11326 } else {
11327 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11328 if (pring->ringno == LPFC_ELS_RING) {
11329 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11330 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11331 }
11332 }
11333 iabt->ulpLe = 1;
11334 iabt->ulpClass = icmd->ulpClass;
11335
11336
11337 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11338 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11339 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11340 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11341 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11342
11343 if (phba->link_state >= LPFC_LINK_UP)
11344 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11345 else
11346 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11347
11348 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11349 abtsiocbp->vport = vport;
11350
11351 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11352 "0339 Abort xri x%x, original iotag x%x, "
11353 "abort cmd iotag x%x\n",
11354 iabt->un.acxri.abortIoTag,
11355 iabt->un.acxri.abortContextTag,
11356 abtsiocbp->iotag);
11357
11358 if (phba->sli_rev == LPFC_SLI_REV4) {
11359 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11360 if (unlikely(pring == NULL))
11361 return 0;
11362
11363 spin_lock_irqsave(&pring->ring_lock, iflags);
11364 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11365 abtsiocbp, 0);
11366 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11367 } else {
11368 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11369 abtsiocbp, 0);
11370 }
11371
11372 if (retval)
11373 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11374
11375
11376
11377
11378
11379
11380 return retval;
11381}
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394
11395
11396int
11397lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11398 struct lpfc_iocbq *cmdiocb)
11399{
11400 struct lpfc_vport *vport = cmdiocb->vport;
11401 int retval = IOCB_ERROR;
11402 IOCB_t *icmd = NULL;
11403
11404 lockdep_assert_held(&phba->hbalock);
11405
11406
11407
11408
11409
11410
11411 icmd = &cmdiocb->iocb;
11412 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11413 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11414 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11415 return 0;
11416
11417 if (!pring) {
11418 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11419 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11420 else
11421 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11422 goto abort_iotag_exit;
11423 }
11424
11425
11426
11427
11428
11429 if ((vport->load_flag & FC_UNLOADING) &&
11430 (pring->ringno == LPFC_ELS_RING)) {
11431 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11432 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11433 else
11434 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11435 goto abort_iotag_exit;
11436 }
11437
11438
11439 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11440
11441abort_iotag_exit:
11442
11443
11444
11445
11446
11447 return retval;
11448}
11449
11450
11451
11452
11453
11454
11455
11456void
11457lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11458{
11459 struct lpfc_sli *psli = &phba->sli;
11460 struct lpfc_sli_ring *pring;
11461 struct lpfc_queue *qp = NULL;
11462 int i;
11463
11464 if (phba->sli_rev != LPFC_SLI_REV4) {
11465 for (i = 0; i < psli->num_rings; i++) {
11466 pring = &psli->sli3_ring[i];
11467 lpfc_sli_abort_iocb_ring(phba, pring);
11468 }
11469 return;
11470 }
11471 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11472 pring = qp->pring;
11473 if (!pring)
11474 continue;
11475 lpfc_sli_abort_iocb_ring(phba, pring);
11476 }
11477}
11478
11479
11480
11481
11482
11483
11484
11485
11486
11487
11488
11489
11490
11491
11492
11493
11494
11495
11496
11497
11498
11499
11500
11501static int
11502lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11503 uint16_t tgt_id, uint64_t lun_id,
11504 lpfc_ctx_cmd ctx_cmd)
11505{
11506 struct lpfc_io_buf *lpfc_cmd;
11507 int rc = 1;
11508
11509 if (iocbq->vport != vport)
11510 return rc;
11511
11512 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11513 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11514 return rc;
11515
11516 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11517
11518 if (lpfc_cmd->pCmd == NULL)
11519 return rc;
11520
11521 switch (ctx_cmd) {
11522 case LPFC_CTX_LUN:
11523 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11524 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11525 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11526 rc = 0;
11527 break;
11528 case LPFC_CTX_TGT:
11529 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11530 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11531 rc = 0;
11532 break;
11533 case LPFC_CTX_HOST:
11534 rc = 0;
11535 break;
11536 default:
11537 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11538 __func__, ctx_cmd);
11539 break;
11540 }
11541
11542 return rc;
11543}
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556
11557
11558
11559
11560
11561
11562
11563
11564int
11565lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11566 lpfc_ctx_cmd ctx_cmd)
11567{
11568 struct lpfc_hba *phba = vport->phba;
11569 struct lpfc_iocbq *iocbq;
11570 int sum, i;
11571
11572 spin_lock_irq(&phba->hbalock);
11573 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11574 iocbq = phba->sli.iocbq_lookup[i];
11575
11576 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11577 ctx_cmd) == 0)
11578 sum++;
11579 }
11580 spin_unlock_irq(&phba->hbalock);
11581
11582 return sum;
11583}
11584
11585
11586
11587
11588
11589
11590
11591
11592
11593
11594
11595void
11596lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11597 struct lpfc_iocbq *rspiocb)
11598{
11599 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11600 "3096 ABORT_XRI_CN completing on rpi x%x "
11601 "original iotag x%x, abort cmd iotag x%x "
11602 "status 0x%x, reason 0x%x\n",
11603 cmdiocb->iocb.un.acxri.abortContextTag,
11604 cmdiocb->iocb.un.acxri.abortIoTag,
11605 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11606 rspiocb->iocb.un.ulpWord[4]);
11607 lpfc_sli_release_iocbq(phba, cmdiocb);
11608 return;
11609}
11610
11611
11612
11613
11614
11615
11616
11617
11618
11619
11620
11621
11622
11623
11624
11625
11626
11627
11628
11629
11630
11631
11632int
11633lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11634 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11635{
11636 struct lpfc_hba *phba = vport->phba;
11637 struct lpfc_iocbq *iocbq;
11638 struct lpfc_iocbq *abtsiocb;
11639 struct lpfc_sli_ring *pring_s4;
11640 IOCB_t *cmd = NULL;
11641 int errcnt = 0, ret_val = 0;
11642 int i;
11643
11644
11645 if (phba->hba_flag & HBA_IOQ_FLUSH)
11646 return errcnt;
11647
11648 for (i = 1; i <= phba->sli.last_iotag; i++) {
11649 iocbq = phba->sli.iocbq_lookup[i];
11650
11651 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11652 abort_cmd) != 0)
11653 continue;
11654
11655
11656
11657
11658
11659 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11660 continue;
11661
11662
11663 abtsiocb = lpfc_sli_get_iocbq(phba);
11664 if (abtsiocb == NULL) {
11665 errcnt++;
11666 continue;
11667 }
11668
11669
11670 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11671
11672 cmd = &iocbq->iocb;
11673 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11674 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11675 if (phba->sli_rev == LPFC_SLI_REV4)
11676 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11677 else
11678 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11679 abtsiocb->iocb.ulpLe = 1;
11680 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11681 abtsiocb->vport = vport;
11682
11683
11684 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11685 if (iocbq->iocb_flag & LPFC_IO_FCP)
11686 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11687 if (iocbq->iocb_flag & LPFC_IO_FOF)
11688 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11689
11690 if (lpfc_is_link_up(phba))
11691 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11692 else
11693 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11694
11695
11696 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11697 if (phba->sli_rev == LPFC_SLI_REV4) {
11698 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11699 if (!pring_s4)
11700 continue;
11701 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11702 abtsiocb, 0);
11703 } else
11704 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11705 abtsiocb, 0);
11706 if (ret_val == IOCB_ERROR) {
11707 lpfc_sli_release_iocbq(phba, abtsiocb);
11708 errcnt++;
11709 continue;
11710 }
11711 }
11712
11713 return errcnt;
11714}
11715
11716
11717
11718
11719
11720
11721
11722
11723
11724
11725
11726
11727
11728
11729
11730
11731
11732
11733
11734
11735
11736
11737
11738int
11739lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11740 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11741{
11742 struct lpfc_hba *phba = vport->phba;
11743 struct lpfc_io_buf *lpfc_cmd;
11744 struct lpfc_iocbq *abtsiocbq;
11745 struct lpfc_nodelist *ndlp;
11746 struct lpfc_iocbq *iocbq;
11747 IOCB_t *icmd;
11748 int sum, i, ret_val;
11749 unsigned long iflags;
11750 struct lpfc_sli_ring *pring_s4 = NULL;
11751
11752 spin_lock_irqsave(&phba->hbalock, iflags);
11753
11754
11755 if (phba->hba_flag & HBA_IOQ_FLUSH) {
11756 spin_unlock_irqrestore(&phba->hbalock, iflags);
11757 return 0;
11758 }
11759 sum = 0;
11760
11761 for (i = 1; i <= phba->sli.last_iotag; i++) {
11762 iocbq = phba->sli.iocbq_lookup[i];
11763
11764 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11765 cmd) != 0)
11766 continue;
11767
11768
11769 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11770 spin_lock(&lpfc_cmd->buf_lock);
11771
11772 if (!lpfc_cmd->pCmd) {
11773 spin_unlock(&lpfc_cmd->buf_lock);
11774 continue;
11775 }
11776
11777 if (phba->sli_rev == LPFC_SLI_REV4) {
11778 pring_s4 =
11779 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11780 if (!pring_s4) {
11781 spin_unlock(&lpfc_cmd->buf_lock);
11782 continue;
11783 }
11784
11785 spin_lock(&pring_s4->ring_lock);
11786 }
11787
11788
11789
11790
11791
11792 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11793 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11794 if (phba->sli_rev == LPFC_SLI_REV4)
11795 spin_unlock(&pring_s4->ring_lock);
11796 spin_unlock(&lpfc_cmd->buf_lock);
11797 continue;
11798 }
11799
11800
11801 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11802 if (!abtsiocbq) {
11803 if (phba->sli_rev == LPFC_SLI_REV4)
11804 spin_unlock(&pring_s4->ring_lock);
11805 spin_unlock(&lpfc_cmd->buf_lock);
11806 continue;
11807 }
11808
11809 icmd = &iocbq->iocb;
11810 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11811 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11812 if (phba->sli_rev == LPFC_SLI_REV4)
11813 abtsiocbq->iocb.un.acxri.abortIoTag =
11814 iocbq->sli4_xritag;
11815 else
11816 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11817 abtsiocbq->iocb.ulpLe = 1;
11818 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11819 abtsiocbq->vport = vport;
11820
11821
11822 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11823 if (iocbq->iocb_flag & LPFC_IO_FCP)
11824 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11825 if (iocbq->iocb_flag & LPFC_IO_FOF)
11826 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11827
11828 ndlp = lpfc_cmd->rdata->pnode;
11829
11830 if (lpfc_is_link_up(phba) &&
11831 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11832 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11833 else
11834 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11835
11836
11837 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11838
11839
11840
11841
11842
11843 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11844
11845 if (phba->sli_rev == LPFC_SLI_REV4) {
11846 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11847 abtsiocbq, 0);
11848 spin_unlock(&pring_s4->ring_lock);
11849 } else {
11850 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11851 abtsiocbq, 0);
11852 }
11853
11854 spin_unlock(&lpfc_cmd->buf_lock);
11855
11856 if (ret_val == IOCB_ERROR)
11857 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11858 else
11859 sum++;
11860 }
11861 spin_unlock_irqrestore(&phba->hbalock, iflags);
11862 return sum;
11863}
11864
11865
11866
11867
11868
11869
11870
11871
11872
11873
11874
11875
11876
11877
11878
11879
11880
11881
11882static void
11883lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11884 struct lpfc_iocbq *cmdiocbq,
11885 struct lpfc_iocbq *rspiocbq)
11886{
11887 wait_queue_head_t *pdone_q;
11888 unsigned long iflags;
11889 struct lpfc_io_buf *lpfc_cmd;
11890
11891 spin_lock_irqsave(&phba->hbalock, iflags);
11892 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11893
11894
11895
11896
11897
11898
11899
11900 spin_unlock_irqrestore(&phba->hbalock, iflags);
11901 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11902 cmdiocbq->wait_iocb_cmpl = NULL;
11903 if (cmdiocbq->iocb_cmpl)
11904 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11905 else
11906 lpfc_sli_release_iocbq(phba, cmdiocbq);
11907 return;
11908 }
11909
11910 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11911 if (cmdiocbq->context2 && rspiocbq)
11912 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11913 &rspiocbq->iocb, sizeof(IOCB_t));
11914
11915
11916 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11917 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11918 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11919 cur_iocbq);
11920 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
11921 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
11922 else
11923 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
11924 }
11925
11926 pdone_q = cmdiocbq->context_un.wait_queue;
11927 if (pdone_q)
11928 wake_up(pdone_q);
11929 spin_unlock_irqrestore(&phba->hbalock, iflags);
11930 return;
11931}
11932
11933
11934
11935
11936
11937
11938
11939
11940
11941
11942
11943
11944
11945static int
11946lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11947 struct lpfc_iocbq *piocbq, uint32_t flag)
11948{
11949 unsigned long iflags;
11950 int ret;
11951
11952 spin_lock_irqsave(&phba->hbalock, iflags);
11953 ret = piocbq->iocb_flag & flag;
11954 spin_unlock_irqrestore(&phba->hbalock, iflags);
11955 return ret;
11956
11957}
11958
11959
11960
11961
11962
11963
11964
11965
11966
11967
11968
11969
11970
11971
11972
11973
11974
11975
11976
11977
11978
11979
11980
11981
11982
11983
11984
11985
11986
11987
11988
11989
11990
11991
11992
11993
11994
11995int
11996lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11997 uint32_t ring_number,
11998 struct lpfc_iocbq *piocb,
11999 struct lpfc_iocbq *prspiocbq,
12000 uint32_t timeout)
12001{
12002 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12003 long timeleft, timeout_req = 0;
12004 int retval = IOCB_SUCCESS;
12005 uint32_t creg_val;
12006 struct lpfc_iocbq *iocb;
12007 int txq_cnt = 0;
12008 int txcmplq_cnt = 0;
12009 struct lpfc_sli_ring *pring;
12010 unsigned long iflags;
12011 bool iocb_completed = true;
12012
12013 if (phba->sli_rev >= LPFC_SLI_REV4)
12014 pring = lpfc_sli4_calc_ring(phba, piocb);
12015 else
12016 pring = &phba->sli.sli3_ring[ring_number];
12017
12018
12019
12020
12021 if (prspiocbq) {
12022 if (piocb->context2)
12023 return IOCB_ERROR;
12024 piocb->context2 = prspiocbq;
12025 }
12026
12027 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12028 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12029 piocb->context_un.wait_queue = &done_q;
12030 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12031
12032 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12033 if (lpfc_readl(phba->HCregaddr, &creg_val))
12034 return IOCB_ERROR;
12035 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12036 writel(creg_val, phba->HCregaddr);
12037 readl(phba->HCregaddr);
12038 }
12039
12040 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12041 SLI_IOCB_RET_IOCB);
12042 if (retval == IOCB_SUCCESS) {
12043 timeout_req = msecs_to_jiffies(timeout * 1000);
12044 timeleft = wait_event_timeout(done_q,
12045 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12046 timeout_req);
12047 spin_lock_irqsave(&phba->hbalock, iflags);
12048 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12049
12050
12051
12052
12053
12054
12055 iocb_completed = false;
12056 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12057 }
12058 spin_unlock_irqrestore(&phba->hbalock, iflags);
12059 if (iocb_completed) {
12060 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12061 "0331 IOCB wake signaled\n");
12062
12063
12064
12065
12066
12067 } else if (timeleft == 0) {
12068 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12069 "0338 IOCB wait timeout error - no "
12070 "wake response Data x%x\n", timeout);
12071 retval = IOCB_TIMEDOUT;
12072 } else {
12073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12074 "0330 IOCB wake NOT set, "
12075 "Data x%x x%lx\n",
12076 timeout, (timeleft / jiffies));
12077 retval = IOCB_TIMEDOUT;
12078 }
12079 } else if (retval == IOCB_BUSY) {
12080 if (phba->cfg_log_verbose & LOG_SLI) {
12081 list_for_each_entry(iocb, &pring->txq, list) {
12082 txq_cnt++;
12083 }
12084 list_for_each_entry(iocb, &pring->txcmplq, list) {
12085 txcmplq_cnt++;
12086 }
12087 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12088 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12089 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12090 }
12091 return retval;
12092 } else {
12093 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12094 "0332 IOCB wait issue failed, Data x%x\n",
12095 retval);
12096 retval = IOCB_ERROR;
12097 }
12098
12099 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12100 if (lpfc_readl(phba->HCregaddr, &creg_val))
12101 return IOCB_ERROR;
12102 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12103 writel(creg_val, phba->HCregaddr);
12104 readl(phba->HCregaddr);
12105 }
12106
12107 if (prspiocbq)
12108 piocb->context2 = NULL;
12109
12110 piocb->context_un.wait_queue = NULL;
12111 piocb->iocb_cmpl = NULL;
12112 return retval;
12113}
12114
12115
12116
12117
12118
12119
12120
12121
12122
12123
12124
12125
12126
12127
12128
12129
12130
12131
12132
12133
12134
12135
12136
12137
12138
12139
12140
12141int
12142lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12143 uint32_t timeout)
12144{
12145 struct completion mbox_done;
12146 int retval;
12147 unsigned long flag;
12148
12149 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12150
12151 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12152
12153
12154 init_completion(&mbox_done);
12155 pmboxq->context3 = &mbox_done;
12156
12157 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12158 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12159 wait_for_completion_timeout(&mbox_done,
12160 msecs_to_jiffies(timeout * 1000));
12161
12162 spin_lock_irqsave(&phba->hbalock, flag);
12163 pmboxq->context3 = NULL;
12164
12165
12166
12167
12168 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12169 retval = MBX_SUCCESS;
12170 } else {
12171 retval = MBX_TIMEOUT;
12172 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12173 }
12174 spin_unlock_irqrestore(&phba->hbalock, flag);
12175 }
12176 return retval;
12177}
12178
12179
12180
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194void
12195lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12196{
12197 struct lpfc_sli *psli = &phba->sli;
12198 unsigned long timeout;
12199
12200 if (mbx_action == LPFC_MBX_NO_WAIT) {
12201
12202 msleep(100);
12203 lpfc_sli_mbox_sys_flush(phba);
12204 return;
12205 }
12206 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12207
12208
12209 local_bh_disable();
12210
12211 spin_lock_irq(&phba->hbalock);
12212 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12213
12214 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12215
12216
12217
12218 if (phba->sli.mbox_active)
12219 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12220 phba->sli.mbox_active) *
12221 1000) + jiffies;
12222 spin_unlock_irq(&phba->hbalock);
12223
12224
12225 local_bh_enable();
12226
12227 while (phba->sli.mbox_active) {
12228
12229 msleep(2);
12230 if (time_after(jiffies, timeout))
12231
12232
12233
12234 break;
12235 }
12236 } else {
12237 spin_unlock_irq(&phba->hbalock);
12238
12239
12240 local_bh_enable();
12241 }
12242
12243 lpfc_sli_mbox_sys_flush(phba);
12244}
12245
12246
12247
12248
12249
12250
12251
12252
12253
12254
12255
12256
12257static int
12258lpfc_sli_eratt_read(struct lpfc_hba *phba)
12259{
12260 uint32_t ha_copy;
12261
12262
12263 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12264 goto unplug_err;
12265
12266 if (ha_copy & HA_ERATT) {
12267
12268 if (lpfc_sli_read_hs(phba))
12269 goto unplug_err;
12270
12271
12272 if ((HS_FFER1 & phba->work_hs) &&
12273 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12274 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12275 phba->hba_flag |= DEFER_ERATT;
12276
12277 writel(0, phba->HCregaddr);
12278 readl(phba->HCregaddr);
12279 }
12280
12281
12282 phba->work_ha |= HA_ERATT;
12283
12284 phba->hba_flag |= HBA_ERATT_HANDLED;
12285 return 1;
12286 }
12287 return 0;
12288
12289unplug_err:
12290
12291 phba->work_hs |= UNPLUG_ERR;
12292
12293 phba->work_ha |= HA_ERATT;
12294
12295 phba->hba_flag |= HBA_ERATT_HANDLED;
12296 return 1;
12297}
12298
12299
12300
12301
12302
12303
12304
12305
12306
12307
12308
12309
12310static int
12311lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12312{
12313 uint32_t uerr_sta_hi, uerr_sta_lo;
12314 uint32_t if_type, portsmphr;
12315 struct lpfc_register portstat_reg;
12316
12317
12318
12319
12320
12321 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12322 switch (if_type) {
12323 case LPFC_SLI_INTF_IF_TYPE_0:
12324 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12325 &uerr_sta_lo) ||
12326 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12327 &uerr_sta_hi)) {
12328 phba->work_hs |= UNPLUG_ERR;
12329 phba->work_ha |= HA_ERATT;
12330 phba->hba_flag |= HBA_ERATT_HANDLED;
12331 return 1;
12332 }
12333 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12334 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12336 "1423 HBA Unrecoverable error: "
12337 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12338 "ue_mask_lo_reg=0x%x, "
12339 "ue_mask_hi_reg=0x%x\n",
12340 uerr_sta_lo, uerr_sta_hi,
12341 phba->sli4_hba.ue_mask_lo,
12342 phba->sli4_hba.ue_mask_hi);
12343 phba->work_status[0] = uerr_sta_lo;
12344 phba->work_status[1] = uerr_sta_hi;
12345 phba->work_ha |= HA_ERATT;
12346 phba->hba_flag |= HBA_ERATT_HANDLED;
12347 return 1;
12348 }
12349 break;
12350 case LPFC_SLI_INTF_IF_TYPE_2:
12351 case LPFC_SLI_INTF_IF_TYPE_6:
12352 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12353 &portstat_reg.word0) ||
12354 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12355 &portsmphr)){
12356 phba->work_hs |= UNPLUG_ERR;
12357 phba->work_ha |= HA_ERATT;
12358 phba->hba_flag |= HBA_ERATT_HANDLED;
12359 return 1;
12360 }
12361 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12362 phba->work_status[0] =
12363 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12364 phba->work_status[1] =
12365 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12366 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12367 "2885 Port Status Event: "
12368 "port status reg 0x%x, "
12369 "port smphr reg 0x%x, "
12370 "error 1=0x%x, error 2=0x%x\n",
12371 portstat_reg.word0,
12372 portsmphr,
12373 phba->work_status[0],
12374 phba->work_status[1]);
12375 phba->work_ha |= HA_ERATT;
12376 phba->hba_flag |= HBA_ERATT_HANDLED;
12377 return 1;
12378 }
12379 break;
12380 case LPFC_SLI_INTF_IF_TYPE_1:
12381 default:
12382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12383 "2886 HBA Error Attention on unsupported "
12384 "if type %d.", if_type);
12385 return 1;
12386 }
12387
12388 return 0;
12389}
12390
12391
12392
12393
12394
12395
12396
12397
12398
12399
12400
12401int
12402lpfc_sli_check_eratt(struct lpfc_hba *phba)
12403{
12404 uint32_t ha_copy;
12405
12406
12407
12408
12409 if (phba->link_flag & LS_IGNORE_ERATT)
12410 return 0;
12411
12412
12413 spin_lock_irq(&phba->hbalock);
12414 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12415
12416 spin_unlock_irq(&phba->hbalock);
12417 return 0;
12418 }
12419
12420
12421
12422
12423
12424 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12425 spin_unlock_irq(&phba->hbalock);
12426 return 0;
12427 }
12428
12429
12430 if (unlikely(pci_channel_offline(phba->pcidev))) {
12431 spin_unlock_irq(&phba->hbalock);
12432 return 0;
12433 }
12434
12435 switch (phba->sli_rev) {
12436 case LPFC_SLI_REV2:
12437 case LPFC_SLI_REV3:
12438
12439 ha_copy = lpfc_sli_eratt_read(phba);
12440 break;
12441 case LPFC_SLI_REV4:
12442
12443 ha_copy = lpfc_sli4_eratt_read(phba);
12444 break;
12445 default:
12446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12447 "0299 Invalid SLI revision (%d)\n",
12448 phba->sli_rev);
12449 ha_copy = 0;
12450 break;
12451 }
12452 spin_unlock_irq(&phba->hbalock);
12453
12454 return ha_copy;
12455}
12456
12457
12458
12459
12460
12461
12462
12463
12464
12465
12466
12467static inline int
12468lpfc_intr_state_check(struct lpfc_hba *phba)
12469{
12470
12471 if (unlikely(pci_channel_offline(phba->pcidev)))
12472 return -EIO;
12473
12474
12475 phba->sli.slistat.sli_intr++;
12476
12477
12478 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12479 return -EIO;
12480
12481 return 0;
12482}
12483
12484
12485
12486
12487
12488
12489
12490
12491
12492
12493
12494
12495
12496
12497
12498
12499
12500
12501
12502
12503
12504
12505irqreturn_t
12506lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12507{
12508 struct lpfc_hba *phba;
12509 uint32_t ha_copy, hc_copy;
12510 uint32_t work_ha_copy;
12511 unsigned long status;
12512 unsigned long iflag;
12513 uint32_t control;
12514
12515 MAILBOX_t *mbox, *pmbox;
12516 struct lpfc_vport *vport;
12517 struct lpfc_nodelist *ndlp;
12518 struct lpfc_dmabuf *mp;
12519 LPFC_MBOXQ_t *pmb;
12520 int rc;
12521
12522
12523
12524
12525
12526 phba = (struct lpfc_hba *)dev_id;
12527
12528 if (unlikely(!phba))
12529 return IRQ_NONE;
12530
12531
12532
12533
12534
12535 if (phba->intr_type == MSIX) {
12536
12537 if (lpfc_intr_state_check(phba))
12538 return IRQ_NONE;
12539
12540 spin_lock_irqsave(&phba->hbalock, iflag);
12541 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12542 goto unplug_error;
12543
12544
12545
12546 if (phba->link_flag & LS_IGNORE_ERATT)
12547 ha_copy &= ~HA_ERATT;
12548
12549 if (ha_copy & HA_ERATT) {
12550 if (phba->hba_flag & HBA_ERATT_HANDLED)
12551
12552 ha_copy &= ~HA_ERATT;
12553 else
12554
12555 phba->hba_flag |= HBA_ERATT_HANDLED;
12556 }
12557
12558
12559
12560
12561
12562 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12563 spin_unlock_irqrestore(&phba->hbalock, iflag);
12564 return IRQ_NONE;
12565 }
12566
12567
12568 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12569 goto unplug_error;
12570
12571 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12572 HC_LAINT_ENA | HC_ERINT_ENA),
12573 phba->HCregaddr);
12574 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12575 phba->HAregaddr);
12576 writel(hc_copy, phba->HCregaddr);
12577 readl(phba->HAregaddr);
12578 spin_unlock_irqrestore(&phba->hbalock, iflag);
12579 } else
12580 ha_copy = phba->ha_copy;
12581
12582 work_ha_copy = ha_copy & phba->work_ha_mask;
12583
12584 if (work_ha_copy) {
12585 if (work_ha_copy & HA_LATT) {
12586 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12587
12588
12589
12590
12591 spin_lock_irqsave(&phba->hbalock, iflag);
12592 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12593 if (lpfc_readl(phba->HCregaddr, &control))
12594 goto unplug_error;
12595 control &= ~HC_LAINT_ENA;
12596 writel(control, phba->HCregaddr);
12597 readl(phba->HCregaddr);
12598 spin_unlock_irqrestore(&phba->hbalock, iflag);
12599 }
12600 else
12601 work_ha_copy &= ~HA_LATT;
12602 }
12603
12604 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12605
12606
12607
12608
12609 status = (work_ha_copy &
12610 (HA_RXMASK << (4*LPFC_ELS_RING)));
12611 status >>= (4*LPFC_ELS_RING);
12612 if (status & HA_RXMASK) {
12613 spin_lock_irqsave(&phba->hbalock, iflag);
12614 if (lpfc_readl(phba->HCregaddr, &control))
12615 goto unplug_error;
12616
12617 lpfc_debugfs_slow_ring_trc(phba,
12618 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12619 control, status,
12620 (uint32_t)phba->sli.slistat.sli_intr);
12621
12622 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12623 lpfc_debugfs_slow_ring_trc(phba,
12624 "ISR Disable ring:"
12625 "pwork:x%x hawork:x%x wait:x%x",
12626 phba->work_ha, work_ha_copy,
12627 (uint32_t)((unsigned long)
12628 &phba->work_waitq));
12629
12630 control &=
12631 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12632 writel(control, phba->HCregaddr);
12633 readl(phba->HCregaddr);
12634 }
12635 else {
12636 lpfc_debugfs_slow_ring_trc(phba,
12637 "ISR slow ring: pwork:"
12638 "x%x hawork:x%x wait:x%x",
12639 phba->work_ha, work_ha_copy,
12640 (uint32_t)((unsigned long)
12641 &phba->work_waitq));
12642 }
12643 spin_unlock_irqrestore(&phba->hbalock, iflag);
12644 }
12645 }
12646 spin_lock_irqsave(&phba->hbalock, iflag);
12647 if (work_ha_copy & HA_ERATT) {
12648 if (lpfc_sli_read_hs(phba))
12649 goto unplug_error;
12650
12651
12652
12653
12654 if ((HS_FFER1 & phba->work_hs) &&
12655 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12656 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12657 phba->work_hs)) {
12658 phba->hba_flag |= DEFER_ERATT;
12659
12660 writel(0, phba->HCregaddr);
12661 readl(phba->HCregaddr);
12662 }
12663 }
12664
12665 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12666 pmb = phba->sli.mbox_active;
12667 pmbox = &pmb->u.mb;
12668 mbox = phba->mbox;
12669 vport = pmb->vport;
12670
12671
12672 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12673 if (pmbox->mbxOwner != OWN_HOST) {
12674 spin_unlock_irqrestore(&phba->hbalock, iflag);
12675
12676
12677
12678
12679 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12680 LOG_SLI,
12681 "(%d):0304 Stray Mailbox "
12682 "Interrupt mbxCommand x%x "
12683 "mbxStatus x%x\n",
12684 (vport ? vport->vpi : 0),
12685 pmbox->mbxCommand,
12686 pmbox->mbxStatus);
12687
12688 work_ha_copy &= ~HA_MBATT;
12689 } else {
12690 phba->sli.mbox_active = NULL;
12691 spin_unlock_irqrestore(&phba->hbalock, iflag);
12692 phba->last_completion_time = jiffies;
12693 del_timer(&phba->sli.mbox_tmo);
12694 if (pmb->mbox_cmpl) {
12695 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12696 MAILBOX_CMD_SIZE);
12697 if (pmb->out_ext_byte_len &&
12698 pmb->ctx_buf)
12699 lpfc_sli_pcimem_bcopy(
12700 phba->mbox_ext,
12701 pmb->ctx_buf,
12702 pmb->out_ext_byte_len);
12703 }
12704 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12705 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12706
12707 lpfc_debugfs_disc_trc(vport,
12708 LPFC_DISC_TRC_MBOX_VPORT,
12709 "MBOX dflt rpi: : "
12710 "status:x%x rpi:x%x",
12711 (uint32_t)pmbox->mbxStatus,
12712 pmbox->un.varWords[0], 0);
12713
12714 if (!pmbox->mbxStatus) {
12715 mp = (struct lpfc_dmabuf *)
12716 (pmb->ctx_buf);
12717 ndlp = (struct lpfc_nodelist *)
12718 pmb->ctx_ndlp;
12719
12720
12721
12722
12723
12724
12725 lpfc_unreg_login(phba,
12726 vport->vpi,
12727 pmbox->un.varWords[0],
12728 pmb);
12729 pmb->mbox_cmpl =
12730 lpfc_mbx_cmpl_dflt_rpi;
12731 pmb->ctx_buf = mp;
12732 pmb->ctx_ndlp = ndlp;
12733 pmb->vport = vport;
12734 rc = lpfc_sli_issue_mbox(phba,
12735 pmb,
12736 MBX_NOWAIT);
12737 if (rc != MBX_BUSY)
12738 lpfc_printf_log(phba,
12739 KERN_ERR,
12740 LOG_MBOX | LOG_SLI,
12741 "0350 rc should have"
12742 "been MBX_BUSY\n");
12743 if (rc != MBX_NOT_FINISHED)
12744 goto send_current_mbox;
12745 }
12746 }
12747 spin_lock_irqsave(
12748 &phba->pport->work_port_lock,
12749 iflag);
12750 phba->pport->work_port_events &=
12751 ~WORKER_MBOX_TMO;
12752 spin_unlock_irqrestore(
12753 &phba->pport->work_port_lock,
12754 iflag);
12755 lpfc_mbox_cmpl_put(phba, pmb);
12756 }
12757 } else
12758 spin_unlock_irqrestore(&phba->hbalock, iflag);
12759
12760 if ((work_ha_copy & HA_MBATT) &&
12761 (phba->sli.mbox_active == NULL)) {
12762send_current_mbox:
12763
12764 do {
12765 rc = lpfc_sli_issue_mbox(phba, NULL,
12766 MBX_NOWAIT);
12767 } while (rc == MBX_NOT_FINISHED);
12768 if (rc != MBX_SUCCESS)
12769 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12770 LOG_SLI, "0349 rc should be "
12771 "MBX_SUCCESS\n");
12772 }
12773
12774 spin_lock_irqsave(&phba->hbalock, iflag);
12775 phba->work_ha |= work_ha_copy;
12776 spin_unlock_irqrestore(&phba->hbalock, iflag);
12777 lpfc_worker_wake_up(phba);
12778 }
12779 return IRQ_HANDLED;
12780unplug_error:
12781 spin_unlock_irqrestore(&phba->hbalock, iflag);
12782 return IRQ_HANDLED;
12783
12784}
12785
12786
12787
12788
12789
12790
12791
12792
12793
12794
12795
12796
12797
12798
12799
12800
12801
12802
12803
12804
12805irqreturn_t
12806lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12807{
12808 struct lpfc_hba *phba;
12809 uint32_t ha_copy;
12810 unsigned long status;
12811 unsigned long iflag;
12812 struct lpfc_sli_ring *pring;
12813
12814
12815
12816
12817 phba = (struct lpfc_hba *) dev_id;
12818
12819 if (unlikely(!phba))
12820 return IRQ_NONE;
12821
12822
12823
12824
12825
12826 if (phba->intr_type == MSIX) {
12827
12828 if (lpfc_intr_state_check(phba))
12829 return IRQ_NONE;
12830
12831 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12832 return IRQ_HANDLED;
12833
12834 spin_lock_irqsave(&phba->hbalock, iflag);
12835
12836
12837
12838
12839 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12840 spin_unlock_irqrestore(&phba->hbalock, iflag);
12841 return IRQ_NONE;
12842 }
12843 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12844 phba->HAregaddr);
12845 readl(phba->HAregaddr);
12846 spin_unlock_irqrestore(&phba->hbalock, iflag);
12847 } else
12848 ha_copy = phba->ha_copy;
12849
12850
12851
12852
12853 ha_copy &= ~(phba->work_ha_mask);
12854
12855 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12856 status >>= (4*LPFC_FCP_RING);
12857 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12858 if (status & HA_RXMASK)
12859 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12860
12861 if (phba->cfg_multi_ring_support == 2) {
12862
12863
12864
12865
12866 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12867 status >>= (4*LPFC_EXTRA_RING);
12868 if (status & HA_RXMASK) {
12869 lpfc_sli_handle_fast_ring_event(phba,
12870 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12871 status);
12872 }
12873 }
12874 return IRQ_HANDLED;
12875}
12876
12877
12878
12879
12880
12881
12882
12883
12884
12885
12886
12887
12888
12889
12890
12891
12892
12893
12894irqreturn_t
12895lpfc_sli_intr_handler(int irq, void *dev_id)
12896{
12897 struct lpfc_hba *phba;
12898 irqreturn_t sp_irq_rc, fp_irq_rc;
12899 unsigned long status1, status2;
12900 uint32_t hc_copy;
12901
12902
12903
12904
12905
12906 phba = (struct lpfc_hba *) dev_id;
12907
12908 if (unlikely(!phba))
12909 return IRQ_NONE;
12910
12911
12912 if (lpfc_intr_state_check(phba))
12913 return IRQ_NONE;
12914
12915 spin_lock(&phba->hbalock);
12916 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12917 spin_unlock(&phba->hbalock);
12918 return IRQ_HANDLED;
12919 }
12920
12921 if (unlikely(!phba->ha_copy)) {
12922 spin_unlock(&phba->hbalock);
12923 return IRQ_NONE;
12924 } else if (phba->ha_copy & HA_ERATT) {
12925 if (phba->hba_flag & HBA_ERATT_HANDLED)
12926
12927 phba->ha_copy &= ~HA_ERATT;
12928 else
12929
12930 phba->hba_flag |= HBA_ERATT_HANDLED;
12931 }
12932
12933
12934
12935
12936 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12937 spin_unlock(&phba->hbalock);
12938 return IRQ_NONE;
12939 }
12940
12941
12942 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12943 spin_unlock(&phba->hbalock);
12944 return IRQ_HANDLED;
12945 }
12946 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12947 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12948 phba->HCregaddr);
12949 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12950 writel(hc_copy, phba->HCregaddr);
12951 readl(phba->HAregaddr);
12952 spin_unlock(&phba->hbalock);
12953
12954
12955
12956
12957
12958
12959 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12960
12961
12962 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
12963 status2 >>= (4*LPFC_ELS_RING);
12964
12965 if (status1 || (status2 & HA_RXMASK))
12966 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12967 else
12968 sp_irq_rc = IRQ_NONE;
12969
12970
12971
12972
12973
12974
12975 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12976 status1 >>= (4*LPFC_FCP_RING);
12977
12978
12979 if (phba->cfg_multi_ring_support == 2) {
12980 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12981 status2 >>= (4*LPFC_EXTRA_RING);
12982 } else
12983 status2 = 0;
12984
12985 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12986 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12987 else
12988 fp_irq_rc = IRQ_NONE;
12989
12990
12991 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12992}
12993
12994
12995
12996
12997
12998
12999
13000
13001void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13002{
13003 struct lpfc_cq_event *cq_event;
13004
13005
13006 spin_lock_irq(&phba->hbalock);
13007 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13008 spin_unlock_irq(&phba->hbalock);
13009
13010 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13011
13012 spin_lock_irq(&phba->hbalock);
13013 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13014 cq_event, struct lpfc_cq_event, list);
13015 spin_unlock_irq(&phba->hbalock);
13016
13017 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13018
13019 lpfc_sli4_cq_event_release(phba, cq_event);
13020 }
13021}
13022
13023
13024
13025
13026
13027
13028
13029
13030
13031
13032
13033
13034static void
13035lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13036 struct lpfc_iocbq *pIocbIn,
13037 struct lpfc_iocbq *pIocbOut,
13038 struct lpfc_wcqe_complete *wcqe)
13039{
13040 int numBdes, i;
13041 unsigned long iflags;
13042 uint32_t status, max_response;
13043 struct lpfc_dmabuf *dmabuf;
13044 struct ulp_bde64 *bpl, bde;
13045 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13046
13047 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13048 sizeof(struct lpfc_iocbq) - offset);
13049
13050 status = bf_get(lpfc_wcqe_c_status, wcqe);
13051 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13052 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13053 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13054 pIocbIn->iocb.un.fcpi.fcpi_parm =
13055 pIocbOut->iocb.un.fcpi.fcpi_parm -
13056 wcqe->total_data_placed;
13057 else
13058 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13059 else {
13060 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13061 switch (pIocbOut->iocb.ulpCommand) {
13062 case CMD_ELS_REQUEST64_CR:
13063 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13064 bpl = (struct ulp_bde64 *)dmabuf->virt;
13065 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13066 max_response = bde.tus.f.bdeSize;
13067 break;
13068 case CMD_GEN_REQUEST64_CR:
13069 max_response = 0;
13070 if (!pIocbOut->context3)
13071 break;
13072 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13073 sizeof(struct ulp_bde64);
13074 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13075 bpl = (struct ulp_bde64 *)dmabuf->virt;
13076 for (i = 0; i < numBdes; i++) {
13077 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13078 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13079 max_response += bde.tus.f.bdeSize;
13080 }
13081 break;
13082 default:
13083 max_response = wcqe->total_data_placed;
13084 break;
13085 }
13086 if (max_response < wcqe->total_data_placed)
13087 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13088 else
13089 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13090 wcqe->total_data_placed;
13091 }
13092
13093
13094 if (status == CQE_STATUS_DI_ERROR) {
13095 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13096
13097 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13098 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13099 else
13100 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13101
13102 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13103 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
13104 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13105 BGS_GUARD_ERR_MASK;
13106 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
13107 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13108 BGS_APPTAG_ERR_MASK;
13109 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
13110 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13111 BGS_REFTAG_ERR_MASK;
13112
13113
13114 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13115 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13116 BGS_HI_WATER_MARK_PRESENT_MASK;
13117 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13118 wcqe->total_data_placed;
13119 }
13120
13121
13122
13123
13124
13125 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13126 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13127 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13128 BGS_GUARD_ERR_MASK);
13129 }
13130
13131
13132 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13133 spin_lock_irqsave(&phba->hbalock, iflags);
13134 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13135 spin_unlock_irqrestore(&phba->hbalock, iflags);
13136 }
13137}
13138
13139
13140
13141
13142
13143
13144
13145
13146
13147
13148
13149
13150static struct lpfc_iocbq *
13151lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13152 struct lpfc_iocbq *irspiocbq)
13153{
13154 struct lpfc_sli_ring *pring;
13155 struct lpfc_iocbq *cmdiocbq;
13156 struct lpfc_wcqe_complete *wcqe;
13157 unsigned long iflags;
13158
13159 pring = lpfc_phba_elsring(phba);
13160 if (unlikely(!pring))
13161 return NULL;
13162
13163 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13164 pring->stats.iocb_event++;
13165
13166 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13167 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13168 if (unlikely(!cmdiocbq)) {
13169 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13170 "0386 ELS complete with no corresponding "
13171 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13172 wcqe->word0, wcqe->total_data_placed,
13173 wcqe->parameter, wcqe->word3);
13174 lpfc_sli_release_iocbq(phba, irspiocbq);
13175 return NULL;
13176 }
13177
13178 spin_lock_irqsave(&pring->ring_lock, iflags);
13179
13180 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13181 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13182
13183
13184 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13185
13186 return irspiocbq;
13187}
13188
13189inline struct lpfc_cq_event *
13190lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13191{
13192 struct lpfc_cq_event *cq_event;
13193
13194
13195 cq_event = lpfc_sli4_cq_event_alloc(phba);
13196 if (!cq_event) {
13197 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13198 "0602 Failed to alloc CQ_EVENT entry\n");
13199 return NULL;
13200 }
13201
13202
13203 memcpy(&cq_event->cqe, entry, size);
13204 return cq_event;
13205}
13206
13207
13208
13209
13210
13211
13212
13213
13214
13215
13216
13217static bool
13218lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13219{
13220 struct lpfc_cq_event *cq_event;
13221 unsigned long iflags;
13222
13223 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13224 "0392 Async Event: word0:x%x, word1:x%x, "
13225 "word2:x%x, word3:x%x\n", mcqe->word0,
13226 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13227
13228 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13229 if (!cq_event)
13230 return false;
13231 spin_lock_irqsave(&phba->hbalock, iflags);
13232 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13233
13234 phba->hba_flag |= ASYNC_EVENT;
13235 spin_unlock_irqrestore(&phba->hbalock, iflags);
13236
13237 return true;
13238}
13239
13240
13241
13242
13243
13244
13245
13246
13247
13248
13249
13250static bool
13251lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13252{
13253 uint32_t mcqe_status;
13254 MAILBOX_t *mbox, *pmbox;
13255 struct lpfc_mqe *mqe;
13256 struct lpfc_vport *vport;
13257 struct lpfc_nodelist *ndlp;
13258 struct lpfc_dmabuf *mp;
13259 unsigned long iflags;
13260 LPFC_MBOXQ_t *pmb;
13261 bool workposted = false;
13262 int rc;
13263
13264
13265 if (!bf_get(lpfc_trailer_completed, mcqe))
13266 goto out_no_mqe_complete;
13267
13268
13269 spin_lock_irqsave(&phba->hbalock, iflags);
13270 pmb = phba->sli.mbox_active;
13271 if (unlikely(!pmb)) {
13272 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13273 "1832 No pending MBOX command to handle\n");
13274 spin_unlock_irqrestore(&phba->hbalock, iflags);
13275 goto out_no_mqe_complete;
13276 }
13277 spin_unlock_irqrestore(&phba->hbalock, iflags);
13278 mqe = &pmb->u.mqe;
13279 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13280 mbox = phba->mbox;
13281 vport = pmb->vport;
13282
13283
13284 phba->last_completion_time = jiffies;
13285 del_timer(&phba->sli.mbox_tmo);
13286
13287
13288 if (pmb->mbox_cmpl && mbox)
13289 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13290
13291
13292
13293
13294
13295 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13296 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13297 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13298 bf_set(lpfc_mqe_status, mqe,
13299 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13300 }
13301 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13302 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13303 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13304 "MBOX dflt rpi: status:x%x rpi:x%x",
13305 mcqe_status,
13306 pmbox->un.varWords[0], 0);
13307 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13308 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13309 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13310
13311
13312
13313 lpfc_unreg_login(phba, vport->vpi,
13314 pmbox->un.varWords[0], pmb);
13315 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13316 pmb->ctx_buf = mp;
13317 pmb->ctx_ndlp = ndlp;
13318 pmb->vport = vport;
13319 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13320 if (rc != MBX_BUSY)
13321 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
13322 LOG_SLI, "0385 rc should "
13323 "have been MBX_BUSY\n");
13324 if (rc != MBX_NOT_FINISHED)
13325 goto send_current_mbox;
13326 }
13327 }
13328 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13329 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13330 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13331
13332
13333 spin_lock_irqsave(&phba->hbalock, iflags);
13334 __lpfc_mbox_cmpl_put(phba, pmb);
13335 phba->work_ha |= HA_MBATT;
13336 spin_unlock_irqrestore(&phba->hbalock, iflags);
13337 workposted = true;
13338
13339send_current_mbox:
13340 spin_lock_irqsave(&phba->hbalock, iflags);
13341
13342 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13343
13344 phba->sli.mbox_active = NULL;
13345 if (bf_get(lpfc_trailer_consumed, mcqe))
13346 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13347 spin_unlock_irqrestore(&phba->hbalock, iflags);
13348
13349 lpfc_worker_wake_up(phba);
13350 return workposted;
13351
13352out_no_mqe_complete:
13353 spin_lock_irqsave(&phba->hbalock, iflags);
13354 if (bf_get(lpfc_trailer_consumed, mcqe))
13355 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13356 spin_unlock_irqrestore(&phba->hbalock, iflags);
13357 return false;
13358}
13359
13360
13361
13362
13363
13364
13365
13366
13367
13368
13369
13370
13371static bool
13372lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13373 struct lpfc_cqe *cqe)
13374{
13375 struct lpfc_mcqe mcqe;
13376 bool workposted;
13377
13378 cq->CQ_mbox++;
13379
13380
13381 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13382
13383
13384 if (!bf_get(lpfc_trailer_async, &mcqe))
13385 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13386 else
13387 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13388 return workposted;
13389}
13390
13391
13392
13393
13394
13395
13396
13397
13398
13399
13400
13401static bool
13402lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13403 struct lpfc_wcqe_complete *wcqe)
13404{
13405 struct lpfc_iocbq *irspiocbq;
13406 unsigned long iflags;
13407 struct lpfc_sli_ring *pring = cq->pring;
13408 int txq_cnt = 0;
13409 int txcmplq_cnt = 0;
13410
13411
13412 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13413
13414 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13415 "0357 ELS CQE error: status=x%x: "
13416 "CQE: %08x %08x %08x %08x\n",
13417 bf_get(lpfc_wcqe_c_status, wcqe),
13418 wcqe->word0, wcqe->total_data_placed,
13419 wcqe->parameter, wcqe->word3);
13420 }
13421
13422
13423 irspiocbq = lpfc_sli_get_iocbq(phba);
13424 if (!irspiocbq) {
13425 if (!list_empty(&pring->txq))
13426 txq_cnt++;
13427 if (!list_empty(&pring->txcmplq))
13428 txcmplq_cnt++;
13429 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13430 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13431 "els_txcmplq_cnt=%d\n",
13432 txq_cnt, phba->iocb_cnt,
13433 txcmplq_cnt);
13434 return false;
13435 }
13436
13437
13438 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13439 spin_lock_irqsave(&phba->hbalock, iflags);
13440 list_add_tail(&irspiocbq->cq_event.list,
13441 &phba->sli4_hba.sp_queue_event);
13442 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13443 spin_unlock_irqrestore(&phba->hbalock, iflags);
13444
13445 return true;
13446}
13447
13448
13449
13450
13451
13452
13453
13454
13455
13456static void
13457lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13458 struct lpfc_wcqe_release *wcqe)
13459{
13460
13461 if (unlikely(!phba->sli4_hba.els_wq))
13462 return;
13463
13464 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13465 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13466 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13467 else
13468 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13469 "2579 Slow-path wqe consume event carries "
13470 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13471 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13472 phba->sli4_hba.els_wq->queue_id);
13473}
13474
13475
13476
13477
13478
13479
13480
13481
13482
13483
13484
13485static bool
13486lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13487 struct lpfc_queue *cq,
13488 struct sli4_wcqe_xri_aborted *wcqe)
13489{
13490 bool workposted = false;
13491 struct lpfc_cq_event *cq_event;
13492 unsigned long iflags;
13493
13494 switch (cq->subtype) {
13495 case LPFC_IO:
13496 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13497 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13498
13499 if (phba->nvmet_support)
13500 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13501 }
13502 workposted = false;
13503 break;
13504 case LPFC_NVME_LS:
13505 case LPFC_ELS:
13506 cq_event = lpfc_cq_event_setup(
13507 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13508 if (!cq_event)
13509 return false;
13510 cq_event->hdwq = cq->hdwq;
13511 spin_lock_irqsave(&phba->hbalock, iflags);
13512 list_add_tail(&cq_event->list,
13513 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13514
13515 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13516 spin_unlock_irqrestore(&phba->hbalock, iflags);
13517 workposted = true;
13518 break;
13519 default:
13520 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13521 "0603 Invalid CQ subtype %d: "
13522 "%08x %08x %08x %08x\n",
13523 cq->subtype, wcqe->word0, wcqe->parameter,
13524 wcqe->word2, wcqe->word3);
13525 workposted = false;
13526 break;
13527 }
13528 return workposted;
13529}
13530
13531#define FC_RCTL_MDS_DIAGS 0xF4
13532
13533
13534
13535
13536
13537
13538
13539
13540
13541
13542static bool
13543lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13544{
13545 bool workposted = false;
13546 struct fc_frame_header *fc_hdr;
13547 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13548 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13549 struct lpfc_nvmet_tgtport *tgtp;
13550 struct hbq_dmabuf *dma_buf;
13551 uint32_t status, rq_id;
13552 unsigned long iflags;
13553
13554
13555 if (unlikely(!hrq) || unlikely(!drq))
13556 return workposted;
13557
13558 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13559 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13560 else
13561 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13562 if (rq_id != hrq->queue_id)
13563 goto out;
13564
13565 status = bf_get(lpfc_rcqe_status, rcqe);
13566 switch (status) {
13567 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13568 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13569 "2537 Receive Frame Truncated!!\n");
13570
13571 case FC_STATUS_RQ_SUCCESS:
13572 spin_lock_irqsave(&phba->hbalock, iflags);
13573 lpfc_sli4_rq_release(hrq, drq);
13574 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13575 if (!dma_buf) {
13576 hrq->RQ_no_buf_found++;
13577 spin_unlock_irqrestore(&phba->hbalock, iflags);
13578 goto out;
13579 }
13580 hrq->RQ_rcv_buf++;
13581 hrq->RQ_buf_posted--;
13582 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13583
13584 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13585
13586 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13587 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13588 spin_unlock_irqrestore(&phba->hbalock, iflags);
13589
13590 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf);
13591 break;
13592 }
13593
13594
13595 list_add_tail(&dma_buf->cq_event.list,
13596 &phba->sli4_hba.sp_queue_event);
13597
13598 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13599 spin_unlock_irqrestore(&phba->hbalock, iflags);
13600 workposted = true;
13601 break;
13602 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13603 if (phba->nvmet_support) {
13604 tgtp = phba->targetport->private;
13605 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13606 "6402 RQE Error x%x, posted %d err_cnt "
13607 "%d: %x %x %x\n",
13608 status, hrq->RQ_buf_posted,
13609 hrq->RQ_no_posted_buf,
13610 atomic_read(&tgtp->rcv_fcp_cmd_in),
13611 atomic_read(&tgtp->rcv_fcp_cmd_out),
13612 atomic_read(&tgtp->xmt_fcp_release));
13613 }
13614
13615
13616 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13617 hrq->RQ_no_posted_buf++;
13618
13619 spin_lock_irqsave(&phba->hbalock, iflags);
13620 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13621 spin_unlock_irqrestore(&phba->hbalock, iflags);
13622 workposted = true;
13623 break;
13624 }
13625out:
13626 return workposted;
13627}
13628
13629
13630
13631
13632
13633
13634
13635
13636
13637
13638
13639
13640static bool
13641lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13642 struct lpfc_cqe *cqe)
13643{
13644 struct lpfc_cqe cqevt;
13645 bool workposted = false;
13646
13647
13648 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13649
13650
13651 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13652 case CQE_CODE_COMPL_WQE:
13653
13654 phba->last_completion_time = jiffies;
13655 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13656 (struct lpfc_wcqe_complete *)&cqevt);
13657 break;
13658 case CQE_CODE_RELEASE_WQE:
13659
13660 lpfc_sli4_sp_handle_rel_wcqe(phba,
13661 (struct lpfc_wcqe_release *)&cqevt);
13662 break;
13663 case CQE_CODE_XRI_ABORTED:
13664
13665 phba->last_completion_time = jiffies;
13666 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13667 (struct sli4_wcqe_xri_aborted *)&cqevt);
13668 break;
13669 case CQE_CODE_RECEIVE:
13670 case CQE_CODE_RECEIVE_V1:
13671
13672 phba->last_completion_time = jiffies;
13673 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13674 (struct lpfc_rcqe *)&cqevt);
13675 break;
13676 default:
13677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13678 "0388 Not a valid WCQE code: x%x\n",
13679 bf_get(lpfc_cqe_code, &cqevt));
13680 break;
13681 }
13682 return workposted;
13683}
13684
13685
13686
13687
13688
13689
13690
13691
13692
13693
13694
13695
13696
13697
13698static void
13699lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13700 struct lpfc_queue *speq)
13701{
13702 struct lpfc_queue *cq = NULL, *childq;
13703 uint16_t cqid;
13704
13705
13706 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13707
13708 list_for_each_entry(childq, &speq->child_list, list) {
13709 if (childq->queue_id == cqid) {
13710 cq = childq;
13711 break;
13712 }
13713 }
13714 if (unlikely(!cq)) {
13715 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13717 "0365 Slow-path CQ identifier "
13718 "(%d) does not exist\n", cqid);
13719 return;
13720 }
13721
13722
13723 cq->assoc_qp = speq;
13724
13725 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork))
13726 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13727 "0390 Cannot schedule soft IRQ "
13728 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13729 cqid, cq->queue_id, raw_smp_processor_id());
13730}
13731
13732
13733
13734
13735
13736
13737
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749
13750
13751
13752static bool
13753__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13754 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13755 struct lpfc_cqe *), unsigned long *delay)
13756{
13757 struct lpfc_cqe *cqe;
13758 bool workposted = false;
13759 int count = 0, consumed = 0;
13760 bool arm = true;
13761
13762
13763 *delay = 0;
13764
13765 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13766 goto rearm_and_exit;
13767
13768
13769 cq->q_flag = 0;
13770 cqe = lpfc_sli4_cq_get(cq);
13771 while (cqe) {
13772 workposted |= handler(phba, cq, cqe);
13773 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13774
13775 consumed++;
13776 if (!(++count % cq->max_proc_limit))
13777 break;
13778
13779 if (!(count % cq->notify_interval)) {
13780 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13781 LPFC_QUEUE_NOARM);
13782 consumed = 0;
13783 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
13784 }
13785
13786 if (count == LPFC_NVMET_CQ_NOTIFY)
13787 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13788
13789 cqe = lpfc_sli4_cq_get(cq);
13790 }
13791 if (count >= phba->cfg_cq_poll_threshold) {
13792 *delay = 1;
13793 arm = false;
13794 }
13795
13796
13797 if (count > cq->CQ_max_cqe)
13798 cq->CQ_max_cqe = count;
13799
13800 cq->assoc_qp->EQ_cqe_cnt += count;
13801
13802
13803 if (unlikely(count == 0))
13804 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13805 "0369 No entry from completion queue "
13806 "qid=%d\n", cq->queue_id);
13807
13808 xchg(&cq->queue_claimed, 0);
13809
13810rearm_and_exit:
13811 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13812 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13813
13814 return workposted;
13815}
13816
13817
13818
13819
13820
13821
13822
13823
13824
13825
13826
13827
13828
13829
13830
13831
13832static void
13833__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13834{
13835 struct lpfc_hba *phba = cq->phba;
13836 unsigned long delay;
13837 bool workposted = false;
13838
13839
13840 switch (cq->type) {
13841 case LPFC_MCQ:
13842 workposted |= __lpfc_sli4_process_cq(phba, cq,
13843 lpfc_sli4_sp_handle_mcqe,
13844 &delay);
13845 break;
13846 case LPFC_WCQ:
13847 if (cq->subtype == LPFC_IO)
13848 workposted |= __lpfc_sli4_process_cq(phba, cq,
13849 lpfc_sli4_fp_handle_cqe,
13850 &delay);
13851 else
13852 workposted |= __lpfc_sli4_process_cq(phba, cq,
13853 lpfc_sli4_sp_handle_cqe,
13854 &delay);
13855 break;
13856 default:
13857 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13858 "0370 Invalid completion queue type (%d)\n",
13859 cq->type);
13860 return;
13861 }
13862
13863 if (delay) {
13864 if (!queue_delayed_work_on(cq->chann, phba->wq,
13865 &cq->sched_spwork, delay))
13866 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13867 "0394 Cannot schedule soft IRQ "
13868 "for cqid=%d on CPU %d\n",
13869 cq->queue_id, cq->chann);
13870 }
13871
13872
13873 if (workposted)
13874 lpfc_worker_wake_up(phba);
13875}
13876
13877
13878
13879
13880
13881
13882
13883
13884static void
13885lpfc_sli4_sp_process_cq(struct work_struct *work)
13886{
13887 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13888
13889 __lpfc_sli4_sp_process_cq(cq);
13890}
13891
13892
13893
13894
13895
13896
13897
13898static void
13899lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13900{
13901 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13902 struct lpfc_queue, sched_spwork);
13903
13904 __lpfc_sli4_sp_process_cq(cq);
13905}
13906
13907
13908
13909
13910
13911
13912
13913
13914
13915
13916static void
13917lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13918 struct lpfc_wcqe_complete *wcqe)
13919{
13920 struct lpfc_sli_ring *pring = cq->pring;
13921 struct lpfc_iocbq *cmdiocbq;
13922 struct lpfc_iocbq irspiocbq;
13923 unsigned long iflags;
13924
13925
13926 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13927
13928
13929
13930 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13931 IOSTAT_LOCAL_REJECT)) &&
13932 ((wcqe->parameter & IOERR_PARAM_MASK) ==
13933 IOERR_NO_RESOURCES))
13934 phba->lpfc_rampdown_queue_depth(phba);
13935
13936
13937 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13938 "0373 FCP CQE error: status=x%x: "
13939 "CQE: %08x %08x %08x %08x\n",
13940 bf_get(lpfc_wcqe_c_status, wcqe),
13941 wcqe->word0, wcqe->total_data_placed,
13942 wcqe->parameter, wcqe->word3);
13943 }
13944
13945
13946 spin_lock_irqsave(&pring->ring_lock, iflags);
13947 pring->stats.iocb_event++;
13948 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13949 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13950 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13951 if (unlikely(!cmdiocbq)) {
13952 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13953 "0374 FCP complete with no corresponding "
13954 "cmdiocb: iotag (%d)\n",
13955 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13956 return;
13957 }
13958#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13959 cmdiocbq->isr_timestamp = cq->isr_timestamp;
13960#endif
13961 if (cmdiocbq->iocb_cmpl == NULL) {
13962 if (cmdiocbq->wqe_cmpl) {
13963 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13964 spin_lock_irqsave(&phba->hbalock, iflags);
13965 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13966 spin_unlock_irqrestore(&phba->hbalock, iflags);
13967 }
13968
13969
13970 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13971 return;
13972 }
13973 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13974 "0375 FCP cmdiocb not callback function "
13975 "iotag: (%d)\n",
13976 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13977 return;
13978 }
13979
13980
13981 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13982
13983 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13984 spin_lock_irqsave(&phba->hbalock, iflags);
13985 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13986 spin_unlock_irqrestore(&phba->hbalock, iflags);
13987 }
13988
13989
13990 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13991}
13992
13993
13994
13995
13996
13997
13998
13999
14000
14001
14002static void
14003lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14004 struct lpfc_wcqe_release *wcqe)
14005{
14006 struct lpfc_queue *childwq;
14007 bool wqid_matched = false;
14008 uint16_t hba_wqid;
14009
14010
14011 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14012 list_for_each_entry(childwq, &cq->child_list, list) {
14013 if (childwq->queue_id == hba_wqid) {
14014 lpfc_sli4_wq_release(childwq,
14015 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14016 if (childwq->q_flag & HBA_NVMET_WQFULL)
14017 lpfc_nvmet_wqfull_process(phba, childwq);
14018 wqid_matched = true;
14019 break;
14020 }
14021 }
14022
14023 if (wqid_matched != true)
14024 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14025 "2580 Fast-path wqe consume event carries "
14026 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14027}
14028
14029
14030
14031
14032
14033
14034
14035
14036
14037
14038static bool
14039lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14040 struct lpfc_rcqe *rcqe)
14041{
14042 bool workposted = false;
14043 struct lpfc_queue *hrq;
14044 struct lpfc_queue *drq;
14045 struct rqb_dmabuf *dma_buf;
14046 struct fc_frame_header *fc_hdr;
14047 struct lpfc_nvmet_tgtport *tgtp;
14048 uint32_t status, rq_id;
14049 unsigned long iflags;
14050 uint32_t fctl, idx;
14051
14052 if ((phba->nvmet_support == 0) ||
14053 (phba->sli4_hba.nvmet_cqset == NULL))
14054 return workposted;
14055
14056 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14057 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14058 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14059
14060
14061 if (unlikely(!hrq) || unlikely(!drq))
14062 return workposted;
14063
14064 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14065 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14066 else
14067 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14068
14069 if ((phba->nvmet_support == 0) ||
14070 (rq_id != hrq->queue_id))
14071 return workposted;
14072
14073 status = bf_get(lpfc_rcqe_status, rcqe);
14074 switch (status) {
14075 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14076 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14077 "6126 Receive Frame Truncated!!\n");
14078
14079 case FC_STATUS_RQ_SUCCESS:
14080 spin_lock_irqsave(&phba->hbalock, iflags);
14081 lpfc_sli4_rq_release(hrq, drq);
14082 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14083 if (!dma_buf) {
14084 hrq->RQ_no_buf_found++;
14085 spin_unlock_irqrestore(&phba->hbalock, iflags);
14086 goto out;
14087 }
14088 spin_unlock_irqrestore(&phba->hbalock, iflags);
14089 hrq->RQ_rcv_buf++;
14090 hrq->RQ_buf_posted--;
14091 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14092
14093
14094 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14095 fc_hdr->fh_f_ctl[1] << 8 |
14096 fc_hdr->fh_f_ctl[2]);
14097 if (((fctl &
14098 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14099 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14100 (fc_hdr->fh_seq_cnt != 0))
14101 goto drop;
14102
14103 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14104 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14105 lpfc_nvmet_unsol_fcp_event(
14106 phba, idx, dma_buf, cq->isr_timestamp,
14107 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14108 return false;
14109 }
14110drop:
14111 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14112 break;
14113 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14114 if (phba->nvmet_support) {
14115 tgtp = phba->targetport->private;
14116 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
14117 "6401 RQE Error x%x, posted %d err_cnt "
14118 "%d: %x %x %x\n",
14119 status, hrq->RQ_buf_posted,
14120 hrq->RQ_no_posted_buf,
14121 atomic_read(&tgtp->rcv_fcp_cmd_in),
14122 atomic_read(&tgtp->rcv_fcp_cmd_out),
14123 atomic_read(&tgtp->xmt_fcp_release));
14124 }
14125
14126
14127 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14128 hrq->RQ_no_posted_buf++;
14129
14130 break;
14131 }
14132out:
14133 return workposted;
14134}
14135
14136
14137
14138
14139
14140
14141
14142
14143
14144
14145
14146
14147static bool
14148lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14149 struct lpfc_cqe *cqe)
14150{
14151 struct lpfc_wcqe_release wcqe;
14152 bool workposted = false;
14153
14154
14155 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14156
14157
14158 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14159 case CQE_CODE_COMPL_WQE:
14160 case CQE_CODE_NVME_ERSP:
14161 cq->CQ_wq++;
14162
14163 phba->last_completion_time = jiffies;
14164 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14165 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14166 (struct lpfc_wcqe_complete *)&wcqe);
14167 break;
14168 case CQE_CODE_RELEASE_WQE:
14169 cq->CQ_release_wqe++;
14170
14171 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14172 (struct lpfc_wcqe_release *)&wcqe);
14173 break;
14174 case CQE_CODE_XRI_ABORTED:
14175 cq->CQ_xri_aborted++;
14176
14177 phba->last_completion_time = jiffies;
14178 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14179 (struct sli4_wcqe_xri_aborted *)&wcqe);
14180 break;
14181 case CQE_CODE_RECEIVE_V1:
14182 case CQE_CODE_RECEIVE:
14183 phba->last_completion_time = jiffies;
14184 if (cq->subtype == LPFC_NVMET) {
14185 workposted = lpfc_sli4_nvmet_handle_rcqe(
14186 phba, cq, (struct lpfc_rcqe *)&wcqe);
14187 }
14188 break;
14189 default:
14190 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14191 "0144 Not a valid CQE code: x%x\n",
14192 bf_get(lpfc_wcqe_c_code, &wcqe));
14193 break;
14194 }
14195 return workposted;
14196}
14197
14198
14199
14200
14201
14202
14203
14204
14205
14206
14207
14208
14209
14210static void
14211lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14212 struct lpfc_eqe *eqe)
14213{
14214 struct lpfc_queue *cq = NULL;
14215 uint32_t qidx = eq->hdwq;
14216 uint16_t cqid, id;
14217
14218 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14219 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14220 "0366 Not a valid completion "
14221 "event: majorcode=x%x, minorcode=x%x\n",
14222 bf_get_le32(lpfc_eqe_major_code, eqe),
14223 bf_get_le32(lpfc_eqe_minor_code, eqe));
14224 return;
14225 }
14226
14227
14228 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14229
14230
14231 if (cqid <= phba->sli4_hba.cq_max) {
14232 cq = phba->sli4_hba.cq_lookup[cqid];
14233 if (cq)
14234 goto work_cq;
14235 }
14236
14237
14238 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14239 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14240 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14241
14242 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14243 goto process_cq;
14244 }
14245 }
14246
14247 if (phba->sli4_hba.nvmels_cq &&
14248 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14249
14250 cq = phba->sli4_hba.nvmels_cq;
14251 }
14252
14253
14254 if (cq == NULL) {
14255 lpfc_sli4_sp_handle_eqe(phba, eqe,
14256 phba->sli4_hba.hdwq[qidx].hba_eq);
14257 return;
14258 }
14259
14260process_cq:
14261 if (unlikely(cqid != cq->queue_id)) {
14262 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14263 "0368 Miss-matched fast-path completion "
14264 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14265 cqid, cq->queue_id);
14266 return;
14267 }
14268
14269work_cq:
14270#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14271 if (phba->ktime_on)
14272 cq->isr_timestamp = ktime_get_ns();
14273 else
14274 cq->isr_timestamp = 0;
14275#endif
14276 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork))
14277 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14278 "0363 Cannot schedule soft IRQ "
14279 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14280 cqid, cq->queue_id, raw_smp_processor_id());
14281}
14282
14283
14284
14285
14286
14287
14288
14289
14290
14291
14292
14293
14294
14295
14296
14297
14298static void
14299__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
14300{
14301 struct lpfc_hba *phba = cq->phba;
14302 unsigned long delay;
14303 bool workposted = false;
14304
14305
14306 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14307 &delay);
14308
14309 if (delay) {
14310 if (!queue_delayed_work_on(cq->chann, phba->wq,
14311 &cq->sched_irqwork, delay))
14312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14313 "0367 Cannot schedule soft IRQ "
14314 "for cqid=%d on CPU %d\n",
14315 cq->queue_id, cq->chann);
14316 }
14317
14318
14319 if (workposted)
14320 lpfc_worker_wake_up(phba);
14321}
14322
14323
14324
14325
14326
14327
14328
14329
14330static void
14331lpfc_sli4_hba_process_cq(struct work_struct *work)
14332{
14333 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14334
14335 __lpfc_sli4_hba_process_cq(cq);
14336}
14337
14338
14339
14340
14341
14342
14343
14344static void
14345lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14346{
14347 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14348 struct lpfc_queue, sched_irqwork);
14349
14350 __lpfc_sli4_hba_process_cq(cq);
14351}
14352
14353
14354
14355
14356
14357
14358
14359
14360
14361
14362
14363
14364
14365
14366
14367
14368
14369
14370
14371
14372
14373
14374
14375
14376
14377
14378
14379irqreturn_t
14380lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14381{
14382 struct lpfc_hba *phba;
14383 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14384 struct lpfc_queue *fpeq;
14385 unsigned long iflag;
14386 int ecount = 0;
14387 int hba_eqidx;
14388 struct lpfc_eq_intr_info *eqi;
14389
14390
14391 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14392 phba = hba_eq_hdl->phba;
14393 hba_eqidx = hba_eq_hdl->idx;
14394
14395 if (unlikely(!phba))
14396 return IRQ_NONE;
14397 if (unlikely(!phba->sli4_hba.hdwq))
14398 return IRQ_NONE;
14399
14400
14401 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14402 if (unlikely(!fpeq))
14403 return IRQ_NONE;
14404
14405
14406 if (unlikely(lpfc_intr_state_check(phba))) {
14407
14408 spin_lock_irqsave(&phba->hbalock, iflag);
14409 if (phba->link_state < LPFC_LINK_DOWN)
14410
14411 lpfc_sli4_eqcq_flush(phba, fpeq);
14412 spin_unlock_irqrestore(&phba->hbalock, iflag);
14413 return IRQ_NONE;
14414 }
14415
14416 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14417 eqi->icnt++;
14418
14419 fpeq->last_cpu = raw_smp_processor_id();
14420
14421 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14422 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14423 phba->cfg_auto_imax &&
14424 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14425 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14426 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14427
14428
14429 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14430
14431 if (unlikely(ecount == 0)) {
14432 fpeq->EQ_no_entry++;
14433 if (phba->intr_type == MSIX)
14434
14435 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14436 "0358 MSI-X interrupt with no EQE\n");
14437 else
14438
14439 return IRQ_NONE;
14440 }
14441
14442 return IRQ_HANDLED;
14443}
14444
14445
14446
14447
14448
14449
14450
14451
14452
14453
14454
14455
14456
14457
14458
14459
14460
14461
14462irqreturn_t
14463lpfc_sli4_intr_handler(int irq, void *dev_id)
14464{
14465 struct lpfc_hba *phba;
14466 irqreturn_t hba_irq_rc;
14467 bool hba_handled = false;
14468 int qidx;
14469
14470
14471 phba = (struct lpfc_hba *)dev_id;
14472
14473 if (unlikely(!phba))
14474 return IRQ_NONE;
14475
14476
14477
14478
14479 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14480 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14481 &phba->sli4_hba.hba_eq_hdl[qidx]);
14482 if (hba_irq_rc == IRQ_HANDLED)
14483 hba_handled |= true;
14484 }
14485
14486 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14487}
14488
14489void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14490{
14491 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14492 struct lpfc_queue *eq;
14493 int i = 0;
14494
14495 rcu_read_lock();
14496
14497 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14498 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14499 if (!list_empty(&phba->poll_list))
14500 mod_timer(&phba->cpuhp_poll_timer,
14501 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14502
14503 rcu_read_unlock();
14504}
14505
14506inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14507{
14508 struct lpfc_hba *phba = eq->phba;
14509 int i = 0;
14510
14511
14512
14513
14514
14515
14516
14517
14518 smp_rmb();
14519
14520 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14521
14522
14523
14524
14525
14526
14527
14528 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14529
14530 return i;
14531}
14532
14533static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14534{
14535 struct lpfc_hba *phba = eq->phba;
14536
14537
14538 if (list_empty(&phba->poll_list))
14539 mod_timer(&phba->cpuhp_poll_timer,
14540 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14541
14542 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14543 synchronize_rcu();
14544}
14545
14546static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14547{
14548 struct lpfc_hba *phba = eq->phba;
14549
14550
14551
14552
14553 list_del_rcu(&eq->_poll_list);
14554 synchronize_rcu();
14555
14556 if (list_empty(&phba->poll_list))
14557 del_timer_sync(&phba->cpuhp_poll_timer);
14558}
14559
14560void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14561{
14562 struct lpfc_queue *eq, *next;
14563
14564 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14565 list_del(&eq->_poll_list);
14566
14567 INIT_LIST_HEAD(&phba->poll_list);
14568 synchronize_rcu();
14569}
14570
14571static inline void
14572__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14573{
14574 if (mode == eq->mode)
14575 return;
14576
14577
14578
14579
14580
14581
14582
14583
14584
14585
14586
14587 WRITE_ONCE(eq->mode, mode);
14588
14589 smp_wmb();
14590
14591
14592
14593
14594
14595
14596
14597
14598
14599
14600
14601
14602
14603 mode ? lpfc_sli4_add_to_poll_list(eq) :
14604 lpfc_sli4_remove_from_poll_list(eq);
14605}
14606
14607void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14608{
14609 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14610}
14611
14612void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14613{
14614 struct lpfc_hba *phba = eq->phba;
14615
14616 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14617
14618
14619
14620
14621
14622
14623
14624
14625 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14626}
14627
14628
14629
14630
14631
14632
14633
14634
14635
14636void
14637lpfc_sli4_queue_free(struct lpfc_queue *queue)
14638{
14639 struct lpfc_dmabuf *dmabuf;
14640
14641 if (!queue)
14642 return;
14643
14644 if (!list_empty(&queue->wq_list))
14645 list_del(&queue->wq_list);
14646
14647 while (!list_empty(&queue->page_list)) {
14648 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14649 list);
14650 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14651 dmabuf->virt, dmabuf->phys);
14652 kfree(dmabuf);
14653 }
14654 if (queue->rqbp) {
14655 lpfc_free_rq_buffer(queue->phba, queue);
14656 kfree(queue->rqbp);
14657 }
14658
14659 if (!list_empty(&queue->cpu_list))
14660 list_del(&queue->cpu_list);
14661
14662 kfree(queue);
14663 return;
14664}
14665
14666
14667
14668
14669
14670
14671
14672
14673
14674
14675
14676
14677
14678struct lpfc_queue *
14679lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14680 uint32_t entry_size, uint32_t entry_count, int cpu)
14681{
14682 struct lpfc_queue *queue;
14683 struct lpfc_dmabuf *dmabuf;
14684 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14685 uint16_t x, pgcnt;
14686
14687 if (!phba->sli4_hba.pc_sli4_params.supported)
14688 hw_page_size = page_size;
14689
14690 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14691
14692
14693 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14694 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14695
14696 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14697 GFP_KERNEL, cpu_to_node(cpu));
14698 if (!queue)
14699 return NULL;
14700
14701 INIT_LIST_HEAD(&queue->list);
14702 INIT_LIST_HEAD(&queue->_poll_list);
14703 INIT_LIST_HEAD(&queue->wq_list);
14704 INIT_LIST_HEAD(&queue->wqfull_list);
14705 INIT_LIST_HEAD(&queue->page_list);
14706 INIT_LIST_HEAD(&queue->child_list);
14707 INIT_LIST_HEAD(&queue->cpu_list);
14708
14709
14710
14711
14712 queue->page_count = pgcnt;
14713 queue->q_pgs = (void **)&queue[1];
14714 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14715 queue->entry_size = entry_size;
14716 queue->entry_count = entry_count;
14717 queue->page_size = hw_page_size;
14718 queue->phba = phba;
14719
14720 for (x = 0; x < queue->page_count; x++) {
14721 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14722 dev_to_node(&phba->pcidev->dev));
14723 if (!dmabuf)
14724 goto out_fail;
14725 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14726 hw_page_size, &dmabuf->phys,
14727 GFP_KERNEL);
14728 if (!dmabuf->virt) {
14729 kfree(dmabuf);
14730 goto out_fail;
14731 }
14732 dmabuf->buffer_tag = x;
14733 list_add_tail(&dmabuf->list, &queue->page_list);
14734
14735 queue->q_pgs[x] = dmabuf->virt;
14736 }
14737 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14738 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14739 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14740 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14741
14742
14743
14744 return queue;
14745out_fail:
14746 lpfc_sli4_queue_free(queue);
14747 return NULL;
14748}
14749
14750
14751
14752
14753
14754
14755
14756
14757
14758
14759static void __iomem *
14760lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14761{
14762 if (!phba->pcidev)
14763 return NULL;
14764
14765 switch (pci_barset) {
14766 case WQ_PCI_BAR_0_AND_1:
14767 return phba->pci_bar0_memmap_p;
14768 case WQ_PCI_BAR_2_AND_3:
14769 return phba->pci_bar2_memmap_p;
14770 case WQ_PCI_BAR_4_AND_5:
14771 return phba->pci_bar4_memmap_p;
14772 default:
14773 break;
14774 }
14775 return NULL;
14776}
14777
14778
14779
14780
14781
14782
14783
14784
14785
14786
14787
14788
14789
14790
14791
14792
14793
14794
14795
14796
14797
14798
14799
14800
14801void
14802lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14803 uint32_t numq, uint32_t usdelay)
14804{
14805 struct lpfc_mbx_modify_eq_delay *eq_delay;
14806 LPFC_MBOXQ_t *mbox;
14807 struct lpfc_queue *eq;
14808 int cnt = 0, rc, length;
14809 uint32_t shdr_status, shdr_add_status;
14810 uint32_t dmult;
14811 int qidx;
14812 union lpfc_sli4_cfg_shdr *shdr;
14813
14814 if (startq >= phba->cfg_irq_chann)
14815 return;
14816
14817 if (usdelay > 0xFFFF) {
14818 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14819 "6429 usdelay %d too large. Scaled down to "
14820 "0xFFFF.\n", usdelay);
14821 usdelay = 0xFFFF;
14822 }
14823
14824
14825 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14826 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14827 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14828 if (!eq)
14829 continue;
14830
14831 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14832
14833 if (++cnt >= numq)
14834 break;
14835 }
14836 return;
14837 }
14838
14839
14840
14841 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14842 if (!mbox) {
14843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME,
14844 "6428 Failed allocating mailbox cmd buffer."
14845 " EQ delay was not set.\n");
14846 return;
14847 }
14848 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14849 sizeof(struct lpfc_sli4_cfg_mhdr));
14850 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14851 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14852 length, LPFC_SLI4_MBX_EMBED);
14853 eq_delay = &mbox->u.mqe.un.eq_delay;
14854
14855
14856 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14857 if (dmult)
14858 dmult--;
14859 if (dmult > LPFC_DMULT_MAX)
14860 dmult = LPFC_DMULT_MAX;
14861
14862 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14863 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14864 if (!eq)
14865 continue;
14866 eq->q_mode = usdelay;
14867 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14868 eq_delay->u.request.eq[cnt].phase = 0;
14869 eq_delay->u.request.eq[cnt].delay_multi = dmult;
14870
14871 if (++cnt >= numq)
14872 break;
14873 }
14874 eq_delay->u.request.num_eq = cnt;
14875
14876 mbox->vport = phba->pport;
14877 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14878 mbox->ctx_buf = NULL;
14879 mbox->ctx_ndlp = NULL;
14880 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14881 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14882 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14883 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14884 if (shdr_status || shdr_add_status || rc) {
14885 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14886 "2512 MODIFY_EQ_DELAY mailbox failed with "
14887 "status x%x add_status x%x, mbx status x%x\n",
14888 shdr_status, shdr_add_status, rc);
14889 }
14890 mempool_free(mbox, phba->mbox_mem_pool);
14891 return;
14892}
14893
14894
14895
14896
14897
14898
14899
14900
14901
14902
14903
14904
14905
14906
14907
14908
14909
14910
14911
14912
14913
14914int
14915lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14916{
14917 struct lpfc_mbx_eq_create *eq_create;
14918 LPFC_MBOXQ_t *mbox;
14919 int rc, length, status = 0;
14920 struct lpfc_dmabuf *dmabuf;
14921 uint32_t shdr_status, shdr_add_status;
14922 union lpfc_sli4_cfg_shdr *shdr;
14923 uint16_t dmult;
14924 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14925
14926
14927 if (!eq)
14928 return -ENODEV;
14929 if (!phba->sli4_hba.pc_sli4_params.supported)
14930 hw_page_size = SLI4_PAGE_SIZE;
14931
14932 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14933 if (!mbox)
14934 return -ENOMEM;
14935 length = (sizeof(struct lpfc_mbx_eq_create) -
14936 sizeof(struct lpfc_sli4_cfg_mhdr));
14937 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14938 LPFC_MBOX_OPCODE_EQ_CREATE,
14939 length, LPFC_SLI4_MBX_EMBED);
14940 eq_create = &mbox->u.mqe.un.eq_create;
14941 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14942 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14943 eq->page_count);
14944 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14945 LPFC_EQE_SIZE);
14946 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14947
14948
14949 if (phba->sli4_hba.pc_sli4_params.eqav) {
14950 bf_set(lpfc_mbox_hdr_version, &shdr->request,
14951 LPFC_Q_CREATE_VERSION_2);
14952 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
14953 phba->sli4_hba.pc_sli4_params.eqav);
14954 }
14955
14956
14957 dmult = 0;
14958 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14959 dmult);
14960 switch (eq->entry_count) {
14961 default:
14962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14963 "0360 Unsupported EQ count. (%d)\n",
14964 eq->entry_count);
14965 if (eq->entry_count < 256) {
14966 status = -EINVAL;
14967 goto out;
14968 }
14969
14970 case 256:
14971 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14972 LPFC_EQ_CNT_256);
14973 break;
14974 case 512:
14975 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14976 LPFC_EQ_CNT_512);
14977 break;
14978 case 1024:
14979 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14980 LPFC_EQ_CNT_1024);
14981 break;
14982 case 2048:
14983 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14984 LPFC_EQ_CNT_2048);
14985 break;
14986 case 4096:
14987 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14988 LPFC_EQ_CNT_4096);
14989 break;
14990 }
14991 list_for_each_entry(dmabuf, &eq->page_list, list) {
14992 memset(dmabuf->virt, 0, hw_page_size);
14993 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14994 putPaddrLow(dmabuf->phys);
14995 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14996 putPaddrHigh(dmabuf->phys);
14997 }
14998 mbox->vport = phba->pport;
14999 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15000 mbox->ctx_buf = NULL;
15001 mbox->ctx_ndlp = NULL;
15002 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15003 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15004 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15005 if (shdr_status || shdr_add_status || rc) {
15006 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15007 "2500 EQ_CREATE mailbox failed with "
15008 "status x%x add_status x%x, mbx status x%x\n",
15009 shdr_status, shdr_add_status, rc);
15010 status = -ENXIO;
15011 }
15012 eq->type = LPFC_EQ;
15013 eq->subtype = LPFC_NONE;
15014 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15015 if (eq->queue_id == 0xFFFF)
15016 status = -ENXIO;
15017 eq->host_index = 0;
15018 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15019 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15020out:
15021 mempool_free(mbox, phba->mbox_mem_pool);
15022 return status;
15023}
15024
15025
15026
15027
15028
15029
15030
15031
15032
15033
15034
15035
15036
15037
15038
15039
15040
15041
15042
15043
15044
15045
15046int
15047lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15048 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15049{
15050 struct lpfc_mbx_cq_create *cq_create;
15051 struct lpfc_dmabuf *dmabuf;
15052 LPFC_MBOXQ_t *mbox;
15053 int rc, length, status = 0;
15054 uint32_t shdr_status, shdr_add_status;
15055 union lpfc_sli4_cfg_shdr *shdr;
15056
15057
15058 if (!cq || !eq)
15059 return -ENODEV;
15060
15061 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15062 if (!mbox)
15063 return -ENOMEM;
15064 length = (sizeof(struct lpfc_mbx_cq_create) -
15065 sizeof(struct lpfc_sli4_cfg_mhdr));
15066 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15067 LPFC_MBOX_OPCODE_CQ_CREATE,
15068 length, LPFC_SLI4_MBX_EMBED);
15069 cq_create = &mbox->u.mqe.un.cq_create;
15070 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15071 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15072 cq->page_count);
15073 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15074 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15075 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15076 phba->sli4_hba.pc_sli4_params.cqv);
15077 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15078 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15079 (cq->page_size / SLI4_PAGE_SIZE));
15080 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15081 eq->queue_id);
15082 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15083 phba->sli4_hba.pc_sli4_params.cqav);
15084 } else {
15085 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15086 eq->queue_id);
15087 }
15088 switch (cq->entry_count) {
15089 case 2048:
15090 case 4096:
15091 if (phba->sli4_hba.pc_sli4_params.cqv ==
15092 LPFC_Q_CREATE_VERSION_2) {
15093 cq_create->u.request.context.lpfc_cq_context_count =
15094 cq->entry_count;
15095 bf_set(lpfc_cq_context_count,
15096 &cq_create->u.request.context,
15097 LPFC_CQ_CNT_WORD7);
15098 break;
15099 }
15100
15101 default:
15102 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15103 "0361 Unsupported CQ count: "
15104 "entry cnt %d sz %d pg cnt %d\n",
15105 cq->entry_count, cq->entry_size,
15106 cq->page_count);
15107 if (cq->entry_count < 256) {
15108 status = -EINVAL;
15109 goto out;
15110 }
15111
15112 case 256:
15113 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15114 LPFC_CQ_CNT_256);
15115 break;
15116 case 512:
15117 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15118 LPFC_CQ_CNT_512);
15119 break;
15120 case 1024:
15121 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15122 LPFC_CQ_CNT_1024);
15123 break;
15124 }
15125 list_for_each_entry(dmabuf, &cq->page_list, list) {
15126 memset(dmabuf->virt, 0, cq->page_size);
15127 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15128 putPaddrLow(dmabuf->phys);
15129 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15130 putPaddrHigh(dmabuf->phys);
15131 }
15132 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15133
15134
15135 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15136 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15137 if (shdr_status || shdr_add_status || rc) {
15138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15139 "2501 CQ_CREATE mailbox failed with "
15140 "status x%x add_status x%x, mbx status x%x\n",
15141 shdr_status, shdr_add_status, rc);
15142 status = -ENXIO;
15143 goto out;
15144 }
15145 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15146 if (cq->queue_id == 0xFFFF) {
15147 status = -ENXIO;
15148 goto out;
15149 }
15150
15151 list_add_tail(&cq->list, &eq->child_list);
15152
15153 cq->type = type;
15154 cq->subtype = subtype;
15155 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15156 cq->assoc_qid = eq->queue_id;
15157 cq->assoc_qp = eq;
15158 cq->host_index = 0;
15159 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15160 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15161
15162 if (cq->queue_id > phba->sli4_hba.cq_max)
15163 phba->sli4_hba.cq_max = cq->queue_id;
15164out:
15165 mempool_free(mbox, phba->mbox_mem_pool);
15166 return status;
15167}
15168
15169
15170
15171
15172
15173
15174
15175
15176
15177
15178
15179
15180
15181
15182
15183
15184
15185
15186
15187
15188
15189
15190
15191int
15192lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15193 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15194 uint32_t subtype)
15195{
15196 struct lpfc_queue *cq;
15197 struct lpfc_queue *eq;
15198 struct lpfc_mbx_cq_create_set *cq_set;
15199 struct lpfc_dmabuf *dmabuf;
15200 LPFC_MBOXQ_t *mbox;
15201 int rc, length, alloclen, status = 0;
15202 int cnt, idx, numcq, page_idx = 0;
15203 uint32_t shdr_status, shdr_add_status;
15204 union lpfc_sli4_cfg_shdr *shdr;
15205 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15206
15207
15208 numcq = phba->cfg_nvmet_mrq;
15209 if (!cqp || !hdwq || !numcq)
15210 return -ENODEV;
15211
15212 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15213 if (!mbox)
15214 return -ENOMEM;
15215
15216 length = sizeof(struct lpfc_mbx_cq_create_set);
15217 length += ((numcq * cqp[0]->page_count) *
15218 sizeof(struct dma_address));
15219 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15220 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15221 LPFC_SLI4_MBX_NEMBED);
15222 if (alloclen < length) {
15223 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15224 "3098 Allocated DMA memory size (%d) is "
15225 "less than the requested DMA memory size "
15226 "(%d)\n", alloclen, length);
15227 status = -ENOMEM;
15228 goto out;
15229 }
15230 cq_set = mbox->sge_array->addr[0];
15231 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15232 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15233
15234 for (idx = 0; idx < numcq; idx++) {
15235 cq = cqp[idx];
15236 eq = hdwq[idx].hba_eq;
15237 if (!cq || !eq) {
15238 status = -ENOMEM;
15239 goto out;
15240 }
15241 if (!phba->sli4_hba.pc_sli4_params.supported)
15242 hw_page_size = cq->page_size;
15243
15244 switch (idx) {
15245 case 0:
15246 bf_set(lpfc_mbx_cq_create_set_page_size,
15247 &cq_set->u.request,
15248 (hw_page_size / SLI4_PAGE_SIZE));
15249 bf_set(lpfc_mbx_cq_create_set_num_pages,
15250 &cq_set->u.request, cq->page_count);
15251 bf_set(lpfc_mbx_cq_create_set_evt,
15252 &cq_set->u.request, 1);
15253 bf_set(lpfc_mbx_cq_create_set_valid,
15254 &cq_set->u.request, 1);
15255 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15256 &cq_set->u.request, 0);
15257 bf_set(lpfc_mbx_cq_create_set_num_cq,
15258 &cq_set->u.request, numcq);
15259 bf_set(lpfc_mbx_cq_create_set_autovalid,
15260 &cq_set->u.request,
15261 phba->sli4_hba.pc_sli4_params.cqav);
15262 switch (cq->entry_count) {
15263 case 2048:
15264 case 4096:
15265 if (phba->sli4_hba.pc_sli4_params.cqv ==
15266 LPFC_Q_CREATE_VERSION_2) {
15267 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15268 &cq_set->u.request,
15269 cq->entry_count);
15270 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15271 &cq_set->u.request,
15272 LPFC_CQ_CNT_WORD7);
15273 break;
15274 }
15275
15276 default:
15277 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15278 "3118 Bad CQ count. (%d)\n",
15279 cq->entry_count);
15280 if (cq->entry_count < 256) {
15281 status = -EINVAL;
15282 goto out;
15283 }
15284
15285 case 256:
15286 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15287 &cq_set->u.request, LPFC_CQ_CNT_256);
15288 break;
15289 case 512:
15290 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15291 &cq_set->u.request, LPFC_CQ_CNT_512);
15292 break;
15293 case 1024:
15294 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15295 &cq_set->u.request, LPFC_CQ_CNT_1024);
15296 break;
15297 }
15298 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15299 &cq_set->u.request, eq->queue_id);
15300 break;
15301 case 1:
15302 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15303 &cq_set->u.request, eq->queue_id);
15304 break;
15305 case 2:
15306 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15307 &cq_set->u.request, eq->queue_id);
15308 break;
15309 case 3:
15310 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15311 &cq_set->u.request, eq->queue_id);
15312 break;
15313 case 4:
15314 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15315 &cq_set->u.request, eq->queue_id);
15316 break;
15317 case 5:
15318 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15319 &cq_set->u.request, eq->queue_id);
15320 break;
15321 case 6:
15322 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15323 &cq_set->u.request, eq->queue_id);
15324 break;
15325 case 7:
15326 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15327 &cq_set->u.request, eq->queue_id);
15328 break;
15329 case 8:
15330 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15331 &cq_set->u.request, eq->queue_id);
15332 break;
15333 case 9:
15334 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15335 &cq_set->u.request, eq->queue_id);
15336 break;
15337 case 10:
15338 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15339 &cq_set->u.request, eq->queue_id);
15340 break;
15341 case 11:
15342 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15343 &cq_set->u.request, eq->queue_id);
15344 break;
15345 case 12:
15346 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15347 &cq_set->u.request, eq->queue_id);
15348 break;
15349 case 13:
15350 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15351 &cq_set->u.request, eq->queue_id);
15352 break;
15353 case 14:
15354 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15355 &cq_set->u.request, eq->queue_id);
15356 break;
15357 case 15:
15358 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15359 &cq_set->u.request, eq->queue_id);
15360 break;
15361 }
15362
15363
15364 list_add_tail(&cq->list, &eq->child_list);
15365
15366 cq->type = type;
15367 cq->subtype = subtype;
15368 cq->assoc_qid = eq->queue_id;
15369 cq->assoc_qp = eq;
15370 cq->host_index = 0;
15371 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15372 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15373 cq->entry_count);
15374 cq->chann = idx;
15375
15376 rc = 0;
15377 list_for_each_entry(dmabuf, &cq->page_list, list) {
15378 memset(dmabuf->virt, 0, hw_page_size);
15379 cnt = page_idx + dmabuf->buffer_tag;
15380 cq_set->u.request.page[cnt].addr_lo =
15381 putPaddrLow(dmabuf->phys);
15382 cq_set->u.request.page[cnt].addr_hi =
15383 putPaddrHigh(dmabuf->phys);
15384 rc++;
15385 }
15386 page_idx += rc;
15387 }
15388
15389 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15390
15391
15392 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15393 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15394 if (shdr_status || shdr_add_status || rc) {
15395 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15396 "3119 CQ_CREATE_SET mailbox failed with "
15397 "status x%x add_status x%x, mbx status x%x\n",
15398 shdr_status, shdr_add_status, rc);
15399 status = -ENXIO;
15400 goto out;
15401 }
15402 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15403 if (rc == 0xFFFF) {
15404 status = -ENXIO;
15405 goto out;
15406 }
15407
15408 for (idx = 0; idx < numcq; idx++) {
15409 cq = cqp[idx];
15410 cq->queue_id = rc + idx;
15411 if (cq->queue_id > phba->sli4_hba.cq_max)
15412 phba->sli4_hba.cq_max = cq->queue_id;
15413 }
15414
15415out:
15416 lpfc_sli4_mbox_cmd_free(phba, mbox);
15417 return status;
15418}
15419
15420
15421
15422
15423
15424
15425
15426
15427
15428
15429
15430
15431
15432
15433
15434static void
15435lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15436 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15437{
15438 struct lpfc_mbx_mq_create *mq_create;
15439 struct lpfc_dmabuf *dmabuf;
15440 int length;
15441
15442 length = (sizeof(struct lpfc_mbx_mq_create) -
15443 sizeof(struct lpfc_sli4_cfg_mhdr));
15444 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15445 LPFC_MBOX_OPCODE_MQ_CREATE,
15446 length, LPFC_SLI4_MBX_EMBED);
15447 mq_create = &mbox->u.mqe.un.mq_create;
15448 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15449 mq->page_count);
15450 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15451 cq->queue_id);
15452 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15453 switch (mq->entry_count) {
15454 case 16:
15455 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15456 LPFC_MQ_RING_SIZE_16);
15457 break;
15458 case 32:
15459 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15460 LPFC_MQ_RING_SIZE_32);
15461 break;
15462 case 64:
15463 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15464 LPFC_MQ_RING_SIZE_64);
15465 break;
15466 case 128:
15467 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15468 LPFC_MQ_RING_SIZE_128);
15469 break;
15470 }
15471 list_for_each_entry(dmabuf, &mq->page_list, list) {
15472 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15473 putPaddrLow(dmabuf->phys);
15474 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15475 putPaddrHigh(dmabuf->phys);
15476 }
15477}
15478
15479
15480
15481
15482
15483
15484
15485
15486
15487
15488
15489
15490
15491
15492
15493
15494
15495
15496
15497
15498
15499
15500int32_t
15501lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15502 struct lpfc_queue *cq, uint32_t subtype)
15503{
15504 struct lpfc_mbx_mq_create *mq_create;
15505 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15506 struct lpfc_dmabuf *dmabuf;
15507 LPFC_MBOXQ_t *mbox;
15508 int rc, length, status = 0;
15509 uint32_t shdr_status, shdr_add_status;
15510 union lpfc_sli4_cfg_shdr *shdr;
15511 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15512
15513
15514 if (!mq || !cq)
15515 return -ENODEV;
15516 if (!phba->sli4_hba.pc_sli4_params.supported)
15517 hw_page_size = SLI4_PAGE_SIZE;
15518
15519 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15520 if (!mbox)
15521 return -ENOMEM;
15522 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15523 sizeof(struct lpfc_sli4_cfg_mhdr));
15524 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15525 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15526 length, LPFC_SLI4_MBX_EMBED);
15527
15528 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15529 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15530 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15531 &mq_create_ext->u.request, mq->page_count);
15532 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15533 &mq_create_ext->u.request, 1);
15534 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15535 &mq_create_ext->u.request, 1);
15536 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15537 &mq_create_ext->u.request, 1);
15538 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15539 &mq_create_ext->u.request, 1);
15540 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15541 &mq_create_ext->u.request, 1);
15542 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15543 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15544 phba->sli4_hba.pc_sli4_params.mqv);
15545 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15546 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15547 cq->queue_id);
15548 else
15549 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15550 cq->queue_id);
15551 switch (mq->entry_count) {
15552 default:
15553 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15554 "0362 Unsupported MQ count. (%d)\n",
15555 mq->entry_count);
15556 if (mq->entry_count < 16) {
15557 status = -EINVAL;
15558 goto out;
15559 }
15560
15561 case 16:
15562 bf_set(lpfc_mq_context_ring_size,
15563 &mq_create_ext->u.request.context,
15564 LPFC_MQ_RING_SIZE_16);
15565 break;
15566 case 32:
15567 bf_set(lpfc_mq_context_ring_size,
15568 &mq_create_ext->u.request.context,
15569 LPFC_MQ_RING_SIZE_32);
15570 break;
15571 case 64:
15572 bf_set(lpfc_mq_context_ring_size,
15573 &mq_create_ext->u.request.context,
15574 LPFC_MQ_RING_SIZE_64);
15575 break;
15576 case 128:
15577 bf_set(lpfc_mq_context_ring_size,
15578 &mq_create_ext->u.request.context,
15579 LPFC_MQ_RING_SIZE_128);
15580 break;
15581 }
15582 list_for_each_entry(dmabuf, &mq->page_list, list) {
15583 memset(dmabuf->virt, 0, hw_page_size);
15584 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15585 putPaddrLow(dmabuf->phys);
15586 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15587 putPaddrHigh(dmabuf->phys);
15588 }
15589 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15590 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15591 &mq_create_ext->u.response);
15592 if (rc != MBX_SUCCESS) {
15593 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15594 "2795 MQ_CREATE_EXT failed with "
15595 "status x%x. Failback to MQ_CREATE.\n",
15596 rc);
15597 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15598 mq_create = &mbox->u.mqe.un.mq_create;
15599 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15600 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15601 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15602 &mq_create->u.response);
15603 }
15604
15605
15606 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15607 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15608 if (shdr_status || shdr_add_status || rc) {
15609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15610 "2502 MQ_CREATE mailbox failed with "
15611 "status x%x add_status x%x, mbx status x%x\n",
15612 shdr_status, shdr_add_status, rc);
15613 status = -ENXIO;
15614 goto out;
15615 }
15616 if (mq->queue_id == 0xFFFF) {
15617 status = -ENXIO;
15618 goto out;
15619 }
15620 mq->type = LPFC_MQ;
15621 mq->assoc_qid = cq->queue_id;
15622 mq->subtype = subtype;
15623 mq->host_index = 0;
15624 mq->hba_index = 0;
15625
15626
15627 list_add_tail(&mq->list, &cq->child_list);
15628out:
15629 mempool_free(mbox, phba->mbox_mem_pool);
15630 return status;
15631}
15632
15633
15634
15635
15636
15637
15638
15639
15640
15641
15642
15643
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655int
15656lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15657 struct lpfc_queue *cq, uint32_t subtype)
15658{
15659 struct lpfc_mbx_wq_create *wq_create;
15660 struct lpfc_dmabuf *dmabuf;
15661 LPFC_MBOXQ_t *mbox;
15662 int rc, length, status = 0;
15663 uint32_t shdr_status, shdr_add_status;
15664 union lpfc_sli4_cfg_shdr *shdr;
15665 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15666 struct dma_address *page;
15667 void __iomem *bar_memmap_p;
15668 uint32_t db_offset;
15669 uint16_t pci_barset;
15670 uint8_t dpp_barset;
15671 uint32_t dpp_offset;
15672 unsigned long pg_addr;
15673 uint8_t wq_create_version;
15674
15675
15676 if (!wq || !cq)
15677 return -ENODEV;
15678 if (!phba->sli4_hba.pc_sli4_params.supported)
15679 hw_page_size = wq->page_size;
15680
15681 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15682 if (!mbox)
15683 return -ENOMEM;
15684 length = (sizeof(struct lpfc_mbx_wq_create) -
15685 sizeof(struct lpfc_sli4_cfg_mhdr));
15686 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15687 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15688 length, LPFC_SLI4_MBX_EMBED);
15689 wq_create = &mbox->u.mqe.un.wq_create;
15690 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15691 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15692 wq->page_count);
15693 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15694 cq->queue_id);
15695
15696
15697 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15698 phba->sli4_hba.pc_sli4_params.wqv);
15699
15700 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15701 (wq->page_size > SLI4_PAGE_SIZE))
15702 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15703 else
15704 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15705
15706
15707 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15708 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15709 else
15710 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15711
15712 switch (wq_create_version) {
15713 case LPFC_Q_CREATE_VERSION_1:
15714 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15715 wq->entry_count);
15716 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15717 LPFC_Q_CREATE_VERSION_1);
15718
15719 switch (wq->entry_size) {
15720 default:
15721 case 64:
15722 bf_set(lpfc_mbx_wq_create_wqe_size,
15723 &wq_create->u.request_1,
15724 LPFC_WQ_WQE_SIZE_64);
15725 break;
15726 case 128:
15727 bf_set(lpfc_mbx_wq_create_wqe_size,
15728 &wq_create->u.request_1,
15729 LPFC_WQ_WQE_SIZE_128);
15730 break;
15731 }
15732
15733 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15734 bf_set(lpfc_mbx_wq_create_page_size,
15735 &wq_create->u.request_1,
15736 (wq->page_size / SLI4_PAGE_SIZE));
15737 page = wq_create->u.request_1.page;
15738 break;
15739 default:
15740 page = wq_create->u.request.page;
15741 break;
15742 }
15743
15744 list_for_each_entry(dmabuf, &wq->page_list, list) {
15745 memset(dmabuf->virt, 0, hw_page_size);
15746 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15747 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15748 }
15749
15750 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15751 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15752
15753 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15754
15755 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15756 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15757 if (shdr_status || shdr_add_status || rc) {
15758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15759 "2503 WQ_CREATE mailbox failed with "
15760 "status x%x add_status x%x, mbx status x%x\n",
15761 shdr_status, shdr_add_status, rc);
15762 status = -ENXIO;
15763 goto out;
15764 }
15765
15766 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15767 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15768 &wq_create->u.response);
15769 else
15770 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15771 &wq_create->u.response_1);
15772
15773 if (wq->queue_id == 0xFFFF) {
15774 status = -ENXIO;
15775 goto out;
15776 }
15777
15778 wq->db_format = LPFC_DB_LIST_FORMAT;
15779 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15780 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15781 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15782 &wq_create->u.response);
15783 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15784 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15785 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15786 "3265 WQ[%d] doorbell format "
15787 "not supported: x%x\n",
15788 wq->queue_id, wq->db_format);
15789 status = -EINVAL;
15790 goto out;
15791 }
15792 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15793 &wq_create->u.response);
15794 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15795 pci_barset);
15796 if (!bar_memmap_p) {
15797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15798 "3263 WQ[%d] failed to memmap "
15799 "pci barset:x%x\n",
15800 wq->queue_id, pci_barset);
15801 status = -ENOMEM;
15802 goto out;
15803 }
15804 db_offset = wq_create->u.response.doorbell_offset;
15805 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15806 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15807 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15808 "3252 WQ[%d] doorbell offset "
15809 "not supported: x%x\n",
15810 wq->queue_id, db_offset);
15811 status = -EINVAL;
15812 goto out;
15813 }
15814 wq->db_regaddr = bar_memmap_p + db_offset;
15815 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15816 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15817 "format:x%x\n", wq->queue_id,
15818 pci_barset, db_offset, wq->db_format);
15819 } else
15820 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15821 } else {
15822
15823 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15824 &wq_create->u.response_1);
15825 if (wq->dpp_enable) {
15826 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15827 &wq_create->u.response_1);
15828 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15829 pci_barset);
15830 if (!bar_memmap_p) {
15831 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15832 "3267 WQ[%d] failed to memmap "
15833 "pci barset:x%x\n",
15834 wq->queue_id, pci_barset);
15835 status = -ENOMEM;
15836 goto out;
15837 }
15838 db_offset = wq_create->u.response_1.doorbell_offset;
15839 wq->db_regaddr = bar_memmap_p + db_offset;
15840 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15841 &wq_create->u.response_1);
15842 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15843 &wq_create->u.response_1);
15844 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15845 dpp_barset);
15846 if (!bar_memmap_p) {
15847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15848 "3268 WQ[%d] failed to memmap "
15849 "pci barset:x%x\n",
15850 wq->queue_id, dpp_barset);
15851 status = -ENOMEM;
15852 goto out;
15853 }
15854 dpp_offset = wq_create->u.response_1.dpp_offset;
15855 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
15856 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15857 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15858 "dpp_id:x%x dpp_barset:x%x "
15859 "dpp_offset:x%x\n",
15860 wq->queue_id, pci_barset, db_offset,
15861 wq->dpp_id, dpp_barset, dpp_offset);
15862
15863
15864 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
15865#ifdef CONFIG_X86
15866 rc = set_memory_wc(pg_addr, 1);
15867 if (rc) {
15868 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15869 "3272 Cannot setup Combined "
15870 "Write on WQ[%d] - disable DPP\n",
15871 wq->queue_id);
15872 phba->cfg_enable_dpp = 0;
15873 }
15874#else
15875 phba->cfg_enable_dpp = 0;
15876#endif
15877 } else
15878 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15879 }
15880 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
15881 if (wq->pring == NULL) {
15882 status = -ENOMEM;
15883 goto out;
15884 }
15885 wq->type = LPFC_WQ;
15886 wq->assoc_qid = cq->queue_id;
15887 wq->subtype = subtype;
15888 wq->host_index = 0;
15889 wq->hba_index = 0;
15890 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
15891
15892
15893 list_add_tail(&wq->list, &cq->child_list);
15894out:
15895 mempool_free(mbox, phba->mbox_mem_pool);
15896 return status;
15897}
15898
15899
15900
15901
15902
15903
15904
15905
15906
15907
15908
15909
15910
15911
15912
15913
15914
15915
15916
15917
15918
15919
15920
15921
15922int
15923lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15924 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15925{
15926 struct lpfc_mbx_rq_create *rq_create;
15927 struct lpfc_dmabuf *dmabuf;
15928 LPFC_MBOXQ_t *mbox;
15929 int rc, length, status = 0;
15930 uint32_t shdr_status, shdr_add_status;
15931 union lpfc_sli4_cfg_shdr *shdr;
15932 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15933 void __iomem *bar_memmap_p;
15934 uint32_t db_offset;
15935 uint16_t pci_barset;
15936
15937
15938 if (!hrq || !drq || !cq)
15939 return -ENODEV;
15940 if (!phba->sli4_hba.pc_sli4_params.supported)
15941 hw_page_size = SLI4_PAGE_SIZE;
15942
15943 if (hrq->entry_count != drq->entry_count)
15944 return -EINVAL;
15945 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15946 if (!mbox)
15947 return -ENOMEM;
15948 length = (sizeof(struct lpfc_mbx_rq_create) -
15949 sizeof(struct lpfc_sli4_cfg_mhdr));
15950 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15951 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15952 length, LPFC_SLI4_MBX_EMBED);
15953 rq_create = &mbox->u.mqe.un.rq_create;
15954 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15955 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15956 phba->sli4_hba.pc_sli4_params.rqv);
15957 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15958 bf_set(lpfc_rq_context_rqe_count_1,
15959 &rq_create->u.request.context,
15960 hrq->entry_count);
15961 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15962 bf_set(lpfc_rq_context_rqe_size,
15963 &rq_create->u.request.context,
15964 LPFC_RQE_SIZE_8);
15965 bf_set(lpfc_rq_context_page_size,
15966 &rq_create->u.request.context,
15967 LPFC_RQ_PAGE_SIZE_4096);
15968 } else {
15969 switch (hrq->entry_count) {
15970 default:
15971 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15972 "2535 Unsupported RQ count. (%d)\n",
15973 hrq->entry_count);
15974 if (hrq->entry_count < 512) {
15975 status = -EINVAL;
15976 goto out;
15977 }
15978
15979 case 512:
15980 bf_set(lpfc_rq_context_rqe_count,
15981 &rq_create->u.request.context,
15982 LPFC_RQ_RING_SIZE_512);
15983 break;
15984 case 1024:
15985 bf_set(lpfc_rq_context_rqe_count,
15986 &rq_create->u.request.context,
15987 LPFC_RQ_RING_SIZE_1024);
15988 break;
15989 case 2048:
15990 bf_set(lpfc_rq_context_rqe_count,
15991 &rq_create->u.request.context,
15992 LPFC_RQ_RING_SIZE_2048);
15993 break;
15994 case 4096:
15995 bf_set(lpfc_rq_context_rqe_count,
15996 &rq_create->u.request.context,
15997 LPFC_RQ_RING_SIZE_4096);
15998 break;
15999 }
16000 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16001 LPFC_HDR_BUF_SIZE);
16002 }
16003 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16004 cq->queue_id);
16005 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16006 hrq->page_count);
16007 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16008 memset(dmabuf->virt, 0, hw_page_size);
16009 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16010 putPaddrLow(dmabuf->phys);
16011 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16012 putPaddrHigh(dmabuf->phys);
16013 }
16014 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16015 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16016
16017 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16018
16019 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16020 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16021 if (shdr_status || shdr_add_status || rc) {
16022 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16023 "2504 RQ_CREATE mailbox failed with "
16024 "status x%x add_status x%x, mbx status x%x\n",
16025 shdr_status, shdr_add_status, rc);
16026 status = -ENXIO;
16027 goto out;
16028 }
16029 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16030 if (hrq->queue_id == 0xFFFF) {
16031 status = -ENXIO;
16032 goto out;
16033 }
16034
16035 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16036 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16037 &rq_create->u.response);
16038 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16039 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16040 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16041 "3262 RQ [%d] doorbell format not "
16042 "supported: x%x\n", hrq->queue_id,
16043 hrq->db_format);
16044 status = -EINVAL;
16045 goto out;
16046 }
16047
16048 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16049 &rq_create->u.response);
16050 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16051 if (!bar_memmap_p) {
16052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16053 "3269 RQ[%d] failed to memmap pci "
16054 "barset:x%x\n", hrq->queue_id,
16055 pci_barset);
16056 status = -ENOMEM;
16057 goto out;
16058 }
16059
16060 db_offset = rq_create->u.response.doorbell_offset;
16061 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16062 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16063 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16064 "3270 RQ[%d] doorbell offset not "
16065 "supported: x%x\n", hrq->queue_id,
16066 db_offset);
16067 status = -EINVAL;
16068 goto out;
16069 }
16070 hrq->db_regaddr = bar_memmap_p + db_offset;
16071 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16072 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16073 "format:x%x\n", hrq->queue_id, pci_barset,
16074 db_offset, hrq->db_format);
16075 } else {
16076 hrq->db_format = LPFC_DB_RING_FORMAT;
16077 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16078 }
16079 hrq->type = LPFC_HRQ;
16080 hrq->assoc_qid = cq->queue_id;
16081 hrq->subtype = subtype;
16082 hrq->host_index = 0;
16083 hrq->hba_index = 0;
16084 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16085
16086
16087 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16088 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16089 length, LPFC_SLI4_MBX_EMBED);
16090 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16091 phba->sli4_hba.pc_sli4_params.rqv);
16092 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16093 bf_set(lpfc_rq_context_rqe_count_1,
16094 &rq_create->u.request.context, hrq->entry_count);
16095 if (subtype == LPFC_NVMET)
16096 rq_create->u.request.context.buffer_size =
16097 LPFC_NVMET_DATA_BUF_SIZE;
16098 else
16099 rq_create->u.request.context.buffer_size =
16100 LPFC_DATA_BUF_SIZE;
16101 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16102 LPFC_RQE_SIZE_8);
16103 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16104 (PAGE_SIZE/SLI4_PAGE_SIZE));
16105 } else {
16106 switch (drq->entry_count) {
16107 default:
16108 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16109 "2536 Unsupported RQ count. (%d)\n",
16110 drq->entry_count);
16111 if (drq->entry_count < 512) {
16112 status = -EINVAL;
16113 goto out;
16114 }
16115
16116 case 512:
16117 bf_set(lpfc_rq_context_rqe_count,
16118 &rq_create->u.request.context,
16119 LPFC_RQ_RING_SIZE_512);
16120 break;
16121 case 1024:
16122 bf_set(lpfc_rq_context_rqe_count,
16123 &rq_create->u.request.context,
16124 LPFC_RQ_RING_SIZE_1024);
16125 break;
16126 case 2048:
16127 bf_set(lpfc_rq_context_rqe_count,
16128 &rq_create->u.request.context,
16129 LPFC_RQ_RING_SIZE_2048);
16130 break;
16131 case 4096:
16132 bf_set(lpfc_rq_context_rqe_count,
16133 &rq_create->u.request.context,
16134 LPFC_RQ_RING_SIZE_4096);
16135 break;
16136 }
16137 if (subtype == LPFC_NVMET)
16138 bf_set(lpfc_rq_context_buf_size,
16139 &rq_create->u.request.context,
16140 LPFC_NVMET_DATA_BUF_SIZE);
16141 else
16142 bf_set(lpfc_rq_context_buf_size,
16143 &rq_create->u.request.context,
16144 LPFC_DATA_BUF_SIZE);
16145 }
16146 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16147 cq->queue_id);
16148 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16149 drq->page_count);
16150 list_for_each_entry(dmabuf, &drq->page_list, list) {
16151 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16152 putPaddrLow(dmabuf->phys);
16153 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16154 putPaddrHigh(dmabuf->phys);
16155 }
16156 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16157 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16158 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16159
16160 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16161 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16162 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16163 if (shdr_status || shdr_add_status || rc) {
16164 status = -ENXIO;
16165 goto out;
16166 }
16167 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16168 if (drq->queue_id == 0xFFFF) {
16169 status = -ENXIO;
16170 goto out;
16171 }
16172 drq->type = LPFC_DRQ;
16173 drq->assoc_qid = cq->queue_id;
16174 drq->subtype = subtype;
16175 drq->host_index = 0;
16176 drq->hba_index = 0;
16177 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16178
16179
16180 list_add_tail(&hrq->list, &cq->child_list);
16181 list_add_tail(&drq->list, &cq->child_list);
16182
16183out:
16184 mempool_free(mbox, phba->mbox_mem_pool);
16185 return status;
16186}
16187
16188
16189
16190
16191
16192
16193
16194
16195
16196
16197
16198
16199
16200
16201
16202
16203
16204
16205
16206
16207
16208
16209
16210
16211int
16212lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16213 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16214 uint32_t subtype)
16215{
16216 struct lpfc_queue *hrq, *drq, *cq;
16217 struct lpfc_mbx_rq_create_v2 *rq_create;
16218 struct lpfc_dmabuf *dmabuf;
16219 LPFC_MBOXQ_t *mbox;
16220 int rc, length, alloclen, status = 0;
16221 int cnt, idx, numrq, page_idx = 0;
16222 uint32_t shdr_status, shdr_add_status;
16223 union lpfc_sli4_cfg_shdr *shdr;
16224 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16225
16226 numrq = phba->cfg_nvmet_mrq;
16227
16228 if (!hrqp || !drqp || !cqp || !numrq)
16229 return -ENODEV;
16230 if (!phba->sli4_hba.pc_sli4_params.supported)
16231 hw_page_size = SLI4_PAGE_SIZE;
16232
16233 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16234 if (!mbox)
16235 return -ENOMEM;
16236
16237 length = sizeof(struct lpfc_mbx_rq_create_v2);
16238 length += ((2 * numrq * hrqp[0]->page_count) *
16239 sizeof(struct dma_address));
16240
16241 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16242 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16243 LPFC_SLI4_MBX_NEMBED);
16244 if (alloclen < length) {
16245 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16246 "3099 Allocated DMA memory size (%d) is "
16247 "less than the requested DMA memory size "
16248 "(%d)\n", alloclen, length);
16249 status = -ENOMEM;
16250 goto out;
16251 }
16252
16253
16254
16255 rq_create = mbox->sge_array->addr[0];
16256 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16257
16258 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16259 cnt = 0;
16260
16261 for (idx = 0; idx < numrq; idx++) {
16262 hrq = hrqp[idx];
16263 drq = drqp[idx];
16264 cq = cqp[idx];
16265
16266
16267 if (!hrq || !drq || !cq) {
16268 status = -ENODEV;
16269 goto out;
16270 }
16271
16272 if (hrq->entry_count != drq->entry_count) {
16273 status = -EINVAL;
16274 goto out;
16275 }
16276
16277 if (idx == 0) {
16278 bf_set(lpfc_mbx_rq_create_num_pages,
16279 &rq_create->u.request,
16280 hrq->page_count);
16281 bf_set(lpfc_mbx_rq_create_rq_cnt,
16282 &rq_create->u.request, (numrq * 2));
16283 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16284 1);
16285 bf_set(lpfc_rq_context_base_cq,
16286 &rq_create->u.request.context,
16287 cq->queue_id);
16288 bf_set(lpfc_rq_context_data_size,
16289 &rq_create->u.request.context,
16290 LPFC_NVMET_DATA_BUF_SIZE);
16291 bf_set(lpfc_rq_context_hdr_size,
16292 &rq_create->u.request.context,
16293 LPFC_HDR_BUF_SIZE);
16294 bf_set(lpfc_rq_context_rqe_count_1,
16295 &rq_create->u.request.context,
16296 hrq->entry_count);
16297 bf_set(lpfc_rq_context_rqe_size,
16298 &rq_create->u.request.context,
16299 LPFC_RQE_SIZE_8);
16300 bf_set(lpfc_rq_context_page_size,
16301 &rq_create->u.request.context,
16302 (PAGE_SIZE/SLI4_PAGE_SIZE));
16303 }
16304 rc = 0;
16305 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16306 memset(dmabuf->virt, 0, hw_page_size);
16307 cnt = page_idx + dmabuf->buffer_tag;
16308 rq_create->u.request.page[cnt].addr_lo =
16309 putPaddrLow(dmabuf->phys);
16310 rq_create->u.request.page[cnt].addr_hi =
16311 putPaddrHigh(dmabuf->phys);
16312 rc++;
16313 }
16314 page_idx += rc;
16315
16316 rc = 0;
16317 list_for_each_entry(dmabuf, &drq->page_list, list) {
16318 memset(dmabuf->virt, 0, hw_page_size);
16319 cnt = page_idx + dmabuf->buffer_tag;
16320 rq_create->u.request.page[cnt].addr_lo =
16321 putPaddrLow(dmabuf->phys);
16322 rq_create->u.request.page[cnt].addr_hi =
16323 putPaddrHigh(dmabuf->phys);
16324 rc++;
16325 }
16326 page_idx += rc;
16327
16328 hrq->db_format = LPFC_DB_RING_FORMAT;
16329 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16330 hrq->type = LPFC_HRQ;
16331 hrq->assoc_qid = cq->queue_id;
16332 hrq->subtype = subtype;
16333 hrq->host_index = 0;
16334 hrq->hba_index = 0;
16335 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16336
16337 drq->db_format = LPFC_DB_RING_FORMAT;
16338 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16339 drq->type = LPFC_DRQ;
16340 drq->assoc_qid = cq->queue_id;
16341 drq->subtype = subtype;
16342 drq->host_index = 0;
16343 drq->hba_index = 0;
16344 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16345
16346 list_add_tail(&hrq->list, &cq->child_list);
16347 list_add_tail(&drq->list, &cq->child_list);
16348 }
16349
16350 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16351
16352 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16353 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16354 if (shdr_status || shdr_add_status || rc) {
16355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16356 "3120 RQ_CREATE mailbox failed with "
16357 "status x%x add_status x%x, mbx status x%x\n",
16358 shdr_status, shdr_add_status, rc);
16359 status = -ENXIO;
16360 goto out;
16361 }
16362 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16363 if (rc == 0xFFFF) {
16364 status = -ENXIO;
16365 goto out;
16366 }
16367
16368
16369 for (idx = 0; idx < numrq; idx++) {
16370 hrq = hrqp[idx];
16371 hrq->queue_id = rc + (2 * idx);
16372 drq = drqp[idx];
16373 drq->queue_id = rc + (2 * idx) + 1;
16374 }
16375
16376out:
16377 lpfc_sli4_mbox_cmd_free(phba, mbox);
16378 return status;
16379}
16380
16381
16382
16383
16384
16385
16386
16387
16388
16389
16390
16391
16392
16393int
16394lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16395{
16396 LPFC_MBOXQ_t *mbox;
16397 int rc, length, status = 0;
16398 uint32_t shdr_status, shdr_add_status;
16399 union lpfc_sli4_cfg_shdr *shdr;
16400
16401
16402 if (!eq)
16403 return -ENODEV;
16404
16405 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16406 if (!mbox)
16407 return -ENOMEM;
16408 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16409 sizeof(struct lpfc_sli4_cfg_mhdr));
16410 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16411 LPFC_MBOX_OPCODE_EQ_DESTROY,
16412 length, LPFC_SLI4_MBX_EMBED);
16413 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16414 eq->queue_id);
16415 mbox->vport = eq->phba->pport;
16416 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16417
16418 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16419
16420 shdr = (union lpfc_sli4_cfg_shdr *)
16421 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16422 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16423 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16424 if (shdr_status || shdr_add_status || rc) {
16425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16426 "2505 EQ_DESTROY mailbox failed with "
16427 "status x%x add_status x%x, mbx status x%x\n",
16428 shdr_status, shdr_add_status, rc);
16429 status = -ENXIO;
16430 }
16431
16432
16433 list_del_init(&eq->list);
16434 mempool_free(mbox, eq->phba->mbox_mem_pool);
16435 return status;
16436}
16437
16438
16439
16440
16441
16442
16443
16444
16445
16446
16447
16448
16449
16450int
16451lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16452{
16453 LPFC_MBOXQ_t *mbox;
16454 int rc, length, status = 0;
16455 uint32_t shdr_status, shdr_add_status;
16456 union lpfc_sli4_cfg_shdr *shdr;
16457
16458
16459 if (!cq)
16460 return -ENODEV;
16461 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16462 if (!mbox)
16463 return -ENOMEM;
16464 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16465 sizeof(struct lpfc_sli4_cfg_mhdr));
16466 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16467 LPFC_MBOX_OPCODE_CQ_DESTROY,
16468 length, LPFC_SLI4_MBX_EMBED);
16469 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16470 cq->queue_id);
16471 mbox->vport = cq->phba->pport;
16472 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16473 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16474
16475 shdr = (union lpfc_sli4_cfg_shdr *)
16476 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16477 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16478 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16479 if (shdr_status || shdr_add_status || rc) {
16480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16481 "2506 CQ_DESTROY mailbox failed with "
16482 "status x%x add_status x%x, mbx status x%x\n",
16483 shdr_status, shdr_add_status, rc);
16484 status = -ENXIO;
16485 }
16486
16487 list_del_init(&cq->list);
16488 mempool_free(mbox, cq->phba->mbox_mem_pool);
16489 return status;
16490}
16491
16492
16493
16494
16495
16496
16497
16498
16499
16500
16501
16502
16503
16504int
16505lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16506{
16507 LPFC_MBOXQ_t *mbox;
16508 int rc, length, status = 0;
16509 uint32_t shdr_status, shdr_add_status;
16510 union lpfc_sli4_cfg_shdr *shdr;
16511
16512
16513 if (!mq)
16514 return -ENODEV;
16515 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16516 if (!mbox)
16517 return -ENOMEM;
16518 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16519 sizeof(struct lpfc_sli4_cfg_mhdr));
16520 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16521 LPFC_MBOX_OPCODE_MQ_DESTROY,
16522 length, LPFC_SLI4_MBX_EMBED);
16523 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16524 mq->queue_id);
16525 mbox->vport = mq->phba->pport;
16526 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16527 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16528
16529 shdr = (union lpfc_sli4_cfg_shdr *)
16530 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16531 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16532 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16533 if (shdr_status || shdr_add_status || rc) {
16534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16535 "2507 MQ_DESTROY mailbox failed with "
16536 "status x%x add_status x%x, mbx status x%x\n",
16537 shdr_status, shdr_add_status, rc);
16538 status = -ENXIO;
16539 }
16540
16541 list_del_init(&mq->list);
16542 mempool_free(mbox, mq->phba->mbox_mem_pool);
16543 return status;
16544}
16545
16546
16547
16548
16549
16550
16551
16552
16553
16554
16555
16556
16557
16558int
16559lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16560{
16561 LPFC_MBOXQ_t *mbox;
16562 int rc, length, status = 0;
16563 uint32_t shdr_status, shdr_add_status;
16564 union lpfc_sli4_cfg_shdr *shdr;
16565
16566
16567 if (!wq)
16568 return -ENODEV;
16569 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16570 if (!mbox)
16571 return -ENOMEM;
16572 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16573 sizeof(struct lpfc_sli4_cfg_mhdr));
16574 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16575 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16576 length, LPFC_SLI4_MBX_EMBED);
16577 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16578 wq->queue_id);
16579 mbox->vport = wq->phba->pport;
16580 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16581 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16582 shdr = (union lpfc_sli4_cfg_shdr *)
16583 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16584 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16585 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16586 if (shdr_status || shdr_add_status || rc) {
16587 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16588 "2508 WQ_DESTROY mailbox failed with "
16589 "status x%x add_status x%x, mbx status x%x\n",
16590 shdr_status, shdr_add_status, rc);
16591 status = -ENXIO;
16592 }
16593
16594 list_del_init(&wq->list);
16595 kfree(wq->pring);
16596 wq->pring = NULL;
16597 mempool_free(mbox, wq->phba->mbox_mem_pool);
16598 return status;
16599}
16600
16601
16602
16603
16604
16605
16606
16607
16608
16609
16610
16611
16612
16613int
16614lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16615 struct lpfc_queue *drq)
16616{
16617 LPFC_MBOXQ_t *mbox;
16618 int rc, length, status = 0;
16619 uint32_t shdr_status, shdr_add_status;
16620 union lpfc_sli4_cfg_shdr *shdr;
16621
16622
16623 if (!hrq || !drq)
16624 return -ENODEV;
16625 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16626 if (!mbox)
16627 return -ENOMEM;
16628 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16629 sizeof(struct lpfc_sli4_cfg_mhdr));
16630 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16631 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16632 length, LPFC_SLI4_MBX_EMBED);
16633 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16634 hrq->queue_id);
16635 mbox->vport = hrq->phba->pport;
16636 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16637 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16638
16639 shdr = (union lpfc_sli4_cfg_shdr *)
16640 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16641 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16642 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16643 if (shdr_status || shdr_add_status || rc) {
16644 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16645 "2509 RQ_DESTROY mailbox failed with "
16646 "status x%x add_status x%x, mbx status x%x\n",
16647 shdr_status, shdr_add_status, rc);
16648 if (rc != MBX_TIMEOUT)
16649 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16650 return -ENXIO;
16651 }
16652 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16653 drq->queue_id);
16654 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16655 shdr = (union lpfc_sli4_cfg_shdr *)
16656 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16657 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16658 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16659 if (shdr_status || shdr_add_status || rc) {
16660 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16661 "2510 RQ_DESTROY mailbox failed with "
16662 "status x%x add_status x%x, mbx status x%x\n",
16663 shdr_status, shdr_add_status, rc);
16664 status = -ENXIO;
16665 }
16666 list_del_init(&hrq->list);
16667 list_del_init(&drq->list);
16668 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16669 return status;
16670}
16671
16672
16673
16674
16675
16676
16677
16678
16679
16680
16681
16682
16683
16684
16685
16686
16687
16688
16689
16690
16691
16692
16693
16694int
16695lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16696 dma_addr_t pdma_phys_addr0,
16697 dma_addr_t pdma_phys_addr1,
16698 uint16_t xritag)
16699{
16700 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16701 LPFC_MBOXQ_t *mbox;
16702 int rc;
16703 uint32_t shdr_status, shdr_add_status;
16704 uint32_t mbox_tmo;
16705 union lpfc_sli4_cfg_shdr *shdr;
16706
16707 if (xritag == NO_XRI) {
16708 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16709 "0364 Invalid param:\n");
16710 return -EINVAL;
16711 }
16712
16713 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16714 if (!mbox)
16715 return -ENOMEM;
16716
16717 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16718 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16719 sizeof(struct lpfc_mbx_post_sgl_pages) -
16720 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16721
16722 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16723 &mbox->u.mqe.un.post_sgl_pages;
16724 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16725 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16726
16727 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16728 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16729 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16730 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16731
16732 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16733 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16734 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16735 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16736 if (!phba->sli4_hba.intr_enable)
16737 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16738 else {
16739 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16740 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16741 }
16742
16743 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16744 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16745 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16746 if (rc != MBX_TIMEOUT)
16747 mempool_free(mbox, phba->mbox_mem_pool);
16748 if (shdr_status || shdr_add_status || rc) {
16749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16750 "2511 POST_SGL mailbox failed with "
16751 "status x%x add_status x%x, mbx status x%x\n",
16752 shdr_status, shdr_add_status, rc);
16753 }
16754 return 0;
16755}
16756
16757
16758
16759
16760
16761
16762
16763
16764
16765
16766
16767
16768
16769
16770static uint16_t
16771lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16772{
16773 unsigned long xri;
16774
16775
16776
16777
16778
16779 spin_lock_irq(&phba->hbalock);
16780 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16781 phba->sli4_hba.max_cfg_param.max_xri, 0);
16782 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16783 spin_unlock_irq(&phba->hbalock);
16784 return NO_XRI;
16785 } else {
16786 set_bit(xri, phba->sli4_hba.xri_bmask);
16787 phba->sli4_hba.max_cfg_param.xri_used++;
16788 }
16789 spin_unlock_irq(&phba->hbalock);
16790 return xri;
16791}
16792
16793
16794
16795
16796
16797
16798
16799
16800static void
16801__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16802{
16803 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16804 phba->sli4_hba.max_cfg_param.xri_used--;
16805 }
16806}
16807
16808
16809
16810
16811
16812
16813
16814
16815void
16816lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16817{
16818 spin_lock_irq(&phba->hbalock);
16819 __lpfc_sli4_free_xri(phba, xri);
16820 spin_unlock_irq(&phba->hbalock);
16821}
16822
16823
16824
16825
16826
16827
16828
16829
16830
16831
16832
16833uint16_t
16834lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16835{
16836 uint16_t xri_index;
16837
16838 xri_index = lpfc_sli4_alloc_xri(phba);
16839 if (xri_index == NO_XRI)
16840 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16841 "2004 Failed to allocate XRI.last XRITAG is %d"
16842 " Max XRI is %d, Used XRI is %d\n",
16843 xri_index,
16844 phba->sli4_hba.max_cfg_param.max_xri,
16845 phba->sli4_hba.max_cfg_param.xri_used);
16846 return xri_index;
16847}
16848
16849
16850
16851
16852
16853
16854
16855
16856
16857
16858
16859
16860static int
16861lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
16862 struct list_head *post_sgl_list,
16863 int post_cnt)
16864{
16865 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
16866 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16867 struct sgl_page_pairs *sgl_pg_pairs;
16868 void *viraddr;
16869 LPFC_MBOXQ_t *mbox;
16870 uint32_t reqlen, alloclen, pg_pairs;
16871 uint32_t mbox_tmo;
16872 uint16_t xritag_start = 0;
16873 int rc = 0;
16874 uint32_t shdr_status, shdr_add_status;
16875 union lpfc_sli4_cfg_shdr *shdr;
16876
16877 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
16878 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16879 if (reqlen > SLI4_PAGE_SIZE) {
16880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16881 "2559 Block sgl registration required DMA "
16882 "size (%d) great than a page\n", reqlen);
16883 return -ENOMEM;
16884 }
16885
16886 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16887 if (!mbox)
16888 return -ENOMEM;
16889
16890
16891 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16892 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16893 LPFC_SLI4_MBX_NEMBED);
16894
16895 if (alloclen < reqlen) {
16896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16897 "0285 Allocated DMA memory size (%d) is "
16898 "less than the requested DMA memory "
16899 "size (%d)\n", alloclen, reqlen);
16900 lpfc_sli4_mbox_cmd_free(phba, mbox);
16901 return -ENOMEM;
16902 }
16903
16904 viraddr = mbox->sge_array->addr[0];
16905 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16906 sgl_pg_pairs = &sgl->sgl_pg_pairs;
16907
16908 pg_pairs = 0;
16909 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16910
16911 sgl_pg_pairs->sgl_pg0_addr_lo =
16912 cpu_to_le32(putPaddrLow(sglq_entry->phys));
16913 sgl_pg_pairs->sgl_pg0_addr_hi =
16914 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16915 sgl_pg_pairs->sgl_pg1_addr_lo =
16916 cpu_to_le32(putPaddrLow(0));
16917 sgl_pg_pairs->sgl_pg1_addr_hi =
16918 cpu_to_le32(putPaddrHigh(0));
16919
16920
16921 if (pg_pairs == 0)
16922 xritag_start = sglq_entry->sli4_xritag;
16923 sgl_pg_pairs++;
16924 pg_pairs++;
16925 }
16926
16927
16928 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16929 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16930 sgl->word0 = cpu_to_le32(sgl->word0);
16931
16932 if (!phba->sli4_hba.intr_enable)
16933 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16934 else {
16935 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16936 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16937 }
16938 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16939 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16940 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16941 if (rc != MBX_TIMEOUT)
16942 lpfc_sli4_mbox_cmd_free(phba, mbox);
16943 if (shdr_status || shdr_add_status || rc) {
16944 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16945 "2513 POST_SGL_BLOCK mailbox command failed "
16946 "status x%x add_status x%x mbx status x%x\n",
16947 shdr_status, shdr_add_status, rc);
16948 rc = -ENXIO;
16949 }
16950 return rc;
16951}
16952
16953
16954
16955
16956
16957
16958
16959
16960
16961
16962
16963
16964static int
16965lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
16966 int count)
16967{
16968 struct lpfc_io_buf *lpfc_ncmd;
16969 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16970 struct sgl_page_pairs *sgl_pg_pairs;
16971 void *viraddr;
16972 LPFC_MBOXQ_t *mbox;
16973 uint32_t reqlen, alloclen, pg_pairs;
16974 uint32_t mbox_tmo;
16975 uint16_t xritag_start = 0;
16976 int rc = 0;
16977 uint32_t shdr_status, shdr_add_status;
16978 dma_addr_t pdma_phys_bpl1;
16979 union lpfc_sli4_cfg_shdr *shdr;
16980
16981
16982 reqlen = count * sizeof(struct sgl_page_pairs) +
16983 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16984 if (reqlen > SLI4_PAGE_SIZE) {
16985 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16986 "6118 Block sgl registration required DMA "
16987 "size (%d) great than a page\n", reqlen);
16988 return -ENOMEM;
16989 }
16990 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16991 if (!mbox) {
16992 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16993 "6119 Failed to allocate mbox cmd memory\n");
16994 return -ENOMEM;
16995 }
16996
16997
16998 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16999 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17000 reqlen, LPFC_SLI4_MBX_NEMBED);
17001
17002 if (alloclen < reqlen) {
17003 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17004 "6120 Allocated DMA memory size (%d) is "
17005 "less than the requested DMA memory "
17006 "size (%d)\n", alloclen, reqlen);
17007 lpfc_sli4_mbox_cmd_free(phba, mbox);
17008 return -ENOMEM;
17009 }
17010
17011
17012 viraddr = mbox->sge_array->addr[0];
17013
17014
17015 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17016 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17017
17018 pg_pairs = 0;
17019 list_for_each_entry(lpfc_ncmd, nblist, list) {
17020
17021 sgl_pg_pairs->sgl_pg0_addr_lo =
17022 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17023 sgl_pg_pairs->sgl_pg0_addr_hi =
17024 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17025 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17026 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17027 SGL_PAGE_SIZE;
17028 else
17029 pdma_phys_bpl1 = 0;
17030 sgl_pg_pairs->sgl_pg1_addr_lo =
17031 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17032 sgl_pg_pairs->sgl_pg1_addr_hi =
17033 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17034
17035 if (pg_pairs == 0)
17036 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17037 sgl_pg_pairs++;
17038 pg_pairs++;
17039 }
17040 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17041 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17042
17043 sgl->word0 = cpu_to_le32(sgl->word0);
17044
17045 if (!phba->sli4_hba.intr_enable) {
17046 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17047 } else {
17048 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17049 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17050 }
17051 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17052 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17053 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17054 if (rc != MBX_TIMEOUT)
17055 lpfc_sli4_mbox_cmd_free(phba, mbox);
17056 if (shdr_status || shdr_add_status || rc) {
17057 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17058 "6125 POST_SGL_BLOCK mailbox command failed "
17059 "status x%x add_status x%x mbx status x%x\n",
17060 shdr_status, shdr_add_status, rc);
17061 rc = -ENXIO;
17062 }
17063 return rc;
17064}
17065
17066
17067
17068
17069
17070
17071
17072
17073
17074
17075
17076
17077
17078
17079
17080int
17081lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17082 struct list_head *post_nblist, int sb_count)
17083{
17084 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17085 int status, sgl_size;
17086 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17087 dma_addr_t pdma_phys_sgl1;
17088 int last_xritag = NO_XRI;
17089 int cur_xritag;
17090 LIST_HEAD(prep_nblist);
17091 LIST_HEAD(blck_nblist);
17092 LIST_HEAD(nvme_nblist);
17093
17094
17095 if (sb_count <= 0)
17096 return -EINVAL;
17097
17098 sgl_size = phba->cfg_sg_dma_buf_size;
17099 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17100 list_del_init(&lpfc_ncmd->list);
17101 block_cnt++;
17102 if ((last_xritag != NO_XRI) &&
17103 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17104
17105 list_splice_init(&prep_nblist, &blck_nblist);
17106 post_cnt = block_cnt - 1;
17107
17108 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17109 block_cnt = 1;
17110 } else {
17111
17112 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17113
17114 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17115 list_splice_init(&prep_nblist, &blck_nblist);
17116 post_cnt = block_cnt;
17117 block_cnt = 0;
17118 }
17119 }
17120 num_posting++;
17121 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17122
17123
17124 if (num_posting == sb_count) {
17125 if (post_cnt == 0) {
17126
17127 list_splice_init(&prep_nblist, &blck_nblist);
17128 post_cnt = block_cnt;
17129 } else if (block_cnt == 1) {
17130
17131 if (sgl_size > SGL_PAGE_SIZE)
17132 pdma_phys_sgl1 =
17133 lpfc_ncmd->dma_phys_sgl +
17134 SGL_PAGE_SIZE;
17135 else
17136 pdma_phys_sgl1 = 0;
17137 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17138 status = lpfc_sli4_post_sgl(
17139 phba, lpfc_ncmd->dma_phys_sgl,
17140 pdma_phys_sgl1, cur_xritag);
17141 if (status) {
17142
17143 lpfc_ncmd->flags |=
17144 LPFC_SBUF_NOT_POSTED;
17145 } else {
17146
17147 lpfc_ncmd->flags &=
17148 ~LPFC_SBUF_NOT_POSTED;
17149 lpfc_ncmd->status = IOSTAT_SUCCESS;
17150 num_posted++;
17151 }
17152
17153 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17154 }
17155 }
17156
17157
17158 if (post_cnt == 0)
17159 continue;
17160
17161
17162 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17163 post_cnt);
17164
17165
17166 if (block_cnt == 0)
17167 last_xritag = NO_XRI;
17168
17169
17170 post_cnt = 0;
17171
17172
17173 while (!list_empty(&blck_nblist)) {
17174 list_remove_head(&blck_nblist, lpfc_ncmd,
17175 struct lpfc_io_buf, list);
17176 if (status) {
17177
17178 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17179 } else {
17180
17181 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17182 lpfc_ncmd->status = IOSTAT_SUCCESS;
17183 num_posted++;
17184 }
17185 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17186 }
17187 }
17188
17189 lpfc_io_buf_replenish(phba, &nvme_nblist);
17190
17191 return num_posted;
17192}
17193
17194
17195
17196
17197
17198
17199
17200
17201
17202
17203
17204static int
17205lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17206{
17207
17208 struct fc_vft_header *fc_vft_hdr;
17209 uint32_t *header = (uint32_t *) fc_hdr;
17210
17211#define FC_RCTL_MDS_DIAGS 0xF4
17212
17213 switch (fc_hdr->fh_r_ctl) {
17214 case FC_RCTL_DD_UNCAT:
17215 case FC_RCTL_DD_SOL_DATA:
17216 case FC_RCTL_DD_UNSOL_CTL:
17217 case FC_RCTL_DD_SOL_CTL:
17218 case FC_RCTL_DD_UNSOL_DATA:
17219 case FC_RCTL_DD_DATA_DESC:
17220 case FC_RCTL_DD_UNSOL_CMD:
17221 case FC_RCTL_DD_CMD_STATUS:
17222 case FC_RCTL_ELS_REQ:
17223 case FC_RCTL_ELS_REP:
17224 case FC_RCTL_ELS4_REQ:
17225 case FC_RCTL_ELS4_REP:
17226 case FC_RCTL_BA_NOP:
17227 case FC_RCTL_BA_ABTS:
17228 case FC_RCTL_BA_RMC:
17229 case FC_RCTL_BA_ACC:
17230 case FC_RCTL_BA_RJT:
17231 case FC_RCTL_BA_PRMT:
17232 case FC_RCTL_ACK_1:
17233 case FC_RCTL_ACK_0:
17234 case FC_RCTL_P_RJT:
17235 case FC_RCTL_F_RJT:
17236 case FC_RCTL_P_BSY:
17237 case FC_RCTL_F_BSY:
17238 case FC_RCTL_F_BSYL:
17239 case FC_RCTL_LCR:
17240 case FC_RCTL_MDS_DIAGS:
17241 case FC_RCTL_END:
17242 break;
17243 case FC_RCTL_VFTH:
17244 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17245 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17246 return lpfc_fc_frame_check(phba, fc_hdr);
17247 default:
17248 goto drop;
17249 }
17250
17251 switch (fc_hdr->fh_type) {
17252 case FC_TYPE_BLS:
17253 case FC_TYPE_ELS:
17254 case FC_TYPE_FCP:
17255 case FC_TYPE_CT:
17256 case FC_TYPE_NVME:
17257 break;
17258 case FC_TYPE_IP:
17259 case FC_TYPE_ILS:
17260 default:
17261 goto drop;
17262 }
17263
17264 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17265 "2538 Received frame rctl:x%x, type:x%x, "
17266 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17267 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17268 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17269 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17270 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17271 be32_to_cpu(header[6]));
17272 return 0;
17273drop:
17274 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17275 "2539 Dropped frame rctl:x%x type:x%x\n",
17276 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17277 return 1;
17278}
17279
17280
17281
17282
17283
17284
17285
17286
17287
17288static uint32_t
17289lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17290{
17291 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17292
17293 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17294 return 0;
17295 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17296}
17297
17298
17299
17300
17301
17302
17303
17304
17305
17306
17307
17308
17309
17310static struct lpfc_vport *
17311lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17312 uint16_t fcfi, uint32_t did)
17313{
17314 struct lpfc_vport **vports;
17315 struct lpfc_vport *vport = NULL;
17316 int i;
17317
17318 if (did == Fabric_DID)
17319 return phba->pport;
17320 if ((phba->pport->fc_flag & FC_PT2PT) &&
17321 !(phba->link_state == LPFC_HBA_READY))
17322 return phba->pport;
17323
17324 vports = lpfc_create_vport_work_array(phba);
17325 if (vports != NULL) {
17326 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17327 if (phba->fcf.fcfi == fcfi &&
17328 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17329 vports[i]->fc_myDID == did) {
17330 vport = vports[i];
17331 break;
17332 }
17333 }
17334 }
17335 lpfc_destroy_vport_work_array(phba, vports);
17336 return vport;
17337}
17338
17339
17340
17341
17342
17343
17344
17345
17346
17347
17348
17349static void
17350lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17351{
17352 struct lpfc_dmabuf *h_buf;
17353 struct hbq_dmabuf *dmabuf = NULL;
17354
17355
17356 h_buf = list_get_first(&vport->rcv_buffer_list,
17357 struct lpfc_dmabuf, list);
17358 if (!h_buf)
17359 return;
17360 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17361 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17362}
17363
17364
17365
17366
17367
17368
17369
17370
17371
17372void
17373lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17374{
17375 struct lpfc_dmabuf *h_buf, *hnext;
17376 struct lpfc_dmabuf *d_buf, *dnext;
17377 struct hbq_dmabuf *dmabuf = NULL;
17378
17379
17380 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17381 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17382 list_del_init(&dmabuf->hbuf.list);
17383 list_for_each_entry_safe(d_buf, dnext,
17384 &dmabuf->dbuf.list, list) {
17385 list_del_init(&d_buf->list);
17386 lpfc_in_buf_free(vport->phba, d_buf);
17387 }
17388 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17389 }
17390}
17391
17392
17393
17394
17395
17396
17397
17398
17399
17400
17401
17402
17403
17404void
17405lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17406{
17407 struct lpfc_dmabuf *h_buf, *hnext;
17408 struct lpfc_dmabuf *d_buf, *dnext;
17409 struct hbq_dmabuf *dmabuf = NULL;
17410 unsigned long timeout;
17411 int abort_count = 0;
17412
17413 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17414 vport->rcv_buffer_time_stamp);
17415 if (list_empty(&vport->rcv_buffer_list) ||
17416 time_before(jiffies, timeout))
17417 return;
17418
17419 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17420 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17421 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17422 dmabuf->time_stamp);
17423 if (time_before(jiffies, timeout))
17424 break;
17425 abort_count++;
17426 list_del_init(&dmabuf->hbuf.list);
17427 list_for_each_entry_safe(d_buf, dnext,
17428 &dmabuf->dbuf.list, list) {
17429 list_del_init(&d_buf->list);
17430 lpfc_in_buf_free(vport->phba, d_buf);
17431 }
17432 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17433 }
17434 if (abort_count)
17435 lpfc_update_rcv_time_stamp(vport);
17436}
17437
17438
17439
17440
17441
17442
17443
17444
17445
17446
17447
17448
17449
17450static struct hbq_dmabuf *
17451lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17452{
17453 struct fc_frame_header *new_hdr;
17454 struct fc_frame_header *temp_hdr;
17455 struct lpfc_dmabuf *d_buf;
17456 struct lpfc_dmabuf *h_buf;
17457 struct hbq_dmabuf *seq_dmabuf = NULL;
17458 struct hbq_dmabuf *temp_dmabuf = NULL;
17459 uint8_t found = 0;
17460
17461 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17462 dmabuf->time_stamp = jiffies;
17463 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17464
17465
17466 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17467 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17468 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17469 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17470 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17471 continue;
17472
17473 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17474 break;
17475 }
17476 if (!seq_dmabuf) {
17477
17478
17479
17480
17481 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17482 lpfc_update_rcv_time_stamp(vport);
17483 return dmabuf;
17484 }
17485 temp_hdr = seq_dmabuf->hbuf.virt;
17486 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17487 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17488 list_del_init(&seq_dmabuf->hbuf.list);
17489 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17490 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17491 lpfc_update_rcv_time_stamp(vport);
17492 return dmabuf;
17493 }
17494
17495 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17496 seq_dmabuf->time_stamp = jiffies;
17497 lpfc_update_rcv_time_stamp(vport);
17498 if (list_empty(&seq_dmabuf->dbuf.list)) {
17499 temp_hdr = dmabuf->hbuf.virt;
17500 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17501 return seq_dmabuf;
17502 }
17503
17504 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17505 while (!found) {
17506 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17507 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17508
17509
17510
17511
17512 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17513 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17514 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17515 found = 1;
17516 break;
17517 }
17518
17519 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17520 break;
17521 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17522 }
17523
17524 if (found)
17525 return seq_dmabuf;
17526 return NULL;
17527}
17528
17529
17530
17531
17532
17533
17534
17535
17536
17537
17538
17539
17540
17541
17542
17543
17544
17545static bool
17546lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17547 struct hbq_dmabuf *dmabuf)
17548{
17549 struct fc_frame_header *new_hdr;
17550 struct fc_frame_header *temp_hdr;
17551 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17552 struct hbq_dmabuf *seq_dmabuf = NULL;
17553
17554
17555 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17556 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17557 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17558 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17559 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17560 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17561 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17562 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17563 continue;
17564
17565 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17566 break;
17567 }
17568
17569
17570 if (seq_dmabuf) {
17571 list_for_each_entry_safe(d_buf, n_buf,
17572 &seq_dmabuf->dbuf.list, list) {
17573 list_del_init(&d_buf->list);
17574 lpfc_in_buf_free(vport->phba, d_buf);
17575 }
17576 return true;
17577 }
17578 return false;
17579}
17580
17581
17582
17583
17584
17585
17586
17587
17588
17589
17590
17591
17592
17593
17594
17595
17596
17597static bool
17598lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17599{
17600 struct lpfc_hba *phba = vport->phba;
17601 int handled;
17602
17603
17604 if (phba->sli_rev < LPFC_SLI_REV4)
17605 return false;
17606
17607
17608 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17609 if (handled)
17610 return true;
17611
17612 return false;
17613}
17614
17615
17616
17617
17618
17619
17620
17621
17622
17623
17624
17625static void
17626lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17627 struct lpfc_iocbq *cmd_iocbq,
17628 struct lpfc_iocbq *rsp_iocbq)
17629{
17630 struct lpfc_nodelist *ndlp;
17631
17632 if (cmd_iocbq) {
17633 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17634 lpfc_nlp_put(ndlp);
17635 lpfc_nlp_not_used(ndlp);
17636 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17637 }
17638
17639
17640 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17641 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17642 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17643 rsp_iocbq->iocb.ulpStatus,
17644 rsp_iocbq->iocb.un.ulpWord[4]);
17645}
17646
17647
17648
17649
17650
17651
17652
17653
17654
17655uint16_t
17656lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17657 uint16_t xri)
17658{
17659 uint16_t i;
17660
17661 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17662 if (xri == phba->sli4_hba.xri_ids[i])
17663 return i;
17664 }
17665 return NO_XRI;
17666}
17667
17668
17669
17670
17671
17672
17673
17674
17675
17676void
17677lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17678 struct fc_frame_header *fc_hdr, bool aborted)
17679{
17680 struct lpfc_hba *phba = vport->phba;
17681 struct lpfc_iocbq *ctiocb = NULL;
17682 struct lpfc_nodelist *ndlp;
17683 uint16_t oxid, rxid, xri, lxri;
17684 uint32_t sid, fctl;
17685 IOCB_t *icmd;
17686 int rc;
17687
17688 if (!lpfc_is_link_up(phba))
17689 return;
17690
17691 sid = sli4_sid_from_fc_hdr(fc_hdr);
17692 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17693 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17694
17695 ndlp = lpfc_findnode_did(vport, sid);
17696 if (!ndlp) {
17697 ndlp = lpfc_nlp_init(vport, sid);
17698 if (!ndlp) {
17699 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17700 "1268 Failed to allocate ndlp for "
17701 "oxid:x%x SID:x%x\n", oxid, sid);
17702 return;
17703 }
17704
17705 lpfc_enqueue_node(vport, ndlp);
17706 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17707
17708 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17709 if (!ndlp) {
17710 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17711 "3275 Failed to active ndlp found "
17712 "for oxid:x%x SID:x%x\n", oxid, sid);
17713 return;
17714 }
17715 }
17716
17717
17718 ctiocb = lpfc_sli_get_iocbq(phba);
17719 if (!ctiocb)
17720 return;
17721
17722
17723 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17724
17725 icmd = &ctiocb->iocb;
17726 icmd->un.xseq64.bdl.bdeSize = 0;
17727 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17728 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17729 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17730 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17731
17732
17733 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17734 icmd->ulpBdeCount = 0;
17735 icmd->ulpLe = 1;
17736 icmd->ulpClass = CLASS3;
17737 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17738 ctiocb->context1 = lpfc_nlp_get(ndlp);
17739
17740 ctiocb->vport = phba->pport;
17741 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17742 ctiocb->sli4_lxritag = NO_XRI;
17743 ctiocb->sli4_xritag = NO_XRI;
17744
17745 if (fctl & FC_FC_EX_CTX)
17746
17747
17748
17749 xri = oxid;
17750 else
17751 xri = rxid;
17752 lxri = lpfc_sli4_xri_inrange(phba, xri);
17753 if (lxri != NO_XRI)
17754 lpfc_set_rrq_active(phba, ndlp, lxri,
17755 (xri == oxid) ? rxid : oxid, 0);
17756
17757
17758
17759
17760
17761 if ((fctl & FC_FC_EX_CTX) &&
17762 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17763 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17764 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17765 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17766 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17767 }
17768
17769
17770
17771
17772
17773 if (aborted == false) {
17774 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17775 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17776 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17777 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17778 }
17779
17780 if (fctl & FC_FC_EX_CTX) {
17781
17782
17783
17784
17785 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17786 } else {
17787
17788
17789
17790
17791 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17792 }
17793 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17794 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17795
17796
17797 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17798 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17799 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17800
17801 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17802 if (rc == IOCB_ERROR) {
17803 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
17804 "2925 Failed to issue CT ABTS RSP x%x on "
17805 "xri x%x, Data x%x\n",
17806 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17807 phba->link_state);
17808 lpfc_nlp_put(ndlp);
17809 ctiocb->context1 = NULL;
17810 lpfc_sli_release_iocbq(phba, ctiocb);
17811 }
17812}
17813
17814
17815
17816
17817
17818
17819
17820
17821
17822
17823
17824
17825
17826
17827static void
17828lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17829 struct hbq_dmabuf *dmabuf)
17830{
17831 struct lpfc_hba *phba = vport->phba;
17832 struct fc_frame_header fc_hdr;
17833 uint32_t fctl;
17834 bool aborted;
17835
17836
17837 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17838 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
17839
17840 if (fctl & FC_FC_EX_CTX) {
17841
17842 aborted = true;
17843 } else {
17844
17845 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
17846 if (aborted == false)
17847 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
17848 }
17849 lpfc_in_buf_free(phba, &dmabuf->dbuf);
17850
17851 if (phba->nvmet_support) {
17852 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
17853 return;
17854 }
17855
17856
17857 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
17858}
17859
17860
17861
17862
17863
17864
17865
17866
17867
17868
17869
17870
17871
17872static int
17873lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
17874{
17875 struct fc_frame_header *hdr;
17876 struct lpfc_dmabuf *d_buf;
17877 struct hbq_dmabuf *seq_dmabuf;
17878 uint32_t fctl;
17879 int seq_count = 0;
17880
17881 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17882
17883 if (hdr->fh_seq_cnt != seq_count)
17884 return 0;
17885 fctl = (hdr->fh_f_ctl[0] << 16 |
17886 hdr->fh_f_ctl[1] << 8 |
17887 hdr->fh_f_ctl[2]);
17888
17889 if (fctl & FC_FC_END_SEQ)
17890 return 1;
17891 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
17892 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17893 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17894
17895 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
17896 return 0;
17897 fctl = (hdr->fh_f_ctl[0] << 16 |
17898 hdr->fh_f_ctl[1] << 8 |
17899 hdr->fh_f_ctl[2]);
17900
17901 if (fctl & FC_FC_END_SEQ)
17902 return 1;
17903 }
17904 return 0;
17905}
17906
17907
17908
17909
17910
17911
17912
17913
17914
17915
17916
17917
17918
17919
17920static struct lpfc_iocbq *
17921lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
17922{
17923 struct hbq_dmabuf *hbq_buf;
17924 struct lpfc_dmabuf *d_buf, *n_buf;
17925 struct lpfc_iocbq *first_iocbq, *iocbq;
17926 struct fc_frame_header *fc_hdr;
17927 uint32_t sid;
17928 uint32_t len, tot_len;
17929 struct ulp_bde64 *pbde;
17930
17931 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17932
17933 list_del_init(&seq_dmabuf->hbuf.list);
17934 lpfc_update_rcv_time_stamp(vport);
17935
17936 sid = sli4_sid_from_fc_hdr(fc_hdr);
17937 tot_len = 0;
17938
17939 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
17940 if (first_iocbq) {
17941
17942 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
17943 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
17944 first_iocbq->vport = vport;
17945
17946
17947 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
17948 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
17949 first_iocbq->iocb.un.rcvels.parmRo =
17950 sli4_did_from_fc_hdr(fc_hdr);
17951 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
17952 } else
17953 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
17954 first_iocbq->iocb.ulpContext = NO_XRI;
17955 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
17956 be16_to_cpu(fc_hdr->fh_ox_id);
17957
17958 first_iocbq->iocb.unsli3.rcvsli3.vpi =
17959 vport->phba->vpi_ids[vport->vpi];
17960
17961 tot_len = bf_get(lpfc_rcqe_length,
17962 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
17963
17964 first_iocbq->context2 = &seq_dmabuf->dbuf;
17965 first_iocbq->context3 = NULL;
17966 first_iocbq->iocb.ulpBdeCount = 1;
17967 if (tot_len > LPFC_DATA_BUF_SIZE)
17968 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17969 LPFC_DATA_BUF_SIZE;
17970 else
17971 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
17972
17973 first_iocbq->iocb.un.rcvels.remoteID = sid;
17974
17975 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17976 }
17977 iocbq = first_iocbq;
17978
17979
17980
17981
17982 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
17983 if (!iocbq) {
17984 lpfc_in_buf_free(vport->phba, d_buf);
17985 continue;
17986 }
17987 if (!iocbq->context3) {
17988 iocbq->context3 = d_buf;
17989 iocbq->iocb.ulpBdeCount++;
17990
17991 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17992 len = bf_get(lpfc_rcqe_length,
17993 &hbq_buf->cq_event.cqe.rcqe_cmpl);
17994 pbde = (struct ulp_bde64 *)
17995 &iocbq->iocb.unsli3.sli3Words[4];
17996 if (len > LPFC_DATA_BUF_SIZE)
17997 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
17998 else
17999 pbde->tus.f.bdeSize = len;
18000
18001 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18002 tot_len += len;
18003 } else {
18004 iocbq = lpfc_sli_get_iocbq(vport->phba);
18005 if (!iocbq) {
18006 if (first_iocbq) {
18007 first_iocbq->iocb.ulpStatus =
18008 IOSTAT_FCP_RSP_ERROR;
18009 first_iocbq->iocb.un.ulpWord[4] =
18010 IOERR_NO_RESOURCES;
18011 }
18012 lpfc_in_buf_free(vport->phba, d_buf);
18013 continue;
18014 }
18015
18016 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18017 len = bf_get(lpfc_rcqe_length,
18018 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18019 iocbq->context2 = d_buf;
18020 iocbq->context3 = NULL;
18021 iocbq->iocb.ulpBdeCount = 1;
18022 if (len > LPFC_DATA_BUF_SIZE)
18023 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18024 LPFC_DATA_BUF_SIZE;
18025 else
18026 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18027
18028 tot_len += len;
18029 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18030
18031 iocbq->iocb.un.rcvels.remoteID = sid;
18032 list_add_tail(&iocbq->list, &first_iocbq->list);
18033 }
18034 }
18035
18036 if (!first_iocbq)
18037 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18038
18039 return first_iocbq;
18040}
18041
18042static void
18043lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18044 struct hbq_dmabuf *seq_dmabuf)
18045{
18046 struct fc_frame_header *fc_hdr;
18047 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18048 struct lpfc_hba *phba = vport->phba;
18049
18050 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18051 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18052 if (!iocbq) {
18053 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18054 "2707 Ring %d handler: Failed to allocate "
18055 "iocb Rctl x%x Type x%x received\n",
18056 LPFC_ELS_RING,
18057 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18058 return;
18059 }
18060 if (!lpfc_complete_unsol_iocb(phba,
18061 phba->sli4_hba.els_wq->pring,
18062 iocbq, fc_hdr->fh_r_ctl,
18063 fc_hdr->fh_type))
18064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18065 "2540 Ring %d handler: unexpected Rctl "
18066 "x%x Type x%x received\n",
18067 LPFC_ELS_RING,
18068 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18069
18070
18071 list_for_each_entry_safe(curr_iocb, next_iocb,
18072 &iocbq->list, list) {
18073 list_del_init(&curr_iocb->list);
18074 lpfc_sli_release_iocbq(phba, curr_iocb);
18075 }
18076 lpfc_sli_release_iocbq(phba, iocbq);
18077}
18078
18079static void
18080lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18081 struct lpfc_iocbq *rspiocb)
18082{
18083 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18084
18085 if (pcmd && pcmd->virt)
18086 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18087 kfree(pcmd);
18088 lpfc_sli_release_iocbq(phba, cmdiocb);
18089 lpfc_drain_txq(phba);
18090}
18091
18092static void
18093lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18094 struct hbq_dmabuf *dmabuf)
18095{
18096 struct fc_frame_header *fc_hdr;
18097 struct lpfc_hba *phba = vport->phba;
18098 struct lpfc_iocbq *iocbq = NULL;
18099 union lpfc_wqe *wqe;
18100 struct lpfc_dmabuf *pcmd = NULL;
18101 uint32_t frame_len;
18102 int rc;
18103 unsigned long iflags;
18104
18105 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18106 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18107
18108
18109 iocbq = lpfc_sli_get_iocbq(phba);
18110 if (!iocbq) {
18111
18112 spin_lock_irqsave(&phba->hbalock, iflags);
18113 list_add_tail(&dmabuf->cq_event.list,
18114 &phba->sli4_hba.sp_queue_event);
18115 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18116 spin_unlock_irqrestore(&phba->hbalock, iflags);
18117 lpfc_worker_wake_up(phba);
18118 return;
18119 }
18120
18121
18122 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18123 if (pcmd)
18124 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18125 &pcmd->phys);
18126 if (!pcmd || !pcmd->virt)
18127 goto exit;
18128
18129 INIT_LIST_HEAD(&pcmd->list);
18130
18131
18132 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18133
18134
18135 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18136 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18137 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18138 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18139
18140 iocbq->context2 = pcmd;
18141 iocbq->vport = vport;
18142 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18143 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18144
18145
18146
18147
18148
18149 wqe = (union lpfc_wqe *)&iocbq->iocb;
18150
18151 wqe->send_frame.frame_len = frame_len;
18152 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18153 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18154 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18155 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18156 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18157 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18158
18159 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18160 iocbq->iocb.ulpLe = 1;
18161 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18162 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18163 if (rc == IOCB_ERROR)
18164 goto exit;
18165
18166 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18167 return;
18168
18169exit:
18170 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18171 "2023 Unable to process MDS loopback frame\n");
18172 if (pcmd && pcmd->virt)
18173 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18174 kfree(pcmd);
18175 if (iocbq)
18176 lpfc_sli_release_iocbq(phba, iocbq);
18177 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18178}
18179
18180
18181
18182
18183
18184
18185
18186
18187
18188
18189
18190
18191void
18192lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18193 struct hbq_dmabuf *dmabuf)
18194{
18195 struct hbq_dmabuf *seq_dmabuf;
18196 struct fc_frame_header *fc_hdr;
18197 struct lpfc_vport *vport;
18198 uint32_t fcfi;
18199 uint32_t did;
18200
18201
18202 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18203
18204 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18205 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18206 vport = phba->pport;
18207
18208 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18209 return;
18210 }
18211
18212
18213 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18214 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18215 return;
18216 }
18217
18218 if ((bf_get(lpfc_cqe_code,
18219 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18220 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18221 &dmabuf->cq_event.cqe.rcqe_cmpl);
18222 else
18223 fcfi = bf_get(lpfc_rcqe_fcf_id,
18224 &dmabuf->cq_event.cqe.rcqe_cmpl);
18225
18226 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18227 vport = phba->pport;
18228 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18229 "2023 MDS Loopback %d bytes\n",
18230 bf_get(lpfc_rcqe_length,
18231 &dmabuf->cq_event.cqe.rcqe_cmpl));
18232
18233 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18234 return;
18235 }
18236
18237
18238 did = sli4_did_from_fc_hdr(fc_hdr);
18239
18240 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18241 if (!vport) {
18242
18243 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18244 return;
18245 }
18246
18247
18248 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18249 (did != Fabric_DID)) {
18250
18251
18252
18253
18254
18255 if (!(vport->fc_flag & FC_PT2PT) ||
18256 (phba->link_state == LPFC_HBA_READY)) {
18257 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18258 return;
18259 }
18260 }
18261
18262
18263 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18264 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18265 return;
18266 }
18267
18268
18269 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18270 if (!seq_dmabuf) {
18271
18272 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18273 return;
18274 }
18275
18276 if (!lpfc_seq_complete(seq_dmabuf))
18277 return;
18278
18279
18280 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18281}
18282
18283
18284
18285
18286
18287
18288
18289
18290
18291
18292
18293
18294
18295
18296
18297
18298
18299
18300
18301
18302
18303
18304int
18305lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18306{
18307 struct lpfc_rpi_hdr *rpi_page;
18308 uint32_t rc = 0;
18309 uint16_t lrpi = 0;
18310
18311
18312 if (!phba->sli4_hba.rpi_hdrs_in_use)
18313 goto exit;
18314 if (phba->sli4_hba.extents_in_use)
18315 return -EIO;
18316
18317 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18318
18319
18320
18321
18322
18323 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18324 LPFC_RPI_RSRC_RDY)
18325 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18326
18327 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18328 if (rc != MBX_SUCCESS) {
18329 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18330 "2008 Error %d posting all rpi "
18331 "headers\n", rc);
18332 rc = -EIO;
18333 break;
18334 }
18335 }
18336
18337 exit:
18338 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18339 LPFC_RPI_RSRC_RDY);
18340 return rc;
18341}
18342
18343
18344
18345
18346
18347
18348
18349
18350
18351
18352
18353
18354
18355
18356
18357int
18358lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18359{
18360 LPFC_MBOXQ_t *mboxq;
18361 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18362 uint32_t rc = 0;
18363 uint32_t shdr_status, shdr_add_status;
18364 union lpfc_sli4_cfg_shdr *shdr;
18365
18366
18367 if (!phba->sli4_hba.rpi_hdrs_in_use)
18368 return rc;
18369 if (phba->sli4_hba.extents_in_use)
18370 return -EIO;
18371
18372
18373 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18374 if (!mboxq) {
18375 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18376 "2001 Unable to allocate memory for issuing "
18377 "SLI_CONFIG_SPECIAL mailbox command\n");
18378 return -ENOMEM;
18379 }
18380
18381
18382 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18383 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18384 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18385 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18386 sizeof(struct lpfc_sli4_cfg_mhdr),
18387 LPFC_SLI4_MBX_EMBED);
18388
18389
18390
18391 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18392 rpi_page->start_rpi);
18393 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18394 hdr_tmpl, rpi_page->page_count);
18395
18396 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18397 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18398 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18399 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18400 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18401 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18402 if (rc != MBX_TIMEOUT)
18403 mempool_free(mboxq, phba->mbox_mem_pool);
18404 if (shdr_status || shdr_add_status || rc) {
18405 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18406 "2514 POST_RPI_HDR mailbox failed with "
18407 "status x%x add_status x%x, mbx status x%x\n",
18408 shdr_status, shdr_add_status, rc);
18409 rc = -ENXIO;
18410 } else {
18411
18412
18413
18414
18415 spin_lock_irq(&phba->hbalock);
18416 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18417 spin_unlock_irq(&phba->hbalock);
18418 }
18419 return rc;
18420}
18421
18422
18423
18424
18425
18426
18427
18428
18429
18430
18431
18432
18433
18434
18435int
18436lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18437{
18438 unsigned long rpi;
18439 uint16_t max_rpi, rpi_limit;
18440 uint16_t rpi_remaining, lrpi = 0;
18441 struct lpfc_rpi_hdr *rpi_hdr;
18442 unsigned long iflag;
18443
18444
18445
18446
18447
18448 spin_lock_irqsave(&phba->hbalock, iflag);
18449 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18450 rpi_limit = phba->sli4_hba.next_rpi;
18451
18452 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18453 if (rpi >= rpi_limit)
18454 rpi = LPFC_RPI_ALLOC_ERROR;
18455 else {
18456 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18457 phba->sli4_hba.max_cfg_param.rpi_used++;
18458 phba->sli4_hba.rpi_count++;
18459 }
18460 lpfc_printf_log(phba, KERN_INFO,
18461 LOG_NODE | LOG_DISCOVERY,
18462 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18463 (int) rpi, max_rpi, rpi_limit);
18464
18465
18466
18467
18468
18469 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18470 (phba->sli4_hba.rpi_count >= max_rpi)) {
18471 spin_unlock_irqrestore(&phba->hbalock, iflag);
18472 return rpi;
18473 }
18474
18475
18476
18477
18478
18479 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18480 spin_unlock_irqrestore(&phba->hbalock, iflag);
18481 return rpi;
18482 }
18483
18484
18485
18486
18487
18488
18489
18490 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18491 spin_unlock_irqrestore(&phba->hbalock, iflag);
18492 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18493 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18494 if (!rpi_hdr) {
18495 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18496 "2002 Error Could not grow rpi "
18497 "count\n");
18498 } else {
18499 lrpi = rpi_hdr->start_rpi;
18500 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18501 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18502 }
18503 }
18504
18505 return rpi;
18506}
18507
18508
18509
18510
18511
18512
18513
18514
18515static void
18516__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18517{
18518
18519
18520
18521
18522 if (rpi == LPFC_RPI_ALLOC_ERROR)
18523 return;
18524
18525 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18526 phba->sli4_hba.rpi_count--;
18527 phba->sli4_hba.max_cfg_param.rpi_used--;
18528 } else {
18529 lpfc_printf_log(phba, KERN_INFO,
18530 LOG_NODE | LOG_DISCOVERY,
18531 "2016 rpi %x not inuse\n",
18532 rpi);
18533 }
18534}
18535
18536
18537
18538
18539
18540
18541
18542
18543void
18544lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18545{
18546 spin_lock_irq(&phba->hbalock);
18547 __lpfc_sli4_free_rpi(phba, rpi);
18548 spin_unlock_irq(&phba->hbalock);
18549}
18550
18551
18552
18553
18554
18555
18556
18557
18558void
18559lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18560{
18561 kfree(phba->sli4_hba.rpi_bmask);
18562 kfree(phba->sli4_hba.rpi_ids);
18563 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18564}
18565
18566
18567
18568
18569
18570
18571
18572
18573int
18574lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18575 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18576{
18577 LPFC_MBOXQ_t *mboxq;
18578 struct lpfc_hba *phba = ndlp->phba;
18579 int rc;
18580
18581
18582 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18583 if (!mboxq)
18584 return -ENOMEM;
18585
18586
18587 lpfc_resume_rpi(mboxq, ndlp);
18588 if (cmpl) {
18589 mboxq->mbox_cmpl = cmpl;
18590 mboxq->ctx_buf = arg;
18591 mboxq->ctx_ndlp = ndlp;
18592 } else
18593 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18594 mboxq->vport = ndlp->vport;
18595 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18596 if (rc == MBX_NOT_FINISHED) {
18597 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18598 "2010 Resume RPI Mailbox failed "
18599 "status %d, mbxStatus x%x\n", rc,
18600 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18601 mempool_free(mboxq, phba->mbox_mem_pool);
18602 return -EIO;
18603 }
18604 return 0;
18605}
18606
18607
18608
18609
18610
18611
18612
18613
18614
18615
18616
18617int
18618lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18619{
18620 LPFC_MBOXQ_t *mboxq;
18621 int rc = 0;
18622 int retval = MBX_SUCCESS;
18623 uint32_t mbox_tmo;
18624 struct lpfc_hba *phba = vport->phba;
18625 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18626 if (!mboxq)
18627 return -ENOMEM;
18628 lpfc_init_vpi(phba, mboxq, vport->vpi);
18629 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18630 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18631 if (rc != MBX_SUCCESS) {
18632 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
18633 "2022 INIT VPI Mailbox failed "
18634 "status %d, mbxStatus x%x\n", rc,
18635 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18636 retval = -EIO;
18637 }
18638 if (rc != MBX_TIMEOUT)
18639 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18640
18641 return retval;
18642}
18643
18644
18645
18646
18647
18648
18649
18650
18651
18652
18653static void
18654lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18655{
18656 void *virt_addr;
18657 union lpfc_sli4_cfg_shdr *shdr;
18658 uint32_t shdr_status, shdr_add_status;
18659
18660 virt_addr = mboxq->sge_array->addr[0];
18661
18662 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18663 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18664 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18665
18666 if ((shdr_status || shdr_add_status) &&
18667 (shdr_status != STATUS_FCF_IN_USE))
18668 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18669 "2558 ADD_FCF_RECORD mailbox failed with "
18670 "status x%x add_status x%x\n",
18671 shdr_status, shdr_add_status);
18672
18673 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18674}
18675
18676
18677
18678
18679
18680
18681
18682
18683
18684
18685int
18686lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18687{
18688 int rc = 0;
18689 LPFC_MBOXQ_t *mboxq;
18690 uint8_t *bytep;
18691 void *virt_addr;
18692 struct lpfc_mbx_sge sge;
18693 uint32_t alloc_len, req_len;
18694 uint32_t fcfindex;
18695
18696 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18697 if (!mboxq) {
18698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18699 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18700 return -ENOMEM;
18701 }
18702
18703 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18704 sizeof(uint32_t);
18705
18706
18707 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18708 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18709 req_len, LPFC_SLI4_MBX_NEMBED);
18710 if (alloc_len < req_len) {
18711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18712 "2523 Allocated DMA memory size (x%x) is "
18713 "less than the requested DMA memory "
18714 "size (x%x)\n", alloc_len, req_len);
18715 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18716 return -ENOMEM;
18717 }
18718
18719
18720
18721
18722
18723 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18724 virt_addr = mboxq->sge_array->addr[0];
18725
18726
18727
18728
18729 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18730 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18731 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18732
18733
18734
18735
18736
18737
18738 bytep += sizeof(uint32_t);
18739 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18740 mboxq->vport = phba->pport;
18741 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18742 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18743 if (rc == MBX_NOT_FINISHED) {
18744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18745 "2515 ADD_FCF_RECORD mailbox failed with "
18746 "status 0x%x\n", rc);
18747 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18748 rc = -EIO;
18749 } else
18750 rc = 0;
18751
18752 return rc;
18753}
18754
18755
18756
18757
18758
18759
18760
18761
18762
18763
18764
18765void
18766lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18767 struct fcf_record *fcf_record,
18768 uint16_t fcf_index)
18769{
18770 memset(fcf_record, 0, sizeof(struct fcf_record));
18771 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18772 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18773 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18774 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18775 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18776 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18777 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18778 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18779 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18780 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18781 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18782 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18783 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18784 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18785 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18786 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18787 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18788
18789 if (phba->valid_vlan) {
18790 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18791 = 1 << (phba->vlan_id % 8);
18792 }
18793}
18794
18795
18796
18797
18798
18799
18800
18801
18802
18803
18804
18805
18806
18807int
18808lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18809{
18810 int rc = 0, error;
18811 LPFC_MBOXQ_t *mboxq;
18812
18813 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18814 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18815 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18816 if (!mboxq) {
18817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18818 "2000 Failed to allocate mbox for "
18819 "READ_FCF cmd\n");
18820 error = -ENOMEM;
18821 goto fail_fcf_scan;
18822 }
18823
18824 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18825 if (rc) {
18826 error = -EINVAL;
18827 goto fail_fcf_scan;
18828 }
18829
18830 mboxq->vport = phba->pport;
18831 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
18832
18833 spin_lock_irq(&phba->hbalock);
18834 phba->hba_flag |= FCF_TS_INPROG;
18835 spin_unlock_irq(&phba->hbalock);
18836
18837 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18838 if (rc == MBX_NOT_FINISHED)
18839 error = -EIO;
18840 else {
18841
18842 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
18843 phba->fcf.eligible_fcf_cnt = 0;
18844 error = 0;
18845 }
18846fail_fcf_scan:
18847 if (error) {
18848 if (mboxq)
18849 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18850
18851 spin_lock_irq(&phba->hbalock);
18852 phba->hba_flag &= ~FCF_TS_INPROG;
18853 spin_unlock_irq(&phba->hbalock);
18854 }
18855 return error;
18856}
18857
18858
18859
18860
18861
18862
18863
18864
18865
18866
18867
18868
18869int
18870lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18871{
18872 int rc = 0, error;
18873 LPFC_MBOXQ_t *mboxq;
18874
18875 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18876 if (!mboxq) {
18877 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18878 "2763 Failed to allocate mbox for "
18879 "READ_FCF cmd\n");
18880 error = -ENOMEM;
18881 goto fail_fcf_read;
18882 }
18883
18884 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18885 if (rc) {
18886 error = -EINVAL;
18887 goto fail_fcf_read;
18888 }
18889
18890 mboxq->vport = phba->pport;
18891 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
18892 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18893 if (rc == MBX_NOT_FINISHED)
18894 error = -EIO;
18895 else
18896 error = 0;
18897
18898fail_fcf_read:
18899 if (error && mboxq)
18900 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18901 return error;
18902}
18903
18904
18905
18906
18907
18908
18909
18910
18911
18912
18913
18914
18915int
18916lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18917{
18918 int rc = 0, error;
18919 LPFC_MBOXQ_t *mboxq;
18920
18921 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18922 if (!mboxq) {
18923 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
18924 "2758 Failed to allocate mbox for "
18925 "READ_FCF cmd\n");
18926 error = -ENOMEM;
18927 goto fail_fcf_read;
18928 }
18929
18930 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18931 if (rc) {
18932 error = -EINVAL;
18933 goto fail_fcf_read;
18934 }
18935
18936 mboxq->vport = phba->pport;
18937 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
18938 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18939 if (rc == MBX_NOT_FINISHED)
18940 error = -EIO;
18941 else
18942 error = 0;
18943
18944fail_fcf_read:
18945 if (error && mboxq)
18946 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18947 return error;
18948}
18949
18950
18951
18952
18953
18954
18955
18956
18957
18958
18959
18960
18961
18962
18963static int
18964lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
18965{
18966 uint16_t next_fcf_pri;
18967 uint16_t last_index;
18968 struct lpfc_fcf_pri *fcf_pri;
18969 int rc;
18970 int ret = 0;
18971
18972 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
18973 LPFC_SLI4_FCF_TBL_INDX_MAX);
18974 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18975 "3060 Last IDX %d\n", last_index);
18976
18977
18978 spin_lock_irq(&phba->hbalock);
18979 if (list_empty(&phba->fcf.fcf_pri_list) ||
18980 list_is_singular(&phba->fcf.fcf_pri_list)) {
18981 spin_unlock_irq(&phba->hbalock);
18982 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18983 "3061 Last IDX %d\n", last_index);
18984 return 0;
18985 }
18986 spin_unlock_irq(&phba->hbalock);
18987
18988 next_fcf_pri = 0;
18989
18990
18991
18992
18993 memset(phba->fcf.fcf_rr_bmask, 0,
18994 sizeof(*phba->fcf.fcf_rr_bmask));
18995 spin_lock_irq(&phba->hbalock);
18996 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
18997 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
18998 continue;
18999
19000
19001
19002
19003 if (!next_fcf_pri)
19004 next_fcf_pri = fcf_pri->fcf_rec.priority;
19005 spin_unlock_irq(&phba->hbalock);
19006 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19007 rc = lpfc_sli4_fcf_rr_index_set(phba,
19008 fcf_pri->fcf_rec.fcf_index);
19009 if (rc)
19010 return 0;
19011 }
19012 spin_lock_irq(&phba->hbalock);
19013 }
19014
19015
19016
19017
19018
19019 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19020 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19021 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19022
19023
19024
19025
19026 if (!next_fcf_pri)
19027 next_fcf_pri = fcf_pri->fcf_rec.priority;
19028 spin_unlock_irq(&phba->hbalock);
19029 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19030 rc = lpfc_sli4_fcf_rr_index_set(phba,
19031 fcf_pri->fcf_rec.fcf_index);
19032 if (rc)
19033 return 0;
19034 }
19035 spin_lock_irq(&phba->hbalock);
19036 }
19037 } else
19038 ret = 1;
19039 spin_unlock_irq(&phba->hbalock);
19040
19041 return ret;
19042}
19043
19044
19045
19046
19047
19048
19049
19050
19051
19052
19053uint16_t
19054lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19055{
19056 uint16_t next_fcf_index;
19057
19058initial_priority:
19059
19060 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19061
19062next_priority:
19063
19064 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19065 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19066 LPFC_SLI4_FCF_TBL_INDX_MAX,
19067 next_fcf_index);
19068
19069
19070 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19071
19072
19073
19074
19075
19076 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19077 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19078 }
19079
19080
19081
19082 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19083 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19084
19085
19086
19087
19088
19089
19090 if (lpfc_check_next_fcf_pri_level(phba))
19091 goto initial_priority;
19092 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19093 "2844 No roundrobin failover FCF available\n");
19094
19095 return LPFC_FCOE_FCF_NEXT_NONE;
19096 }
19097
19098 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19099 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19100 LPFC_FCF_FLOGI_FAILED) {
19101 if (list_is_singular(&phba->fcf.fcf_pri_list))
19102 return LPFC_FCOE_FCF_NEXT_NONE;
19103
19104 goto next_priority;
19105 }
19106
19107 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19108 "2845 Get next roundrobin failover FCF (x%x)\n",
19109 next_fcf_index);
19110
19111 return next_fcf_index;
19112}
19113
19114
19115
19116
19117
19118
19119
19120
19121
19122
19123
19124
19125
19126int
19127lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19128{
19129 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19130 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19131 "2610 FCF (x%x) reached driver's book "
19132 "keeping dimension:x%x\n",
19133 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19134 return -EINVAL;
19135 }
19136
19137 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19138
19139 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19140 "2790 Set FCF (x%x) to roundrobin FCF failover "
19141 "bmask\n", fcf_index);
19142
19143 return 0;
19144}
19145
19146
19147
19148
19149
19150
19151
19152
19153
19154
19155void
19156lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19157{
19158 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19159 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19160 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19161 "2762 FCF (x%x) reached driver's book "
19162 "keeping dimension:x%x\n",
19163 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19164 return;
19165 }
19166
19167 spin_lock_irq(&phba->hbalock);
19168 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19169 list) {
19170 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19171 list_del_init(&fcf_pri->list);
19172 break;
19173 }
19174 }
19175 spin_unlock_irq(&phba->hbalock);
19176 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19177
19178 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19179 "2791 Clear FCF (x%x) from roundrobin failover "
19180 "bmask\n", fcf_index);
19181}
19182
19183
19184
19185
19186
19187
19188
19189
19190
19191static void
19192lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19193{
19194 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19195 uint32_t shdr_status, shdr_add_status;
19196
19197 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19198
19199 shdr_status = bf_get(lpfc_mbox_hdr_status,
19200 &redisc_fcf->header.cfg_shdr.response);
19201 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19202 &redisc_fcf->header.cfg_shdr.response);
19203 if (shdr_status || shdr_add_status) {
19204 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19205 "2746 Requesting for FCF rediscovery failed "
19206 "status x%x add_status x%x\n",
19207 shdr_status, shdr_add_status);
19208 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19209 spin_lock_irq(&phba->hbalock);
19210 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19211 spin_unlock_irq(&phba->hbalock);
19212
19213
19214
19215
19216 lpfc_retry_pport_discovery(phba);
19217 } else {
19218 spin_lock_irq(&phba->hbalock);
19219 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19220 spin_unlock_irq(&phba->hbalock);
19221
19222
19223
19224
19225
19226 lpfc_sli4_fcf_dead_failthrough(phba);
19227 }
19228 } else {
19229 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19230 "2775 Start FCF rediscover quiescent timer\n");
19231
19232
19233
19234
19235 lpfc_fcf_redisc_wait_start_timer(phba);
19236 }
19237
19238 mempool_free(mbox, phba->mbox_mem_pool);
19239}
19240
19241
19242
19243
19244
19245
19246
19247
19248int
19249lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19250{
19251 LPFC_MBOXQ_t *mbox;
19252 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19253 int rc, length;
19254
19255
19256 lpfc_cancel_all_vport_retry_delay_timer(phba);
19257
19258 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19259 if (!mbox) {
19260 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19261 "2745 Failed to allocate mbox for "
19262 "requesting FCF rediscover.\n");
19263 return -ENOMEM;
19264 }
19265
19266 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19267 sizeof(struct lpfc_sli4_cfg_mhdr));
19268 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19269 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19270 length, LPFC_SLI4_MBX_EMBED);
19271
19272 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19273
19274 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19275
19276
19277 mbox->vport = phba->pport;
19278 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19279 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19280
19281 if (rc == MBX_NOT_FINISHED) {
19282 mempool_free(mbox, phba->mbox_mem_pool);
19283 return -EIO;
19284 }
19285 return 0;
19286}
19287
19288
19289
19290
19291
19292
19293
19294
19295void
19296lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19297{
19298 uint32_t link_state;
19299
19300
19301
19302
19303
19304
19305 link_state = phba->link_state;
19306 lpfc_linkdown(phba);
19307 phba->link_state = link_state;
19308
19309
19310 lpfc_unregister_unused_fcf(phba);
19311}
19312
19313
19314
19315
19316
19317
19318
19319
19320
19321
19322static uint32_t
19323lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19324{
19325 LPFC_MBOXQ_t *pmb = NULL;
19326 MAILBOX_t *mb;
19327 uint32_t offset = 0;
19328 int rc;
19329
19330 if (!rgn23_data)
19331 return 0;
19332
19333 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19334 if (!pmb) {
19335 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19336 "2600 failed to allocate mailbox memory\n");
19337 return 0;
19338 }
19339 mb = &pmb->u.mb;
19340
19341 do {
19342 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19343 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19344
19345 if (rc != MBX_SUCCESS) {
19346 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19347 "2601 failed to read config "
19348 "region 23, rc 0x%x Status 0x%x\n",
19349 rc, mb->mbxStatus);
19350 mb->un.varDmp.word_cnt = 0;
19351 }
19352
19353
19354
19355
19356 if (mb->un.varDmp.word_cnt == 0)
19357 break;
19358 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19359 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19360
19361 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19362 rgn23_data + offset,
19363 mb->un.varDmp.word_cnt);
19364 offset += mb->un.varDmp.word_cnt;
19365 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19366
19367 mempool_free(pmb, phba->mbox_mem_pool);
19368 return offset;
19369}
19370
19371
19372
19373
19374
19375
19376
19377
19378
19379
19380static uint32_t
19381lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19382{
19383 LPFC_MBOXQ_t *mboxq = NULL;
19384 struct lpfc_dmabuf *mp = NULL;
19385 struct lpfc_mqe *mqe;
19386 uint32_t data_length = 0;
19387 int rc;
19388
19389 if (!rgn23_data)
19390 return 0;
19391
19392 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19393 if (!mboxq) {
19394 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19395 "3105 failed to allocate mailbox memory\n");
19396 return 0;
19397 }
19398
19399 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19400 goto out;
19401 mqe = &mboxq->u.mqe;
19402 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19403 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19404 if (rc)
19405 goto out;
19406 data_length = mqe->un.mb_words[5];
19407 if (data_length == 0)
19408 goto out;
19409 if (data_length > DMP_RGN23_SIZE) {
19410 data_length = 0;
19411 goto out;
19412 }
19413 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19414out:
19415 mempool_free(mboxq, phba->mbox_mem_pool);
19416 if (mp) {
19417 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19418 kfree(mp);
19419 }
19420 return data_length;
19421}
19422
19423
19424
19425
19426
19427
19428
19429
19430
19431void
19432lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19433{
19434 uint8_t *rgn23_data = NULL;
19435 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19436 uint32_t offset = 0;
19437
19438
19439 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19440 if (!rgn23_data)
19441 goto out;
19442
19443 if (phba->sli_rev < LPFC_SLI_REV4)
19444 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19445 else {
19446 if_type = bf_get(lpfc_sli_intf_if_type,
19447 &phba->sli4_hba.sli_intf);
19448 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19449 goto out;
19450 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19451 }
19452
19453 if (!data_size)
19454 goto out;
19455
19456
19457 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19459 "2619 Config region 23 has bad signature\n");
19460 goto out;
19461 }
19462 offset += 4;
19463
19464
19465 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19466 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19467 "2620 Config region 23 has bad version\n");
19468 goto out;
19469 }
19470 offset += 4;
19471
19472
19473 while (offset < data_size) {
19474 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19475 break;
19476
19477
19478
19479
19480 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19481 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19482 (rgn23_data[offset + 3] != 0)) {
19483 offset += rgn23_data[offset + 1] * 4 + 4;
19484 continue;
19485 }
19486
19487
19488 sub_tlv_len = rgn23_data[offset + 1] * 4;
19489 offset += 4;
19490 tlv_offset = 0;
19491
19492
19493
19494
19495 while ((offset < data_size) &&
19496 (tlv_offset < sub_tlv_len)) {
19497 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19498 offset += 4;
19499 tlv_offset += 4;
19500 break;
19501 }
19502 if (rgn23_data[offset] != PORT_STE_TYPE) {
19503 offset += rgn23_data[offset + 1] * 4 + 4;
19504 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19505 continue;
19506 }
19507
19508
19509 if (!rgn23_data[offset + 2])
19510 phba->hba_flag |= LINK_DISABLED;
19511
19512 goto out;
19513 }
19514 }
19515
19516out:
19517 kfree(rgn23_data);
19518 return;
19519}
19520
19521
19522
19523
19524
19525
19526
19527
19528
19529
19530
19531
19532
19533
19534
19535
19536
19537
19538
19539
19540int
19541lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19542 uint32_t size, uint32_t *offset)
19543{
19544 struct lpfc_mbx_wr_object *wr_object;
19545 LPFC_MBOXQ_t *mbox;
19546 int rc = 0, i = 0;
19547 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
19548 uint32_t mbox_tmo;
19549 struct lpfc_dmabuf *dmabuf;
19550 uint32_t written = 0;
19551 bool check_change_status = false;
19552
19553 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19554 if (!mbox)
19555 return -ENOMEM;
19556
19557 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19558 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19559 sizeof(struct lpfc_mbx_wr_object) -
19560 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19561
19562 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19563 wr_object->u.request.write_offset = *offset;
19564 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19565 wr_object->u.request.object_name[0] =
19566 cpu_to_le32(wr_object->u.request.object_name[0]);
19567 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19568 list_for_each_entry(dmabuf, dmabuf_list, list) {
19569 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19570 break;
19571 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19572 wr_object->u.request.bde[i].addrHigh =
19573 putPaddrHigh(dmabuf->phys);
19574 if (written + SLI4_PAGE_SIZE >= size) {
19575 wr_object->u.request.bde[i].tus.f.bdeSize =
19576 (size - written);
19577 written += (size - written);
19578 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19579 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19580 check_change_status = true;
19581 } else {
19582 wr_object->u.request.bde[i].tus.f.bdeSize =
19583 SLI4_PAGE_SIZE;
19584 written += SLI4_PAGE_SIZE;
19585 }
19586 i++;
19587 }
19588 wr_object->u.request.bde_count = i;
19589 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19590 if (!phba->sli4_hba.intr_enable)
19591 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19592 else {
19593 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19594 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19595 }
19596
19597 shdr_status = bf_get(lpfc_mbox_hdr_status,
19598 &wr_object->header.cfg_shdr.response);
19599 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19600 &wr_object->header.cfg_shdr.response);
19601 if (check_change_status) {
19602 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19603 &wr_object->u.response);
19604
19605 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19606 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19607 shdr_csf = bf_get(lpfc_wr_object_csf,
19608 &wr_object->u.response);
19609 if (shdr_csf)
19610 shdr_change_status =
19611 LPFC_CHANGE_STATUS_PCI_RESET;
19612 }
19613
19614 switch (shdr_change_status) {
19615 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19616 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19617 "3198 Firmware write complete: System "
19618 "reboot required to instantiate\n");
19619 break;
19620 case (LPFC_CHANGE_STATUS_FW_RESET):
19621 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19622 "3199 Firmware write complete: Firmware"
19623 " reset required to instantiate\n");
19624 break;
19625 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19626 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19627 "3200 Firmware write complete: Port "
19628 "Migration or PCI Reset required to "
19629 "instantiate\n");
19630 break;
19631 case (LPFC_CHANGE_STATUS_PCI_RESET):
19632 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19633 "3201 Firmware write complete: PCI "
19634 "Reset required to instantiate\n");
19635 break;
19636 default:
19637 break;
19638 }
19639 }
19640 if (rc != MBX_TIMEOUT)
19641 mempool_free(mbox, phba->mbox_mem_pool);
19642 if (shdr_status || shdr_add_status || rc) {
19643 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
19644 "3025 Write Object mailbox failed with "
19645 "status x%x add_status x%x, mbx status x%x\n",
19646 shdr_status, shdr_add_status, rc);
19647 rc = -ENXIO;
19648 *offset = shdr_add_status;
19649 } else
19650 *offset += wr_object->u.response.actual_write_length;
19651 return rc;
19652}
19653
19654
19655
19656
19657
19658
19659
19660
19661
19662
19663void
19664lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19665{
19666 struct lpfc_hba *phba = vport->phba;
19667 LPFC_MBOXQ_t *mb, *nextmb;
19668 struct lpfc_dmabuf *mp;
19669 struct lpfc_nodelist *ndlp;
19670 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19671 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19672 LIST_HEAD(mbox_cmd_list);
19673 uint8_t restart_loop;
19674
19675
19676 spin_lock_irq(&phba->hbalock);
19677 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19678 if (mb->vport != vport)
19679 continue;
19680
19681 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19682 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19683 continue;
19684
19685 list_del(&mb->list);
19686 list_add_tail(&mb->list, &mbox_cmd_list);
19687 }
19688
19689 mb = phba->sli.mbox_active;
19690 if (mb && (mb->vport == vport)) {
19691 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19692 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19693 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19694 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19695 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19696
19697 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19698
19699 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19700 }
19701 }
19702
19703 do {
19704 restart_loop = 0;
19705 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19706
19707
19708
19709
19710 if ((mb->vport != vport) ||
19711 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19712 continue;
19713
19714 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19715 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19716 continue;
19717
19718 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19719 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19720 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19721
19722 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19723 restart_loop = 1;
19724 spin_unlock_irq(&phba->hbalock);
19725 spin_lock(shost->host_lock);
19726 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19727 spin_unlock(shost->host_lock);
19728 spin_lock_irq(&phba->hbalock);
19729 break;
19730 }
19731 }
19732 } while (restart_loop);
19733
19734 spin_unlock_irq(&phba->hbalock);
19735
19736
19737 while (!list_empty(&mbox_cmd_list)) {
19738 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19739 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19740 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19741 if (mp) {
19742 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19743 kfree(mp);
19744 }
19745 mb->ctx_buf = NULL;
19746 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19747 mb->ctx_ndlp = NULL;
19748 if (ndlp) {
19749 spin_lock(shost->host_lock);
19750 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19751 spin_unlock(shost->host_lock);
19752 lpfc_nlp_put(ndlp);
19753 }
19754 }
19755 mempool_free(mb, phba->mbox_mem_pool);
19756 }
19757
19758
19759 if (act_mbx_ndlp) {
19760 spin_lock(shost->host_lock);
19761 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19762 spin_unlock(shost->host_lock);
19763 lpfc_nlp_put(act_mbx_ndlp);
19764 }
19765}
19766
19767
19768
19769
19770
19771
19772
19773
19774
19775
19776
19777
19778uint32_t
19779lpfc_drain_txq(struct lpfc_hba *phba)
19780{
19781 LIST_HEAD(completions);
19782 struct lpfc_sli_ring *pring;
19783 struct lpfc_iocbq *piocbq = NULL;
19784 unsigned long iflags = 0;
19785 char *fail_msg = NULL;
19786 struct lpfc_sglq *sglq;
19787 union lpfc_wqe128 wqe;
19788 uint32_t txq_cnt = 0;
19789 struct lpfc_queue *wq;
19790
19791 if (phba->link_flag & LS_MDS_LOOPBACK) {
19792
19793 wq = phba->sli4_hba.hdwq[0].io_wq;
19794 if (unlikely(!wq))
19795 return 0;
19796 pring = wq->pring;
19797 } else {
19798 wq = phba->sli4_hba.els_wq;
19799 if (unlikely(!wq))
19800 return 0;
19801 pring = lpfc_phba_elsring(phba);
19802 }
19803
19804 if (unlikely(!pring) || list_empty(&pring->txq))
19805 return 0;
19806
19807 spin_lock_irqsave(&pring->ring_lock, iflags);
19808 list_for_each_entry(piocbq, &pring->txq, list) {
19809 txq_cnt++;
19810 }
19811
19812 if (txq_cnt > pring->txq_max)
19813 pring->txq_max = txq_cnt;
19814
19815 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19816
19817 while (!list_empty(&pring->txq)) {
19818 spin_lock_irqsave(&pring->ring_lock, iflags);
19819
19820 piocbq = lpfc_sli_ringtx_get(phba, pring);
19821 if (!piocbq) {
19822 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19823 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19824 "2823 txq empty and txq_cnt is %d\n ",
19825 txq_cnt);
19826 break;
19827 }
19828 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
19829 if (!sglq) {
19830 __lpfc_sli_ringtx_put(phba, pring, piocbq);
19831 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19832 break;
19833 }
19834 txq_cnt--;
19835
19836
19837
19838
19839 piocbq->sli4_lxritag = sglq->sli4_lxritag;
19840 piocbq->sli4_xritag = sglq->sli4_xritag;
19841 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
19842 fail_msg = "to convert bpl to sgl";
19843 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
19844 fail_msg = "to convert iocb to wqe";
19845 else if (lpfc_sli4_wq_put(wq, &wqe))
19846 fail_msg = " - Wq is full";
19847 else
19848 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
19849
19850 if (fail_msg) {
19851
19852 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
19853 "2822 IOCB failed %s iotag 0x%x "
19854 "xri 0x%x\n",
19855 fail_msg,
19856 piocbq->iotag, piocbq->sli4_xritag);
19857 list_add_tail(&piocbq->list, &completions);
19858 }
19859 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19860 }
19861
19862
19863 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
19864 IOERR_SLI_ABORTED);
19865
19866 return txq_cnt;
19867}
19868
19869
19870
19871
19872
19873
19874
19875
19876
19877
19878
19879
19880
19881
19882
19883
19884
19885
19886static uint16_t
19887lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
19888 struct lpfc_sglq *sglq)
19889{
19890 uint16_t xritag = NO_XRI;
19891 struct ulp_bde64 *bpl = NULL;
19892 struct ulp_bde64 bde;
19893 struct sli4_sge *sgl = NULL;
19894 struct lpfc_dmabuf *dmabuf;
19895 union lpfc_wqe128 *wqe;
19896 int numBdes = 0;
19897 int i = 0;
19898 uint32_t offset = 0;
19899 int inbound = 0;
19900 uint32_t cmd;
19901
19902 if (!pwqeq || !sglq)
19903 return xritag;
19904
19905 sgl = (struct sli4_sge *)sglq->sgl;
19906 wqe = &pwqeq->wqe;
19907 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
19908
19909 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
19910 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
19911 return sglq->sli4_xritag;
19912 numBdes = pwqeq->rsvd2;
19913 if (numBdes) {
19914
19915
19916
19917
19918 if (pwqeq->context3)
19919 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
19920 else
19921 return xritag;
19922
19923 bpl = (struct ulp_bde64 *)dmabuf->virt;
19924 if (!bpl)
19925 return xritag;
19926
19927 for (i = 0; i < numBdes; i++) {
19928
19929 sgl->addr_hi = bpl->addrHigh;
19930 sgl->addr_lo = bpl->addrLow;
19931
19932 sgl->word2 = le32_to_cpu(sgl->word2);
19933 if ((i+1) == numBdes)
19934 bf_set(lpfc_sli4_sge_last, sgl, 1);
19935 else
19936 bf_set(lpfc_sli4_sge_last, sgl, 0);
19937
19938
19939
19940 bde.tus.w = le32_to_cpu(bpl->tus.w);
19941 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
19942
19943
19944
19945
19946 switch (cmd) {
19947 case CMD_GEN_REQUEST64_WQE:
19948
19949 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
19950 inbound++;
19951
19952 if (inbound == 1)
19953 offset = 0;
19954 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19955 bf_set(lpfc_sli4_sge_type, sgl,
19956 LPFC_SGE_TYPE_DATA);
19957 offset += bde.tus.f.bdeSize;
19958 break;
19959 case CMD_FCP_TRSP64_WQE:
19960 bf_set(lpfc_sli4_sge_offset, sgl, 0);
19961 bf_set(lpfc_sli4_sge_type, sgl,
19962 LPFC_SGE_TYPE_DATA);
19963 break;
19964 case CMD_FCP_TSEND64_WQE:
19965 case CMD_FCP_TRECEIVE64_WQE:
19966 bf_set(lpfc_sli4_sge_type, sgl,
19967 bpl->tus.f.bdeFlags);
19968 if (i < 3)
19969 offset = 0;
19970 else
19971 offset += bde.tus.f.bdeSize;
19972 bf_set(lpfc_sli4_sge_offset, sgl, offset);
19973 break;
19974 }
19975 sgl->word2 = cpu_to_le32(sgl->word2);
19976 bpl++;
19977 sgl++;
19978 }
19979 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
19980
19981
19982
19983
19984 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
19985 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
19986 sgl->word2 = le32_to_cpu(sgl->word2);
19987 bf_set(lpfc_sli4_sge_last, sgl, 1);
19988 sgl->word2 = cpu_to_le32(sgl->word2);
19989 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
19990 }
19991 return sglq->sli4_xritag;
19992}
19993
19994
19995
19996
19997
19998
19999
20000int
20001lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20002 struct lpfc_iocbq *pwqe)
20003{
20004 union lpfc_wqe128 *wqe = &pwqe->wqe;
20005 struct lpfc_async_xchg_ctx *ctxp;
20006 struct lpfc_queue *wq;
20007 struct lpfc_sglq *sglq;
20008 struct lpfc_sli_ring *pring;
20009 unsigned long iflags;
20010 uint32_t ret = 0;
20011
20012
20013 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20014 pring = phba->sli4_hba.nvmels_wq->pring;
20015 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20016 qp, wq_access);
20017 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20018 if (!sglq) {
20019 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20020 return WQE_BUSY;
20021 }
20022 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20023 pwqe->sli4_xritag = sglq->sli4_xritag;
20024 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20025 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20026 return WQE_ERROR;
20027 }
20028 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20029 pwqe->sli4_xritag);
20030 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20031 if (ret) {
20032 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20033 return ret;
20034 }
20035
20036 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20037 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20038
20039 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20040 return 0;
20041 }
20042
20043
20044 if (pwqe->iocb_flag & LPFC_IO_NVME) {
20045
20046 wq = qp->io_wq;
20047 pring = wq->pring;
20048
20049 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20050
20051 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20052 qp, wq_access);
20053 ret = lpfc_sli4_wq_put(wq, wqe);
20054 if (ret) {
20055 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20056 return ret;
20057 }
20058 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20059 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20060
20061 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20062 return 0;
20063 }
20064
20065
20066 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20067
20068 wq = qp->io_wq;
20069 pring = wq->pring;
20070
20071 ctxp = pwqe->context2;
20072 sglq = ctxp->ctxbuf->sglq;
20073 if (pwqe->sli4_xritag == NO_XRI) {
20074 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20075 pwqe->sli4_xritag = sglq->sli4_xritag;
20076 }
20077 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20078 pwqe->sli4_xritag);
20079 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20080
20081 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20082 qp, wq_access);
20083 ret = lpfc_sli4_wq_put(wq, wqe);
20084 if (ret) {
20085 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20086 return ret;
20087 }
20088 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20089 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20090
20091 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20092 return 0;
20093 }
20094 return WQE_ERROR;
20095}
20096
20097#ifdef LPFC_MXP_STAT
20098
20099
20100
20101
20102
20103
20104
20105
20106
20107
20108
20109
20110
20111
20112void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20113{
20114 struct lpfc_sli4_hdw_queue *qp;
20115 struct lpfc_multixri_pool *multixri_pool;
20116 struct lpfc_pvt_pool *pvt_pool;
20117 struct lpfc_pbl_pool *pbl_pool;
20118 u32 txcmplq_cnt;
20119
20120 qp = &phba->sli4_hba.hdwq[hwqid];
20121 multixri_pool = qp->p_multixri_pool;
20122 if (!multixri_pool)
20123 return;
20124
20125 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20126 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20127 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20128 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20129
20130 multixri_pool->stat_pbl_count = pbl_pool->count;
20131 multixri_pool->stat_pvt_count = pvt_pool->count;
20132 multixri_pool->stat_busy_count = txcmplq_cnt;
20133 }
20134
20135 multixri_pool->stat_snapshot_taken++;
20136}
20137#endif
20138
20139
20140
20141
20142
20143
20144
20145
20146
20147void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20148{
20149 struct lpfc_multixri_pool *multixri_pool;
20150 u32 io_req_count;
20151 u32 prev_io_req_count;
20152
20153 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20154 if (!multixri_pool)
20155 return;
20156 io_req_count = multixri_pool->io_req_count;
20157 prev_io_req_count = multixri_pool->prev_io_req_count;
20158
20159 if (prev_io_req_count != io_req_count) {
20160
20161 multixri_pool->prev_io_req_count = io_req_count;
20162 } else {
20163
20164
20165
20166 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20167 }
20168}
20169
20170
20171
20172
20173
20174
20175
20176
20177
20178void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20179{
20180 u32 new_watermark;
20181 u32 watermark_max;
20182 u32 watermark_min;
20183 u32 xri_limit;
20184 u32 txcmplq_cnt;
20185 u32 abts_io_bufs;
20186 struct lpfc_multixri_pool *multixri_pool;
20187 struct lpfc_sli4_hdw_queue *qp;
20188
20189 qp = &phba->sli4_hba.hdwq[hwqid];
20190 multixri_pool = qp->p_multixri_pool;
20191 if (!multixri_pool)
20192 return;
20193 xri_limit = multixri_pool->xri_limit;
20194
20195 watermark_max = xri_limit;
20196 watermark_min = xri_limit / 2;
20197
20198 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20199 abts_io_bufs = qp->abts_scsi_io_bufs;
20200 abts_io_bufs += qp->abts_nvme_io_bufs;
20201
20202 new_watermark = txcmplq_cnt + abts_io_bufs;
20203 new_watermark = min(watermark_max, new_watermark);
20204 new_watermark = max(watermark_min, new_watermark);
20205 multixri_pool->pvt_pool.high_watermark = new_watermark;
20206
20207#ifdef LPFC_MXP_STAT
20208 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20209 new_watermark);
20210#endif
20211}
20212
20213
20214
20215
20216
20217
20218
20219
20220
20221
20222
20223void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20224{
20225 struct lpfc_pbl_pool *pbl_pool;
20226 struct lpfc_pvt_pool *pvt_pool;
20227 struct lpfc_sli4_hdw_queue *qp;
20228 struct lpfc_io_buf *lpfc_ncmd;
20229 struct lpfc_io_buf *lpfc_ncmd_next;
20230 unsigned long iflag;
20231 struct list_head tmp_list;
20232 u32 tmp_count;
20233
20234 qp = &phba->sli4_hba.hdwq[hwqid];
20235 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20236 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20237 tmp_count = 0;
20238
20239 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20240 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20241
20242 if (pvt_pool->count > pvt_pool->low_watermark) {
20243
20244
20245
20246
20247
20248 INIT_LIST_HEAD(&tmp_list);
20249 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20250 &pvt_pool->list, list) {
20251 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20252 tmp_count++;
20253 if (tmp_count >= pvt_pool->low_watermark)
20254 break;
20255 }
20256
20257
20258 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20259
20260
20261 list_splice(&tmp_list, &pvt_pool->list);
20262
20263 pbl_pool->count += (pvt_pool->count - tmp_count);
20264 pvt_pool->count = tmp_count;
20265 } else {
20266
20267 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20268 pbl_pool->count += pvt_pool->count;
20269 pvt_pool->count = 0;
20270 }
20271
20272 spin_unlock(&pvt_pool->lock);
20273 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20274}
20275
20276
20277
20278
20279
20280
20281
20282
20283
20284
20285
20286
20287
20288
20289
20290
20291
20292static bool
20293_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20294 struct lpfc_pbl_pool *pbl_pool,
20295 struct lpfc_pvt_pool *pvt_pool, u32 count)
20296{
20297 struct lpfc_io_buf *lpfc_ncmd;
20298 struct lpfc_io_buf *lpfc_ncmd_next;
20299 unsigned long iflag;
20300 int ret;
20301
20302 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20303 if (ret) {
20304 if (pbl_pool->count) {
20305
20306 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20307 list_for_each_entry_safe(lpfc_ncmd,
20308 lpfc_ncmd_next,
20309 &pbl_pool->list,
20310 list) {
20311 list_move_tail(&lpfc_ncmd->list,
20312 &pvt_pool->list);
20313 pvt_pool->count++;
20314 pbl_pool->count--;
20315 count--;
20316 if (count == 0)
20317 break;
20318 }
20319
20320 spin_unlock(&pvt_pool->lock);
20321 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20322 return true;
20323 }
20324 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20325 }
20326
20327 return false;
20328}
20329
20330
20331
20332
20333
20334
20335
20336
20337
20338
20339
20340
20341
20342void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20343{
20344 struct lpfc_multixri_pool *multixri_pool;
20345 struct lpfc_multixri_pool *next_multixri_pool;
20346 struct lpfc_pvt_pool *pvt_pool;
20347 struct lpfc_pbl_pool *pbl_pool;
20348 struct lpfc_sli4_hdw_queue *qp;
20349 u32 next_hwqid;
20350 u32 hwq_count;
20351 int ret;
20352
20353 qp = &phba->sli4_hba.hdwq[hwqid];
20354 multixri_pool = qp->p_multixri_pool;
20355 pvt_pool = &multixri_pool->pvt_pool;
20356 pbl_pool = &multixri_pool->pbl_pool;
20357
20358
20359 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20360 if (ret) {
20361#ifdef LPFC_MXP_STAT
20362 multixri_pool->local_pbl_hit_count++;
20363#endif
20364 return;
20365 }
20366
20367 hwq_count = phba->cfg_hdw_queue;
20368
20369
20370 next_hwqid = multixri_pool->rrb_next_hwqid;
20371
20372 do {
20373
20374 next_hwqid = (next_hwqid + 1) % hwq_count;
20375
20376 next_multixri_pool =
20377 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20378 pbl_pool = &next_multixri_pool->pbl_pool;
20379
20380
20381 ret = _lpfc_move_xri_pbl_to_pvt(
20382 phba, qp, pbl_pool, pvt_pool, count);
20383
20384
20385 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20386
20387
20388 multixri_pool->rrb_next_hwqid = next_hwqid;
20389
20390 if (!ret) {
20391
20392 multixri_pool->pbl_empty_count++;
20393 }
20394
20395#ifdef LPFC_MXP_STAT
20396 if (ret) {
20397 if (next_hwqid == hwqid)
20398 multixri_pool->local_pbl_hit_count++;
20399 else
20400 multixri_pool->other_pbl_hit_count++;
20401 }
20402#endif
20403}
20404
20405
20406
20407
20408
20409
20410
20411
20412
20413void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20414{
20415 struct lpfc_multixri_pool *multixri_pool;
20416 struct lpfc_pvt_pool *pvt_pool;
20417
20418 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20419 pvt_pool = &multixri_pool->pvt_pool;
20420
20421 if (pvt_pool->count < pvt_pool->low_watermark)
20422 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20423}
20424
20425
20426
20427
20428
20429
20430
20431
20432
20433
20434
20435
20436
20437void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20438 struct lpfc_sli4_hdw_queue *qp)
20439{
20440 unsigned long iflag;
20441 struct lpfc_pbl_pool *pbl_pool;
20442 struct lpfc_pvt_pool *pvt_pool;
20443 struct lpfc_epd_pool *epd_pool;
20444 u32 txcmplq_cnt;
20445 u32 xri_owned;
20446 u32 xri_limit;
20447 u32 abts_io_bufs;
20448
20449
20450 lpfc_ncmd->nvmeCmd = NULL;
20451 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20452 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20453
20454 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20455 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20456 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20457
20458 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20459 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20460
20461 if (phba->cfg_xri_rebalancing) {
20462 if (lpfc_ncmd->expedite) {
20463
20464 epd_pool = &phba->epd_pool;
20465 spin_lock_irqsave(&epd_pool->lock, iflag);
20466 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20467 epd_pool->count++;
20468 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20469 return;
20470 }
20471
20472
20473
20474
20475
20476 if (!qp->p_multixri_pool)
20477 return;
20478
20479 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20480 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20481
20482 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20483 abts_io_bufs = qp->abts_scsi_io_bufs;
20484 abts_io_bufs += qp->abts_nvme_io_bufs;
20485
20486 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20487 xri_limit = qp->p_multixri_pool->xri_limit;
20488
20489#ifdef LPFC_MXP_STAT
20490 if (xri_owned <= xri_limit)
20491 qp->p_multixri_pool->below_limit_count++;
20492 else
20493 qp->p_multixri_pool->above_limit_count++;
20494#endif
20495
20496
20497
20498
20499 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20500 (xri_owned < xri_limit &&
20501 pvt_pool->count < pvt_pool->high_watermark)) {
20502 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20503 qp, free_pvt_pool);
20504 list_add_tail(&lpfc_ncmd->list,
20505 &pvt_pool->list);
20506 pvt_pool->count++;
20507 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20508 } else {
20509 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20510 qp, free_pub_pool);
20511 list_add_tail(&lpfc_ncmd->list,
20512 &pbl_pool->list);
20513 pbl_pool->count++;
20514 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20515 }
20516 } else {
20517 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20518 qp, free_xri);
20519 list_add_tail(&lpfc_ncmd->list,
20520 &qp->lpfc_io_buf_list_put);
20521 qp->put_io_bufs++;
20522 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20523 iflag);
20524 }
20525}
20526
20527
20528
20529
20530
20531
20532
20533
20534
20535
20536
20537
20538
20539static struct lpfc_io_buf *
20540lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20541 struct lpfc_sli4_hdw_queue *qp,
20542 struct lpfc_pvt_pool *pvt_pool,
20543 struct lpfc_nodelist *ndlp)
20544{
20545 struct lpfc_io_buf *lpfc_ncmd;
20546 struct lpfc_io_buf *lpfc_ncmd_next;
20547 unsigned long iflag;
20548
20549 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20550 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20551 &pvt_pool->list, list) {
20552 if (lpfc_test_rrq_active(
20553 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20554 continue;
20555 list_del(&lpfc_ncmd->list);
20556 pvt_pool->count--;
20557 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20558 return lpfc_ncmd;
20559 }
20560 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20561
20562 return NULL;
20563}
20564
20565
20566
20567
20568
20569
20570
20571
20572
20573
20574
20575static struct lpfc_io_buf *
20576lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20577{
20578 struct lpfc_io_buf *lpfc_ncmd;
20579 struct lpfc_io_buf *lpfc_ncmd_next;
20580 unsigned long iflag;
20581 struct lpfc_epd_pool *epd_pool;
20582
20583 epd_pool = &phba->epd_pool;
20584 lpfc_ncmd = NULL;
20585
20586 spin_lock_irqsave(&epd_pool->lock, iflag);
20587 if (epd_pool->count > 0) {
20588 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20589 &epd_pool->list, list) {
20590 list_del(&lpfc_ncmd->list);
20591 epd_pool->count--;
20592 break;
20593 }
20594 }
20595 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20596
20597 return lpfc_ncmd;
20598}
20599
20600
20601
20602
20603
20604
20605
20606
20607
20608
20609
20610
20611
20612
20613
20614
20615
20616
20617
20618
20619
20620
20621
20622
20623static struct lpfc_io_buf *
20624lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20625 struct lpfc_nodelist *ndlp,
20626 int hwqid, int expedite)
20627{
20628 struct lpfc_sli4_hdw_queue *qp;
20629 struct lpfc_multixri_pool *multixri_pool;
20630 struct lpfc_pvt_pool *pvt_pool;
20631 struct lpfc_io_buf *lpfc_ncmd;
20632
20633 qp = &phba->sli4_hba.hdwq[hwqid];
20634 lpfc_ncmd = NULL;
20635 multixri_pool = qp->p_multixri_pool;
20636 pvt_pool = &multixri_pool->pvt_pool;
20637 multixri_pool->io_req_count++;
20638
20639
20640 if (pvt_pool->count == 0)
20641 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20642
20643
20644 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20645
20646 if (lpfc_ncmd) {
20647 lpfc_ncmd->hdwq = qp;
20648 lpfc_ncmd->hdwq_no = hwqid;
20649 } else if (expedite) {
20650
20651
20652
20653 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20654 }
20655
20656 return lpfc_ncmd;
20657}
20658
20659static inline struct lpfc_io_buf *
20660lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20661{
20662 struct lpfc_sli4_hdw_queue *qp;
20663 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20664
20665 qp = &phba->sli4_hba.hdwq[idx];
20666 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20667 &qp->lpfc_io_buf_list_get, list) {
20668 if (lpfc_test_rrq_active(phba, ndlp,
20669 lpfc_cmd->cur_iocbq.sli4_lxritag))
20670 continue;
20671
20672 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20673 continue;
20674
20675 list_del_init(&lpfc_cmd->list);
20676 qp->get_io_bufs--;
20677 lpfc_cmd->hdwq = qp;
20678 lpfc_cmd->hdwq_no = idx;
20679 return lpfc_cmd;
20680 }
20681 return NULL;
20682}
20683
20684
20685
20686
20687
20688
20689
20690
20691
20692
20693
20694
20695
20696
20697
20698
20699
20700
20701
20702struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20703 struct lpfc_nodelist *ndlp,
20704 u32 hwqid, int expedite)
20705{
20706 struct lpfc_sli4_hdw_queue *qp;
20707 unsigned long iflag;
20708 struct lpfc_io_buf *lpfc_cmd;
20709
20710 qp = &phba->sli4_hba.hdwq[hwqid];
20711 lpfc_cmd = NULL;
20712
20713 if (phba->cfg_xri_rebalancing)
20714 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20715 phba, ndlp, hwqid, expedite);
20716 else {
20717 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20718 qp, alloc_xri_get);
20719 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20720 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20721 if (!lpfc_cmd) {
20722 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20723 qp, alloc_xri_put);
20724 list_splice(&qp->lpfc_io_buf_list_put,
20725 &qp->lpfc_io_buf_list_get);
20726 qp->get_io_bufs += qp->put_io_bufs;
20727 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20728 qp->put_io_bufs = 0;
20729 spin_unlock(&qp->io_buf_list_put_lock);
20730 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20731 expedite)
20732 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20733 }
20734 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20735 }
20736
20737 return lpfc_cmd;
20738}
20739
20740
20741
20742
20743
20744
20745
20746
20747
20748
20749
20750
20751
20752struct sli4_hybrid_sgl *
20753lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20754{
20755 struct sli4_hybrid_sgl *list_entry = NULL;
20756 struct sli4_hybrid_sgl *tmp = NULL;
20757 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20758 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20759 struct list_head *buf_list = &hdwq->sgl_list;
20760 unsigned long iflags;
20761
20762 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20763
20764 if (likely(!list_empty(buf_list))) {
20765
20766 list_for_each_entry_safe(list_entry, tmp,
20767 buf_list, list_node) {
20768 list_move_tail(&list_entry->list_node,
20769 &lpfc_buf->dma_sgl_xtra_list);
20770 break;
20771 }
20772 } else {
20773
20774 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20775 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20776 cpu_to_node(hdwq->io_wq->chann));
20777 if (!tmp) {
20778 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20779 "8353 error kmalloc memory for HDWQ "
20780 "%d %s\n",
20781 lpfc_buf->hdwq_no, __func__);
20782 return NULL;
20783 }
20784
20785 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20786 GFP_ATOMIC, &tmp->dma_phys_sgl);
20787 if (!tmp->dma_sgl) {
20788 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20789 "8354 error pool_alloc memory for HDWQ "
20790 "%d %s\n",
20791 lpfc_buf->hdwq_no, __func__);
20792 kfree(tmp);
20793 return NULL;
20794 }
20795
20796 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20797 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20798 }
20799
20800 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20801 struct sli4_hybrid_sgl,
20802 list_node);
20803
20804 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20805
20806 return allocated_sgl;
20807}
20808
20809
20810
20811
20812
20813
20814
20815
20816
20817
20818
20819
20820int
20821lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20822{
20823 int rc = 0;
20824 struct sli4_hybrid_sgl *list_entry = NULL;
20825 struct sli4_hybrid_sgl *tmp = NULL;
20826 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20827 struct list_head *buf_list = &hdwq->sgl_list;
20828 unsigned long iflags;
20829
20830 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20831
20832 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
20833 list_for_each_entry_safe(list_entry, tmp,
20834 &lpfc_buf->dma_sgl_xtra_list,
20835 list_node) {
20836 list_move_tail(&list_entry->list_node,
20837 buf_list);
20838 }
20839 } else {
20840 rc = -EINVAL;
20841 }
20842
20843 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20844 return rc;
20845}
20846
20847
20848
20849
20850
20851
20852
20853
20854
20855
20856
20857void
20858lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
20859 struct lpfc_sli4_hdw_queue *hdwq)
20860{
20861 struct list_head *buf_list = &hdwq->sgl_list;
20862 struct sli4_hybrid_sgl *list_entry = NULL;
20863 struct sli4_hybrid_sgl *tmp = NULL;
20864 unsigned long iflags;
20865
20866 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20867
20868
20869 list_for_each_entry_safe(list_entry, tmp,
20870 buf_list, list_node) {
20871 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
20872 list_entry->dma_sgl,
20873 list_entry->dma_phys_sgl);
20874 list_del(&list_entry->list_node);
20875 kfree(list_entry);
20876 }
20877
20878 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20879}
20880
20881
20882
20883
20884
20885
20886
20887
20888
20889
20890
20891
20892
20893struct fcp_cmd_rsp_buf *
20894lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20895 struct lpfc_io_buf *lpfc_buf)
20896{
20897 struct fcp_cmd_rsp_buf *list_entry = NULL;
20898 struct fcp_cmd_rsp_buf *tmp = NULL;
20899 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
20900 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20901 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20902 unsigned long iflags;
20903
20904 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20905
20906 if (likely(!list_empty(buf_list))) {
20907
20908 list_for_each_entry_safe(list_entry, tmp,
20909 buf_list,
20910 list_node) {
20911 list_move_tail(&list_entry->list_node,
20912 &lpfc_buf->dma_cmd_rsp_list);
20913 break;
20914 }
20915 } else {
20916
20917 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20918 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20919 cpu_to_node(hdwq->io_wq->chann));
20920 if (!tmp) {
20921 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20922 "8355 error kmalloc memory for HDWQ "
20923 "%d %s\n",
20924 lpfc_buf->hdwq_no, __func__);
20925 return NULL;
20926 }
20927
20928 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
20929 GFP_ATOMIC,
20930 &tmp->fcp_cmd_rsp_dma_handle);
20931
20932 if (!tmp->fcp_cmnd) {
20933 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20934 "8356 error pool_alloc memory for HDWQ "
20935 "%d %s\n",
20936 lpfc_buf->hdwq_no, __func__);
20937 kfree(tmp);
20938 return NULL;
20939 }
20940
20941 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
20942 sizeof(struct fcp_cmnd));
20943
20944 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20945 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
20946 }
20947
20948 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
20949 struct fcp_cmd_rsp_buf,
20950 list_node);
20951
20952 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20953
20954 return allocated_buf;
20955}
20956
20957
20958
20959
20960
20961
20962
20963
20964
20965
20966
20967
20968int
20969lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
20970 struct lpfc_io_buf *lpfc_buf)
20971{
20972 int rc = 0;
20973 struct fcp_cmd_rsp_buf *list_entry = NULL;
20974 struct fcp_cmd_rsp_buf *tmp = NULL;
20975 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20976 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
20977 unsigned long iflags;
20978
20979 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20980
20981 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
20982 list_for_each_entry_safe(list_entry, tmp,
20983 &lpfc_buf->dma_cmd_rsp_list,
20984 list_node) {
20985 list_move_tail(&list_entry->list_node,
20986 buf_list);
20987 }
20988 } else {
20989 rc = -EINVAL;
20990 }
20991
20992 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20993 return rc;
20994}
20995
20996
20997
20998
20999
21000
21001
21002
21003
21004
21005
21006void
21007lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21008 struct lpfc_sli4_hdw_queue *hdwq)
21009{
21010 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21011 struct fcp_cmd_rsp_buf *list_entry = NULL;
21012 struct fcp_cmd_rsp_buf *tmp = NULL;
21013 unsigned long iflags;
21014
21015 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21016
21017
21018 list_for_each_entry_safe(list_entry, tmp,
21019 buf_list,
21020 list_node) {
21021 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21022 list_entry->fcp_cmnd,
21023 list_entry->fcp_cmd_rsp_dma_handle);
21024 list_del(&list_entry->list_node);
21025 kfree(list_entry);
21026 }
21027
21028 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21029}
21030