1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/aer.h>
38#include <linux/crash_dump.h>
39#ifdef CONFIG_X86
40#include <asm/set_memory.h>
41#endif
42
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc.h"
50#include "lpfc_scsi.h"
51#include "lpfc_nvme.h"
52#include "lpfc_crtn.h"
53#include "lpfc_logmsg.h"
54#include "lpfc_compat.h"
55#include "lpfc_debugfs.h"
56#include "lpfc_vport.h"
57#include "lpfc_version.h"
58
59
60typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65} lpfc_iocb_type;
66
67
68
69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *);
75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 int);
83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
86static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
92
93static IOCB_t *
94lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
95{
96 return &iocbq->iocb;
97}
98
99#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
100
101
102
103
104
105
106
107
108
109
110
111
112
113static void
114lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
115{
116 uint64_t *src = srcp;
117 uint64_t *dest = destp;
118 int i;
119
120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
121 *dest++ = *src++;
122}
123#else
124#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
125#endif
126
127
128
129
130
131
132
133
134
135
136
137
138
139static int
140lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
141{
142 union lpfc_wqe *temp_wqe;
143 struct lpfc_register doorbell;
144 uint32_t host_index;
145 uint32_t idx;
146 uint32_t i = 0;
147 uint8_t *tmp;
148 u32 if_type;
149
150
151 if (unlikely(!q))
152 return -ENOMEM;
153 temp_wqe = lpfc_sli4_qe(q, q->host_index);
154
155
156 idx = ((q->host_index + 1) % q->entry_count);
157 if (idx == q->hba_index) {
158 q->WQ_overflow++;
159 return -EBUSY;
160 }
161 q->WQ_posted++;
162
163 if (!((q->host_index + 1) % q->notify_interval))
164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
165 else
166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
167 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
168 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
169 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
170 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
171
172 tmp = (uint8_t *)temp_wqe;
173#ifdef __raw_writeq
174 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
175 __raw_writeq(*((uint64_t *)(tmp + i)),
176 q->dpp_regaddr + i);
177#else
178 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
179 __raw_writel(*((uint32_t *)(tmp + i)),
180 q->dpp_regaddr + i);
181#endif
182 }
183
184 wmb();
185
186
187 host_index = q->host_index;
188
189 q->host_index = idx;
190
191
192 doorbell.word0 = 0;
193 if (q->db_format == LPFC_DB_LIST_FORMAT) {
194 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
195 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
196 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
197 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
198 q->dpp_id);
199 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
200 q->queue_id);
201 } else {
202 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
204
205
206 if_type = bf_get(lpfc_sli_intf_if_type,
207 &q->phba->sli4_hba.sli_intf);
208 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
209 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
210 host_index);
211 }
212 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
213 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
214 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
215 } else {
216 return -EINVAL;
217 }
218 writel(doorbell.word0, q->db_regaddr);
219
220 return 0;
221}
222
223
224
225
226
227
228
229
230
231
232
233static void
234lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
235{
236
237 if (unlikely(!q))
238 return;
239
240 q->hba_index = index;
241}
242
243
244
245
246
247
248
249
250
251
252
253
254
255static uint32_t
256lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
257{
258 struct lpfc_mqe *temp_mqe;
259 struct lpfc_register doorbell;
260
261
262 if (unlikely(!q))
263 return -ENOMEM;
264 temp_mqe = lpfc_sli4_qe(q, q->host_index);
265
266
267 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
268 return -ENOMEM;
269 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
270
271 q->phba->mbox = (MAILBOX_t *)temp_mqe;
272
273
274 q->host_index = ((q->host_index + 1) % q->entry_count);
275
276
277 doorbell.word0 = 0;
278 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
279 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
280 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
281 return 0;
282}
283
284
285
286
287
288
289
290
291
292
293
294static uint32_t
295lpfc_sli4_mq_release(struct lpfc_queue *q)
296{
297
298 if (unlikely(!q))
299 return 0;
300
301
302 q->phba->mbox = NULL;
303 q->hba_index = ((q->hba_index + 1) % q->entry_count);
304 return 1;
305}
306
307
308
309
310
311
312
313
314
315
316static struct lpfc_eqe *
317lpfc_sli4_eq_get(struct lpfc_queue *q)
318{
319 struct lpfc_eqe *eqe;
320
321
322 if (unlikely(!q))
323 return NULL;
324 eqe = lpfc_sli4_qe(q, q->host_index);
325
326
327 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
328 return NULL;
329
330
331
332
333
334
335
336
337
338
339 mb();
340 return eqe;
341}
342
343
344
345
346
347
348void
349lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
350{
351 struct lpfc_register doorbell;
352
353 doorbell.word0 = 0;
354 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
355 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
356 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
357 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
358 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
359 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
360}
361
362
363
364
365
366
367void
368lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
369{
370 struct lpfc_register doorbell;
371
372 doorbell.word0 = 0;
373 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
374 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
375}
376
377
378
379
380
381
382
383
384
385
386
387
388void
389lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
390 uint32_t count, bool arm)
391{
392 struct lpfc_register doorbell;
393
394
395 if (unlikely(!q || (count == 0 && !arm)))
396 return;
397
398
399 doorbell.word0 = 0;
400 if (arm) {
401 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
402 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
403 }
404 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
405 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
406 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
407 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
408 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
409 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
410
411 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
412 readl(q->phba->sli4_hba.EQDBregaddr);
413}
414
415
416
417
418
419
420
421
422
423
424
425
426void
427lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
428 uint32_t count, bool arm)
429{
430 struct lpfc_register doorbell;
431
432
433 if (unlikely(!q || (count == 0 && !arm)))
434 return;
435
436
437 doorbell.word0 = 0;
438 if (arm)
439 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
440 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
441 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
442 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
443
444 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
445 readl(q->phba->sli4_hba.EQDBregaddr);
446}
447
448static void
449__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
450 struct lpfc_eqe *eqe)
451{
452 if (!phba->sli4_hba.pc_sli4_params.eqav)
453 bf_set_le32(lpfc_eqe_valid, eqe, 0);
454
455 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
456
457
458 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
459 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
460}
461
462static void
463lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
464{
465 struct lpfc_eqe *eqe = NULL;
466 u32 eq_count = 0, cq_count = 0;
467 struct lpfc_cqe *cqe = NULL;
468 struct lpfc_queue *cq = NULL, *childq = NULL;
469 int cqid = 0;
470
471
472 eqe = lpfc_sli4_eq_get(eq);
473 while (eqe) {
474
475 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
476 cq = NULL;
477
478 list_for_each_entry(childq, &eq->child_list, list) {
479 if (childq->queue_id == cqid) {
480 cq = childq;
481 break;
482 }
483 }
484
485 if (cq) {
486 cqe = lpfc_sli4_cq_get(cq);
487 while (cqe) {
488 __lpfc_sli4_consume_cqe(phba, cq, cqe);
489 cq_count++;
490 cqe = lpfc_sli4_cq_get(cq);
491 }
492
493 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
494 LPFC_QUEUE_REARM);
495 cq_count = 0;
496 }
497 __lpfc_sli4_consume_eqe(phba, eq, eqe);
498 eq_count++;
499 eqe = lpfc_sli4_eq_get(eq);
500 }
501
502
503 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
504}
505
506static int
507lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
508 uint8_t rearm)
509{
510 struct lpfc_eqe *eqe;
511 int count = 0, consumed = 0;
512
513 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
514 goto rearm_and_exit;
515
516 eqe = lpfc_sli4_eq_get(eq);
517 while (eqe) {
518 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
519 __lpfc_sli4_consume_eqe(phba, eq, eqe);
520
521 consumed++;
522 if (!(++count % eq->max_proc_limit))
523 break;
524
525 if (!(count % eq->notify_interval)) {
526 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
527 LPFC_QUEUE_NOARM);
528 consumed = 0;
529 }
530
531 eqe = lpfc_sli4_eq_get(eq);
532 }
533 eq->EQ_processed += count;
534
535
536 if (count > eq->EQ_max_eqe)
537 eq->EQ_max_eqe = count;
538
539 xchg(&eq->queue_claimed, 0);
540
541rearm_and_exit:
542
543 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
544
545 return count;
546}
547
548
549
550
551
552
553
554
555
556
557static struct lpfc_cqe *
558lpfc_sli4_cq_get(struct lpfc_queue *q)
559{
560 struct lpfc_cqe *cqe;
561
562
563 if (unlikely(!q))
564 return NULL;
565 cqe = lpfc_sli4_qe(q, q->host_index);
566
567
568 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
569 return NULL;
570
571
572
573
574
575
576
577
578
579 mb();
580 return cqe;
581}
582
583static void
584__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
585 struct lpfc_cqe *cqe)
586{
587 if (!phba->sli4_hba.pc_sli4_params.cqav)
588 bf_set_le32(lpfc_cqe_valid, cqe, 0);
589
590 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
591
592
593 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
594 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
595}
596
597
598
599
600
601
602
603
604
605
606
607
608void
609lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
610 uint32_t count, bool arm)
611{
612 struct lpfc_register doorbell;
613
614
615 if (unlikely(!q || (count == 0 && !arm)))
616 return;
617
618
619 doorbell.word0 = 0;
620 if (arm)
621 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
622 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
623 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
624 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
625 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
626 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
627 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
628}
629
630
631
632
633
634
635
636
637
638
639
640
641void
642lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
643 uint32_t count, bool arm)
644{
645 struct lpfc_register doorbell;
646
647
648 if (unlikely(!q || (count == 0 && !arm)))
649 return;
650
651
652 doorbell.word0 = 0;
653 if (arm)
654 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
655 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
656 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
657 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
658}
659
660
661
662
663
664
665
666
667
668
669
670int
671lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
672 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
673{
674 struct lpfc_rqe *temp_hrqe;
675 struct lpfc_rqe *temp_drqe;
676 struct lpfc_register doorbell;
677 int hq_put_index;
678 int dq_put_index;
679
680
681 if (unlikely(!hq) || unlikely(!dq))
682 return -ENOMEM;
683 hq_put_index = hq->host_index;
684 dq_put_index = dq->host_index;
685 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
686 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
687
688 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
689 return -EINVAL;
690 if (hq_put_index != dq_put_index)
691 return -EINVAL;
692
693 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
694 return -EBUSY;
695 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
696 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
697
698
699 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
700 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
701 hq->RQ_buf_posted++;
702
703
704 if (!(hq->host_index % hq->notify_interval)) {
705 doorbell.word0 = 0;
706 if (hq->db_format == LPFC_DB_RING_FORMAT) {
707 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
708 hq->notify_interval);
709 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
710 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
711 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
712 hq->notify_interval);
713 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
714 hq->host_index);
715 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
716 } else {
717 return -EINVAL;
718 }
719 writel(doorbell.word0, hq->db_regaddr);
720 }
721 return hq_put_index;
722}
723
724
725
726
727
728
729
730
731
732
733static uint32_t
734lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
735{
736
737 if (unlikely(!hq) || unlikely(!dq))
738 return 0;
739
740 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
741 return 0;
742 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
743 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
744 return 1;
745}
746
747
748
749
750
751
752
753
754
755
756
757static inline IOCB_t *
758lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
759{
760 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
761 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
762}
763
764
765
766
767
768
769
770
771
772
773
774static inline IOCB_t *
775lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
776{
777 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
778 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
779}
780
781
782
783
784
785
786
787
788
789
790struct lpfc_iocbq *
791__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
792{
793 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
794 struct lpfc_iocbq * iocbq = NULL;
795
796 lockdep_assert_held(&phba->hbalock);
797
798 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
799 if (iocbq)
800 phba->iocb_cnt++;
801 if (phba->iocb_cnt > phba->iocb_max)
802 phba->iocb_max = phba->iocb_cnt;
803 return iocbq;
804}
805
806
807
808
809
810
811
812
813
814
815
816
817
818struct lpfc_sglq *
819__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
820{
821 struct lpfc_sglq *sglq;
822
823 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
824 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
825 return sglq;
826}
827
828
829
830
831
832
833
834
835
836
837
838
839
840struct lpfc_sglq *
841__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
842{
843 struct lpfc_sglq *sglq;
844
845 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
846 return sglq;
847}
848
849
850
851
852
853
854
855
856void
857lpfc_clr_rrq_active(struct lpfc_hba *phba,
858 uint16_t xritag,
859 struct lpfc_node_rrq *rrq)
860{
861 struct lpfc_nodelist *ndlp = NULL;
862
863 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
864 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
865
866
867
868
869
870 if ((!ndlp) && rrq->ndlp)
871 ndlp = rrq->ndlp;
872
873 if (!ndlp)
874 goto out;
875
876 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
877 rrq->send_rrq = 0;
878 rrq->xritag = 0;
879 rrq->rrq_stop_time = 0;
880 }
881out:
882 mempool_free(rrq, phba->rrq_pool);
883}
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899void
900lpfc_handle_rrq_active(struct lpfc_hba *phba)
901{
902 struct lpfc_node_rrq *rrq;
903 struct lpfc_node_rrq *nextrrq;
904 unsigned long next_time;
905 unsigned long iflags;
906 LIST_HEAD(send_rrq);
907
908 spin_lock_irqsave(&phba->hbalock, iflags);
909 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
910 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
911 list_for_each_entry_safe(rrq, nextrrq,
912 &phba->active_rrq_list, list) {
913 if (time_after(jiffies, rrq->rrq_stop_time))
914 list_move(&rrq->list, &send_rrq);
915 else if (time_before(rrq->rrq_stop_time, next_time))
916 next_time = rrq->rrq_stop_time;
917 }
918 spin_unlock_irqrestore(&phba->hbalock, iflags);
919 if ((!list_empty(&phba->active_rrq_list)) &&
920 (!(phba->pport->load_flag & FC_UNLOADING)))
921 mod_timer(&phba->rrq_tmr, next_time);
922 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
923 list_del(&rrq->list);
924 if (!rrq->send_rrq) {
925
926 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
927 } else if (lpfc_send_rrq(phba, rrq)) {
928
929
930
931 lpfc_clr_rrq_active(phba, rrq->xritag,
932 rrq);
933 }
934 }
935}
936
937
938
939
940
941
942
943
944
945
946struct lpfc_node_rrq *
947lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
948{
949 struct lpfc_hba *phba = vport->phba;
950 struct lpfc_node_rrq *rrq;
951 struct lpfc_node_rrq *nextrrq;
952 unsigned long iflags;
953
954 if (phba->sli_rev != LPFC_SLI_REV4)
955 return NULL;
956 spin_lock_irqsave(&phba->hbalock, iflags);
957 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
958 if (rrq->vport == vport && rrq->xritag == xri &&
959 rrq->nlp_DID == did){
960 list_del(&rrq->list);
961 spin_unlock_irqrestore(&phba->hbalock, iflags);
962 return rrq;
963 }
964 }
965 spin_unlock_irqrestore(&phba->hbalock, iflags);
966 return NULL;
967}
968
969
970
971
972
973
974
975
976
977void
978lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
979
980{
981 struct lpfc_hba *phba = vport->phba;
982 struct lpfc_node_rrq *rrq;
983 struct lpfc_node_rrq *nextrrq;
984 unsigned long iflags;
985 LIST_HEAD(rrq_list);
986
987 if (phba->sli_rev != LPFC_SLI_REV4)
988 return;
989 if (!ndlp) {
990 lpfc_sli4_vport_delete_els_xri_aborted(vport);
991 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
992 }
993 spin_lock_irqsave(&phba->hbalock, iflags);
994 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
995 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
996 list_move(&rrq->list, &rrq_list);
997 spin_unlock_irqrestore(&phba->hbalock, iflags);
998
999 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1000 list_del(&rrq->list);
1001 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1002 }
1003}
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015int
1016lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1017 uint16_t xritag)
1018{
1019 if (!ndlp)
1020 return 0;
1021 if (!ndlp->active_rrqs_xri_bitmap)
1022 return 0;
1023 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1024 return 1;
1025 else
1026 return 0;
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044int
1045lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1046 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1047{
1048 unsigned long iflags;
1049 struct lpfc_node_rrq *rrq;
1050 int empty;
1051
1052 if (!ndlp)
1053 return -EINVAL;
1054
1055 if (!phba->cfg_enable_rrq)
1056 return -EINVAL;
1057
1058 spin_lock_irqsave(&phba->hbalock, iflags);
1059 if (phba->pport->load_flag & FC_UNLOADING) {
1060 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1061 goto out;
1062 }
1063
1064
1065
1066
1067 if (NLP_CHK_FREE_REQ(ndlp))
1068 goto out;
1069
1070 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1071 goto out;
1072
1073 if (!ndlp->active_rrqs_xri_bitmap)
1074 goto out;
1075
1076 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1077 goto out;
1078
1079 spin_unlock_irqrestore(&phba->hbalock, iflags);
1080 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1081 if (!rrq) {
1082 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1083 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1084 " DID:0x%x Send:%d\n",
1085 xritag, rxid, ndlp->nlp_DID, send_rrq);
1086 return -EINVAL;
1087 }
1088 if (phba->cfg_enable_rrq == 1)
1089 rrq->send_rrq = send_rrq;
1090 else
1091 rrq->send_rrq = 0;
1092 rrq->xritag = xritag;
1093 rrq->rrq_stop_time = jiffies +
1094 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1095 rrq->ndlp = ndlp;
1096 rrq->nlp_DID = ndlp->nlp_DID;
1097 rrq->vport = ndlp->vport;
1098 rrq->rxid = rxid;
1099 spin_lock_irqsave(&phba->hbalock, iflags);
1100 empty = list_empty(&phba->active_rrq_list);
1101 list_add_tail(&rrq->list, &phba->active_rrq_list);
1102 phba->hba_flag |= HBA_RRQ_ACTIVE;
1103 if (empty)
1104 lpfc_worker_wake_up(phba);
1105 spin_unlock_irqrestore(&phba->hbalock, iflags);
1106 return 0;
1107out:
1108 spin_unlock_irqrestore(&phba->hbalock, iflags);
1109 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1110 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1111 " DID:0x%x Send:%d\n",
1112 xritag, rxid, ndlp->nlp_DID, send_rrq);
1113 return -EINVAL;
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127static struct lpfc_sglq *
1128__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1129{
1130 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1131 struct lpfc_sglq *sglq = NULL;
1132 struct lpfc_sglq *start_sglq = NULL;
1133 struct lpfc_io_buf *lpfc_cmd;
1134 struct lpfc_nodelist *ndlp;
1135 struct lpfc_sli_ring *pring = NULL;
1136 int found = 0;
1137
1138 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1139 pring = phba->sli4_hba.nvmels_wq->pring;
1140 else
1141 pring = lpfc_phba_elsring(phba);
1142
1143 lockdep_assert_held(&pring->ring_lock);
1144
1145 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1146 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1147 ndlp = lpfc_cmd->rdata->pnode;
1148 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1149 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1150 ndlp = piocbq->context_un.ndlp;
1151 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1152 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1153 ndlp = NULL;
1154 else
1155 ndlp = piocbq->context_un.ndlp;
1156 } else {
1157 ndlp = piocbq->context1;
1158 }
1159
1160 spin_lock(&phba->sli4_hba.sgl_list_lock);
1161 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1162 start_sglq = sglq;
1163 while (!found) {
1164 if (!sglq)
1165 break;
1166 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1167 test_bit(sglq->sli4_lxritag,
1168 ndlp->active_rrqs_xri_bitmap)) {
1169
1170
1171
1172 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1173 sglq = NULL;
1174 list_remove_head(lpfc_els_sgl_list, sglq,
1175 struct lpfc_sglq, list);
1176 if (sglq == start_sglq) {
1177 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1178 sglq = NULL;
1179 break;
1180 } else
1181 continue;
1182 }
1183 sglq->ndlp = ndlp;
1184 found = 1;
1185 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1186 sglq->state = SGL_ALLOCATED;
1187 }
1188 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1189 return sglq;
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202struct lpfc_sglq *
1203__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1204{
1205 struct list_head *lpfc_nvmet_sgl_list;
1206 struct lpfc_sglq *sglq = NULL;
1207
1208 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1209
1210 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1211
1212 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1213 if (!sglq)
1214 return NULL;
1215 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1216 sglq->state = SGL_ALLOCATED;
1217 return sglq;
1218}
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229struct lpfc_iocbq *
1230lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1231{
1232 struct lpfc_iocbq * iocbq = NULL;
1233 unsigned long iflags;
1234
1235 spin_lock_irqsave(&phba->hbalock, iflags);
1236 iocbq = __lpfc_sli_get_iocbq(phba);
1237 spin_unlock_irqrestore(&phba->hbalock, iflags);
1238 return iocbq;
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260static void
1261__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1262{
1263 struct lpfc_sglq *sglq;
1264 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1265 unsigned long iflag = 0;
1266 struct lpfc_sli_ring *pring;
1267
1268 if (iocbq->sli4_xritag == NO_XRI)
1269 sglq = NULL;
1270 else
1271 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1272
1273
1274 if (sglq) {
1275 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1276 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1277 iflag);
1278 sglq->state = SGL_FREED;
1279 sglq->ndlp = NULL;
1280 list_add_tail(&sglq->list,
1281 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1282 spin_unlock_irqrestore(
1283 &phba->sli4_hba.sgl_list_lock, iflag);
1284 goto out;
1285 }
1286
1287 pring = phba->sli4_hba.els_wq->pring;
1288 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1289 (sglq->state != SGL_XRI_ABORTED)) {
1290 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1291 iflag);
1292 list_add(&sglq->list,
1293 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1294 spin_unlock_irqrestore(
1295 &phba->sli4_hba.sgl_list_lock, iflag);
1296 } else {
1297 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1298 iflag);
1299 sglq->state = SGL_FREED;
1300 sglq->ndlp = NULL;
1301 list_add_tail(&sglq->list,
1302 &phba->sli4_hba.lpfc_els_sgl_list);
1303 spin_unlock_irqrestore(
1304 &phba->sli4_hba.sgl_list_lock, iflag);
1305
1306
1307 if (!list_empty(&pring->txq))
1308 lpfc_worker_wake_up(phba);
1309 }
1310 }
1311
1312out:
1313
1314
1315
1316 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1317 iocbq->sli4_lxritag = NO_XRI;
1318 iocbq->sli4_xritag = NO_XRI;
1319 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1320 LPFC_IO_NVME_LS);
1321 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336static void
1337__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1338{
1339 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1340
1341
1342
1343
1344 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1345 iocbq->sli4_xritag = NO_XRI;
1346 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1347}
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359static void
1360__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1361{
1362 lockdep_assert_held(&phba->hbalock);
1363
1364 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1365 phba->iocb_cnt--;
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376void
1377lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1378{
1379 unsigned long iflags;
1380
1381
1382
1383
1384 spin_lock_irqsave(&phba->hbalock, iflags);
1385 __lpfc_sli_release_iocbq(phba, iocbq);
1386 spin_unlock_irqrestore(&phba->hbalock, iflags);
1387}
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401void
1402lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1403 uint32_t ulpstatus, uint32_t ulpWord4)
1404{
1405 struct lpfc_iocbq *piocb;
1406
1407 while (!list_empty(iocblist)) {
1408 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1409 if (!piocb->iocb_cmpl) {
1410 if (piocb->iocb_flag & LPFC_IO_NVME)
1411 lpfc_nvme_cancel_iocb(phba, piocb);
1412 else
1413 lpfc_sli_release_iocbq(phba, piocb);
1414 } else {
1415 piocb->iocb.ulpStatus = ulpstatus;
1416 piocb->iocb.un.ulpWord[4] = ulpWord4;
1417 (piocb->iocb_cmpl) (phba, piocb, piocb);
1418 }
1419 }
1420 return;
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438static lpfc_iocb_type
1439lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1440{
1441 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1442
1443 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1444 return 0;
1445
1446 switch (iocb_cmnd) {
1447 case CMD_XMIT_SEQUENCE_CR:
1448 case CMD_XMIT_SEQUENCE_CX:
1449 case CMD_XMIT_BCAST_CN:
1450 case CMD_XMIT_BCAST_CX:
1451 case CMD_ELS_REQUEST_CR:
1452 case CMD_ELS_REQUEST_CX:
1453 case CMD_CREATE_XRI_CR:
1454 case CMD_CREATE_XRI_CX:
1455 case CMD_GET_RPI_CN:
1456 case CMD_XMIT_ELS_RSP_CX:
1457 case CMD_GET_RPI_CR:
1458 case CMD_FCP_IWRITE_CR:
1459 case CMD_FCP_IWRITE_CX:
1460 case CMD_FCP_IREAD_CR:
1461 case CMD_FCP_IREAD_CX:
1462 case CMD_FCP_ICMND_CR:
1463 case CMD_FCP_ICMND_CX:
1464 case CMD_FCP_TSEND_CX:
1465 case CMD_FCP_TRSP_CX:
1466 case CMD_FCP_TRECEIVE_CX:
1467 case CMD_FCP_AUTO_TRSP_CX:
1468 case CMD_ADAPTER_MSG:
1469 case CMD_ADAPTER_DUMP:
1470 case CMD_XMIT_SEQUENCE64_CR:
1471 case CMD_XMIT_SEQUENCE64_CX:
1472 case CMD_XMIT_BCAST64_CN:
1473 case CMD_XMIT_BCAST64_CX:
1474 case CMD_ELS_REQUEST64_CR:
1475 case CMD_ELS_REQUEST64_CX:
1476 case CMD_FCP_IWRITE64_CR:
1477 case CMD_FCP_IWRITE64_CX:
1478 case CMD_FCP_IREAD64_CR:
1479 case CMD_FCP_IREAD64_CX:
1480 case CMD_FCP_ICMND64_CR:
1481 case CMD_FCP_ICMND64_CX:
1482 case CMD_FCP_TSEND64_CX:
1483 case CMD_FCP_TRSP64_CX:
1484 case CMD_FCP_TRECEIVE64_CX:
1485 case CMD_GEN_REQUEST64_CR:
1486 case CMD_GEN_REQUEST64_CX:
1487 case CMD_XMIT_ELS_RSP64_CX:
1488 case DSSCMD_IWRITE64_CR:
1489 case DSSCMD_IWRITE64_CX:
1490 case DSSCMD_IREAD64_CR:
1491 case DSSCMD_IREAD64_CX:
1492 case CMD_SEND_FRAME:
1493 type = LPFC_SOL_IOCB;
1494 break;
1495 case CMD_ABORT_XRI_CN:
1496 case CMD_ABORT_XRI_CX:
1497 case CMD_CLOSE_XRI_CN:
1498 case CMD_CLOSE_XRI_CX:
1499 case CMD_XRI_ABORTED_CX:
1500 case CMD_ABORT_MXRI64_CN:
1501 case CMD_XMIT_BLS_RSP64_CX:
1502 type = LPFC_ABORT_IOCB;
1503 break;
1504 case CMD_RCV_SEQUENCE_CX:
1505 case CMD_RCV_ELS_REQ_CX:
1506 case CMD_RCV_SEQUENCE64_CX:
1507 case CMD_RCV_ELS_REQ64_CX:
1508 case CMD_ASYNC_STATUS:
1509 case CMD_IOCB_RCV_SEQ64_CX:
1510 case CMD_IOCB_RCV_ELS64_CX:
1511 case CMD_IOCB_RCV_CONT64_CX:
1512 case CMD_IOCB_RET_XRI64_CX:
1513 type = LPFC_UNSOL_IOCB;
1514 break;
1515 case CMD_IOCB_XMIT_MSEQ64_CR:
1516 case CMD_IOCB_XMIT_MSEQ64_CX:
1517 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1518 case CMD_IOCB_RCV_ELS_LIST64_CX:
1519 case CMD_IOCB_CLOSE_EXTENDED_CN:
1520 case CMD_IOCB_ABORT_EXTENDED_CN:
1521 case CMD_IOCB_RET_HBQE64_CN:
1522 case CMD_IOCB_FCP_IBIDIR64_CR:
1523 case CMD_IOCB_FCP_IBIDIR64_CX:
1524 case CMD_IOCB_FCP_ITASKMGT64_CX:
1525 case CMD_IOCB_LOGENTRY_CN:
1526 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1527 printk("%s - Unhandled SLI-3 Command x%x\n",
1528 __func__, iocb_cmnd);
1529 type = LPFC_UNKNOWN_IOCB;
1530 break;
1531 default:
1532 type = LPFC_UNKNOWN_IOCB;
1533 break;
1534 }
1535
1536 return type;
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550static int
1551lpfc_sli_ring_map(struct lpfc_hba *phba)
1552{
1553 struct lpfc_sli *psli = &phba->sli;
1554 LPFC_MBOXQ_t *pmb;
1555 MAILBOX_t *pmbox;
1556 int i, rc, ret = 0;
1557
1558 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1559 if (!pmb)
1560 return -ENOMEM;
1561 pmbox = &pmb->u.mb;
1562 phba->link_state = LPFC_INIT_MBX_CMDS;
1563 for (i = 0; i < psli->num_rings; i++) {
1564 lpfc_config_ring(phba, i, pmb);
1565 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1566 if (rc != MBX_SUCCESS) {
1567 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1568 "0446 Adapter failed to init (%d), "
1569 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1570 "ring %d\n",
1571 rc, pmbox->mbxCommand,
1572 pmbox->mbxStatus, i);
1573 phba->link_state = LPFC_HBA_ERROR;
1574 ret = -ENXIO;
1575 break;
1576 }
1577 }
1578 mempool_free(pmb, phba->mbox_mem_pool);
1579 return ret;
1580}
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595static int
1596lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1597 struct lpfc_iocbq *piocb)
1598{
1599 if (phba->sli_rev == LPFC_SLI_REV4)
1600 lockdep_assert_held(&pring->ring_lock);
1601 else
1602 lockdep_assert_held(&phba->hbalock);
1603
1604 BUG_ON(!piocb);
1605
1606 list_add_tail(&piocb->list, &pring->txcmplq);
1607 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1608 pring->txcmplq_cnt++;
1609
1610 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1611 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1612 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1613 BUG_ON(!piocb->vport);
1614 if (!(piocb->vport->load_flag & FC_UNLOADING))
1615 mod_timer(&piocb->vport->els_tmofunc,
1616 jiffies +
1617 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1618 }
1619
1620 return 0;
1621}
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633struct lpfc_iocbq *
1634lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1635{
1636 struct lpfc_iocbq *cmd_iocb;
1637
1638 lockdep_assert_held(&phba->hbalock);
1639
1640 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1641 return cmd_iocb;
1642}
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658static IOCB_t *
1659lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1660{
1661 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1662 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1663
1664 lockdep_assert_held(&phba->hbalock);
1665
1666 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1667 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1668 pring->sli.sli3.next_cmdidx = 0;
1669
1670 if (unlikely(pring->sli.sli3.local_getidx ==
1671 pring->sli.sli3.next_cmdidx)) {
1672
1673 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1674
1675 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1676 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1677 "0315 Ring %d issue: portCmdGet %d "
1678 "is bigger than cmd ring %d\n",
1679 pring->ringno,
1680 pring->sli.sli3.local_getidx,
1681 max_cmd_idx);
1682
1683 phba->link_state = LPFC_HBA_ERROR;
1684
1685
1686
1687
1688 phba->work_ha |= HA_ERATT;
1689 phba->work_hs = HS_FFER3;
1690
1691 lpfc_worker_wake_up(phba);
1692
1693 return NULL;
1694 }
1695
1696 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1697 return NULL;
1698 }
1699
1700 return lpfc_cmd_iocb(phba, pring);
1701}
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715uint16_t
1716lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1717{
1718 struct lpfc_iocbq **new_arr;
1719 struct lpfc_iocbq **old_arr;
1720 size_t new_len;
1721 struct lpfc_sli *psli = &phba->sli;
1722 uint16_t iotag;
1723
1724 spin_lock_irq(&phba->hbalock);
1725 iotag = psli->last_iotag;
1726 if(++iotag < psli->iocbq_lookup_len) {
1727 psli->last_iotag = iotag;
1728 psli->iocbq_lookup[iotag] = iocbq;
1729 spin_unlock_irq(&phba->hbalock);
1730 iocbq->iotag = iotag;
1731 return iotag;
1732 } else if (psli->iocbq_lookup_len < (0xffff
1733 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1734 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1735 spin_unlock_irq(&phba->hbalock);
1736 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1737 GFP_KERNEL);
1738 if (new_arr) {
1739 spin_lock_irq(&phba->hbalock);
1740 old_arr = psli->iocbq_lookup;
1741 if (new_len <= psli->iocbq_lookup_len) {
1742
1743 kfree(new_arr);
1744 iotag = psli->last_iotag;
1745 if(++iotag < psli->iocbq_lookup_len) {
1746 psli->last_iotag = iotag;
1747 psli->iocbq_lookup[iotag] = iocbq;
1748 spin_unlock_irq(&phba->hbalock);
1749 iocbq->iotag = iotag;
1750 return iotag;
1751 }
1752 spin_unlock_irq(&phba->hbalock);
1753 return 0;
1754 }
1755 if (psli->iocbq_lookup)
1756 memcpy(new_arr, old_arr,
1757 ((psli->last_iotag + 1) *
1758 sizeof (struct lpfc_iocbq *)));
1759 psli->iocbq_lookup = new_arr;
1760 psli->iocbq_lookup_len = new_len;
1761 psli->last_iotag = iotag;
1762 psli->iocbq_lookup[iotag] = iocbq;
1763 spin_unlock_irq(&phba->hbalock);
1764 iocbq->iotag = iotag;
1765 kfree(old_arr);
1766 return iotag;
1767 }
1768 } else
1769 spin_unlock_irq(&phba->hbalock);
1770
1771 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1772 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1773 psli->last_iotag);
1774
1775 return 0;
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793static void
1794lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1795 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1796{
1797
1798
1799
1800 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1801
1802
1803 if (pring->ringno == LPFC_ELS_RING) {
1804 lpfc_debugfs_slow_ring_trc(phba,
1805 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1806 *(((uint32_t *) &nextiocb->iocb) + 4),
1807 *(((uint32_t *) &nextiocb->iocb) + 6),
1808 *(((uint32_t *) &nextiocb->iocb) + 7));
1809 }
1810
1811
1812
1813
1814 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1815 wmb();
1816 pring->stats.iocb_cmd++;
1817
1818
1819
1820
1821
1822
1823 if (nextiocb->iocb_cmpl)
1824 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1825 else
1826 __lpfc_sli_release_iocbq(phba, nextiocb);
1827
1828
1829
1830
1831
1832 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1833 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848static void
1849lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1850{
1851 int ringno = pring->ringno;
1852
1853 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1854
1855 wmb();
1856
1857
1858
1859
1860
1861 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1862 readl(phba->CAregaddr);
1863
1864 pring->stats.iocb_cmd_full++;
1865}
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876static void
1877lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1878{
1879 int ringno = pring->ringno;
1880
1881
1882
1883
1884 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1885 wmb();
1886 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1887 readl(phba->CAregaddr);
1888 }
1889}
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900static void
1901lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1902{
1903 IOCB_t *iocb;
1904 struct lpfc_iocbq *nextiocb;
1905
1906 lockdep_assert_held(&phba->hbalock);
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916 if (lpfc_is_link_up(phba) &&
1917 (!list_empty(&pring->txq)) &&
1918 (pring->ringno != LPFC_FCP_RING ||
1919 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1920
1921 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1922 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1923 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1924
1925 if (iocb)
1926 lpfc_sli_update_ring(phba, pring);
1927 else
1928 lpfc_sli_update_full_ring(phba, pring);
1929 }
1930
1931 return;
1932}
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944static struct lpfc_hbq_entry *
1945lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1946{
1947 struct hbq_s *hbqp = &phba->hbqs[hbqno];
1948
1949 lockdep_assert_held(&phba->hbalock);
1950
1951 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1952 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1953 hbqp->next_hbqPutIdx = 0;
1954
1955 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1956 uint32_t raw_index = phba->hbq_get[hbqno];
1957 uint32_t getidx = le32_to_cpu(raw_index);
1958
1959 hbqp->local_hbqGetIdx = getidx;
1960
1961 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1963 "1802 HBQ %d: local_hbqGetIdx "
1964 "%u is > than hbqp->entry_count %u\n",
1965 hbqno, hbqp->local_hbqGetIdx,
1966 hbqp->entry_count);
1967
1968 phba->link_state = LPFC_HBA_ERROR;
1969 return NULL;
1970 }
1971
1972 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1973 return NULL;
1974 }
1975
1976 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1977 hbqp->hbqPutIdx;
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989void
1990lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1991{
1992 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1993 struct hbq_dmabuf *hbq_buf;
1994 unsigned long flags;
1995 int i, hbq_count;
1996
1997 hbq_count = lpfc_sli_hbq_count();
1998
1999 spin_lock_irqsave(&phba->hbalock, flags);
2000 for (i = 0; i < hbq_count; ++i) {
2001 list_for_each_entry_safe(dmabuf, next_dmabuf,
2002 &phba->hbqs[i].hbq_buffer_list, list) {
2003 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2004 list_del(&hbq_buf->dbuf.list);
2005 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2006 }
2007 phba->hbqs[i].buffer_count = 0;
2008 }
2009
2010
2011 phba->hbq_in_use = 0;
2012 spin_unlock_irqrestore(&phba->hbalock, flags);
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027static int
2028lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2029 struct hbq_dmabuf *hbq_buf)
2030{
2031 lockdep_assert_held(&phba->hbalock);
2032 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046static int
2047lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2048 struct hbq_dmabuf *hbq_buf)
2049{
2050 struct lpfc_hbq_entry *hbqe;
2051 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2052
2053 lockdep_assert_held(&phba->hbalock);
2054
2055 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2056 if (hbqe) {
2057 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2058
2059 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2060 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2061 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2062 hbqe->bde.tus.f.bdeFlags = 0;
2063 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2064 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2065
2066 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2067 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2068
2069 readl(phba->hbq_put + hbqno);
2070 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2071 return 0;
2072 } else
2073 return -ENOMEM;
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086static int
2087lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2088 struct hbq_dmabuf *hbq_buf)
2089{
2090 int rc;
2091 struct lpfc_rqe hrqe;
2092 struct lpfc_rqe drqe;
2093 struct lpfc_queue *hrq;
2094 struct lpfc_queue *drq;
2095
2096 if (hbqno != LPFC_ELS_HBQ)
2097 return 1;
2098 hrq = phba->sli4_hba.hdr_rq;
2099 drq = phba->sli4_hba.dat_rq;
2100
2101 lockdep_assert_held(&phba->hbalock);
2102 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2103 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2104 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2105 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2106 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2107 if (rc < 0)
2108 return rc;
2109 hbq_buf->tag = (rc | (hbqno << 16));
2110 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2111 return 0;
2112}
2113
2114
2115static struct lpfc_hbq_init lpfc_els_hbq = {
2116 .rn = 1,
2117 .entry_count = 256,
2118 .mask_count = 0,
2119 .profile = 0,
2120 .ring_mask = (1 << LPFC_ELS_RING),
2121 .buffer_count = 0,
2122 .init_count = 40,
2123 .add_count = 40,
2124};
2125
2126
2127struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2128 &lpfc_els_hbq,
2129};
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141static int
2142lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2143{
2144 uint32_t i, posted = 0;
2145 unsigned long flags;
2146 struct hbq_dmabuf *hbq_buffer;
2147 LIST_HEAD(hbq_buf_list);
2148 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2149 return 0;
2150
2151 if ((phba->hbqs[hbqno].buffer_count + count) >
2152 lpfc_hbq_defs[hbqno]->entry_count)
2153 count = lpfc_hbq_defs[hbqno]->entry_count -
2154 phba->hbqs[hbqno].buffer_count;
2155 if (!count)
2156 return 0;
2157
2158 for (i = 0; i < count; i++) {
2159 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2160 if (!hbq_buffer)
2161 break;
2162 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2163 }
2164
2165 spin_lock_irqsave(&phba->hbalock, flags);
2166 if (!phba->hbq_in_use)
2167 goto err;
2168 while (!list_empty(&hbq_buf_list)) {
2169 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2170 dbuf.list);
2171 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2172 (hbqno << 16));
2173 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2174 phba->hbqs[hbqno].buffer_count++;
2175 posted++;
2176 } else
2177 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2178 }
2179 spin_unlock_irqrestore(&phba->hbalock, flags);
2180 return posted;
2181err:
2182 spin_unlock_irqrestore(&phba->hbalock, flags);
2183 while (!list_empty(&hbq_buf_list)) {
2184 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2185 dbuf.list);
2186 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2187 }
2188 return 0;
2189}
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200int
2201lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2202{
2203 if (phba->sli_rev == LPFC_SLI_REV4)
2204 return 0;
2205 else
2206 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2207 lpfc_hbq_defs[qno]->add_count);
2208}
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219static int
2220lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2221{
2222 if (phba->sli_rev == LPFC_SLI_REV4)
2223 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2224 lpfc_hbq_defs[qno]->entry_count);
2225 else
2226 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2227 lpfc_hbq_defs[qno]->init_count);
2228}
2229
2230
2231
2232
2233
2234
2235
2236static struct hbq_dmabuf *
2237lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2238{
2239 struct lpfc_dmabuf *d_buf;
2240
2241 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2242 if (!d_buf)
2243 return NULL;
2244 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255static struct rqb_dmabuf *
2256lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2257{
2258 struct lpfc_dmabuf *h_buf;
2259 struct lpfc_rqb *rqbp;
2260
2261 rqbp = hrq->rqbp;
2262 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2263 struct lpfc_dmabuf, list);
2264 if (!h_buf)
2265 return NULL;
2266 rqbp->buffer_count--;
2267 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2268}
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279static struct hbq_dmabuf *
2280lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2281{
2282 struct lpfc_dmabuf *d_buf;
2283 struct hbq_dmabuf *hbq_buf;
2284 uint32_t hbqno;
2285
2286 hbqno = tag >> 16;
2287 if (hbqno >= LPFC_MAX_HBQS)
2288 return NULL;
2289
2290 spin_lock_irq(&phba->hbalock);
2291 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2292 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2293 if (hbq_buf->tag == tag) {
2294 spin_unlock_irq(&phba->hbalock);
2295 return hbq_buf;
2296 }
2297 }
2298 spin_unlock_irq(&phba->hbalock);
2299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2300 "1803 Bad hbq tag. Data: x%x x%x\n",
2301 tag, phba->hbqs[tag >> 16].buffer_count);
2302 return NULL;
2303}
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314void
2315lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2316{
2317 uint32_t hbqno;
2318
2319 if (hbq_buffer) {
2320 hbqno = hbq_buffer->tag >> 16;
2321 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2322 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2323 }
2324}
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335static int
2336lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2337{
2338 uint8_t ret;
2339
2340 switch (mbxCommand) {
2341 case MBX_LOAD_SM:
2342 case MBX_READ_NV:
2343 case MBX_WRITE_NV:
2344 case MBX_WRITE_VPARMS:
2345 case MBX_RUN_BIU_DIAG:
2346 case MBX_INIT_LINK:
2347 case MBX_DOWN_LINK:
2348 case MBX_CONFIG_LINK:
2349 case MBX_CONFIG_RING:
2350 case MBX_RESET_RING:
2351 case MBX_READ_CONFIG:
2352 case MBX_READ_RCONFIG:
2353 case MBX_READ_SPARM:
2354 case MBX_READ_STATUS:
2355 case MBX_READ_RPI:
2356 case MBX_READ_XRI:
2357 case MBX_READ_REV:
2358 case MBX_READ_LNK_STAT:
2359 case MBX_REG_LOGIN:
2360 case MBX_UNREG_LOGIN:
2361 case MBX_CLEAR_LA:
2362 case MBX_DUMP_MEMORY:
2363 case MBX_DUMP_CONTEXT:
2364 case MBX_RUN_DIAGS:
2365 case MBX_RESTART:
2366 case MBX_UPDATE_CFG:
2367 case MBX_DOWN_LOAD:
2368 case MBX_DEL_LD_ENTRY:
2369 case MBX_RUN_PROGRAM:
2370 case MBX_SET_MASK:
2371 case MBX_SET_VARIABLE:
2372 case MBX_UNREG_D_ID:
2373 case MBX_KILL_BOARD:
2374 case MBX_CONFIG_FARP:
2375 case MBX_BEACON:
2376 case MBX_LOAD_AREA:
2377 case MBX_RUN_BIU_DIAG64:
2378 case MBX_CONFIG_PORT:
2379 case MBX_READ_SPARM64:
2380 case MBX_READ_RPI64:
2381 case MBX_REG_LOGIN64:
2382 case MBX_READ_TOPOLOGY:
2383 case MBX_WRITE_WWN:
2384 case MBX_SET_DEBUG:
2385 case MBX_LOAD_EXP_ROM:
2386 case MBX_ASYNCEVT_ENABLE:
2387 case MBX_REG_VPI:
2388 case MBX_UNREG_VPI:
2389 case MBX_HEARTBEAT:
2390 case MBX_PORT_CAPABILITIES:
2391 case MBX_PORT_IOV_CONTROL:
2392 case MBX_SLI4_CONFIG:
2393 case MBX_SLI4_REQ_FTRS:
2394 case MBX_REG_FCFI:
2395 case MBX_UNREG_FCFI:
2396 case MBX_REG_VFI:
2397 case MBX_UNREG_VFI:
2398 case MBX_INIT_VPI:
2399 case MBX_INIT_VFI:
2400 case MBX_RESUME_RPI:
2401 case MBX_READ_EVENT_LOG_STATUS:
2402 case MBX_READ_EVENT_LOG:
2403 case MBX_SECURITY_MGMT:
2404 case MBX_AUTH_PORT:
2405 case MBX_ACCESS_VDATA:
2406 ret = mbxCommand;
2407 break;
2408 default:
2409 ret = MBX_SHUTDOWN;
2410 break;
2411 }
2412 return ret;
2413}
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426void
2427lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2428{
2429 unsigned long drvr_flag;
2430 struct completion *pmbox_done;
2431
2432
2433
2434
2435
2436 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2437 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2438 pmbox_done = (struct completion *)pmboxq->context3;
2439 if (pmbox_done)
2440 complete(pmbox_done);
2441 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2442 return;
2443}
2444
2445static void
2446__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2447{
2448 unsigned long iflags;
2449
2450 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2451 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2452 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags);
2453 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2454 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2455 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags);
2456 }
2457 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2458}
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470void
2471lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2472{
2473 struct lpfc_vport *vport = pmb->vport;
2474 struct lpfc_dmabuf *mp;
2475 struct lpfc_nodelist *ndlp;
2476 struct Scsi_Host *shost;
2477 uint16_t rpi, vpi;
2478 int rc;
2479
2480 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2481
2482 if (mp) {
2483 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2484 kfree(mp);
2485 }
2486
2487
2488
2489
2490
2491 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2492 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2493 !pmb->u.mb.mbxStatus) {
2494 rpi = pmb->u.mb.un.varWords[0];
2495 vpi = pmb->u.mb.un.varRegLogin.vpi;
2496 if (phba->sli_rev == LPFC_SLI_REV4)
2497 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2498 lpfc_unreg_login(phba, vpi, rpi, pmb);
2499 pmb->vport = vport;
2500 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2501 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2502 if (rc != MBX_NOT_FINISHED)
2503 return;
2504 }
2505
2506 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2507 !(phba->pport->load_flag & FC_UNLOADING) &&
2508 !pmb->u.mb.mbxStatus) {
2509 shost = lpfc_shost_from_vport(vport);
2510 spin_lock_irq(shost->host_lock);
2511 vport->vpi_state |= LPFC_VPI_REGISTERED;
2512 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2513 spin_unlock_irq(shost->host_lock);
2514 }
2515
2516 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2517 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2518 lpfc_nlp_put(ndlp);
2519 pmb->ctx_buf = NULL;
2520 pmb->ctx_ndlp = NULL;
2521 }
2522
2523 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2524 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2525
2526
2527 if (ndlp) {
2528 lpfc_printf_vlog(
2529 vport,
2530 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2531 "1438 UNREG cmpl deferred mbox x%x "
2532 "on NPort x%x Data: x%x x%x %px\n",
2533 ndlp->nlp_rpi, ndlp->nlp_DID,
2534 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp);
2535
2536 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2537 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2538 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2539 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2540 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2541 } else {
2542 __lpfc_sli_rpi_release(vport, ndlp);
2543 }
2544 if (vport->load_flag & FC_UNLOADING)
2545 lpfc_nlp_put(ndlp);
2546 pmb->ctx_ndlp = NULL;
2547 }
2548 }
2549
2550
2551 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2552 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2553 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2554 "2860 SLI authentication is required "
2555 "for INIT_LINK but has not done yet\n");
2556
2557 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2558 lpfc_sli4_mbox_cmd_free(phba, pmb);
2559 else
2560 mempool_free(pmb, phba->mbox_mem_pool);
2561}
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575void
2576lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2577{
2578 struct lpfc_vport *vport = pmb->vport;
2579 struct lpfc_nodelist *ndlp;
2580
2581 ndlp = pmb->ctx_ndlp;
2582 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2583 if (phba->sli_rev == LPFC_SLI_REV4 &&
2584 (bf_get(lpfc_sli_intf_if_type,
2585 &phba->sli4_hba.sli_intf) >=
2586 LPFC_SLI_INTF_IF_TYPE_2)) {
2587 if (ndlp) {
2588 lpfc_printf_vlog(
2589 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2590 "0010 UNREG_LOGIN vpi:%x "
2591 "rpi:%x DID:%x defer x%x flg x%x "
2592 "map:%x %px\n",
2593 vport->vpi, ndlp->nlp_rpi,
2594 ndlp->nlp_DID, ndlp->nlp_defer_did,
2595 ndlp->nlp_flag,
2596 ndlp->nlp_usg_map, ndlp);
2597 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2598 lpfc_nlp_put(ndlp);
2599
2600
2601
2602
2603 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2604 (ndlp->nlp_defer_did !=
2605 NLP_EVT_NOTHING_PENDING)) {
2606 lpfc_printf_vlog(
2607 vport, KERN_INFO, LOG_DISCOVERY,
2608 "4111 UNREG cmpl deferred "
2609 "clr x%x on "
2610 "NPort x%x Data: x%x x%px\n",
2611 ndlp->nlp_rpi, ndlp->nlp_DID,
2612 ndlp->nlp_defer_did, ndlp);
2613 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2614 ndlp->nlp_defer_did =
2615 NLP_EVT_NOTHING_PENDING;
2616 lpfc_issue_els_plogi(
2617 vport, ndlp->nlp_DID, 0);
2618 } else {
2619 __lpfc_sli_rpi_release(vport, ndlp);
2620 }
2621 }
2622 }
2623 }
2624
2625 mempool_free(pmb, phba->mbox_mem_pool);
2626}
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641int
2642lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2643{
2644 MAILBOX_t *pmbox;
2645 LPFC_MBOXQ_t *pmb;
2646 int rc;
2647 LIST_HEAD(cmplq);
2648
2649 phba->sli.slistat.mbox_event++;
2650
2651
2652 spin_lock_irq(&phba->hbalock);
2653 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2654 spin_unlock_irq(&phba->hbalock);
2655
2656
2657 do {
2658 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2659 if (pmb == NULL)
2660 break;
2661
2662 pmbox = &pmb->u.mb;
2663
2664 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2665 if (pmb->vport) {
2666 lpfc_debugfs_disc_trc(pmb->vport,
2667 LPFC_DISC_TRC_MBOX_VPORT,
2668 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2669 (uint32_t)pmbox->mbxCommand,
2670 pmbox->un.varWords[0],
2671 pmbox->un.varWords[1]);
2672 }
2673 else {
2674 lpfc_debugfs_disc_trc(phba->pport,
2675 LPFC_DISC_TRC_MBOX,
2676 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2677 (uint32_t)pmbox->mbxCommand,
2678 pmbox->un.varWords[0],
2679 pmbox->un.varWords[1]);
2680 }
2681 }
2682
2683
2684
2685
2686 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2687 MBX_SHUTDOWN) {
2688
2689 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2690 "(%d):0323 Unknown Mailbox command "
2691 "x%x (x%x/x%x) Cmpl\n",
2692 pmb->vport ? pmb->vport->vpi :
2693 LPFC_VPORT_UNKNOWN,
2694 pmbox->mbxCommand,
2695 lpfc_sli_config_mbox_subsys_get(phba,
2696 pmb),
2697 lpfc_sli_config_mbox_opcode_get(phba,
2698 pmb));
2699 phba->link_state = LPFC_HBA_ERROR;
2700 phba->work_hs = HS_FFER3;
2701 lpfc_handle_eratt(phba);
2702 continue;
2703 }
2704
2705 if (pmbox->mbxStatus) {
2706 phba->sli.slistat.mbox_stat_err++;
2707 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2708
2709 lpfc_printf_log(phba, KERN_INFO,
2710 LOG_MBOX | LOG_SLI,
2711 "(%d):0305 Mbox cmd cmpl "
2712 "error - RETRYing Data: x%x "
2713 "(x%x/x%x) x%x x%x x%x\n",
2714 pmb->vport ? pmb->vport->vpi :
2715 LPFC_VPORT_UNKNOWN,
2716 pmbox->mbxCommand,
2717 lpfc_sli_config_mbox_subsys_get(phba,
2718 pmb),
2719 lpfc_sli_config_mbox_opcode_get(phba,
2720 pmb),
2721 pmbox->mbxStatus,
2722 pmbox->un.varWords[0],
2723 pmb->vport ? pmb->vport->port_state :
2724 LPFC_VPORT_UNKNOWN);
2725 pmbox->mbxStatus = 0;
2726 pmbox->mbxOwner = OWN_HOST;
2727 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2728 if (rc != MBX_NOT_FINISHED)
2729 continue;
2730 }
2731 }
2732
2733
2734 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2735 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2736 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2737 "x%x x%x x%x\n",
2738 pmb->vport ? pmb->vport->vpi : 0,
2739 pmbox->mbxCommand,
2740 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2741 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2742 pmb->mbox_cmpl,
2743 *((uint32_t *) pmbox),
2744 pmbox->un.varWords[0],
2745 pmbox->un.varWords[1],
2746 pmbox->un.varWords[2],
2747 pmbox->un.varWords[3],
2748 pmbox->un.varWords[4],
2749 pmbox->un.varWords[5],
2750 pmbox->un.varWords[6],
2751 pmbox->un.varWords[7],
2752 pmbox->un.varWords[8],
2753 pmbox->un.varWords[9],
2754 pmbox->un.varWords[10]);
2755
2756 if (pmb->mbox_cmpl)
2757 pmb->mbox_cmpl(phba,pmb);
2758 } while (1);
2759 return 0;
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774static struct lpfc_dmabuf *
2775lpfc_sli_get_buff(struct lpfc_hba *phba,
2776 struct lpfc_sli_ring *pring,
2777 uint32_t tag)
2778{
2779 struct hbq_dmabuf *hbq_entry;
2780
2781 if (tag & QUE_BUFTAG_BIT)
2782 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2783 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2784 if (!hbq_entry)
2785 return NULL;
2786 return &hbq_entry->dbuf;
2787}
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800static void
2801lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2802{
2803 struct lpfc_nodelist *ndlp;
2804 struct lpfc_dmabuf *d_buf;
2805 struct hbq_dmabuf *nvmebuf;
2806 struct fc_frame_header *fc_hdr;
2807 struct lpfc_async_xchg_ctx *axchg = NULL;
2808 char *failwhy = NULL;
2809 uint32_t oxid, sid, did, fctl, size;
2810 int ret = 1;
2811
2812 d_buf = piocb->context2;
2813
2814 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2815 fc_hdr = nvmebuf->hbuf.virt;
2816 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2817 sid = sli4_sid_from_fc_hdr(fc_hdr);
2818 did = sli4_did_from_fc_hdr(fc_hdr);
2819 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2820 fc_hdr->fh_f_ctl[1] << 8 |
2821 fc_hdr->fh_f_ctl[2]);
2822 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2823
2824 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2825 oxid, size, sid);
2826
2827 if (phba->pport->load_flag & FC_UNLOADING) {
2828 failwhy = "Driver Unloading";
2829 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2830 failwhy = "NVME FC4 Disabled";
2831 } else if (!phba->nvmet_support && !phba->pport->localport) {
2832 failwhy = "No Localport";
2833 } else if (phba->nvmet_support && !phba->targetport) {
2834 failwhy = "No Targetport";
2835 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2836 failwhy = "Bad NVME LS R_CTL";
2837 } else if (unlikely((fctl & 0x00FF0000) !=
2838 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2839 failwhy = "Bad NVME LS F_CTL";
2840 } else {
2841 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2842 if (!axchg)
2843 failwhy = "No CTX memory";
2844 }
2845
2846 if (unlikely(failwhy)) {
2847 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2848 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2849 sid, oxid, failwhy);
2850 goto out_fail;
2851 }
2852
2853
2854 ndlp = lpfc_findnode_did(phba->pport, sid);
2855 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2856 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2857 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2858 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2859 "6216 NVME Unsol rcv: No ndlp: "
2860 "NPort_ID x%x oxid x%x\n",
2861 sid, oxid);
2862 goto out_fail;
2863 }
2864
2865 axchg->phba = phba;
2866 axchg->ndlp = ndlp;
2867 axchg->size = size;
2868 axchg->oxid = oxid;
2869 axchg->sid = sid;
2870 axchg->wqeq = NULL;
2871 axchg->state = LPFC_NVME_STE_LS_RCV;
2872 axchg->entry_cnt = 1;
2873 axchg->rqb_buffer = (void *)nvmebuf;
2874 axchg->hdwq = &phba->sli4_hba.hdwq[0];
2875 axchg->payload = nvmebuf->dbuf.virt;
2876 INIT_LIST_HEAD(&axchg->list);
2877
2878 if (phba->nvmet_support)
2879 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
2880 else
2881 ret = lpfc_nvme_handle_lsreq(phba, axchg);
2882
2883
2884 if (!ret)
2885 return;
2886
2887 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2888 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
2889 "NVMe%s handler failed %d\n",
2890 did, sid, oxid,
2891 (phba->nvmet_support) ? "T" : "I", ret);
2892
2893out_fail:
2894
2895
2896 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2897
2898
2899 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
2900 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
2901
2902 if (ret)
2903 kfree(axchg);
2904}
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918static int
2919lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2920 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2921 uint32_t fch_type)
2922{
2923 int i;
2924
2925 switch (fch_type) {
2926 case FC_TYPE_NVME:
2927 lpfc_nvme_unsol_ls_handler(phba, saveq);
2928 return 1;
2929 default:
2930 break;
2931 }
2932
2933
2934 if (pring->prt[0].profile) {
2935 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2936 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2937 saveq);
2938 return 1;
2939 }
2940
2941
2942 for (i = 0; i < pring->num_mask; i++) {
2943 if ((pring->prt[i].rctl == fch_r_ctl) &&
2944 (pring->prt[i].type == fch_type)) {
2945 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2946 (pring->prt[i].lpfc_sli_rcv_unsol_event)
2947 (phba, pring, saveq);
2948 return 1;
2949 }
2950 }
2951 return 0;
2952}
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968static int
2969lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2970 struct lpfc_iocbq *saveq)
2971{
2972 IOCB_t * irsp;
2973 WORD5 * w5p;
2974 uint32_t Rctl, Type;
2975 struct lpfc_iocbq *iocbq;
2976 struct lpfc_dmabuf *dmzbuf;
2977
2978 irsp = &(saveq->iocb);
2979
2980 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2981 if (pring->lpfc_sli_rcv_async_status)
2982 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2983 else
2984 lpfc_printf_log(phba,
2985 KERN_WARNING,
2986 LOG_SLI,
2987 "0316 Ring %d handler: unexpected "
2988 "ASYNC_STATUS iocb received evt_code "
2989 "0x%x\n",
2990 pring->ringno,
2991 irsp->un.asyncstat.evt_code);
2992 return 1;
2993 }
2994
2995 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2996 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2997 if (irsp->ulpBdeCount > 0) {
2998 dmzbuf = lpfc_sli_get_buff(phba, pring,
2999 irsp->un.ulpWord[3]);
3000 lpfc_in_buf_free(phba, dmzbuf);
3001 }
3002
3003 if (irsp->ulpBdeCount > 1) {
3004 dmzbuf = lpfc_sli_get_buff(phba, pring,
3005 irsp->unsli3.sli3Words[3]);
3006 lpfc_in_buf_free(phba, dmzbuf);
3007 }
3008
3009 if (irsp->ulpBdeCount > 2) {
3010 dmzbuf = lpfc_sli_get_buff(phba, pring,
3011 irsp->unsli3.sli3Words[7]);
3012 lpfc_in_buf_free(phba, dmzbuf);
3013 }
3014
3015 return 1;
3016 }
3017
3018 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3019 if (irsp->ulpBdeCount != 0) {
3020 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3021 irsp->un.ulpWord[3]);
3022 if (!saveq->context2)
3023 lpfc_printf_log(phba,
3024 KERN_ERR,
3025 LOG_SLI,
3026 "0341 Ring %d Cannot find buffer for "
3027 "an unsolicited iocb. tag 0x%x\n",
3028 pring->ringno,
3029 irsp->un.ulpWord[3]);
3030 }
3031 if (irsp->ulpBdeCount == 2) {
3032 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3033 irsp->unsli3.sli3Words[7]);
3034 if (!saveq->context3)
3035 lpfc_printf_log(phba,
3036 KERN_ERR,
3037 LOG_SLI,
3038 "0342 Ring %d Cannot find buffer for an"
3039 " unsolicited iocb. tag 0x%x\n",
3040 pring->ringno,
3041 irsp->unsli3.sli3Words[7]);
3042 }
3043 list_for_each_entry(iocbq, &saveq->list, list) {
3044 irsp = &(iocbq->iocb);
3045 if (irsp->ulpBdeCount != 0) {
3046 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3047 irsp->un.ulpWord[3]);
3048 if (!iocbq->context2)
3049 lpfc_printf_log(phba,
3050 KERN_ERR,
3051 LOG_SLI,
3052 "0343 Ring %d Cannot find "
3053 "buffer for an unsolicited iocb"
3054 ". tag 0x%x\n", pring->ringno,
3055 irsp->un.ulpWord[3]);
3056 }
3057 if (irsp->ulpBdeCount == 2) {
3058 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3059 irsp->unsli3.sli3Words[7]);
3060 if (!iocbq->context3)
3061 lpfc_printf_log(phba,
3062 KERN_ERR,
3063 LOG_SLI,
3064 "0344 Ring %d Cannot find "
3065 "buffer for an unsolicited "
3066 "iocb. tag 0x%x\n",
3067 pring->ringno,
3068 irsp->unsli3.sli3Words[7]);
3069 }
3070 }
3071 }
3072 if (irsp->ulpBdeCount != 0 &&
3073 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3074 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3075 int found = 0;
3076
3077
3078 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3079 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3080 saveq->iocb.unsli3.rcvsli3.ox_id) {
3081 list_add_tail(&saveq->list, &iocbq->list);
3082 found = 1;
3083 break;
3084 }
3085 }
3086 if (!found)
3087 list_add_tail(&saveq->clist,
3088 &pring->iocb_continue_saveq);
3089 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3090 list_del_init(&iocbq->clist);
3091 saveq = iocbq;
3092 irsp = &(saveq->iocb);
3093 } else
3094 return 0;
3095 }
3096 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3097 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3098 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3099 Rctl = FC_RCTL_ELS_REQ;
3100 Type = FC_TYPE_ELS;
3101 } else {
3102 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3103 Rctl = w5p->hcsw.Rctl;
3104 Type = w5p->hcsw.Type;
3105
3106
3107 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3108 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3109 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3110 Rctl = FC_RCTL_ELS_REQ;
3111 Type = FC_TYPE_ELS;
3112 w5p->hcsw.Rctl = Rctl;
3113 w5p->hcsw.Type = Type;
3114 }
3115 }
3116
3117 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3118 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3119 "0313 Ring %d handler: unexpected Rctl x%x "
3120 "Type x%x received\n",
3121 pring->ringno, Rctl, Type);
3122
3123 return 1;
3124}
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139static struct lpfc_iocbq *
3140lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3141 struct lpfc_sli_ring *pring,
3142 struct lpfc_iocbq *prspiocb)
3143{
3144 struct lpfc_iocbq *cmd_iocb = NULL;
3145 uint16_t iotag;
3146 spinlock_t *temp_lock = NULL;
3147 unsigned long iflag = 0;
3148
3149 if (phba->sli_rev == LPFC_SLI_REV4)
3150 temp_lock = &pring->ring_lock;
3151 else
3152 temp_lock = &phba->hbalock;
3153
3154 spin_lock_irqsave(temp_lock, iflag);
3155 iotag = prspiocb->iocb.ulpIoTag;
3156
3157 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3158 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3159 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3160
3161 list_del_init(&cmd_iocb->list);
3162 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3163 pring->txcmplq_cnt--;
3164 spin_unlock_irqrestore(temp_lock, iflag);
3165 return cmd_iocb;
3166 }
3167 }
3168
3169 spin_unlock_irqrestore(temp_lock, iflag);
3170 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3171 "0317 iotag x%x is out of "
3172 "range: max iotag x%x wd0 x%x\n",
3173 iotag, phba->sli.last_iotag,
3174 *(((uint32_t *) &prspiocb->iocb) + 7));
3175 return NULL;
3176}
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190static struct lpfc_iocbq *
3191lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3192 struct lpfc_sli_ring *pring, uint16_t iotag)
3193{
3194 struct lpfc_iocbq *cmd_iocb = NULL;
3195 spinlock_t *temp_lock = NULL;
3196 unsigned long iflag = 0;
3197
3198 if (phba->sli_rev == LPFC_SLI_REV4)
3199 temp_lock = &pring->ring_lock;
3200 else
3201 temp_lock = &phba->hbalock;
3202
3203 spin_lock_irqsave(temp_lock, iflag);
3204 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3205 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3206 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3207
3208 list_del_init(&cmd_iocb->list);
3209 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3210 pring->txcmplq_cnt--;
3211 spin_unlock_irqrestore(temp_lock, iflag);
3212 return cmd_iocb;
3213 }
3214 }
3215
3216 spin_unlock_irqrestore(temp_lock, iflag);
3217 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3218 "0372 iotag x%x lookup error: max iotag (x%x) "
3219 "iocb_flag x%x\n",
3220 iotag, phba->sli.last_iotag,
3221 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3222 return NULL;
3223}
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242static int
3243lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3244 struct lpfc_iocbq *saveq)
3245{
3246 struct lpfc_iocbq *cmdiocbp;
3247 int rc = 1;
3248 unsigned long iflag;
3249
3250 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3251 if (cmdiocbp) {
3252 if (cmdiocbp->iocb_cmpl) {
3253
3254
3255
3256
3257 if (saveq->iocb.ulpStatus &&
3258 (pring->ringno == LPFC_ELS_RING) &&
3259 (cmdiocbp->iocb.ulpCommand ==
3260 CMD_ELS_REQUEST64_CR))
3261 lpfc_send_els_failure_event(phba,
3262 cmdiocbp, saveq);
3263
3264
3265
3266
3267
3268 if (pring->ringno == LPFC_ELS_RING) {
3269 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3270 (cmdiocbp->iocb_flag &
3271 LPFC_DRIVER_ABORTED)) {
3272 spin_lock_irqsave(&phba->hbalock,
3273 iflag);
3274 cmdiocbp->iocb_flag &=
3275 ~LPFC_DRIVER_ABORTED;
3276 spin_unlock_irqrestore(&phba->hbalock,
3277 iflag);
3278 saveq->iocb.ulpStatus =
3279 IOSTAT_LOCAL_REJECT;
3280 saveq->iocb.un.ulpWord[4] =
3281 IOERR_SLI_ABORTED;
3282
3283
3284
3285
3286
3287 spin_lock_irqsave(&phba->hbalock,
3288 iflag);
3289 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3290 spin_unlock_irqrestore(&phba->hbalock,
3291 iflag);
3292 }
3293 if (phba->sli_rev == LPFC_SLI_REV4) {
3294 if (saveq->iocb_flag &
3295 LPFC_EXCHANGE_BUSY) {
3296
3297
3298
3299
3300
3301
3302 spin_lock_irqsave(
3303 &phba->hbalock, iflag);
3304 cmdiocbp->iocb_flag |=
3305 LPFC_EXCHANGE_BUSY;
3306 spin_unlock_irqrestore(
3307 &phba->hbalock, iflag);
3308 }
3309 if (cmdiocbp->iocb_flag &
3310 LPFC_DRIVER_ABORTED) {
3311
3312
3313
3314
3315
3316 spin_lock_irqsave(
3317 &phba->hbalock, iflag);
3318 cmdiocbp->iocb_flag &=
3319 ~LPFC_DRIVER_ABORTED;
3320 spin_unlock_irqrestore(
3321 &phba->hbalock, iflag);
3322 cmdiocbp->iocb.ulpStatus =
3323 IOSTAT_LOCAL_REJECT;
3324 cmdiocbp->iocb.un.ulpWord[4] =
3325 IOERR_ABORT_REQUESTED;
3326
3327
3328
3329
3330
3331
3332 saveq->iocb.ulpStatus =
3333 IOSTAT_LOCAL_REJECT;
3334 saveq->iocb.un.ulpWord[4] =
3335 IOERR_SLI_ABORTED;
3336 spin_lock_irqsave(
3337 &phba->hbalock, iflag);
3338 saveq->iocb_flag |=
3339 LPFC_DELAY_MEM_FREE;
3340 spin_unlock_irqrestore(
3341 &phba->hbalock, iflag);
3342 }
3343 }
3344 }
3345 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3346 } else
3347 lpfc_sli_release_iocbq(phba, cmdiocbp);
3348 } else {
3349
3350
3351
3352
3353
3354 if (pring->ringno != LPFC_ELS_RING) {
3355
3356
3357
3358
3359 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3360 "0322 Ring %d handler: "
3361 "unexpected completion IoTag x%x "
3362 "Data: x%x x%x x%x x%x\n",
3363 pring->ringno,
3364 saveq->iocb.ulpIoTag,
3365 saveq->iocb.ulpStatus,
3366 saveq->iocb.un.ulpWord[4],
3367 saveq->iocb.ulpCommand,
3368 saveq->iocb.ulpContext);
3369 }
3370 }
3371
3372 return rc;
3373}
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385static void
3386lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3387{
3388 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3389
3390
3391
3392
3393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3394 "0312 Ring %d handler: portRspPut %d "
3395 "is bigger than rsp ring %d\n",
3396 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3397 pring->sli.sli3.numRiocb);
3398
3399 phba->link_state = LPFC_HBA_ERROR;
3400
3401
3402
3403
3404
3405 phba->work_ha |= HA_ERATT;
3406 phba->work_hs = HS_FFER3;
3407
3408 lpfc_worker_wake_up(phba);
3409
3410 return;
3411}
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423void lpfc_poll_eratt(struct timer_list *t)
3424{
3425 struct lpfc_hba *phba;
3426 uint32_t eratt = 0;
3427 uint64_t sli_intr, cnt;
3428
3429 phba = from_timer(phba, t, eratt_poll);
3430
3431
3432 sli_intr = phba->sli.slistat.sli_intr;
3433
3434 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3435 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3436 sli_intr);
3437 else
3438 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3439
3440
3441 do_div(cnt, phba->eratt_poll_interval);
3442 phba->sli.slistat.sli_ips = cnt;
3443
3444 phba->sli.slistat.sli_prev_intr = sli_intr;
3445
3446
3447 eratt = lpfc_sli_check_eratt(phba);
3448
3449 if (eratt)
3450
3451 lpfc_worker_wake_up(phba);
3452 else
3453
3454 mod_timer(&phba->eratt_poll,
3455 jiffies +
3456 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3457 return;
3458}
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478int
3479lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3480 struct lpfc_sli_ring *pring, uint32_t mask)
3481{
3482 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3483 IOCB_t *irsp = NULL;
3484 IOCB_t *entry = NULL;
3485 struct lpfc_iocbq *cmdiocbq = NULL;
3486 struct lpfc_iocbq rspiocbq;
3487 uint32_t status;
3488 uint32_t portRspPut, portRspMax;
3489 int rc = 1;
3490 lpfc_iocb_type type;
3491 unsigned long iflag;
3492 uint32_t rsp_cmpl = 0;
3493
3494 spin_lock_irqsave(&phba->hbalock, iflag);
3495 pring->stats.iocb_event++;
3496
3497
3498
3499
3500
3501 portRspMax = pring->sli.sli3.numRiocb;
3502 portRspPut = le32_to_cpu(pgp->rspPutInx);
3503 if (unlikely(portRspPut >= portRspMax)) {
3504 lpfc_sli_rsp_pointers_error(phba, pring);
3505 spin_unlock_irqrestore(&phba->hbalock, iflag);
3506 return 1;
3507 }
3508 if (phba->fcp_ring_in_use) {
3509 spin_unlock_irqrestore(&phba->hbalock, iflag);
3510 return 1;
3511 } else
3512 phba->fcp_ring_in_use = 1;
3513
3514 rmb();
3515 while (pring->sli.sli3.rspidx != portRspPut) {
3516
3517
3518
3519
3520
3521 entry = lpfc_resp_iocb(phba, pring);
3522 phba->last_completion_time = jiffies;
3523
3524 if (++pring->sli.sli3.rspidx >= portRspMax)
3525 pring->sli.sli3.rspidx = 0;
3526
3527 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3528 (uint32_t *) &rspiocbq.iocb,
3529 phba->iocb_rsp_size);
3530 INIT_LIST_HEAD(&(rspiocbq.list));
3531 irsp = &rspiocbq.iocb;
3532
3533 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3534 pring->stats.iocb_rsp++;
3535 rsp_cmpl++;
3536
3537 if (unlikely(irsp->ulpStatus)) {
3538
3539
3540
3541
3542 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3543 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3544 IOERR_NO_RESOURCES)) {
3545 spin_unlock_irqrestore(&phba->hbalock, iflag);
3546 phba->lpfc_rampdown_queue_depth(phba);
3547 spin_lock_irqsave(&phba->hbalock, iflag);
3548 }
3549
3550
3551 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3552 "0336 Rsp Ring %d error: IOCB Data: "
3553 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3554 pring->ringno,
3555 irsp->un.ulpWord[0],
3556 irsp->un.ulpWord[1],
3557 irsp->un.ulpWord[2],
3558 irsp->un.ulpWord[3],
3559 irsp->un.ulpWord[4],
3560 irsp->un.ulpWord[5],
3561 *(uint32_t *)&irsp->un1,
3562 *((uint32_t *)&irsp->un1 + 1));
3563 }
3564
3565 switch (type) {
3566 case LPFC_ABORT_IOCB:
3567 case LPFC_SOL_IOCB:
3568
3569
3570
3571
3572 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3573 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3574 "0333 IOCB cmd 0x%x"
3575 " processed. Skipping"
3576 " completion\n",
3577 irsp->ulpCommand);
3578 break;
3579 }
3580
3581 spin_unlock_irqrestore(&phba->hbalock, iflag);
3582 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3583 &rspiocbq);
3584 spin_lock_irqsave(&phba->hbalock, iflag);
3585 if (unlikely(!cmdiocbq))
3586 break;
3587 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3588 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3589 if (cmdiocbq->iocb_cmpl) {
3590 spin_unlock_irqrestore(&phba->hbalock, iflag);
3591 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3592 &rspiocbq);
3593 spin_lock_irqsave(&phba->hbalock, iflag);
3594 }
3595 break;
3596 case LPFC_UNSOL_IOCB:
3597 spin_unlock_irqrestore(&phba->hbalock, iflag);
3598 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3599 spin_lock_irqsave(&phba->hbalock, iflag);
3600 break;
3601 default:
3602 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3603 char adaptermsg[LPFC_MAX_ADPTMSG];
3604 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3605 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3606 MAX_MSG_DATA);
3607 dev_warn(&((phba->pcidev)->dev),
3608 "lpfc%d: %s\n",
3609 phba->brd_no, adaptermsg);
3610 } else {
3611
3612 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3613 "0334 Unknown IOCB command "
3614 "Data: x%x, x%x x%x x%x x%x\n",
3615 type, irsp->ulpCommand,
3616 irsp->ulpStatus,
3617 irsp->ulpIoTag,
3618 irsp->ulpContext);
3619 }
3620 break;
3621 }
3622
3623
3624
3625
3626
3627
3628
3629 writel(pring->sli.sli3.rspidx,
3630 &phba->host_gp[pring->ringno].rspGetInx);
3631
3632 if (pring->sli.sli3.rspidx == portRspPut)
3633 portRspPut = le32_to_cpu(pgp->rspPutInx);
3634 }
3635
3636 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3637 pring->stats.iocb_rsp_full++;
3638 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3639 writel(status, phba->CAregaddr);
3640 readl(phba->CAregaddr);
3641 }
3642 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3643 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3644 pring->stats.iocb_cmd_empty++;
3645
3646
3647 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3648 lpfc_sli_resume_iocb(phba, pring);
3649
3650 if ((pring->lpfc_sli_cmd_available))
3651 (pring->lpfc_sli_cmd_available) (phba, pring);
3652
3653 }
3654
3655 phba->fcp_ring_in_use = 0;
3656 spin_unlock_irqrestore(&phba->hbalock, iflag);
3657 return rc;
3658}
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678static struct lpfc_iocbq *
3679lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3680 struct lpfc_iocbq *rspiocbp)
3681{
3682 struct lpfc_iocbq *saveq;
3683 struct lpfc_iocbq *cmdiocbp;
3684 struct lpfc_iocbq *next_iocb;
3685 IOCB_t *irsp = NULL;
3686 uint32_t free_saveq;
3687 uint8_t iocb_cmd_type;
3688 lpfc_iocb_type type;
3689 unsigned long iflag;
3690 int rc;
3691
3692 spin_lock_irqsave(&phba->hbalock, iflag);
3693
3694 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3695 pring->iocb_continueq_cnt++;
3696
3697
3698 irsp = &rspiocbp->iocb;
3699 if (irsp->ulpLe) {
3700
3701
3702
3703
3704 free_saveq = 1;
3705 saveq = list_get_first(&pring->iocb_continueq,
3706 struct lpfc_iocbq, list);
3707 irsp = &(saveq->iocb);
3708 list_del_init(&pring->iocb_continueq);
3709 pring->iocb_continueq_cnt = 0;
3710
3711 pring->stats.iocb_rsp++;
3712
3713
3714
3715
3716
3717 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3718 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3719 IOERR_NO_RESOURCES)) {
3720 spin_unlock_irqrestore(&phba->hbalock, iflag);
3721 phba->lpfc_rampdown_queue_depth(phba);
3722 spin_lock_irqsave(&phba->hbalock, iflag);
3723 }
3724
3725 if (irsp->ulpStatus) {
3726
3727 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3728 "0328 Rsp Ring %d error: "
3729 "IOCB Data: "
3730 "x%x x%x x%x x%x "
3731 "x%x x%x x%x x%x "
3732 "x%x x%x x%x x%x "
3733 "x%x x%x x%x x%x\n",
3734 pring->ringno,
3735 irsp->un.ulpWord[0],
3736 irsp->un.ulpWord[1],
3737 irsp->un.ulpWord[2],
3738 irsp->un.ulpWord[3],
3739 irsp->un.ulpWord[4],
3740 irsp->un.ulpWord[5],
3741 *(((uint32_t *) irsp) + 6),
3742 *(((uint32_t *) irsp) + 7),
3743 *(((uint32_t *) irsp) + 8),
3744 *(((uint32_t *) irsp) + 9),
3745 *(((uint32_t *) irsp) + 10),
3746 *(((uint32_t *) irsp) + 11),
3747 *(((uint32_t *) irsp) + 12),
3748 *(((uint32_t *) irsp) + 13),
3749 *(((uint32_t *) irsp) + 14),
3750 *(((uint32_t *) irsp) + 15));
3751 }
3752
3753
3754
3755
3756
3757
3758
3759 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3760 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3761 switch (type) {
3762 case LPFC_SOL_IOCB:
3763 spin_unlock_irqrestore(&phba->hbalock, iflag);
3764 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3765 spin_lock_irqsave(&phba->hbalock, iflag);
3766 break;
3767
3768 case LPFC_UNSOL_IOCB:
3769 spin_unlock_irqrestore(&phba->hbalock, iflag);
3770 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3771 spin_lock_irqsave(&phba->hbalock, iflag);
3772 if (!rc)
3773 free_saveq = 0;
3774 break;
3775
3776 case LPFC_ABORT_IOCB:
3777 cmdiocbp = NULL;
3778 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3779 spin_unlock_irqrestore(&phba->hbalock, iflag);
3780 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3781 saveq);
3782 spin_lock_irqsave(&phba->hbalock, iflag);
3783 }
3784 if (cmdiocbp) {
3785
3786 if (cmdiocbp->iocb_cmpl) {
3787 spin_unlock_irqrestore(&phba->hbalock,
3788 iflag);
3789 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3790 saveq);
3791 spin_lock_irqsave(&phba->hbalock,
3792 iflag);
3793 } else
3794 __lpfc_sli_release_iocbq(phba,
3795 cmdiocbp);
3796 }
3797 break;
3798
3799 case LPFC_UNKNOWN_IOCB:
3800 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3801 char adaptermsg[LPFC_MAX_ADPTMSG];
3802 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3803 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3804 MAX_MSG_DATA);
3805 dev_warn(&((phba->pcidev)->dev),
3806 "lpfc%d: %s\n",
3807 phba->brd_no, adaptermsg);
3808 } else {
3809
3810 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3811 "0335 Unknown IOCB "
3812 "command Data: x%x "
3813 "x%x x%x x%x\n",
3814 irsp->ulpCommand,
3815 irsp->ulpStatus,
3816 irsp->ulpIoTag,
3817 irsp->ulpContext);
3818 }
3819 break;
3820 }
3821
3822 if (free_saveq) {
3823 list_for_each_entry_safe(rspiocbp, next_iocb,
3824 &saveq->list, list) {
3825 list_del_init(&rspiocbp->list);
3826 __lpfc_sli_release_iocbq(phba, rspiocbp);
3827 }
3828 __lpfc_sli_release_iocbq(phba, saveq);
3829 }
3830 rspiocbp = NULL;
3831 }
3832 spin_unlock_irqrestore(&phba->hbalock, iflag);
3833 return rspiocbp;
3834}
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845void
3846lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3847 struct lpfc_sli_ring *pring, uint32_t mask)
3848{
3849 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3850}
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863static void
3864lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3865 struct lpfc_sli_ring *pring, uint32_t mask)
3866{
3867 struct lpfc_pgp *pgp;
3868 IOCB_t *entry;
3869 IOCB_t *irsp = NULL;
3870 struct lpfc_iocbq *rspiocbp = NULL;
3871 uint32_t portRspPut, portRspMax;
3872 unsigned long iflag;
3873 uint32_t status;
3874
3875 pgp = &phba->port_gp[pring->ringno];
3876 spin_lock_irqsave(&phba->hbalock, iflag);
3877 pring->stats.iocb_event++;
3878
3879
3880
3881
3882
3883 portRspMax = pring->sli.sli3.numRiocb;
3884 portRspPut = le32_to_cpu(pgp->rspPutInx);
3885 if (portRspPut >= portRspMax) {
3886
3887
3888
3889
3890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3891 "0303 Ring %d handler: portRspPut %d "
3892 "is bigger than rsp ring %d\n",
3893 pring->ringno, portRspPut, portRspMax);
3894
3895 phba->link_state = LPFC_HBA_ERROR;
3896 spin_unlock_irqrestore(&phba->hbalock, iflag);
3897
3898 phba->work_hs = HS_FFER3;
3899 lpfc_handle_eratt(phba);
3900
3901 return;
3902 }
3903
3904 rmb();
3905 while (pring->sli.sli3.rspidx != portRspPut) {
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919 entry = lpfc_resp_iocb(phba, pring);
3920
3921 phba->last_completion_time = jiffies;
3922 rspiocbp = __lpfc_sli_get_iocbq(phba);
3923 if (rspiocbp == NULL) {
3924 printk(KERN_ERR "%s: out of buffers! Failing "
3925 "completion.\n", __func__);
3926 break;
3927 }
3928
3929 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3930 phba->iocb_rsp_size);
3931 irsp = &rspiocbp->iocb;
3932
3933 if (++pring->sli.sli3.rspidx >= portRspMax)
3934 pring->sli.sli3.rspidx = 0;
3935
3936 if (pring->ringno == LPFC_ELS_RING) {
3937 lpfc_debugfs_slow_ring_trc(phba,
3938 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3939 *(((uint32_t *) irsp) + 4),
3940 *(((uint32_t *) irsp) + 6),
3941 *(((uint32_t *) irsp) + 7));
3942 }
3943
3944 writel(pring->sli.sli3.rspidx,
3945 &phba->host_gp[pring->ringno].rspGetInx);
3946
3947 spin_unlock_irqrestore(&phba->hbalock, iflag);
3948
3949 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3950 spin_lock_irqsave(&phba->hbalock, iflag);
3951
3952
3953
3954
3955
3956
3957 if (pring->sli.sli3.rspidx == portRspPut) {
3958 portRspPut = le32_to_cpu(pgp->rspPutInx);
3959 }
3960 }
3961
3962 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3963
3964 pring->stats.iocb_rsp_full++;
3965
3966 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3967 writel(status, phba->CAregaddr);
3968 readl(phba->CAregaddr);
3969 }
3970 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3971 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3972 pring->stats.iocb_cmd_empty++;
3973
3974
3975 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3976 lpfc_sli_resume_iocb(phba, pring);
3977
3978 if ((pring->lpfc_sli_cmd_available))
3979 (pring->lpfc_sli_cmd_available) (phba, pring);
3980
3981 }
3982
3983 spin_unlock_irqrestore(&phba->hbalock, iflag);
3984 return;
3985}
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999static void
4000lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4001 struct lpfc_sli_ring *pring, uint32_t mask)
4002{
4003 struct lpfc_iocbq *irspiocbq;
4004 struct hbq_dmabuf *dmabuf;
4005 struct lpfc_cq_event *cq_event;
4006 unsigned long iflag;
4007 int count = 0;
4008
4009 spin_lock_irqsave(&phba->hbalock, iflag);
4010 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4011 spin_unlock_irqrestore(&phba->hbalock, iflag);
4012 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4013
4014 spin_lock_irqsave(&phba->hbalock, iflag);
4015 list_remove_head(&phba->sli4_hba.sp_queue_event,
4016 cq_event, struct lpfc_cq_event, list);
4017 spin_unlock_irqrestore(&phba->hbalock, iflag);
4018
4019 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4020 case CQE_CODE_COMPL_WQE:
4021 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4022 cq_event);
4023
4024 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4025 irspiocbq);
4026 if (irspiocbq)
4027 lpfc_sli_sp_handle_rspiocb(phba, pring,
4028 irspiocbq);
4029 count++;
4030 break;
4031 case CQE_CODE_RECEIVE:
4032 case CQE_CODE_RECEIVE_V1:
4033 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4034 cq_event);
4035 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4036 count++;
4037 break;
4038 default:
4039 break;
4040 }
4041
4042
4043 if (count == 64)
4044 break;
4045 }
4046}
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058void
4059lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4060{
4061 LIST_HEAD(completions);
4062 struct lpfc_iocbq *iocb, *next_iocb;
4063
4064 if (pring->ringno == LPFC_ELS_RING) {
4065 lpfc_fabric_abort_hba(phba);
4066 }
4067
4068
4069
4070
4071 if (phba->sli_rev >= LPFC_SLI_REV4) {
4072 spin_lock_irq(&pring->ring_lock);
4073 list_splice_init(&pring->txq, &completions);
4074 pring->txq_cnt = 0;
4075 spin_unlock_irq(&pring->ring_lock);
4076
4077 spin_lock_irq(&phba->hbalock);
4078
4079 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4080 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4081 spin_unlock_irq(&phba->hbalock);
4082 } else {
4083 spin_lock_irq(&phba->hbalock);
4084 list_splice_init(&pring->txq, &completions);
4085 pring->txq_cnt = 0;
4086
4087
4088 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4089 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
4090 spin_unlock_irq(&phba->hbalock);
4091 }
4092
4093
4094 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4095 IOERR_SLI_ABORTED);
4096}
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107void
4108lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4109{
4110 struct lpfc_sli *psli = &phba->sli;
4111 struct lpfc_sli_ring *pring;
4112 uint32_t i;
4113
4114
4115 if (phba->sli_rev >= LPFC_SLI_REV4) {
4116 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4117 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4118 lpfc_sli_abort_iocb_ring(phba, pring);
4119 }
4120 } else {
4121 pring = &psli->sli3_ring[LPFC_FCP_RING];
4122 lpfc_sli_abort_iocb_ring(phba, pring);
4123 }
4124}
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136void
4137lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4138{
4139 LIST_HEAD(txq);
4140 LIST_HEAD(txcmplq);
4141 struct lpfc_sli *psli = &phba->sli;
4142 struct lpfc_sli_ring *pring;
4143 uint32_t i;
4144 struct lpfc_iocbq *piocb, *next_iocb;
4145
4146 spin_lock_irq(&phba->hbalock);
4147 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4148 !phba->sli4_hba.hdwq) {
4149 spin_unlock_irq(&phba->hbalock);
4150 return;
4151 }
4152
4153 phba->hba_flag |= HBA_IOQ_FLUSH;
4154 spin_unlock_irq(&phba->hbalock);
4155
4156
4157 if (phba->sli_rev >= LPFC_SLI_REV4) {
4158 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4159 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4160
4161 spin_lock_irq(&pring->ring_lock);
4162
4163 list_splice_init(&pring->txq, &txq);
4164 list_for_each_entry_safe(piocb, next_iocb,
4165 &pring->txcmplq, list)
4166 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4167
4168 list_splice_init(&pring->txcmplq, &txcmplq);
4169 pring->txq_cnt = 0;
4170 pring->txcmplq_cnt = 0;
4171 spin_unlock_irq(&pring->ring_lock);
4172
4173
4174 lpfc_sli_cancel_iocbs(phba, &txq,
4175 IOSTAT_LOCAL_REJECT,
4176 IOERR_SLI_DOWN);
4177
4178 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4179 IOSTAT_LOCAL_REJECT,
4180 IOERR_SLI_DOWN);
4181 }
4182 } else {
4183 pring = &psli->sli3_ring[LPFC_FCP_RING];
4184
4185 spin_lock_irq(&phba->hbalock);
4186
4187 list_splice_init(&pring->txq, &txq);
4188 list_for_each_entry_safe(piocb, next_iocb,
4189 &pring->txcmplq, list)
4190 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4191
4192 list_splice_init(&pring->txcmplq, &txcmplq);
4193 pring->txq_cnt = 0;
4194 pring->txcmplq_cnt = 0;
4195 spin_unlock_irq(&phba->hbalock);
4196
4197
4198 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4199 IOERR_SLI_DOWN);
4200
4201 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4202 IOERR_SLI_DOWN);
4203 }
4204}
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219static int
4220lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4221{
4222 uint32_t status;
4223 int i = 0;
4224 int retval = 0;
4225
4226
4227 if (lpfc_readl(phba->HSregaddr, &status))
4228 return 1;
4229
4230
4231
4232
4233
4234
4235
4236 while (((status & mask) != mask) &&
4237 !(status & HS_FFERM) &&
4238 i++ < 20) {
4239
4240 if (i <= 5)
4241 msleep(10);
4242 else if (i <= 10)
4243 msleep(500);
4244 else
4245 msleep(2500);
4246
4247 if (i == 15) {
4248
4249 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4250 lpfc_sli_brdrestart(phba);
4251 }
4252
4253 if (lpfc_readl(phba->HSregaddr, &status)) {
4254 retval = 1;
4255 break;
4256 }
4257 }
4258
4259
4260 if ((status & HS_FFERM) || (i >= 20)) {
4261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4262 "2751 Adapter failed to restart, "
4263 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4264 status,
4265 readl(phba->MBslimaddr + 0xa8),
4266 readl(phba->MBslimaddr + 0xac));
4267 phba->link_state = LPFC_HBA_ERROR;
4268 retval = 1;
4269 }
4270
4271 return retval;
4272}
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285static int
4286lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4287{
4288 uint32_t status;
4289 int retval = 0;
4290
4291
4292 status = lpfc_sli4_post_status_check(phba);
4293
4294 if (status) {
4295 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4296 lpfc_sli_brdrestart(phba);
4297 status = lpfc_sli4_post_status_check(phba);
4298 }
4299
4300
4301 if (status) {
4302 phba->link_state = LPFC_HBA_ERROR;
4303 retval = 1;
4304 } else
4305 phba->sli4_hba.intr_enable = 0;
4306
4307 return retval;
4308}
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318int
4319lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4320{
4321 return phba->lpfc_sli_brdready(phba, mask);
4322}
4323
4324#define BARRIER_TEST_PATTERN (0xdeadbeef)
4325
4326
4327
4328
4329
4330
4331
4332
4333void lpfc_reset_barrier(struct lpfc_hba *phba)
4334{
4335 uint32_t __iomem *resp_buf;
4336 uint32_t __iomem *mbox_buf;
4337 volatile uint32_t mbox;
4338 uint32_t hc_copy, ha_copy, resp_data;
4339 int i;
4340 uint8_t hdrtype;
4341
4342 lockdep_assert_held(&phba->hbalock);
4343
4344 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4345 if (hdrtype != 0x80 ||
4346 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4347 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4348 return;
4349
4350
4351
4352
4353
4354 resp_buf = phba->MBslimaddr;
4355
4356
4357 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4358 return;
4359 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4360 readl(phba->HCregaddr);
4361 phba->link_flag |= LS_IGNORE_ERATT;
4362
4363 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4364 return;
4365 if (ha_copy & HA_ERATT) {
4366
4367 writel(HA_ERATT, phba->HAregaddr);
4368 phba->pport->stopped = 1;
4369 }
4370
4371 mbox = 0;
4372 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4373 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4374
4375 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4376 mbox_buf = phba->MBslimaddr;
4377 writel(mbox, mbox_buf);
4378
4379 for (i = 0; i < 50; i++) {
4380 if (lpfc_readl((resp_buf + 1), &resp_data))
4381 return;
4382 if (resp_data != ~(BARRIER_TEST_PATTERN))
4383 mdelay(1);
4384 else
4385 break;
4386 }
4387 resp_data = 0;
4388 if (lpfc_readl((resp_buf + 1), &resp_data))
4389 return;
4390 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4391 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4392 phba->pport->stopped)
4393 goto restore_hc;
4394 else
4395 goto clear_errat;
4396 }
4397
4398 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4399 resp_data = 0;
4400 for (i = 0; i < 500; i++) {
4401 if (lpfc_readl(resp_buf, &resp_data))
4402 return;
4403 if (resp_data != mbox)
4404 mdelay(1);
4405 else
4406 break;
4407 }
4408
4409clear_errat:
4410
4411 while (++i < 500) {
4412 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4413 return;
4414 if (!(ha_copy & HA_ERATT))
4415 mdelay(1);
4416 else
4417 break;
4418 }
4419
4420 if (readl(phba->HAregaddr) & HA_ERATT) {
4421 writel(HA_ERATT, phba->HAregaddr);
4422 phba->pport->stopped = 1;
4423 }
4424
4425restore_hc:
4426 phba->link_flag &= ~LS_IGNORE_ERATT;
4427 writel(hc_copy, phba->HCregaddr);
4428 readl(phba->HCregaddr);
4429}
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442int
4443lpfc_sli_brdkill(struct lpfc_hba *phba)
4444{
4445 struct lpfc_sli *psli;
4446 LPFC_MBOXQ_t *pmb;
4447 uint32_t status;
4448 uint32_t ha_copy;
4449 int retval;
4450 int i = 0;
4451
4452 psli = &phba->sli;
4453
4454
4455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4456 "0329 Kill HBA Data: x%x x%x\n",
4457 phba->pport->port_state, psli->sli_flag);
4458
4459 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4460 if (!pmb)
4461 return 1;
4462
4463
4464 spin_lock_irq(&phba->hbalock);
4465 if (lpfc_readl(phba->HCregaddr, &status)) {
4466 spin_unlock_irq(&phba->hbalock);
4467 mempool_free(pmb, phba->mbox_mem_pool);
4468 return 1;
4469 }
4470 status &= ~HC_ERINT_ENA;
4471 writel(status, phba->HCregaddr);
4472 readl(phba->HCregaddr);
4473 phba->link_flag |= LS_IGNORE_ERATT;
4474 spin_unlock_irq(&phba->hbalock);
4475
4476 lpfc_kill_board(phba, pmb);
4477 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4478 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4479
4480 if (retval != MBX_SUCCESS) {
4481 if (retval != MBX_BUSY)
4482 mempool_free(pmb, phba->mbox_mem_pool);
4483 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4484 "2752 KILL_BOARD command failed retval %d\n",
4485 retval);
4486 spin_lock_irq(&phba->hbalock);
4487 phba->link_flag &= ~LS_IGNORE_ERATT;
4488 spin_unlock_irq(&phba->hbalock);
4489 return 1;
4490 }
4491
4492 spin_lock_irq(&phba->hbalock);
4493 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4494 spin_unlock_irq(&phba->hbalock);
4495
4496 mempool_free(pmb, phba->mbox_mem_pool);
4497
4498
4499
4500
4501
4502
4503 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4504 return 1;
4505 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4506 mdelay(100);
4507 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4508 return 1;
4509 }
4510
4511 del_timer_sync(&psli->mbox_tmo);
4512 if (ha_copy & HA_ERATT) {
4513 writel(HA_ERATT, phba->HAregaddr);
4514 phba->pport->stopped = 1;
4515 }
4516 spin_lock_irq(&phba->hbalock);
4517 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4518 psli->mbox_active = NULL;
4519 phba->link_flag &= ~LS_IGNORE_ERATT;
4520 spin_unlock_irq(&phba->hbalock);
4521
4522 lpfc_hba_down_post(phba);
4523 phba->link_state = LPFC_HBA_ERROR;
4524
4525 return ha_copy & HA_ERATT ? 0 : 1;
4526}
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539int
4540lpfc_sli_brdreset(struct lpfc_hba *phba)
4541{
4542 struct lpfc_sli *psli;
4543 struct lpfc_sli_ring *pring;
4544 uint16_t cfg_value;
4545 int i;
4546
4547 psli = &phba->sli;
4548
4549
4550 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4551 "0325 Reset HBA Data: x%x x%x\n",
4552 (phba->pport) ? phba->pport->port_state : 0,
4553 psli->sli_flag);
4554
4555
4556 phba->fc_eventTag = 0;
4557 phba->link_events = 0;
4558 if (phba->pport) {
4559 phba->pport->fc_myDID = 0;
4560 phba->pport->fc_prevDID = 0;
4561 }
4562
4563
4564 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4565 return -EIO;
4566
4567 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4568 (cfg_value &
4569 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4570
4571 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4572
4573
4574 writel(HC_INITFF, phba->HCregaddr);
4575 mdelay(1);
4576 readl(phba->HCregaddr);
4577 writel(0, phba->HCregaddr);
4578 readl(phba->HCregaddr);
4579
4580
4581 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4582
4583
4584 for (i = 0; i < psli->num_rings; i++) {
4585 pring = &psli->sli3_ring[i];
4586 pring->flag = 0;
4587 pring->sli.sli3.rspidx = 0;
4588 pring->sli.sli3.next_cmdidx = 0;
4589 pring->sli.sli3.local_getidx = 0;
4590 pring->sli.sli3.cmdidx = 0;
4591 pring->missbufcnt = 0;
4592 }
4593
4594 phba->link_state = LPFC_WARM_START;
4595 return 0;
4596}
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608int
4609lpfc_sli4_brdreset(struct lpfc_hba *phba)
4610{
4611 struct lpfc_sli *psli = &phba->sli;
4612 uint16_t cfg_value;
4613 int rc = 0;
4614
4615
4616 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4617 "0295 Reset HBA Data: x%x x%x x%x\n",
4618 phba->pport->port_state, psli->sli_flag,
4619 phba->hba_flag);
4620
4621
4622 phba->fc_eventTag = 0;
4623 phba->link_events = 0;
4624 phba->pport->fc_myDID = 0;
4625 phba->pport->fc_prevDID = 0;
4626
4627 spin_lock_irq(&phba->hbalock);
4628 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4629 phba->fcf.fcf_flag = 0;
4630 spin_unlock_irq(&phba->hbalock);
4631
4632
4633 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4634 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4635 return rc;
4636 }
4637
4638
4639 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4640 "0389 Performing PCI function reset!\n");
4641
4642
4643 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4645 "3205 PCI read Config failed\n");
4646 return -EIO;
4647 }
4648
4649 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4650 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4651
4652
4653 rc = lpfc_pci_function_reset(phba);
4654
4655
4656 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4657
4658 return rc;
4659}
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674static int
4675lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4676{
4677 MAILBOX_t *mb;
4678 struct lpfc_sli *psli;
4679 volatile uint32_t word0;
4680 void __iomem *to_slim;
4681 uint32_t hba_aer_enabled;
4682
4683 spin_lock_irq(&phba->hbalock);
4684
4685
4686 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4687
4688 psli = &phba->sli;
4689
4690
4691 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4692 "0337 Restart HBA Data: x%x x%x\n",
4693 (phba->pport) ? phba->pport->port_state : 0,
4694 psli->sli_flag);
4695
4696 word0 = 0;
4697 mb = (MAILBOX_t *) &word0;
4698 mb->mbxCommand = MBX_RESTART;
4699 mb->mbxHc = 1;
4700
4701 lpfc_reset_barrier(phba);
4702
4703 to_slim = phba->MBslimaddr;
4704 writel(*(uint32_t *) mb, to_slim);
4705 readl(to_slim);
4706
4707
4708 if (phba->pport && phba->pport->port_state)
4709 word0 = 1;
4710 else
4711 word0 = 0;
4712 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4713 writel(*(uint32_t *) mb, to_slim);
4714 readl(to_slim);
4715
4716 lpfc_sli_brdreset(phba);
4717 if (phba->pport)
4718 phba->pport->stopped = 0;
4719 phba->link_state = LPFC_INIT_START;
4720 phba->hba_flag = 0;
4721 spin_unlock_irq(&phba->hbalock);
4722
4723 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4724 psli->stats_start = ktime_get_seconds();
4725
4726
4727 mdelay(100);
4728
4729
4730 if (hba_aer_enabled)
4731 pci_disable_pcie_error_reporting(phba->pcidev);
4732
4733 lpfc_hba_down_post(phba);
4734
4735 return 0;
4736}
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747static int
4748lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4749{
4750 struct lpfc_sli *psli = &phba->sli;
4751 uint32_t hba_aer_enabled;
4752 int rc;
4753
4754
4755 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4756 "0296 Restart HBA Data: x%x x%x\n",
4757 phba->pport->port_state, psli->sli_flag);
4758
4759
4760 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4761
4762 rc = lpfc_sli4_brdreset(phba);
4763 if (rc) {
4764 phba->link_state = LPFC_HBA_ERROR;
4765 goto hba_down_queue;
4766 }
4767
4768 spin_lock_irq(&phba->hbalock);
4769 phba->pport->stopped = 0;
4770 phba->link_state = LPFC_INIT_START;
4771 phba->hba_flag = 0;
4772 spin_unlock_irq(&phba->hbalock);
4773
4774 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4775 psli->stats_start = ktime_get_seconds();
4776
4777
4778 if (hba_aer_enabled)
4779 pci_disable_pcie_error_reporting(phba->pcidev);
4780
4781hba_down_queue:
4782 lpfc_hba_down_post(phba);
4783 lpfc_sli4_queue_destroy(phba);
4784
4785 return rc;
4786}
4787
4788
4789
4790
4791
4792
4793
4794
4795int
4796lpfc_sli_brdrestart(struct lpfc_hba *phba)
4797{
4798 return phba->lpfc_sli_brdrestart(phba);
4799}
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811int
4812lpfc_sli_chipset_init(struct lpfc_hba *phba)
4813{
4814 uint32_t status, i = 0;
4815
4816
4817 if (lpfc_readl(phba->HSregaddr, &status))
4818 return -EIO;
4819
4820
4821 i = 0;
4822 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832 if (i++ >= 200) {
4833
4834
4835 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4836 "0436 Adapter failed to init, "
4837 "timeout, status reg x%x, "
4838 "FW Data: A8 x%x AC x%x\n", status,
4839 readl(phba->MBslimaddr + 0xa8),
4840 readl(phba->MBslimaddr + 0xac));
4841 phba->link_state = LPFC_HBA_ERROR;
4842 return -ETIMEDOUT;
4843 }
4844
4845
4846 if (status & HS_FFERM) {
4847
4848
4849
4850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4851 "0437 Adapter failed to init, "
4852 "chipset, status reg x%x, "
4853 "FW Data: A8 x%x AC x%x\n", status,
4854 readl(phba->MBslimaddr + 0xa8),
4855 readl(phba->MBslimaddr + 0xac));
4856 phba->link_state = LPFC_HBA_ERROR;
4857 return -EIO;
4858 }
4859
4860 if (i <= 10)
4861 msleep(10);
4862 else if (i <= 100)
4863 msleep(100);
4864 else
4865 msleep(1000);
4866
4867 if (i == 150) {
4868
4869 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4870 lpfc_sli_brdrestart(phba);
4871 }
4872
4873 if (lpfc_readl(phba->HSregaddr, &status))
4874 return -EIO;
4875 }
4876
4877
4878 if (status & HS_FFERM) {
4879
4880
4881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4882 "0438 Adapter failed to init, chipset, "
4883 "status reg x%x, "
4884 "FW Data: A8 x%x AC x%x\n", status,
4885 readl(phba->MBslimaddr + 0xa8),
4886 readl(phba->MBslimaddr + 0xac));
4887 phba->link_state = LPFC_HBA_ERROR;
4888 return -EIO;
4889 }
4890
4891
4892 writel(0, phba->HCregaddr);
4893 readl(phba->HCregaddr);
4894
4895
4896 writel(0xffffffff, phba->HAregaddr);
4897 readl(phba->HAregaddr);
4898 return 0;
4899}
4900
4901
4902
4903
4904
4905
4906
4907int
4908lpfc_sli_hbq_count(void)
4909{
4910 return ARRAY_SIZE(lpfc_hbq_defs);
4911}
4912
4913
4914
4915
4916
4917
4918
4919
4920static int
4921lpfc_sli_hbq_entry_count(void)
4922{
4923 int hbq_count = lpfc_sli_hbq_count();
4924 int count = 0;
4925 int i;
4926
4927 for (i = 0; i < hbq_count; ++i)
4928 count += lpfc_hbq_defs[i]->entry_count;
4929 return count;
4930}
4931
4932
4933
4934
4935
4936
4937
4938int
4939lpfc_sli_hbq_size(void)
4940{
4941 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4942}
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953static int
4954lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4955{
4956 int hbq_count = lpfc_sli_hbq_count();
4957 LPFC_MBOXQ_t *pmb;
4958 MAILBOX_t *pmbox;
4959 uint32_t hbqno;
4960 uint32_t hbq_entry_index;
4961
4962
4963
4964
4965 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4966
4967 if (!pmb)
4968 return -ENOMEM;
4969
4970 pmbox = &pmb->u.mb;
4971
4972
4973 phba->link_state = LPFC_INIT_MBX_CMDS;
4974 phba->hbq_in_use = 1;
4975
4976 hbq_entry_index = 0;
4977 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4978 phba->hbqs[hbqno].next_hbqPutIdx = 0;
4979 phba->hbqs[hbqno].hbqPutIdx = 0;
4980 phba->hbqs[hbqno].local_hbqGetIdx = 0;
4981 phba->hbqs[hbqno].entry_count =
4982 lpfc_hbq_defs[hbqno]->entry_count;
4983 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4984 hbq_entry_index, pmb);
4985 hbq_entry_index += phba->hbqs[hbqno].entry_count;
4986
4987 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4988
4989
4990
4991 lpfc_printf_log(phba, KERN_ERR,
4992 LOG_SLI | LOG_VPORT,
4993 "1805 Adapter failed to init. "
4994 "Data: x%x x%x x%x\n",
4995 pmbox->mbxCommand,
4996 pmbox->mbxStatus, hbqno);
4997
4998 phba->link_state = LPFC_HBA_ERROR;
4999 mempool_free(pmb, phba->mbox_mem_pool);
5000 return -ENXIO;
5001 }
5002 }
5003 phba->hbq_count = hbq_count;
5004
5005 mempool_free(pmb, phba->mbox_mem_pool);
5006
5007
5008 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5009 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5010 return 0;
5011}
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022static int
5023lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5024{
5025 phba->hbq_in_use = 1;
5026
5027
5028
5029
5030
5031 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5032 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5033 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5034 else
5035 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5036 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5037 phba->hbq_count = 1;
5038 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5039
5040 return 0;
5041}
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056int
5057lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5058{
5059 LPFC_MBOXQ_t *pmb;
5060 uint32_t resetcount = 0, rc = 0, done = 0;
5061
5062 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5063 if (!pmb) {
5064 phba->link_state = LPFC_HBA_ERROR;
5065 return -ENOMEM;
5066 }
5067
5068 phba->sli_rev = sli_mode;
5069 while (resetcount < 2 && !done) {
5070 spin_lock_irq(&phba->hbalock);
5071 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5072 spin_unlock_irq(&phba->hbalock);
5073 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5074 lpfc_sli_brdrestart(phba);
5075 rc = lpfc_sli_chipset_init(phba);
5076 if (rc)
5077 break;
5078
5079 spin_lock_irq(&phba->hbalock);
5080 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5081 spin_unlock_irq(&phba->hbalock);
5082 resetcount++;
5083
5084
5085
5086
5087
5088
5089 rc = lpfc_config_port_prep(phba);
5090 if (rc == -ERESTART) {
5091 phba->link_state = LPFC_LINK_UNKNOWN;
5092 continue;
5093 } else if (rc)
5094 break;
5095
5096 phba->link_state = LPFC_INIT_MBX_CMDS;
5097 lpfc_config_port(phba, pmb);
5098 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5099 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5100 LPFC_SLI3_HBQ_ENABLED |
5101 LPFC_SLI3_CRP_ENABLED |
5102 LPFC_SLI3_DSS_ENABLED);
5103 if (rc != MBX_SUCCESS) {
5104 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5105 "0442 Adapter failed to init, mbxCmd x%x "
5106 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5107 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5108 spin_lock_irq(&phba->hbalock);
5109 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5110 spin_unlock_irq(&phba->hbalock);
5111 rc = -ENXIO;
5112 } else {
5113
5114 spin_lock_irq(&phba->hbalock);
5115 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5116 spin_unlock_irq(&phba->hbalock);
5117 done = 1;
5118
5119 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5120 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5121 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5122 "3110 Port did not grant ASABT\n");
5123 }
5124 }
5125 if (!done) {
5126 rc = -EINVAL;
5127 goto do_prep_failed;
5128 }
5129 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5130 if (!pmb->u.mb.un.varCfgPort.cMA) {
5131 rc = -ENXIO;
5132 goto do_prep_failed;
5133 }
5134 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5135 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5136 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5137 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5138 phba->max_vpi : phba->max_vports;
5139
5140 } else
5141 phba->max_vpi = 0;
5142 if (pmb->u.mb.un.varCfgPort.gerbm)
5143 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5144 if (pmb->u.mb.un.varCfgPort.gcrp)
5145 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5146
5147 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5148 phba->port_gp = phba->mbox->us.s3_pgp.port;
5149
5150 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5151 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5152 phba->cfg_enable_bg = 0;
5153 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5154 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5155 "0443 Adapter did not grant "
5156 "BlockGuard\n");
5157 }
5158 }
5159 } else {
5160 phba->hbq_get = NULL;
5161 phba->port_gp = phba->mbox->us.s2.port;
5162 phba->max_vpi = 0;
5163 }
5164do_prep_failed:
5165 mempool_free(pmb, phba->mbox_mem_pool);
5166 return rc;
5167}
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183int
5184lpfc_sli_hba_setup(struct lpfc_hba *phba)
5185{
5186 uint32_t rc;
5187 int mode = 3, i;
5188 int longs;
5189
5190 switch (phba->cfg_sli_mode) {
5191 case 2:
5192 if (phba->cfg_enable_npiv) {
5193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5194 "1824 NPIV enabled: Override sli_mode "
5195 "parameter (%d) to auto (0).\n",
5196 phba->cfg_sli_mode);
5197 break;
5198 }
5199 mode = 2;
5200 break;
5201 case 0:
5202 case 3:
5203 break;
5204 default:
5205 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5206 "1819 Unrecognized sli_mode parameter: %d.\n",
5207 phba->cfg_sli_mode);
5208
5209 break;
5210 }
5211 phba->fcp_embed_io = 0;
5212
5213 rc = lpfc_sli_config_port(phba, mode);
5214
5215 if (rc && phba->cfg_sli_mode == 3)
5216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5217 "1820 Unable to select SLI-3. "
5218 "Not supported by adapter.\n");
5219 if (rc && mode != 2)
5220 rc = lpfc_sli_config_port(phba, 2);
5221 else if (rc && mode == 2)
5222 rc = lpfc_sli_config_port(phba, 3);
5223 if (rc)
5224 goto lpfc_sli_hba_setup_error;
5225
5226
5227 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5228 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5229 if (!rc) {
5230 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5231 "2709 This device supports "
5232 "Advanced Error Reporting (AER)\n");
5233 spin_lock_irq(&phba->hbalock);
5234 phba->hba_flag |= HBA_AER_ENABLED;
5235 spin_unlock_irq(&phba->hbalock);
5236 } else {
5237 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5238 "2708 This device does not support "
5239 "Advanced Error Reporting (AER): %d\n",
5240 rc);
5241 phba->cfg_aer_support = 0;
5242 }
5243 }
5244
5245 if (phba->sli_rev == 3) {
5246 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5247 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5248 } else {
5249 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5250 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5251 phba->sli3_options = 0;
5252 }
5253
5254 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5255 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5256 phba->sli_rev, phba->max_vpi);
5257 rc = lpfc_sli_ring_map(phba);
5258
5259 if (rc)
5260 goto lpfc_sli_hba_setup_error;
5261
5262
5263 if (phba->sli_rev == LPFC_SLI_REV3) {
5264
5265
5266
5267
5268
5269 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5270 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5271 phba->vpi_bmask = kcalloc(longs,
5272 sizeof(unsigned long),
5273 GFP_KERNEL);
5274 if (!phba->vpi_bmask) {
5275 rc = -ENOMEM;
5276 goto lpfc_sli_hba_setup_error;
5277 }
5278
5279 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5280 sizeof(uint16_t),
5281 GFP_KERNEL);
5282 if (!phba->vpi_ids) {
5283 kfree(phba->vpi_bmask);
5284 rc = -ENOMEM;
5285 goto lpfc_sli_hba_setup_error;
5286 }
5287 for (i = 0; i < phba->max_vpi; i++)
5288 phba->vpi_ids[i] = i;
5289 }
5290 }
5291
5292
5293 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5294 rc = lpfc_sli_hbq_setup(phba);
5295 if (rc)
5296 goto lpfc_sli_hba_setup_error;
5297 }
5298 spin_lock_irq(&phba->hbalock);
5299 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5300 spin_unlock_irq(&phba->hbalock);
5301
5302 rc = lpfc_config_port_post(phba);
5303 if (rc)
5304 goto lpfc_sli_hba_setup_error;
5305
5306 return rc;
5307
5308lpfc_sli_hba_setup_error:
5309 phba->link_state = LPFC_HBA_ERROR;
5310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5311 "0445 Firmware initialization failed\n");
5312 return rc;
5313}
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323static int
5324lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5325{
5326 LPFC_MBOXQ_t *mboxq;
5327 struct lpfc_dmabuf *mp;
5328 struct lpfc_mqe *mqe;
5329 uint32_t data_length;
5330 int rc;
5331
5332
5333 phba->valid_vlan = 0;
5334 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5335 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5336 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5337
5338 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5339 if (!mboxq)
5340 return -ENOMEM;
5341
5342 mqe = &mboxq->u.mqe;
5343 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5344 rc = -ENOMEM;
5345 goto out_free_mboxq;
5346 }
5347
5348 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5349 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5350
5351 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5352 "(%d):2571 Mailbox cmd x%x Status x%x "
5353 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5354 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5355 "CQ: x%x x%x x%x x%x\n",
5356 mboxq->vport ? mboxq->vport->vpi : 0,
5357 bf_get(lpfc_mqe_command, mqe),
5358 bf_get(lpfc_mqe_status, mqe),
5359 mqe->un.mb_words[0], mqe->un.mb_words[1],
5360 mqe->un.mb_words[2], mqe->un.mb_words[3],
5361 mqe->un.mb_words[4], mqe->un.mb_words[5],
5362 mqe->un.mb_words[6], mqe->un.mb_words[7],
5363 mqe->un.mb_words[8], mqe->un.mb_words[9],
5364 mqe->un.mb_words[10], mqe->un.mb_words[11],
5365 mqe->un.mb_words[12], mqe->un.mb_words[13],
5366 mqe->un.mb_words[14], mqe->un.mb_words[15],
5367 mqe->un.mb_words[16], mqe->un.mb_words[50],
5368 mboxq->mcqe.word0,
5369 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5370 mboxq->mcqe.trailer);
5371
5372 if (rc) {
5373 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5374 kfree(mp);
5375 rc = -EIO;
5376 goto out_free_mboxq;
5377 }
5378 data_length = mqe->un.mb_words[5];
5379 if (data_length > DMP_RGN23_SIZE) {
5380 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5381 kfree(mp);
5382 rc = -EIO;
5383 goto out_free_mboxq;
5384 }
5385
5386 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5387 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5388 kfree(mp);
5389 rc = 0;
5390
5391out_free_mboxq:
5392 mempool_free(mboxq, phba->mbox_mem_pool);
5393 return rc;
5394}
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411static int
5412lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5413 uint8_t *vpd, uint32_t *vpd_size)
5414{
5415 int rc = 0;
5416 uint32_t dma_size;
5417 struct lpfc_dmabuf *dmabuf;
5418 struct lpfc_mqe *mqe;
5419
5420 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5421 if (!dmabuf)
5422 return -ENOMEM;
5423
5424
5425
5426
5427
5428 dma_size = *vpd_size;
5429 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5430 &dmabuf->phys, GFP_KERNEL);
5431 if (!dmabuf->virt) {
5432 kfree(dmabuf);
5433 return -ENOMEM;
5434 }
5435
5436
5437
5438
5439
5440
5441 lpfc_read_rev(phba, mboxq);
5442 mqe = &mboxq->u.mqe;
5443 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5444 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5445 mqe->un.read_rev.word1 &= 0x0000FFFF;
5446 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5447 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5448
5449 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5450 if (rc) {
5451 dma_free_coherent(&phba->pcidev->dev, dma_size,
5452 dmabuf->virt, dmabuf->phys);
5453 kfree(dmabuf);
5454 return -EIO;
5455 }
5456
5457
5458
5459
5460
5461
5462 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5463 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5464
5465 memcpy(vpd, dmabuf->virt, *vpd_size);
5466
5467 dma_free_coherent(&phba->pcidev->dev, dma_size,
5468 dmabuf->virt, dmabuf->phys);
5469 kfree(dmabuf);
5470 return 0;
5471}
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484static int
5485lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5486{
5487 LPFC_MBOXQ_t *mboxq;
5488 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5489 struct lpfc_controller_attribute *cntl_attr;
5490 void *virtaddr = NULL;
5491 uint32_t alloclen, reqlen;
5492 uint32_t shdr_status, shdr_add_status;
5493 union lpfc_sli4_cfg_shdr *shdr;
5494 int rc;
5495
5496 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5497 if (!mboxq)
5498 return -ENOMEM;
5499
5500
5501 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5502 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5503 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5504 LPFC_SLI4_MBX_NEMBED);
5505
5506 if (alloclen < reqlen) {
5507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5508 "3084 Allocated DMA memory size (%d) is "
5509 "less than the requested DMA memory size "
5510 "(%d)\n", alloclen, reqlen);
5511 rc = -ENOMEM;
5512 goto out_free_mboxq;
5513 }
5514 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5515 virtaddr = mboxq->sge_array->addr[0];
5516 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5517 shdr = &mbx_cntl_attr->cfg_shdr;
5518 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5519 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5520 if (shdr_status || shdr_add_status || rc) {
5521 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5522 "3085 Mailbox x%x (x%x/x%x) failed, "
5523 "rc:x%x, status:x%x, add_status:x%x\n",
5524 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5525 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5526 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5527 rc, shdr_status, shdr_add_status);
5528 rc = -ENXIO;
5529 goto out_free_mboxq;
5530 }
5531
5532 cntl_attr = &mbx_cntl_attr->cntl_attr;
5533 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5534 phba->sli4_hba.lnk_info.lnk_tp =
5535 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5536 phba->sli4_hba.lnk_info.lnk_no =
5537 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5538
5539 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5540 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5541 sizeof(phba->BIOSVersion));
5542
5543 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5544 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5545 phba->sli4_hba.lnk_info.lnk_tp,
5546 phba->sli4_hba.lnk_info.lnk_no,
5547 phba->BIOSVersion);
5548out_free_mboxq:
5549 if (rc != MBX_TIMEOUT) {
5550 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5551 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5552 else
5553 mempool_free(mboxq, phba->mbox_mem_pool);
5554 }
5555 return rc;
5556}
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567
5568
5569static int
5570lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5571{
5572 LPFC_MBOXQ_t *mboxq;
5573 struct lpfc_mbx_get_port_name *get_port_name;
5574 uint32_t shdr_status, shdr_add_status;
5575 union lpfc_sli4_cfg_shdr *shdr;
5576 char cport_name = 0;
5577 int rc;
5578
5579
5580 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5581 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5582
5583 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5584 if (!mboxq)
5585 return -ENOMEM;
5586
5587 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5588 lpfc_sli4_read_config(phba);
5589 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5590 goto retrieve_ppname;
5591
5592
5593 rc = lpfc_sli4_get_ctl_attr(phba);
5594 if (rc)
5595 goto out_free_mboxq;
5596
5597retrieve_ppname:
5598 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5599 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5600 sizeof(struct lpfc_mbx_get_port_name) -
5601 sizeof(struct lpfc_sli4_cfg_mhdr),
5602 LPFC_SLI4_MBX_EMBED);
5603 get_port_name = &mboxq->u.mqe.un.get_port_name;
5604 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5605 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5606 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5607 phba->sli4_hba.lnk_info.lnk_tp);
5608 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5609 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5610 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5611 if (shdr_status || shdr_add_status || rc) {
5612 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5613 "3087 Mailbox x%x (x%x/x%x) failed: "
5614 "rc:x%x, status:x%x, add_status:x%x\n",
5615 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5616 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5617 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5618 rc, shdr_status, shdr_add_status);
5619 rc = -ENXIO;
5620 goto out_free_mboxq;
5621 }
5622 switch (phba->sli4_hba.lnk_info.lnk_no) {
5623 case LPFC_LINK_NUMBER_0:
5624 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5625 &get_port_name->u.response);
5626 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5627 break;
5628 case LPFC_LINK_NUMBER_1:
5629 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5630 &get_port_name->u.response);
5631 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5632 break;
5633 case LPFC_LINK_NUMBER_2:
5634 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5635 &get_port_name->u.response);
5636 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5637 break;
5638 case LPFC_LINK_NUMBER_3:
5639 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5640 &get_port_name->u.response);
5641 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5642 break;
5643 default:
5644 break;
5645 }
5646
5647 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5648 phba->Port[0] = cport_name;
5649 phba->Port[1] = '\0';
5650 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5651 "3091 SLI get port name: %s\n", phba->Port);
5652 }
5653
5654out_free_mboxq:
5655 if (rc != MBX_TIMEOUT) {
5656 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5657 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5658 else
5659 mempool_free(mboxq, phba->mbox_mem_pool);
5660 }
5661 return rc;
5662}
5663
5664
5665
5666
5667
5668
5669
5670
5671static void
5672lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5673{
5674 int qidx;
5675 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5676 struct lpfc_sli4_hdw_queue *qp;
5677 struct lpfc_queue *eq;
5678
5679 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5680 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5681 if (sli4_hba->nvmels_cq)
5682 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5683 LPFC_QUEUE_REARM);
5684
5685 if (sli4_hba->hdwq) {
5686
5687 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5688 qp = &sli4_hba->hdwq[qidx];
5689
5690 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5691 LPFC_QUEUE_REARM);
5692 }
5693
5694
5695 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5696 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5697
5698 sli4_hba->sli4_write_eq_db(phba, eq,
5699 0, LPFC_QUEUE_REARM);
5700 }
5701 }
5702
5703 if (phba->nvmet_support) {
5704 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5705 sli4_hba->sli4_write_cq_db(phba,
5706 sli4_hba->nvmet_cqset[qidx], 0,
5707 LPFC_QUEUE_REARM);
5708 }
5709 }
5710}
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724int
5725lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5726 uint16_t *extnt_count, uint16_t *extnt_size)
5727{
5728 int rc = 0;
5729 uint32_t length;
5730 uint32_t mbox_tmo;
5731 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5732 LPFC_MBOXQ_t *mbox;
5733
5734 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5735 if (!mbox)
5736 return -ENOMEM;
5737
5738
5739 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5740 sizeof(struct lpfc_sli4_cfg_mhdr));
5741 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5742 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5743 length, LPFC_SLI4_MBX_EMBED);
5744
5745
5746 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5747 LPFC_SLI4_MBX_EMBED);
5748 if (unlikely(rc)) {
5749 rc = -EIO;
5750 goto err_exit;
5751 }
5752
5753 if (!phba->sli4_hba.intr_enable)
5754 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5755 else {
5756 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5757 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5758 }
5759 if (unlikely(rc)) {
5760 rc = -EIO;
5761 goto err_exit;
5762 }
5763
5764 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5765 if (bf_get(lpfc_mbox_hdr_status,
5766 &rsrc_info->header.cfg_shdr.response)) {
5767 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5768 "2930 Failed to get resource extents "
5769 "Status 0x%x Add'l Status 0x%x\n",
5770 bf_get(lpfc_mbox_hdr_status,
5771 &rsrc_info->header.cfg_shdr.response),
5772 bf_get(lpfc_mbox_hdr_add_status,
5773 &rsrc_info->header.cfg_shdr.response));
5774 rc = -EIO;
5775 goto err_exit;
5776 }
5777
5778 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5779 &rsrc_info->u.rsp);
5780 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5781 &rsrc_info->u.rsp);
5782
5783 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5784 "3162 Retrieved extents type-%d from port: count:%d, "
5785 "size:%d\n", type, *extnt_count, *extnt_size);
5786
5787err_exit:
5788 mempool_free(mbox, phba->mbox_mem_pool);
5789 return rc;
5790}
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807static int
5808lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5809{
5810 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5811 uint16_t size_diff, rsrc_ext_size;
5812 int rc = 0;
5813 struct lpfc_rsrc_blks *rsrc_entry;
5814 struct list_head *rsrc_blk_list = NULL;
5815
5816 size_diff = 0;
5817 curr_ext_cnt = 0;
5818 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5819 &rsrc_ext_cnt,
5820 &rsrc_ext_size);
5821 if (unlikely(rc))
5822 return -EIO;
5823
5824 switch (type) {
5825 case LPFC_RSC_TYPE_FCOE_RPI:
5826 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5827 break;
5828 case LPFC_RSC_TYPE_FCOE_VPI:
5829 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5830 break;
5831 case LPFC_RSC_TYPE_FCOE_XRI:
5832 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5833 break;
5834 case LPFC_RSC_TYPE_FCOE_VFI:
5835 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5836 break;
5837 default:
5838 break;
5839 }
5840
5841 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5842 curr_ext_cnt++;
5843 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5844 size_diff++;
5845 }
5846
5847 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5848 rc = 1;
5849
5850 return rc;
5851}
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870static int
5871lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5872 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5873{
5874 int rc = 0;
5875 uint32_t req_len;
5876 uint32_t emb_len;
5877 uint32_t alloc_len, mbox_tmo;
5878
5879
5880 req_len = extnt_cnt * sizeof(uint16_t);
5881
5882
5883
5884
5885
5886 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5887 sizeof(uint32_t);
5888
5889
5890
5891
5892
5893 *emb = LPFC_SLI4_MBX_EMBED;
5894 if (req_len > emb_len) {
5895 req_len = extnt_cnt * sizeof(uint16_t) +
5896 sizeof(union lpfc_sli4_cfg_shdr) +
5897 sizeof(uint32_t);
5898 *emb = LPFC_SLI4_MBX_NEMBED;
5899 }
5900
5901 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5902 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5903 req_len, *emb);
5904 if (alloc_len < req_len) {
5905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5906 "2982 Allocated DMA memory size (x%x) is "
5907 "less than the requested DMA memory "
5908 "size (x%x)\n", alloc_len, req_len);
5909 return -ENOMEM;
5910 }
5911 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5912 if (unlikely(rc))
5913 return -EIO;
5914
5915 if (!phba->sli4_hba.intr_enable)
5916 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5917 else {
5918 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5919 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5920 }
5921
5922 if (unlikely(rc))
5923 rc = -EIO;
5924 return rc;
5925}
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935static int
5936lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5937{
5938 bool emb = false;
5939 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5940 uint16_t rsrc_id, rsrc_start, j, k;
5941 uint16_t *ids;
5942 int i, rc;
5943 unsigned long longs;
5944 unsigned long *bmask;
5945 struct lpfc_rsrc_blks *rsrc_blks;
5946 LPFC_MBOXQ_t *mbox;
5947 uint32_t length;
5948 struct lpfc_id_range *id_array = NULL;
5949 void *virtaddr = NULL;
5950 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5951 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5952 struct list_head *ext_blk_list;
5953
5954 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5955 &rsrc_cnt,
5956 &rsrc_size);
5957 if (unlikely(rc))
5958 return -EIO;
5959
5960 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5961 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5962 "3009 No available Resource Extents "
5963 "for resource type 0x%x: Count: 0x%x, "
5964 "Size 0x%x\n", type, rsrc_cnt,
5965 rsrc_size);
5966 return -ENOMEM;
5967 }
5968
5969 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5970 "2903 Post resource extents type-0x%x: "
5971 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5972
5973 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5974 if (!mbox)
5975 return -ENOMEM;
5976
5977 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5978 if (unlikely(rc)) {
5979 rc = -EIO;
5980 goto err_exit;
5981 }
5982
5983
5984
5985
5986
5987
5988
5989 if (emb == LPFC_SLI4_MBX_EMBED) {
5990 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5991 id_array = &rsrc_ext->u.rsp.id[0];
5992 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5993 } else {
5994 virtaddr = mbox->sge_array->addr[0];
5995 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5996 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5997 id_array = &n_rsrc->id;
5998 }
5999
6000 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6001 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6002
6003
6004
6005
6006
6007 length = sizeof(struct lpfc_rsrc_blks);
6008 switch (type) {
6009 case LPFC_RSC_TYPE_FCOE_RPI:
6010 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6011 sizeof(unsigned long),
6012 GFP_KERNEL);
6013 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6014 rc = -ENOMEM;
6015 goto err_exit;
6016 }
6017 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6018 sizeof(uint16_t),
6019 GFP_KERNEL);
6020 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6021 kfree(phba->sli4_hba.rpi_bmask);
6022 rc = -ENOMEM;
6023 goto err_exit;
6024 }
6025
6026
6027
6028
6029
6030
6031 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6032
6033
6034 bmask = phba->sli4_hba.rpi_bmask;
6035 ids = phba->sli4_hba.rpi_ids;
6036 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6037 break;
6038 case LPFC_RSC_TYPE_FCOE_VPI:
6039 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6040 GFP_KERNEL);
6041 if (unlikely(!phba->vpi_bmask)) {
6042 rc = -ENOMEM;
6043 goto err_exit;
6044 }
6045 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6046 GFP_KERNEL);
6047 if (unlikely(!phba->vpi_ids)) {
6048 kfree(phba->vpi_bmask);
6049 rc = -ENOMEM;
6050 goto err_exit;
6051 }
6052
6053
6054 bmask = phba->vpi_bmask;
6055 ids = phba->vpi_ids;
6056 ext_blk_list = &phba->lpfc_vpi_blk_list;
6057 break;
6058 case LPFC_RSC_TYPE_FCOE_XRI:
6059 phba->sli4_hba.xri_bmask = kcalloc(longs,
6060 sizeof(unsigned long),
6061 GFP_KERNEL);
6062 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6063 rc = -ENOMEM;
6064 goto err_exit;
6065 }
6066 phba->sli4_hba.max_cfg_param.xri_used = 0;
6067 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6068 sizeof(uint16_t),
6069 GFP_KERNEL);
6070 if (unlikely(!phba->sli4_hba.xri_ids)) {
6071 kfree(phba->sli4_hba.xri_bmask);
6072 rc = -ENOMEM;
6073 goto err_exit;
6074 }
6075
6076
6077 bmask = phba->sli4_hba.xri_bmask;
6078 ids = phba->sli4_hba.xri_ids;
6079 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6080 break;
6081 case LPFC_RSC_TYPE_FCOE_VFI:
6082 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6083 sizeof(unsigned long),
6084 GFP_KERNEL);
6085 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6086 rc = -ENOMEM;
6087 goto err_exit;
6088 }
6089 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6090 sizeof(uint16_t),
6091 GFP_KERNEL);
6092 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6093 kfree(phba->sli4_hba.vfi_bmask);
6094 rc = -ENOMEM;
6095 goto err_exit;
6096 }
6097
6098
6099 bmask = phba->sli4_hba.vfi_bmask;
6100 ids = phba->sli4_hba.vfi_ids;
6101 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6102 break;
6103 default:
6104
6105 id_array = NULL;
6106 bmask = NULL;
6107 ids = NULL;
6108 ext_blk_list = NULL;
6109 goto err_exit;
6110 }
6111
6112
6113
6114
6115
6116
6117
6118 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6119 if ((i % 2) == 0)
6120 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6121 &id_array[k]);
6122 else
6123 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6124 &id_array[k]);
6125
6126 rsrc_blks = kzalloc(length, GFP_KERNEL);
6127 if (unlikely(!rsrc_blks)) {
6128 rc = -ENOMEM;
6129 kfree(bmask);
6130 kfree(ids);
6131 goto err_exit;
6132 }
6133 rsrc_blks->rsrc_start = rsrc_id;
6134 rsrc_blks->rsrc_size = rsrc_size;
6135 list_add_tail(&rsrc_blks->list, ext_blk_list);
6136 rsrc_start = rsrc_id;
6137 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6138 phba->sli4_hba.io_xri_start = rsrc_start +
6139 lpfc_sli4_get_iocb_cnt(phba);
6140 }
6141
6142 while (rsrc_id < (rsrc_start + rsrc_size)) {
6143 ids[j] = rsrc_id;
6144 rsrc_id++;
6145 j++;
6146 }
6147
6148 if ((i % 2) == 1)
6149 k++;
6150 }
6151 err_exit:
6152 lpfc_sli4_mbox_cmd_free(phba, mbox);
6153 return rc;
6154}
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167static int
6168lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6169{
6170 int rc;
6171 uint32_t length, mbox_tmo = 0;
6172 LPFC_MBOXQ_t *mbox;
6173 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6174 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6175
6176 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6177 if (!mbox)
6178 return -ENOMEM;
6179
6180
6181
6182
6183
6184
6185 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6186 sizeof(struct lpfc_sli4_cfg_mhdr));
6187 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6188 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6189 length, LPFC_SLI4_MBX_EMBED);
6190
6191
6192 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6193 LPFC_SLI4_MBX_EMBED);
6194 if (unlikely(rc)) {
6195 rc = -EIO;
6196 goto out_free_mbox;
6197 }
6198 if (!phba->sli4_hba.intr_enable)
6199 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6200 else {
6201 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6202 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6203 }
6204 if (unlikely(rc)) {
6205 rc = -EIO;
6206 goto out_free_mbox;
6207 }
6208
6209 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6210 if (bf_get(lpfc_mbox_hdr_status,
6211 &dealloc_rsrc->header.cfg_shdr.response)) {
6212 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6213 "2919 Failed to release resource extents "
6214 "for type %d - Status 0x%x Add'l Status 0x%x. "
6215 "Resource memory not released.\n",
6216 type,
6217 bf_get(lpfc_mbox_hdr_status,
6218 &dealloc_rsrc->header.cfg_shdr.response),
6219 bf_get(lpfc_mbox_hdr_add_status,
6220 &dealloc_rsrc->header.cfg_shdr.response));
6221 rc = -EIO;
6222 goto out_free_mbox;
6223 }
6224
6225
6226 switch (type) {
6227 case LPFC_RSC_TYPE_FCOE_VPI:
6228 kfree(phba->vpi_bmask);
6229 kfree(phba->vpi_ids);
6230 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6231 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6232 &phba->lpfc_vpi_blk_list, list) {
6233 list_del_init(&rsrc_blk->list);
6234 kfree(rsrc_blk);
6235 }
6236 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6237 break;
6238 case LPFC_RSC_TYPE_FCOE_XRI:
6239 kfree(phba->sli4_hba.xri_bmask);
6240 kfree(phba->sli4_hba.xri_ids);
6241 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6242 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6243 list_del_init(&rsrc_blk->list);
6244 kfree(rsrc_blk);
6245 }
6246 break;
6247 case LPFC_RSC_TYPE_FCOE_VFI:
6248 kfree(phba->sli4_hba.vfi_bmask);
6249 kfree(phba->sli4_hba.vfi_ids);
6250 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6251 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6252 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6253 list_del_init(&rsrc_blk->list);
6254 kfree(rsrc_blk);
6255 }
6256 break;
6257 case LPFC_RSC_TYPE_FCOE_RPI:
6258
6259 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6260 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6261 list_del_init(&rsrc_blk->list);
6262 kfree(rsrc_blk);
6263 }
6264 break;
6265 default:
6266 break;
6267 }
6268
6269 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6270
6271 out_free_mbox:
6272 mempool_free(mbox, phba->mbox_mem_pool);
6273 return rc;
6274}
6275
6276static void
6277lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6278 uint32_t feature)
6279{
6280 uint32_t len;
6281
6282 len = sizeof(struct lpfc_mbx_set_feature) -
6283 sizeof(struct lpfc_sli4_cfg_mhdr);
6284 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6285 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6286 LPFC_SLI4_MBX_EMBED);
6287
6288 switch (feature) {
6289 case LPFC_SET_UE_RECOVERY:
6290 bf_set(lpfc_mbx_set_feature_UER,
6291 &mbox->u.mqe.un.set_feature, 1);
6292 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6293 mbox->u.mqe.un.set_feature.param_len = 8;
6294 break;
6295 case LPFC_SET_MDS_DIAGS:
6296 bf_set(lpfc_mbx_set_feature_mds,
6297 &mbox->u.mqe.un.set_feature, 1);
6298 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6299 &mbox->u.mqe.un.set_feature, 1);
6300 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6301 mbox->u.mqe.un.set_feature.param_len = 8;
6302 break;
6303 case LPFC_SET_DUAL_DUMP:
6304 bf_set(lpfc_mbx_set_feature_dd,
6305 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6306 bf_set(lpfc_mbx_set_feature_ddquery,
6307 &mbox->u.mqe.un.set_feature, 0);
6308 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6309 mbox->u.mqe.un.set_feature.param_len = 4;
6310 break;
6311 }
6312
6313 return;
6314}
6315
6316
6317
6318
6319
6320
6321
6322
6323void
6324lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6325{
6326 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6327
6328 spin_lock_irq(&phba->hbalock);
6329 ras_fwlog->state = INACTIVE;
6330 spin_unlock_irq(&phba->hbalock);
6331
6332
6333 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6334 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6335
6336
6337 usleep_range(10 * 1000, 20 * 1000);
6338}
6339
6340
6341
6342
6343
6344
6345
6346
6347void
6348lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6349{
6350 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6351 struct lpfc_dmabuf *dmabuf, *next;
6352
6353 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6354 list_for_each_entry_safe(dmabuf, next,
6355 &ras_fwlog->fwlog_buff_list,
6356 list) {
6357 list_del(&dmabuf->list);
6358 dma_free_coherent(&phba->pcidev->dev,
6359 LPFC_RAS_MAX_ENTRY_SIZE,
6360 dmabuf->virt, dmabuf->phys);
6361 kfree(dmabuf);
6362 }
6363 }
6364
6365 if (ras_fwlog->lwpd.virt) {
6366 dma_free_coherent(&phba->pcidev->dev,
6367 sizeof(uint32_t) * 2,
6368 ras_fwlog->lwpd.virt,
6369 ras_fwlog->lwpd.phys);
6370 ras_fwlog->lwpd.virt = NULL;
6371 }
6372
6373 spin_lock_irq(&phba->hbalock);
6374 ras_fwlog->state = INACTIVE;
6375 spin_unlock_irq(&phba->hbalock);
6376}
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389static int
6390lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6391 uint32_t fwlog_buff_count)
6392{
6393 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6394 struct lpfc_dmabuf *dmabuf;
6395 int rc = 0, i = 0;
6396
6397
6398 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6399
6400
6401 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6402 sizeof(uint32_t) * 2,
6403 &ras_fwlog->lwpd.phys,
6404 GFP_KERNEL);
6405 if (!ras_fwlog->lwpd.virt) {
6406 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6407 "6185 LWPD Memory Alloc Failed\n");
6408
6409 return -ENOMEM;
6410 }
6411
6412 ras_fwlog->fw_buffcount = fwlog_buff_count;
6413 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6414 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6415 GFP_KERNEL);
6416 if (!dmabuf) {
6417 rc = -ENOMEM;
6418 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6419 "6186 Memory Alloc failed FW logging");
6420 goto free_mem;
6421 }
6422
6423 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6424 LPFC_RAS_MAX_ENTRY_SIZE,
6425 &dmabuf->phys, GFP_KERNEL);
6426 if (!dmabuf->virt) {
6427 kfree(dmabuf);
6428 rc = -ENOMEM;
6429 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6430 "6187 DMA Alloc Failed FW logging");
6431 goto free_mem;
6432 }
6433 dmabuf->buffer_tag = i;
6434 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6435 }
6436
6437free_mem:
6438 if (rc)
6439 lpfc_sli4_ras_dma_free(phba);
6440
6441 return rc;
6442}
6443
6444
6445
6446
6447
6448
6449
6450
6451static void
6452lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6453{
6454 MAILBOX_t *mb;
6455 union lpfc_sli4_cfg_shdr *shdr;
6456 uint32_t shdr_status, shdr_add_status;
6457 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6458
6459 mb = &pmb->u.mb;
6460
6461 shdr = (union lpfc_sli4_cfg_shdr *)
6462 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6463 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6464 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6465
6466 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6467 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6468 "6188 FW LOG mailbox "
6469 "completed with status x%x add_status x%x,"
6470 " mbx status x%x\n",
6471 shdr_status, shdr_add_status, mb->mbxStatus);
6472
6473 ras_fwlog->ras_hwsupport = false;
6474 goto disable_ras;
6475 }
6476
6477 spin_lock_irq(&phba->hbalock);
6478 ras_fwlog->state = ACTIVE;
6479 spin_unlock_irq(&phba->hbalock);
6480 mempool_free(pmb, phba->mbox_mem_pool);
6481
6482 return;
6483
6484disable_ras:
6485
6486 lpfc_sli4_ras_dma_free(phba);
6487 mempool_free(pmb, phba->mbox_mem_pool);
6488}
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499int
6500lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6501 uint32_t fwlog_level,
6502 uint32_t fwlog_enable)
6503{
6504 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6505 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6506 struct lpfc_dmabuf *dmabuf;
6507 LPFC_MBOXQ_t *mbox;
6508 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6509 int rc = 0;
6510
6511 spin_lock_irq(&phba->hbalock);
6512 ras_fwlog->state = INACTIVE;
6513 spin_unlock_irq(&phba->hbalock);
6514
6515 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6516 phba->cfg_ras_fwlog_buffsize);
6517 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6518
6519
6520
6521
6522
6523 if (!ras_fwlog->lwpd.virt) {
6524 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6525 if (rc) {
6526 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6527 "6189 FW Log Memory Allocation Failed");
6528 return rc;
6529 }
6530 }
6531
6532
6533 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6534 if (!mbox) {
6535 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6536 "6190 RAS MBX Alloc Failed");
6537 rc = -ENOMEM;
6538 goto mem_free;
6539 }
6540
6541 ras_fwlog->fw_loglevel = fwlog_level;
6542 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6543 sizeof(struct lpfc_sli4_cfg_mhdr));
6544
6545 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6546 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6547 len, LPFC_SLI4_MBX_EMBED);
6548
6549 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6550 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6551 fwlog_enable);
6552 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6553 ras_fwlog->fw_loglevel);
6554 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6555 ras_fwlog->fw_buffcount);
6556 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6557 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6558
6559
6560 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6561 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6562
6563 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6564 putPaddrLow(dmabuf->phys);
6565
6566 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6567 putPaddrHigh(dmabuf->phys);
6568 }
6569
6570
6571 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6572 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6573
6574 spin_lock_irq(&phba->hbalock);
6575 ras_fwlog->state = REG_INPROGRESS;
6576 spin_unlock_irq(&phba->hbalock);
6577 mbox->vport = phba->pport;
6578 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6579
6580 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6581
6582 if (rc == MBX_NOT_FINISHED) {
6583 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6584 "6191 FW-Log Mailbox failed. "
6585 "status %d mbxStatus : x%x", rc,
6586 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6587 mempool_free(mbox, phba->mbox_mem_pool);
6588 rc = -EIO;
6589 goto mem_free;
6590 } else
6591 rc = 0;
6592mem_free:
6593 if (rc)
6594 lpfc_sli4_ras_dma_free(phba);
6595
6596 return rc;
6597}
6598
6599
6600
6601
6602
6603
6604
6605void
6606lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6607{
6608
6609 if (lpfc_check_fwlog_support(phba))
6610 return;
6611
6612 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6613 LPFC_RAS_ENABLE_LOGGING);
6614}
6615
6616
6617
6618
6619
6620
6621
6622int
6623lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6624{
6625 int i, rc, error = 0;
6626 uint16_t count, base;
6627 unsigned long longs;
6628
6629 if (!phba->sli4_hba.rpi_hdrs_in_use)
6630 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6631 if (phba->sli4_hba.extents_in_use) {
6632
6633
6634
6635
6636
6637 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6638 LPFC_IDX_RSRC_RDY) {
6639
6640
6641
6642
6643
6644 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6645 LPFC_RSC_TYPE_FCOE_VFI);
6646 if (rc != 0)
6647 error++;
6648 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6649 LPFC_RSC_TYPE_FCOE_VPI);
6650 if (rc != 0)
6651 error++;
6652 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6653 LPFC_RSC_TYPE_FCOE_XRI);
6654 if (rc != 0)
6655 error++;
6656 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6657 LPFC_RSC_TYPE_FCOE_RPI);
6658 if (rc != 0)
6659 error++;
6660
6661
6662
6663
6664
6665
6666
6667 if (error) {
6668 lpfc_printf_log(phba, KERN_INFO,
6669 LOG_MBOX | LOG_INIT,
6670 "2931 Detected extent resource "
6671 "change. Reallocating all "
6672 "extents.\n");
6673 rc = lpfc_sli4_dealloc_extent(phba,
6674 LPFC_RSC_TYPE_FCOE_VFI);
6675 rc = lpfc_sli4_dealloc_extent(phba,
6676 LPFC_RSC_TYPE_FCOE_VPI);
6677 rc = lpfc_sli4_dealloc_extent(phba,
6678 LPFC_RSC_TYPE_FCOE_XRI);
6679 rc = lpfc_sli4_dealloc_extent(phba,
6680 LPFC_RSC_TYPE_FCOE_RPI);
6681 } else
6682 return 0;
6683 }
6684
6685 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6686 if (unlikely(rc))
6687 goto err_exit;
6688
6689 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6690 if (unlikely(rc))
6691 goto err_exit;
6692
6693 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6694 if (unlikely(rc))
6695 goto err_exit;
6696
6697 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6698 if (unlikely(rc))
6699 goto err_exit;
6700 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6701 LPFC_IDX_RSRC_RDY);
6702 return rc;
6703 } else {
6704
6705
6706
6707
6708
6709
6710
6711 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6712 LPFC_IDX_RSRC_RDY) {
6713 lpfc_sli4_dealloc_resource_identifiers(phba);
6714 lpfc_sli4_remove_rpis(phba);
6715 }
6716
6717 count = phba->sli4_hba.max_cfg_param.max_rpi;
6718 if (count <= 0) {
6719 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6720 "3279 Invalid provisioning of "
6721 "rpi:%d\n", count);
6722 rc = -EINVAL;
6723 goto err_exit;
6724 }
6725 base = phba->sli4_hba.max_cfg_param.rpi_base;
6726 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6727 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6728 sizeof(unsigned long),
6729 GFP_KERNEL);
6730 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6731 rc = -ENOMEM;
6732 goto err_exit;
6733 }
6734 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6735 GFP_KERNEL);
6736 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6737 rc = -ENOMEM;
6738 goto free_rpi_bmask;
6739 }
6740
6741 for (i = 0; i < count; i++)
6742 phba->sli4_hba.rpi_ids[i] = base + i;
6743
6744
6745 count = phba->sli4_hba.max_cfg_param.max_vpi;
6746 if (count <= 0) {
6747 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6748 "3280 Invalid provisioning of "
6749 "vpi:%d\n", count);
6750 rc = -EINVAL;
6751 goto free_rpi_ids;
6752 }
6753 base = phba->sli4_hba.max_cfg_param.vpi_base;
6754 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6755 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6756 GFP_KERNEL);
6757 if (unlikely(!phba->vpi_bmask)) {
6758 rc = -ENOMEM;
6759 goto free_rpi_ids;
6760 }
6761 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6762 GFP_KERNEL);
6763 if (unlikely(!phba->vpi_ids)) {
6764 rc = -ENOMEM;
6765 goto free_vpi_bmask;
6766 }
6767
6768 for (i = 0; i < count; i++)
6769 phba->vpi_ids[i] = base + i;
6770
6771
6772 count = phba->sli4_hba.max_cfg_param.max_xri;
6773 if (count <= 0) {
6774 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6775 "3281 Invalid provisioning of "
6776 "xri:%d\n", count);
6777 rc = -EINVAL;
6778 goto free_vpi_ids;
6779 }
6780 base = phba->sli4_hba.max_cfg_param.xri_base;
6781 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6782 phba->sli4_hba.xri_bmask = kcalloc(longs,
6783 sizeof(unsigned long),
6784 GFP_KERNEL);
6785 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6786 rc = -ENOMEM;
6787 goto free_vpi_ids;
6788 }
6789 phba->sli4_hba.max_cfg_param.xri_used = 0;
6790 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6791 GFP_KERNEL);
6792 if (unlikely(!phba->sli4_hba.xri_ids)) {
6793 rc = -ENOMEM;
6794 goto free_xri_bmask;
6795 }
6796
6797 for (i = 0; i < count; i++)
6798 phba->sli4_hba.xri_ids[i] = base + i;
6799
6800
6801 count = phba->sli4_hba.max_cfg_param.max_vfi;
6802 if (count <= 0) {
6803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6804 "3282 Invalid provisioning of "
6805 "vfi:%d\n", count);
6806 rc = -EINVAL;
6807 goto free_xri_ids;
6808 }
6809 base = phba->sli4_hba.max_cfg_param.vfi_base;
6810 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6811 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6812 sizeof(unsigned long),
6813 GFP_KERNEL);
6814 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6815 rc = -ENOMEM;
6816 goto free_xri_ids;
6817 }
6818 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6819 GFP_KERNEL);
6820 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6821 rc = -ENOMEM;
6822 goto free_vfi_bmask;
6823 }
6824
6825 for (i = 0; i < count; i++)
6826 phba->sli4_hba.vfi_ids[i] = base + i;
6827
6828
6829
6830
6831
6832 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6833 LPFC_IDX_RSRC_RDY);
6834 return 0;
6835 }
6836
6837 free_vfi_bmask:
6838 kfree(phba->sli4_hba.vfi_bmask);
6839 phba->sli4_hba.vfi_bmask = NULL;
6840 free_xri_ids:
6841 kfree(phba->sli4_hba.xri_ids);
6842 phba->sli4_hba.xri_ids = NULL;
6843 free_xri_bmask:
6844 kfree(phba->sli4_hba.xri_bmask);
6845 phba->sli4_hba.xri_bmask = NULL;
6846 free_vpi_ids:
6847 kfree(phba->vpi_ids);
6848 phba->vpi_ids = NULL;
6849 free_vpi_bmask:
6850 kfree(phba->vpi_bmask);
6851 phba->vpi_bmask = NULL;
6852 free_rpi_ids:
6853 kfree(phba->sli4_hba.rpi_ids);
6854 phba->sli4_hba.rpi_ids = NULL;
6855 free_rpi_bmask:
6856 kfree(phba->sli4_hba.rpi_bmask);
6857 phba->sli4_hba.rpi_bmask = NULL;
6858 err_exit:
6859 return rc;
6860}
6861
6862
6863
6864
6865
6866
6867
6868
6869int
6870lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6871{
6872 if (phba->sli4_hba.extents_in_use) {
6873 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6874 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6875 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6876 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6877 } else {
6878 kfree(phba->vpi_bmask);
6879 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6880 kfree(phba->vpi_ids);
6881 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6882 kfree(phba->sli4_hba.xri_bmask);
6883 kfree(phba->sli4_hba.xri_ids);
6884 kfree(phba->sli4_hba.vfi_bmask);
6885 kfree(phba->sli4_hba.vfi_ids);
6886 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6887 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6888 }
6889
6890 return 0;
6891}
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903int
6904lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6905 uint16_t *extnt_cnt, uint16_t *extnt_size)
6906{
6907 bool emb;
6908 int rc = 0;
6909 uint16_t curr_blks = 0;
6910 uint32_t req_len, emb_len;
6911 uint32_t alloc_len, mbox_tmo;
6912 struct list_head *blk_list_head;
6913 struct lpfc_rsrc_blks *rsrc_blk;
6914 LPFC_MBOXQ_t *mbox;
6915 void *virtaddr = NULL;
6916 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6917 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6918 union lpfc_sli4_cfg_shdr *shdr;
6919
6920 switch (type) {
6921 case LPFC_RSC_TYPE_FCOE_VPI:
6922 blk_list_head = &phba->lpfc_vpi_blk_list;
6923 break;
6924 case LPFC_RSC_TYPE_FCOE_XRI:
6925 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6926 break;
6927 case LPFC_RSC_TYPE_FCOE_VFI:
6928 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6929 break;
6930 case LPFC_RSC_TYPE_FCOE_RPI:
6931 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6932 break;
6933 default:
6934 return -EIO;
6935 }
6936
6937
6938 list_for_each_entry(rsrc_blk, blk_list_head, list) {
6939 if (curr_blks == 0) {
6940
6941
6942
6943
6944
6945
6946
6947 *extnt_size = rsrc_blk->rsrc_size;
6948 }
6949 curr_blks++;
6950 }
6951
6952
6953
6954
6955
6956 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6957 sizeof(uint32_t);
6958
6959
6960
6961
6962
6963 emb = LPFC_SLI4_MBX_EMBED;
6964 req_len = emb_len;
6965 if (req_len > emb_len) {
6966 req_len = curr_blks * sizeof(uint16_t) +
6967 sizeof(union lpfc_sli4_cfg_shdr) +
6968 sizeof(uint32_t);
6969 emb = LPFC_SLI4_MBX_NEMBED;
6970 }
6971
6972 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6973 if (!mbox)
6974 return -ENOMEM;
6975 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6976
6977 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6978 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6979 req_len, emb);
6980 if (alloc_len < req_len) {
6981 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6982 "2983 Allocated DMA memory size (x%x) is "
6983 "less than the requested DMA memory "
6984 "size (x%x)\n", alloc_len, req_len);
6985 rc = -ENOMEM;
6986 goto err_exit;
6987 }
6988 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6989 if (unlikely(rc)) {
6990 rc = -EIO;
6991 goto err_exit;
6992 }
6993
6994 if (!phba->sli4_hba.intr_enable)
6995 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6996 else {
6997 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6998 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6999 }
7000
7001 if (unlikely(rc)) {
7002 rc = -EIO;
7003 goto err_exit;
7004 }
7005
7006
7007
7008
7009
7010
7011
7012 if (emb == LPFC_SLI4_MBX_EMBED) {
7013 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7014 shdr = &rsrc_ext->header.cfg_shdr;
7015 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7016 } else {
7017 virtaddr = mbox->sge_array->addr[0];
7018 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7019 shdr = &n_rsrc->cfg_shdr;
7020 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7021 }
7022
7023 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7025 "2984 Failed to read allocated resources "
7026 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7027 type,
7028 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7029 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7030 rc = -EIO;
7031 goto err_exit;
7032 }
7033 err_exit:
7034 lpfc_sli4_mbox_cmd_free(phba, mbox);
7035 return rc;
7036}
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054static int
7055lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7056 struct list_head *sgl_list, int cnt)
7057{
7058 struct lpfc_sglq *sglq_entry = NULL;
7059 struct lpfc_sglq *sglq_entry_next = NULL;
7060 struct lpfc_sglq *sglq_entry_first = NULL;
7061 int status, total_cnt;
7062 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7063 int last_xritag = NO_XRI;
7064 LIST_HEAD(prep_sgl_list);
7065 LIST_HEAD(blck_sgl_list);
7066 LIST_HEAD(allc_sgl_list);
7067 LIST_HEAD(post_sgl_list);
7068 LIST_HEAD(free_sgl_list);
7069
7070 spin_lock_irq(&phba->hbalock);
7071 spin_lock(&phba->sli4_hba.sgl_list_lock);
7072 list_splice_init(sgl_list, &allc_sgl_list);
7073 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7074 spin_unlock_irq(&phba->hbalock);
7075
7076 total_cnt = cnt;
7077 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7078 &allc_sgl_list, list) {
7079 list_del_init(&sglq_entry->list);
7080 block_cnt++;
7081 if ((last_xritag != NO_XRI) &&
7082 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7083
7084 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7085 post_cnt = block_cnt - 1;
7086
7087 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7088 block_cnt = 1;
7089 } else {
7090
7091 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7092
7093 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7094 list_splice_init(&prep_sgl_list,
7095 &blck_sgl_list);
7096 post_cnt = block_cnt;
7097 block_cnt = 0;
7098 }
7099 }
7100 num_posted++;
7101
7102
7103 last_xritag = sglq_entry->sli4_xritag;
7104
7105
7106 if (num_posted == total_cnt) {
7107 if (post_cnt == 0) {
7108 list_splice_init(&prep_sgl_list,
7109 &blck_sgl_list);
7110 post_cnt = block_cnt;
7111 } else if (block_cnt == 1) {
7112 status = lpfc_sli4_post_sgl(phba,
7113 sglq_entry->phys, 0,
7114 sglq_entry->sli4_xritag);
7115 if (!status) {
7116
7117 list_add_tail(&sglq_entry->list,
7118 &post_sgl_list);
7119 } else {
7120
7121 lpfc_printf_log(phba, KERN_WARNING,
7122 LOG_SLI,
7123 "3159 Failed to post "
7124 "sgl, xritag:x%x\n",
7125 sglq_entry->sli4_xritag);
7126 list_add_tail(&sglq_entry->list,
7127 &free_sgl_list);
7128 total_cnt--;
7129 }
7130 }
7131 }
7132
7133
7134 if (post_cnt == 0)
7135 continue;
7136
7137
7138 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7139 post_cnt);
7140
7141 if (!status) {
7142
7143 list_splice_init(&blck_sgl_list, &post_sgl_list);
7144 } else {
7145
7146 sglq_entry_first = list_first_entry(&blck_sgl_list,
7147 struct lpfc_sglq,
7148 list);
7149 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7150 "3160 Failed to post sgl-list, "
7151 "xritag:x%x-x%x\n",
7152 sglq_entry_first->sli4_xritag,
7153 (sglq_entry_first->sli4_xritag +
7154 post_cnt - 1));
7155 list_splice_init(&blck_sgl_list, &free_sgl_list);
7156 total_cnt -= post_cnt;
7157 }
7158
7159
7160 if (block_cnt == 0)
7161 last_xritag = NO_XRI;
7162
7163
7164 post_cnt = 0;
7165 }
7166
7167
7168 lpfc_free_sgl_list(phba, &free_sgl_list);
7169
7170
7171 if (!list_empty(&post_sgl_list)) {
7172 spin_lock_irq(&phba->hbalock);
7173 spin_lock(&phba->sli4_hba.sgl_list_lock);
7174 list_splice_init(&post_sgl_list, sgl_list);
7175 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7176 spin_unlock_irq(&phba->hbalock);
7177 } else {
7178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7179 "3161 Failure to post sgl to port.\n");
7180 return -EIO;
7181 }
7182
7183
7184 return total_cnt;
7185}
7186
7187
7188
7189
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199static int
7200lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7201{
7202 LIST_HEAD(post_nblist);
7203 int num_posted, rc = 0;
7204
7205
7206 lpfc_io_buf_flush(phba, &post_nblist);
7207
7208
7209 if (!list_empty(&post_nblist)) {
7210 num_posted = lpfc_sli4_post_io_sgl_list(
7211 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7212
7213 if (num_posted == 0)
7214 rc = -EIO;
7215 }
7216 return rc;
7217}
7218
7219static void
7220lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7221{
7222 uint32_t len;
7223
7224 len = sizeof(struct lpfc_mbx_set_host_data) -
7225 sizeof(struct lpfc_sli4_cfg_mhdr);
7226 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7227 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7228 LPFC_SLI4_MBX_EMBED);
7229
7230 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7231 mbox->u.mqe.un.set_host_data.param_len =
7232 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7233 snprintf(mbox->u.mqe.un.set_host_data.data,
7234 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7235 "Linux %s v"LPFC_DRIVER_VERSION,
7236 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7237}
7238
7239int
7240lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7241 struct lpfc_queue *drq, int count, int idx)
7242{
7243 int rc, i;
7244 struct lpfc_rqe hrqe;
7245 struct lpfc_rqe drqe;
7246 struct lpfc_rqb *rqbp;
7247 unsigned long flags;
7248 struct rqb_dmabuf *rqb_buffer;
7249 LIST_HEAD(rqb_buf_list);
7250
7251 spin_lock_irqsave(&phba->hbalock, flags);
7252 rqbp = hrq->rqbp;
7253 for (i = 0; i < count; i++) {
7254
7255 if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7256 break;
7257 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7258 if (!rqb_buffer)
7259 break;
7260 rqb_buffer->hrq = hrq;
7261 rqb_buffer->drq = drq;
7262 rqb_buffer->idx = idx;
7263 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7264 }
7265 while (!list_empty(&rqb_buf_list)) {
7266 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7267 hbuf.list);
7268
7269 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7270 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7271 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7272 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7273 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7274 if (rc < 0) {
7275 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7276 "6421 Cannot post to HRQ %d: %x %x %x "
7277 "DRQ %x %x\n",
7278 hrq->queue_id,
7279 hrq->host_index,
7280 hrq->hba_index,
7281 hrq->entry_count,
7282 drq->host_index,
7283 drq->hba_index);
7284 rqbp->rqb_free_buffer(phba, rqb_buffer);
7285 } else {
7286 list_add_tail(&rqb_buffer->hbuf.list,
7287 &rqbp->rqb_buffer_list);
7288 rqbp->buffer_count++;
7289 }
7290 }
7291 spin_unlock_irqrestore(&phba->hbalock, flags);
7292 return 1;
7293}
7294
7295
7296
7297
7298
7299
7300
7301
7302
7303
7304
7305static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7306{
7307 int i;
7308 struct lpfc_sli4_hdw_queue *hdwq;
7309 struct lpfc_queue *cq;
7310 struct lpfc_idle_stat *idle_stat;
7311 u64 wall;
7312
7313 for_each_present_cpu(i) {
7314 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7315 cq = hdwq->io_cq;
7316
7317
7318 if (cq->chann != i)
7319 continue;
7320
7321 idle_stat = &phba->sli4_hba.idle_stat[i];
7322
7323 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7324 idle_stat->prev_wall = wall;
7325
7326 if (phba->nvmet_support)
7327 cq->poll_mode = LPFC_QUEUE_WORK;
7328 else
7329 cq->poll_mode = LPFC_IRQ_POLL;
7330 }
7331
7332 if (!phba->nvmet_support)
7333 schedule_delayed_work(&phba->idle_stat_delay_work,
7334 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7335}
7336
7337static void lpfc_sli4_dip(struct lpfc_hba *phba)
7338{
7339 uint32_t if_type;
7340
7341 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7342 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7343 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7344 struct lpfc_register reg_data;
7345
7346 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7347 ®_data.word0))
7348 return;
7349
7350 if (bf_get(lpfc_sliport_status_dip, ®_data))
7351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7352 "2904 Firmware Dump Image Present"
7353 " on Adapter");
7354 }
7355}
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365
7366int
7367lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7368{
7369 int rc, i, cnt, len, dd;
7370 LPFC_MBOXQ_t *mboxq;
7371 struct lpfc_mqe *mqe;
7372 uint8_t *vpd;
7373 uint32_t vpd_size;
7374 uint32_t ftr_rsp = 0;
7375 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7376 struct lpfc_vport *vport = phba->pport;
7377 struct lpfc_dmabuf *mp;
7378 struct lpfc_rqb *rqbp;
7379
7380
7381 rc = lpfc_pci_function_reset(phba);
7382 if (unlikely(rc))
7383 return -ENODEV;
7384
7385
7386 rc = lpfc_sli4_post_status_check(phba);
7387 if (unlikely(rc))
7388 return -ENODEV;
7389 else {
7390 spin_lock_irq(&phba->hbalock);
7391 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7392 spin_unlock_irq(&phba->hbalock);
7393 }
7394
7395 lpfc_sli4_dip(phba);
7396
7397
7398
7399
7400
7401 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7402 if (!mboxq)
7403 return -ENOMEM;
7404
7405
7406 vpd_size = SLI4_PAGE_SIZE;
7407 vpd = kzalloc(vpd_size, GFP_KERNEL);
7408 if (!vpd) {
7409 rc = -ENOMEM;
7410 goto out_free_mbox;
7411 }
7412
7413 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7414 if (unlikely(rc)) {
7415 kfree(vpd);
7416 goto out_free_mbox;
7417 }
7418
7419 mqe = &mboxq->u.mqe;
7420 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7421 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7422 phba->hba_flag |= HBA_FCOE_MODE;
7423 phba->fcp_embed_io = 0;
7424 } else {
7425 phba->hba_flag &= ~HBA_FCOE_MODE;
7426 }
7427
7428 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7429 LPFC_DCBX_CEE_MODE)
7430 phba->hba_flag |= HBA_FIP_SUPPORT;
7431 else
7432 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7433
7434 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7435
7436 if (phba->sli_rev != LPFC_SLI_REV4) {
7437 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7438 "0376 READ_REV Error. SLI Level %d "
7439 "FCoE enabled %d\n",
7440 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7441 rc = -EIO;
7442 kfree(vpd);
7443 goto out_free_mbox;
7444 }
7445
7446
7447
7448
7449
7450
7451 if (phba->hba_flag & HBA_FCOE_MODE &&
7452 lpfc_sli4_read_fcoe_params(phba))
7453 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7454 "2570 Failed to read FCoE parameters\n");
7455
7456
7457
7458
7459
7460 rc = lpfc_sli4_retrieve_pport_name(phba);
7461 if (!rc)
7462 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7463 "3080 Successful retrieving SLI4 device "
7464 "physical port name: %s.\n", phba->Port);
7465
7466 rc = lpfc_sli4_get_ctl_attr(phba);
7467 if (!rc)
7468 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7469 "8351 Successful retrieving SLI4 device "
7470 "CTL ATTR\n");
7471
7472
7473
7474
7475
7476
7477 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7478 if (unlikely(!rc)) {
7479 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7480 "0377 Error %d parsing vpd. "
7481 "Using defaults.\n", rc);
7482 rc = 0;
7483 }
7484 kfree(vpd);
7485
7486
7487 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7488 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7489
7490
7491
7492
7493
7494 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7495 LPFC_SLI_INTF_IF_TYPE_6) &&
7496 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7497 (phba->vpd.rev.smRev == 0) &&
7498 (phba->cfg_nvme_embed_cmd == 1))
7499 phba->cfg_nvme_embed_cmd = 0;
7500
7501 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7502 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7503 &mqe->un.read_rev);
7504 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7505 &mqe->un.read_rev);
7506 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7507 &mqe->un.read_rev);
7508 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7509 &mqe->un.read_rev);
7510 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7511 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7512 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7513 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7514 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7515 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7516 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7517 "(%d):0380 READ_REV Status x%x "
7518 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7519 mboxq->vport ? mboxq->vport->vpi : 0,
7520 bf_get(lpfc_mqe_status, mqe),
7521 phba->vpd.rev.opFwName,
7522 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7523 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7524
7525 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7526 LPFC_SLI_INTF_IF_TYPE_0) {
7527 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7528 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7529 if (rc == MBX_SUCCESS) {
7530 phba->hba_flag |= HBA_RECOVERABLE_UE;
7531
7532 phba->eratt_poll_interval = 1;
7533 phba->sli4_hba.ue_to_sr = bf_get(
7534 lpfc_mbx_set_feature_UESR,
7535 &mboxq->u.mqe.un.set_feature);
7536 phba->sli4_hba.ue_to_rp = bf_get(
7537 lpfc_mbx_set_feature_UERP,
7538 &mboxq->u.mqe.un.set_feature);
7539 }
7540 }
7541
7542 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7543
7544 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7545 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7546 if (rc != MBX_SUCCESS)
7547 phba->mds_diags_support = 0;
7548 }
7549
7550
7551
7552
7553
7554 lpfc_request_features(phba, mboxq);
7555 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7556 if (unlikely(rc)) {
7557 rc = -EIO;
7558 goto out_free_mbox;
7559 }
7560
7561
7562
7563
7564
7565 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7566 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7567 "0378 No support for fcpi mode.\n");
7568 ftr_rsp++;
7569 }
7570
7571
7572 if (phba->hba_flag & HBA_FCOE_MODE) {
7573 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7574 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7575 else
7576 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7577 }
7578
7579
7580
7581
7582
7583
7584 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7585 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7586 phba->cfg_enable_bg = 0;
7587 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7588 ftr_rsp++;
7589 }
7590 }
7591
7592 if (phba->max_vpi && phba->cfg_enable_npiv &&
7593 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7594 ftr_rsp++;
7595
7596 if (ftr_rsp) {
7597 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7598 "0379 Feature Mismatch Data: x%08x %08x "
7599 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7600 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7601 phba->cfg_enable_npiv, phba->max_vpi);
7602 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7603 phba->cfg_enable_bg = 0;
7604 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7605 phba->cfg_enable_npiv = 0;
7606 }
7607
7608
7609 spin_lock_irq(&phba->hbalock);
7610 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7611 spin_unlock_irq(&phba->hbalock);
7612
7613
7614 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7615 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7616 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7617 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7618 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7619 "6448 Dual Dump is enabled\n");
7620 else
7621 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7622 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7623 "rc:x%x dd:x%x\n",
7624 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7625 lpfc_sli_config_mbox_subsys_get(
7626 phba, mboxq),
7627 lpfc_sli_config_mbox_opcode_get(
7628 phba, mboxq),
7629 rc, dd);
7630
7631
7632
7633
7634 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7635 if (rc) {
7636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7637 "2920 Failed to alloc Resource IDs "
7638 "rc = x%x\n", rc);
7639 goto out_free_mbox;
7640 }
7641
7642 lpfc_set_host_data(phba, mboxq);
7643
7644 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7645 if (rc) {
7646 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7647 "2134 Failed to set host os driver version %x",
7648 rc);
7649 }
7650
7651
7652 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7653 if (rc) {
7654 phba->link_state = LPFC_HBA_ERROR;
7655 rc = -ENOMEM;
7656 goto out_free_mbox;
7657 }
7658
7659 mboxq->vport = vport;
7660 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7661 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7662 if (rc == MBX_SUCCESS) {
7663 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7664 rc = 0;
7665 }
7666
7667
7668
7669
7670
7671 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7672 kfree(mp);
7673 mboxq->ctx_buf = NULL;
7674 if (unlikely(rc)) {
7675 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7676 "0382 READ_SPARAM command failed "
7677 "status %d, mbxStatus x%x\n",
7678 rc, bf_get(lpfc_mqe_status, mqe));
7679 phba->link_state = LPFC_HBA_ERROR;
7680 rc = -EIO;
7681 goto out_free_mbox;
7682 }
7683
7684 lpfc_update_vport_wwn(vport);
7685
7686
7687 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7688 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7689
7690
7691 rc = lpfc_sli4_queue_create(phba);
7692 if (rc) {
7693 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7694 "3089 Failed to allocate queues\n");
7695 rc = -ENODEV;
7696 goto out_free_mbox;
7697 }
7698
7699 rc = lpfc_sli4_queue_setup(phba);
7700 if (unlikely(rc)) {
7701 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7702 "0381 Error %d during queue setup.\n ", rc);
7703 goto out_stop_timers;
7704 }
7705
7706 lpfc_sli4_setup(phba);
7707 lpfc_sli4_queue_init(phba);
7708
7709
7710 rc = lpfc_sli4_els_sgl_update(phba);
7711 if (unlikely(rc)) {
7712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7713 "1400 Failed to update xri-sgl size and "
7714 "mapping: %d\n", rc);
7715 goto out_destroy_queue;
7716 }
7717
7718
7719 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7720 phba->sli4_hba.els_xri_cnt);
7721 if (unlikely(rc < 0)) {
7722 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7723 "0582 Error %d during els sgl post "
7724 "operation\n", rc);
7725 rc = -ENODEV;
7726 goto out_destroy_queue;
7727 }
7728 phba->sli4_hba.els_xri_cnt = rc;
7729
7730 if (phba->nvmet_support) {
7731
7732 rc = lpfc_sli4_nvmet_sgl_update(phba);
7733 if (unlikely(rc)) {
7734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7735 "6308 Failed to update nvmet-sgl size "
7736 "and mapping: %d\n", rc);
7737 goto out_destroy_queue;
7738 }
7739
7740
7741 rc = lpfc_sli4_repost_sgl_list(
7742 phba,
7743 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7744 phba->sli4_hba.nvmet_xri_cnt);
7745 if (unlikely(rc < 0)) {
7746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7747 "3117 Error %d during nvmet "
7748 "sgl post\n", rc);
7749 rc = -ENODEV;
7750 goto out_destroy_queue;
7751 }
7752 phba->sli4_hba.nvmet_xri_cnt = rc;
7753
7754
7755
7756
7757 cnt = phba->sli4_hba.nvmet_xri_cnt +
7758 phba->sli4_hba.max_cfg_param.max_xri;
7759 } else {
7760
7761 rc = lpfc_sli4_io_sgl_update(phba);
7762 if (unlikely(rc)) {
7763 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7764 "6082 Failed to update nvme-sgl size "
7765 "and mapping: %d\n", rc);
7766 goto out_destroy_queue;
7767 }
7768
7769
7770 rc = lpfc_sli4_repost_io_sgl_list(phba);
7771 if (unlikely(rc)) {
7772 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7773 "6116 Error %d during nvme sgl post "
7774 "operation\n", rc);
7775
7776
7777 rc = -ENODEV;
7778 goto out_destroy_queue;
7779 }
7780
7781
7782
7783 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7784 }
7785
7786 if (!phba->sli.iocbq_lookup) {
7787
7788 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7789 "2821 initialize iocb list with %d entries\n",
7790 cnt);
7791 rc = lpfc_init_iocb_list(phba, cnt);
7792 if (rc) {
7793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7794 "1413 Failed to init iocb list.\n");
7795 goto out_destroy_queue;
7796 }
7797 }
7798
7799 if (phba->nvmet_support)
7800 lpfc_nvmet_create_targetport(phba);
7801
7802 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7803
7804 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7805 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7806 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7807 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7808 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7809 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7810 rqbp->buffer_count = 0;
7811
7812 lpfc_post_rq_buffer(
7813 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7814 phba->sli4_hba.nvmet_mrq_data[i],
7815 phba->cfg_nvmet_mrq_post, i);
7816 }
7817 }
7818
7819
7820 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7821 if (unlikely(rc)) {
7822 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7823 "0393 Error %d during rpi post operation\n",
7824 rc);
7825 rc = -ENODEV;
7826 goto out_destroy_queue;
7827 }
7828 lpfc_sli4_node_prep(phba);
7829
7830 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7831 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7832
7833
7834
7835 lpfc_reg_fcfi(phba, mboxq);
7836 mboxq->vport = phba->pport;
7837 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7838 if (rc != MBX_SUCCESS)
7839 goto out_unset_queue;
7840 rc = 0;
7841 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7842 &mboxq->u.mqe.un.reg_fcfi);
7843 } else {
7844
7845
7846
7847 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7848 mboxq->vport = phba->pport;
7849 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7850 if (rc != MBX_SUCCESS)
7851 goto out_unset_queue;
7852 rc = 0;
7853 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7854 &mboxq->u.mqe.un.reg_fcfi_mrq);
7855
7856
7857 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7858 mboxq->vport = phba->pport;
7859 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7860 if (rc != MBX_SUCCESS)
7861 goto out_unset_queue;
7862 rc = 0;
7863 }
7864
7865 lpfc_sli_read_link_ste(phba);
7866 }
7867
7868
7869
7870
7871 if (phba->nvmet_support == 0) {
7872 if (phba->sli4_hba.io_xri_cnt == 0) {
7873 len = lpfc_new_io_buf(
7874 phba, phba->sli4_hba.io_xri_max);
7875 if (len == 0) {
7876 rc = -ENOMEM;
7877 goto out_unset_queue;
7878 }
7879
7880 if (phba->cfg_xri_rebalancing)
7881 lpfc_create_multixri_pools(phba);
7882 }
7883 } else {
7884 phba->cfg_xri_rebalancing = 0;
7885 }
7886
7887
7888 spin_lock_irq(&phba->hbalock);
7889 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7890 spin_unlock_irq(&phba->hbalock);
7891
7892
7893 lpfc_sli4_rb_setup(phba);
7894
7895
7896 phba->fcf.fcf_flag = 0;
7897 phba->fcf.current_rec.flag = 0;
7898
7899
7900 mod_timer(&vport->els_tmofunc,
7901 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7902
7903
7904 mod_timer(&phba->hb_tmofunc,
7905 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7906 phba->hb_outstanding = 0;
7907 phba->last_completion_time = jiffies;
7908
7909
7910 if (phba->cfg_auto_imax)
7911 queue_delayed_work(phba->wq, &phba->eq_delay_work,
7912 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
7913
7914
7915 lpfc_init_idle_stat_hb(phba);
7916
7917
7918 mod_timer(&phba->eratt_poll,
7919 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7920
7921
7922 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7923 rc = pci_enable_pcie_error_reporting(phba->pcidev);
7924 if (!rc) {
7925 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7926 "2829 This device supports "
7927 "Advanced Error Reporting (AER)\n");
7928 spin_lock_irq(&phba->hbalock);
7929 phba->hba_flag |= HBA_AER_ENABLED;
7930 spin_unlock_irq(&phba->hbalock);
7931 } else {
7932 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7933 "2830 This device does not support "
7934 "Advanced Error Reporting (AER)\n");
7935 phba->cfg_aer_support = 0;
7936 }
7937 rc = 0;
7938 }
7939
7940
7941
7942
7943
7944 spin_lock_irq(&phba->hbalock);
7945 phba->link_state = LPFC_LINK_DOWN;
7946
7947
7948 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
7949 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
7950 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
7951 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
7952 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
7953 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
7954 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
7955 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
7956 spin_unlock_irq(&phba->hbalock);
7957
7958
7959 lpfc_sli4_arm_cqeq_intr(phba);
7960
7961
7962 phba->sli4_hba.intr_enable = 1;
7963
7964 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7965 (phba->hba_flag & LINK_DISABLED)) {
7966 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7967 "3103 Adapter Link is disabled.\n");
7968 lpfc_down_link(phba, mboxq);
7969 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7970 if (rc != MBX_SUCCESS) {
7971 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7972 "3104 Adapter failed to issue "
7973 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
7974 goto out_io_buff_free;
7975 }
7976 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7977
7978 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7979 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7980 if (rc)
7981 goto out_io_buff_free;
7982 }
7983 }
7984 mempool_free(mboxq, phba->mbox_mem_pool);
7985 return rc;
7986out_io_buff_free:
7987
7988 lpfc_io_free(phba);
7989out_unset_queue:
7990
7991 lpfc_sli4_queue_unset(phba);
7992out_destroy_queue:
7993 lpfc_free_iocb_list(phba);
7994 lpfc_sli4_queue_destroy(phba);
7995out_stop_timers:
7996 lpfc_stop_hba_timers(phba);
7997out_free_mbox:
7998 mempool_free(mboxq, phba->mbox_mem_pool);
7999 return rc;
8000}
8001
8002
8003
8004
8005
8006
8007
8008
8009
8010
8011
8012
8013
8014void
8015lpfc_mbox_timeout(struct timer_list *t)
8016{
8017 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8018 unsigned long iflag;
8019 uint32_t tmo_posted;
8020
8021 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8022 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8023 if (!tmo_posted)
8024 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8025 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8026
8027 if (!tmo_posted)
8028 lpfc_worker_wake_up(phba);
8029 return;
8030}
8031
8032
8033
8034
8035
8036
8037
8038
8039
8040static bool
8041lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8042{
8043
8044 uint32_t idx;
8045 struct lpfc_queue *mcq;
8046 struct lpfc_mcqe *mcqe;
8047 bool pending_completions = false;
8048 uint8_t qe_valid;
8049
8050 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8051 return false;
8052
8053
8054
8055 mcq = phba->sli4_hba.mbx_cq;
8056 idx = mcq->hba_index;
8057 qe_valid = mcq->qe_valid;
8058 while (bf_get_le32(lpfc_cqe_valid,
8059 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8060 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8061 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8062 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8063 pending_completions = true;
8064 break;
8065 }
8066 idx = (idx + 1) % mcq->entry_count;
8067 if (mcq->hba_index == idx)
8068 break;
8069
8070
8071 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8072 qe_valid = (qe_valid) ? 0 : 1;
8073 }
8074 return pending_completions;
8075
8076}
8077
8078
8079
8080
8081
8082
8083
8084
8085
8086
8087
8088
8089static bool
8090lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8091{
8092 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8093 uint32_t eqidx;
8094 struct lpfc_queue *fpeq = NULL;
8095 struct lpfc_queue *eq;
8096 bool mbox_pending;
8097
8098 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8099 return false;
8100
8101
8102 if (sli4_hba->hdwq) {
8103 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8104 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8105 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8106 fpeq = eq;
8107 break;
8108 }
8109 }
8110 }
8111 if (!fpeq)
8112 return false;
8113
8114
8115
8116 sli4_hba->sli4_eq_clr_intr(fpeq);
8117
8118
8119
8120 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8121
8122
8123
8124
8125
8126
8127
8128
8129 if (mbox_pending)
8130
8131 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8132 else
8133
8134 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8135
8136 return mbox_pending;
8137
8138}
8139
8140
8141
8142
8143
8144
8145
8146
8147
8148void
8149lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8150{
8151 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8152 MAILBOX_t *mb = NULL;
8153
8154 struct lpfc_sli *psli = &phba->sli;
8155
8156
8157 if (lpfc_sli4_process_missed_mbox_completions(phba))
8158 return;
8159
8160 if (pmbox != NULL)
8161 mb = &pmbox->u.mb;
8162
8163
8164
8165
8166
8167 spin_lock_irq(&phba->hbalock);
8168 if (pmbox == NULL) {
8169 lpfc_printf_log(phba, KERN_WARNING,
8170 LOG_MBOX | LOG_SLI,
8171 "0353 Active Mailbox cleared - mailbox timeout "
8172 "exiting\n");
8173 spin_unlock_irq(&phba->hbalock);
8174 return;
8175 }
8176
8177
8178 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8179 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8180 mb->mbxCommand,
8181 phba->pport->port_state,
8182 phba->sli.sli_flag,
8183 phba->sli.mbox_active);
8184 spin_unlock_irq(&phba->hbalock);
8185
8186
8187
8188
8189
8190 spin_lock_irq(&phba->pport->work_port_lock);
8191 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8192 spin_unlock_irq(&phba->pport->work_port_lock);
8193 spin_lock_irq(&phba->hbalock);
8194 phba->link_state = LPFC_LINK_UNKNOWN;
8195 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8196 spin_unlock_irq(&phba->hbalock);
8197
8198 lpfc_sli_abort_fcp_rings(phba);
8199
8200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8201 "0345 Resetting board due to mailbox timeout\n");
8202
8203
8204 lpfc_reset_hba(phba);
8205}
8206
8207
8208
8209
8210
8211
8212
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229
8230
8231
8232
8233static int
8234lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8235 uint32_t flag)
8236{
8237 MAILBOX_t *mbx;
8238 struct lpfc_sli *psli = &phba->sli;
8239 uint32_t status, evtctr;
8240 uint32_t ha_copy, hc_copy;
8241 int i;
8242 unsigned long timeout;
8243 unsigned long drvr_flag = 0;
8244 uint32_t word0, ldata;
8245 void __iomem *to_slim;
8246 int processing_queue = 0;
8247
8248 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8249 if (!pmbox) {
8250 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8251
8252 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8253 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8254 return MBX_SUCCESS;
8255 }
8256 processing_queue = 1;
8257 pmbox = lpfc_mbox_get(phba);
8258 if (!pmbox) {
8259 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8260 return MBX_SUCCESS;
8261 }
8262 }
8263
8264 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8265 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8266 if(!pmbox->vport) {
8267 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8268 lpfc_printf_log(phba, KERN_ERR,
8269 LOG_MBOX | LOG_VPORT,
8270 "1806 Mbox x%x failed. No vport\n",
8271 pmbox->u.mb.mbxCommand);
8272 dump_stack();
8273 goto out_not_finished;
8274 }
8275 }
8276
8277
8278 if (unlikely(pci_channel_offline(phba->pcidev))) {
8279 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8280 goto out_not_finished;
8281 }
8282
8283
8284 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8285 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8286 goto out_not_finished;
8287 }
8288
8289 psli = &phba->sli;
8290
8291 mbx = &pmbox->u.mb;
8292 status = MBX_SUCCESS;
8293
8294 if (phba->link_state == LPFC_HBA_ERROR) {
8295 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8296
8297
8298 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8299 "(%d):0311 Mailbox command x%x cannot "
8300 "issue Data: x%x x%x\n",
8301 pmbox->vport ? pmbox->vport->vpi : 0,
8302 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8303 goto out_not_finished;
8304 }
8305
8306 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8307 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8308 !(hc_copy & HC_MBINT_ENA)) {
8309 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8310 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8311 "(%d):2528 Mailbox command x%x cannot "
8312 "issue Data: x%x x%x\n",
8313 pmbox->vport ? pmbox->vport->vpi : 0,
8314 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8315 goto out_not_finished;
8316 }
8317 }
8318
8319 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8320
8321
8322
8323
8324
8325 if (flag & MBX_POLL) {
8326 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8327
8328
8329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8330 "(%d):2529 Mailbox command x%x "
8331 "cannot issue Data: x%x x%x\n",
8332 pmbox->vport ? pmbox->vport->vpi : 0,
8333 pmbox->u.mb.mbxCommand,
8334 psli->sli_flag, flag);
8335 goto out_not_finished;
8336 }
8337
8338 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8339 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8340
8341 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8342 "(%d):2530 Mailbox command x%x "
8343 "cannot issue Data: x%x x%x\n",
8344 pmbox->vport ? pmbox->vport->vpi : 0,
8345 pmbox->u.mb.mbxCommand,
8346 psli->sli_flag, flag);
8347 goto out_not_finished;
8348 }
8349
8350
8351
8352
8353 lpfc_mbox_put(phba, pmbox);
8354
8355
8356 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8357 "(%d):0308 Mbox cmd issue - BUSY Data: "
8358 "x%x x%x x%x x%x\n",
8359 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8360 mbx->mbxCommand,
8361 phba->pport ? phba->pport->port_state : 0xff,
8362 psli->sli_flag, flag);
8363
8364 psli->slistat.mbox_busy++;
8365 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8366
8367 if (pmbox->vport) {
8368 lpfc_debugfs_disc_trc(pmbox->vport,
8369 LPFC_DISC_TRC_MBOX_VPORT,
8370 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8371 (uint32_t)mbx->mbxCommand,
8372 mbx->un.varWords[0], mbx->un.varWords[1]);
8373 }
8374 else {
8375 lpfc_debugfs_disc_trc(phba->pport,
8376 LPFC_DISC_TRC_MBOX,
8377 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8378 (uint32_t)mbx->mbxCommand,
8379 mbx->un.varWords[0], mbx->un.varWords[1]);
8380 }
8381
8382 return MBX_BUSY;
8383 }
8384
8385 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8386
8387
8388 if (flag != MBX_POLL) {
8389 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8390 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8391 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8392 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8393
8394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8395 "(%d):2531 Mailbox command x%x "
8396 "cannot issue Data: x%x x%x\n",
8397 pmbox->vport ? pmbox->vport->vpi : 0,
8398 pmbox->u.mb.mbxCommand,
8399 psli->sli_flag, flag);
8400 goto out_not_finished;
8401 }
8402
8403 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8404 1000);
8405 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8406 }
8407
8408
8409 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8410 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8411 "x%x\n",
8412 pmbox->vport ? pmbox->vport->vpi : 0,
8413 mbx->mbxCommand,
8414 phba->pport ? phba->pport->port_state : 0xff,
8415 psli->sli_flag, flag);
8416
8417 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8418 if (pmbox->vport) {
8419 lpfc_debugfs_disc_trc(pmbox->vport,
8420 LPFC_DISC_TRC_MBOX_VPORT,
8421 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8422 (uint32_t)mbx->mbxCommand,
8423 mbx->un.varWords[0], mbx->un.varWords[1]);
8424 }
8425 else {
8426 lpfc_debugfs_disc_trc(phba->pport,
8427 LPFC_DISC_TRC_MBOX,
8428 "MBOX Send: cmd:x%x mb:x%x x%x",
8429 (uint32_t)mbx->mbxCommand,
8430 mbx->un.varWords[0], mbx->un.varWords[1]);
8431 }
8432 }
8433
8434 psli->slistat.mbox_cmd++;
8435 evtctr = psli->slistat.mbox_event;
8436
8437
8438 mbx->mbxOwner = OWN_CHIP;
8439
8440 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8441
8442 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8443 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8444 = (uint8_t *)phba->mbox_ext
8445 - (uint8_t *)phba->mbox;
8446 }
8447
8448
8449 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8450 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8451 (uint8_t *)phba->mbox_ext,
8452 pmbox->in_ext_byte_len);
8453 }
8454
8455 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8456 } else {
8457
8458 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8459 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8460 = MAILBOX_HBA_EXT_OFFSET;
8461
8462
8463 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8464 lpfc_memcpy_to_slim(phba->MBslimaddr +
8465 MAILBOX_HBA_EXT_OFFSET,
8466 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8467
8468 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8469
8470 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8471 MAILBOX_CMD_SIZE);
8472
8473
8474
8475 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8476 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8477 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8478
8479
8480 ldata = *((uint32_t *)mbx);
8481 to_slim = phba->MBslimaddr;
8482 writel(ldata, to_slim);
8483 readl(to_slim);
8484
8485 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8486
8487 psli->sli_flag |= LPFC_SLI_ACTIVE;
8488 }
8489
8490 wmb();
8491
8492 switch (flag) {
8493 case MBX_NOWAIT:
8494
8495 psli->mbox_active = pmbox;
8496
8497 writel(CA_MBATT, phba->CAregaddr);
8498 readl(phba->CAregaddr);
8499
8500 break;
8501
8502 case MBX_POLL:
8503
8504 psli->mbox_active = NULL;
8505
8506 writel(CA_MBATT, phba->CAregaddr);
8507 readl(phba->CAregaddr);
8508
8509 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8510
8511 word0 = *((uint32_t *)phba->mbox);
8512 word0 = le32_to_cpu(word0);
8513 } else {
8514
8515 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8516 spin_unlock_irqrestore(&phba->hbalock,
8517 drvr_flag);
8518 goto out_not_finished;
8519 }
8520 }
8521
8522
8523 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8524 spin_unlock_irqrestore(&phba->hbalock,
8525 drvr_flag);
8526 goto out_not_finished;
8527 }
8528 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8529 1000) + jiffies;
8530 i = 0;
8531
8532 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8533 (!(ha_copy & HA_MBATT) &&
8534 (phba->link_state > LPFC_WARM_START))) {
8535 if (time_after(jiffies, timeout)) {
8536 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8537 spin_unlock_irqrestore(&phba->hbalock,
8538 drvr_flag);
8539 goto out_not_finished;
8540 }
8541
8542
8543
8544 if (((word0 & OWN_CHIP) != OWN_CHIP)
8545 && (evtctr != psli->slistat.mbox_event))
8546 break;
8547
8548 if (i++ > 10) {
8549 spin_unlock_irqrestore(&phba->hbalock,
8550 drvr_flag);
8551 msleep(1);
8552 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8553 }
8554
8555 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8556
8557 word0 = *((uint32_t *)phba->mbox);
8558 word0 = le32_to_cpu(word0);
8559 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8560 MAILBOX_t *slimmb;
8561 uint32_t slimword0;
8562
8563 slimword0 = readl(phba->MBslimaddr);
8564 slimmb = (MAILBOX_t *) & slimword0;
8565 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8566 && slimmb->mbxStatus) {
8567 psli->sli_flag &=
8568 ~LPFC_SLI_ACTIVE;
8569 word0 = slimword0;
8570 }
8571 }
8572 } else {
8573
8574 word0 = readl(phba->MBslimaddr);
8575 }
8576
8577 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8578 spin_unlock_irqrestore(&phba->hbalock,
8579 drvr_flag);
8580 goto out_not_finished;
8581 }
8582 }
8583
8584 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8585
8586 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8587 MAILBOX_CMD_SIZE);
8588
8589 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8590 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8591 pmbox->ctx_buf,
8592 pmbox->out_ext_byte_len);
8593 }
8594 } else {
8595
8596 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8597 MAILBOX_CMD_SIZE);
8598
8599 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8600 lpfc_memcpy_from_slim(
8601 pmbox->ctx_buf,
8602 phba->MBslimaddr +
8603 MAILBOX_HBA_EXT_OFFSET,
8604 pmbox->out_ext_byte_len);
8605 }
8606 }
8607
8608 writel(HA_MBATT, phba->HAregaddr);
8609 readl(phba->HAregaddr);
8610
8611 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8612 status = mbx->mbxStatus;
8613 }
8614
8615 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8616 return status;
8617
8618out_not_finished:
8619 if (processing_queue) {
8620 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8621 lpfc_mbox_cmpl_put(phba, pmbox);
8622 }
8623 return MBX_NOT_FINISHED;
8624}
8625
8626
8627
8628
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638static int
8639lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8640{
8641 struct lpfc_sli *psli = &phba->sli;
8642 int rc = 0;
8643 unsigned long timeout = 0;
8644
8645
8646 spin_lock_irq(&phba->hbalock);
8647 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8648
8649
8650
8651 if (phba->sli.mbox_active)
8652 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8653 phba->sli.mbox_active) *
8654 1000) + jiffies;
8655 spin_unlock_irq(&phba->hbalock);
8656
8657
8658 if (timeout)
8659 lpfc_sli4_process_missed_mbox_completions(phba);
8660
8661
8662 while (phba->sli.mbox_active) {
8663
8664 msleep(2);
8665 if (time_after(jiffies, timeout)) {
8666
8667 rc = 1;
8668 break;
8669 }
8670 }
8671
8672
8673 if (rc) {
8674 spin_lock_irq(&phba->hbalock);
8675 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8676 spin_unlock_irq(&phba->hbalock);
8677 }
8678 return rc;
8679}
8680
8681
8682
8683
8684
8685
8686
8687
8688
8689
8690
8691
8692static void
8693lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8694{
8695 struct lpfc_sli *psli = &phba->sli;
8696
8697 spin_lock_irq(&phba->hbalock);
8698 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8699
8700 spin_unlock_irq(&phba->hbalock);
8701 return;
8702 }
8703
8704
8705
8706
8707
8708
8709 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8710 spin_unlock_irq(&phba->hbalock);
8711
8712
8713 lpfc_worker_wake_up(phba);
8714}
8715
8716
8717
8718
8719
8720
8721
8722
8723
8724
8725
8726
8727static int
8728lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8729{
8730 uint32_t db_ready;
8731 unsigned long timeout;
8732 struct lpfc_register bmbx_reg;
8733
8734 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8735 * 1000) + jiffies;
8736
8737 do {
8738 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8739 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8740 if (!db_ready)
8741 mdelay(2);
8742
8743 if (time_after(jiffies, timeout))
8744 return MBXERR_ERROR;
8745 } while (!db_ready);
8746
8747 return 0;
8748}
8749
8750
8751
8752
8753
8754
8755
8756
8757
8758
8759
8760
8761
8762
8763
8764
8765
8766static int
8767lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8768{
8769 int rc = MBX_SUCCESS;
8770 unsigned long iflag;
8771 uint32_t mcqe_status;
8772 uint32_t mbx_cmnd;
8773 struct lpfc_sli *psli = &phba->sli;
8774 struct lpfc_mqe *mb = &mboxq->u.mqe;
8775 struct lpfc_bmbx_create *mbox_rgn;
8776 struct dma_address *dma_address;
8777
8778
8779
8780
8781
8782 spin_lock_irqsave(&phba->hbalock, iflag);
8783 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8784 spin_unlock_irqrestore(&phba->hbalock, iflag);
8785 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8786 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8787 "cannot issue Data: x%x x%x\n",
8788 mboxq->vport ? mboxq->vport->vpi : 0,
8789 mboxq->u.mb.mbxCommand,
8790 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8791 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8792 psli->sli_flag, MBX_POLL);
8793 return MBXERR_ERROR;
8794 }
8795
8796 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8797 phba->sli.mbox_active = mboxq;
8798 spin_unlock_irqrestore(&phba->hbalock, iflag);
8799
8800
8801 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8802 if (rc)
8803 goto exit;
8804
8805
8806
8807
8808
8809 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8810 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8811 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8812 sizeof(struct lpfc_mqe));
8813
8814
8815 dma_address = &phba->sli4_hba.bmbx.dma_address;
8816 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8817
8818
8819 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8820 if (rc)
8821 goto exit;
8822
8823
8824 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8825
8826
8827 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8828 if (rc)
8829 goto exit;
8830
8831
8832
8833
8834
8835
8836 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8837 sizeof(struct lpfc_mqe));
8838 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8839 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8840 sizeof(struct lpfc_mcqe));
8841 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8842
8843
8844
8845
8846
8847 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8848 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
8849 bf_set(lpfc_mqe_status, mb,
8850 (LPFC_MBX_ERROR_RANGE | mcqe_status));
8851 rc = MBXERR_ERROR;
8852 } else
8853 lpfc_sli4_swap_str(phba, mboxq);
8854
8855 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8856 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8857 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8858 " x%x x%x CQ: x%x x%x x%x x%x\n",
8859 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8860 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8861 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8862 bf_get(lpfc_mqe_status, mb),
8863 mb->un.mb_words[0], mb->un.mb_words[1],
8864 mb->un.mb_words[2], mb->un.mb_words[3],
8865 mb->un.mb_words[4], mb->un.mb_words[5],
8866 mb->un.mb_words[6], mb->un.mb_words[7],
8867 mb->un.mb_words[8], mb->un.mb_words[9],
8868 mb->un.mb_words[10], mb->un.mb_words[11],
8869 mb->un.mb_words[12], mboxq->mcqe.word0,
8870 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
8871 mboxq->mcqe.trailer);
8872exit:
8873
8874 spin_lock_irqsave(&phba->hbalock, iflag);
8875 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8876 phba->sli.mbox_active = NULL;
8877 spin_unlock_irqrestore(&phba->hbalock, iflag);
8878 return rc;
8879}
8880
8881
8882
8883
8884
8885
8886
8887
8888
8889
8890
8891
8892
8893static int
8894lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8895 uint32_t flag)
8896{
8897 struct lpfc_sli *psli = &phba->sli;
8898 unsigned long iflags;
8899 int rc;
8900
8901
8902 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8903
8904 rc = lpfc_mbox_dev_check(phba);
8905 if (unlikely(rc)) {
8906 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8907 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8908 "cannot issue Data: x%x x%x\n",
8909 mboxq->vport ? mboxq->vport->vpi : 0,
8910 mboxq->u.mb.mbxCommand,
8911 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8912 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8913 psli->sli_flag, flag);
8914 goto out_not_finished;
8915 }
8916
8917
8918 if (!phba->sli4_hba.intr_enable) {
8919 if (flag == MBX_POLL)
8920 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8921 else
8922 rc = -EIO;
8923 if (rc != MBX_SUCCESS)
8924 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8925 "(%d):2541 Mailbox command x%x "
8926 "(x%x/x%x) failure: "
8927 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8928 "Data: x%x x%x\n,",
8929 mboxq->vport ? mboxq->vport->vpi : 0,
8930 mboxq->u.mb.mbxCommand,
8931 lpfc_sli_config_mbox_subsys_get(phba,
8932 mboxq),
8933 lpfc_sli_config_mbox_opcode_get(phba,
8934 mboxq),
8935 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8936 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8937 bf_get(lpfc_mcqe_ext_status,
8938 &mboxq->mcqe),
8939 psli->sli_flag, flag);
8940 return rc;
8941 } else if (flag == MBX_POLL) {
8942 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8943 "(%d):2542 Try to issue mailbox command "
8944 "x%x (x%x/x%x) synchronously ahead of async "
8945 "mailbox command queue: x%x x%x\n",
8946 mboxq->vport ? mboxq->vport->vpi : 0,
8947 mboxq->u.mb.mbxCommand,
8948 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8949 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8950 psli->sli_flag, flag);
8951
8952 rc = lpfc_sli4_async_mbox_block(phba);
8953 if (!rc) {
8954
8955 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8956 if (rc != MBX_SUCCESS)
8957 lpfc_printf_log(phba, KERN_WARNING,
8958 LOG_MBOX | LOG_SLI,
8959 "(%d):2597 Sync Mailbox command "
8960 "x%x (x%x/x%x) failure: "
8961 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8962 "Data: x%x x%x\n,",
8963 mboxq->vport ? mboxq->vport->vpi : 0,
8964 mboxq->u.mb.mbxCommand,
8965 lpfc_sli_config_mbox_subsys_get(phba,
8966 mboxq),
8967 lpfc_sli_config_mbox_opcode_get(phba,
8968 mboxq),
8969 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8970 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8971 bf_get(lpfc_mcqe_ext_status,
8972 &mboxq->mcqe),
8973 psli->sli_flag, flag);
8974
8975 lpfc_sli4_async_mbox_unblock(phba);
8976 }
8977 return rc;
8978 }
8979
8980
8981 rc = lpfc_mbox_cmd_check(phba, mboxq);
8982 if (rc) {
8983 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8984 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8985 "cannot issue Data: x%x x%x\n",
8986 mboxq->vport ? mboxq->vport->vpi : 0,
8987 mboxq->u.mb.mbxCommand,
8988 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8989 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8990 psli->sli_flag, flag);
8991 goto out_not_finished;
8992 }
8993
8994
8995 psli->slistat.mbox_busy++;
8996 spin_lock_irqsave(&phba->hbalock, iflags);
8997 lpfc_mbox_put(phba, mboxq);
8998 spin_unlock_irqrestore(&phba->hbalock, iflags);
8999 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9000 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9001 "x%x (x%x/x%x) x%x x%x x%x\n",
9002 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9003 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9004 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9005 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9006 phba->pport->port_state,
9007 psli->sli_flag, MBX_NOWAIT);
9008
9009 lpfc_worker_wake_up(phba);
9010
9011 return MBX_BUSY;
9012
9013out_not_finished:
9014 return MBX_NOT_FINISHED;
9015}
9016
9017
9018
9019
9020
9021
9022
9023
9024
9025int
9026lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9027{
9028 struct lpfc_sli *psli = &phba->sli;
9029 LPFC_MBOXQ_t *mboxq;
9030 int rc = MBX_SUCCESS;
9031 unsigned long iflags;
9032 struct lpfc_mqe *mqe;
9033 uint32_t mbx_cmnd;
9034
9035
9036 if (unlikely(!phba->sli4_hba.intr_enable))
9037 return MBX_NOT_FINISHED;
9038
9039
9040 spin_lock_irqsave(&phba->hbalock, iflags);
9041 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9042 spin_unlock_irqrestore(&phba->hbalock, iflags);
9043 return MBX_NOT_FINISHED;
9044 }
9045 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9046 spin_unlock_irqrestore(&phba->hbalock, iflags);
9047 return MBX_NOT_FINISHED;
9048 }
9049 if (unlikely(phba->sli.mbox_active)) {
9050 spin_unlock_irqrestore(&phba->hbalock, iflags);
9051 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9052 "0384 There is pending active mailbox cmd\n");
9053 return MBX_NOT_FINISHED;
9054 }
9055
9056 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9057
9058
9059 mboxq = lpfc_mbox_get(phba);
9060
9061
9062 if (!mboxq) {
9063 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9064 spin_unlock_irqrestore(&phba->hbalock, iflags);
9065 return MBX_SUCCESS;
9066 }
9067 phba->sli.mbox_active = mboxq;
9068 spin_unlock_irqrestore(&phba->hbalock, iflags);
9069
9070
9071 rc = lpfc_mbox_dev_check(phba);
9072 if (unlikely(rc))
9073
9074 goto out_not_finished;
9075
9076
9077 mqe = &mboxq->u.mqe;
9078 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9079
9080
9081 mod_timer(&psli->mbox_tmo, (jiffies +
9082 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9083
9084 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9085 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9086 "x%x x%x\n",
9087 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9088 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9089 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9090 phba->pport->port_state, psli->sli_flag);
9091
9092 if (mbx_cmnd != MBX_HEARTBEAT) {
9093 if (mboxq->vport) {
9094 lpfc_debugfs_disc_trc(mboxq->vport,
9095 LPFC_DISC_TRC_MBOX_VPORT,
9096 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9097 mbx_cmnd, mqe->un.mb_words[0],
9098 mqe->un.mb_words[1]);
9099 } else {
9100 lpfc_debugfs_disc_trc(phba->pport,
9101 LPFC_DISC_TRC_MBOX,
9102 "MBOX Send: cmd:x%x mb:x%x x%x",
9103 mbx_cmnd, mqe->un.mb_words[0],
9104 mqe->un.mb_words[1]);
9105 }
9106 }
9107 psli->slistat.mbox_cmd++;
9108
9109
9110 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9111 if (rc != MBX_SUCCESS) {
9112 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9113 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9114 "cannot issue Data: x%x x%x\n",
9115 mboxq->vport ? mboxq->vport->vpi : 0,
9116 mboxq->u.mb.mbxCommand,
9117 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9118 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9119 psli->sli_flag, MBX_NOWAIT);
9120 goto out_not_finished;
9121 }
9122
9123 return rc;
9124
9125out_not_finished:
9126 spin_lock_irqsave(&phba->hbalock, iflags);
9127 if (phba->sli.mbox_active) {
9128 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9129 __lpfc_mbox_cmpl_put(phba, mboxq);
9130
9131 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9132 phba->sli.mbox_active = NULL;
9133 }
9134 spin_unlock_irqrestore(&phba->hbalock, iflags);
9135
9136 return MBX_NOT_FINISHED;
9137}
9138
9139
9140
9141
9142
9143
9144
9145
9146
9147
9148
9149
9150
9151int
9152lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9153{
9154 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9155}
9156
9157
9158
9159
9160
9161
9162
9163
9164
9165
9166int
9167lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9168{
9169
9170 switch (dev_grp) {
9171 case LPFC_PCI_DEV_LP:
9172 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9173 phba->lpfc_sli_handle_slow_ring_event =
9174 lpfc_sli_handle_slow_ring_event_s3;
9175 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9176 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9177 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9178 break;
9179 case LPFC_PCI_DEV_OC:
9180 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9181 phba->lpfc_sli_handle_slow_ring_event =
9182 lpfc_sli_handle_slow_ring_event_s4;
9183 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9184 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9185 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9186 break;
9187 default:
9188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9189 "1420 Invalid HBA PCI-device group: 0x%x\n",
9190 dev_grp);
9191 return -ENODEV;
9192 break;
9193 }
9194 return 0;
9195}
9196
9197
9198
9199
9200
9201
9202
9203
9204
9205
9206
9207
9208void
9209__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9210 struct lpfc_iocbq *piocb)
9211{
9212 if (phba->sli_rev == LPFC_SLI_REV4)
9213 lockdep_assert_held(&pring->ring_lock);
9214 else
9215 lockdep_assert_held(&phba->hbalock);
9216
9217 list_add_tail(&piocb->list, &pring->txq);
9218}
9219
9220
9221
9222
9223
9224
9225
9226
9227
9228
9229
9230
9231
9232
9233
9234
9235
9236
9237static struct lpfc_iocbq *
9238lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9239 struct lpfc_iocbq **piocb)
9240{
9241 struct lpfc_iocbq * nextiocb;
9242
9243 lockdep_assert_held(&phba->hbalock);
9244
9245 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9246 if (!nextiocb) {
9247 nextiocb = *piocb;
9248 *piocb = NULL;
9249 }
9250
9251 return nextiocb;
9252}
9253
9254
9255
9256
9257
9258
9259
9260
9261
9262
9263
9264
9265
9266
9267
9268
9269
9270
9271
9272
9273
9274
9275
9276static int
9277__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9278 struct lpfc_iocbq *piocb, uint32_t flag)
9279{
9280 struct lpfc_iocbq *nextiocb;
9281 IOCB_t *iocb;
9282 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9283
9284 lockdep_assert_held(&phba->hbalock);
9285
9286 if (piocb->iocb_cmpl && (!piocb->vport) &&
9287 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9288 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9289 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9290 "1807 IOCB x%x failed. No vport\n",
9291 piocb->iocb.ulpCommand);
9292 dump_stack();
9293 return IOCB_ERROR;
9294 }
9295
9296
9297
9298 if (unlikely(pci_channel_offline(phba->pcidev)))
9299 return IOCB_ERROR;
9300
9301
9302 if (unlikely(phba->hba_flag & DEFER_ERATT))
9303 return IOCB_ERROR;
9304
9305
9306
9307
9308 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9309 return IOCB_ERROR;
9310
9311
9312
9313
9314
9315 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9316 goto iocb_busy;
9317
9318 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9319
9320
9321
9322
9323 switch (piocb->iocb.ulpCommand) {
9324 case CMD_GEN_REQUEST64_CR:
9325 case CMD_GEN_REQUEST64_CX:
9326 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9327 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9328 FC_RCTL_DD_UNSOL_CMD) ||
9329 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9330 MENLO_TRANSPORT_TYPE))
9331
9332 goto iocb_busy;
9333 break;
9334 case CMD_QUE_RING_BUF_CN:
9335 case CMD_QUE_RING_BUF64_CN:
9336
9337
9338
9339
9340 if (piocb->iocb_cmpl)
9341 piocb->iocb_cmpl = NULL;
9342 fallthrough;
9343 case CMD_CREATE_XRI_CR:
9344 case CMD_CLOSE_XRI_CN:
9345 case CMD_CLOSE_XRI_CX:
9346 break;
9347 default:
9348 goto iocb_busy;
9349 }
9350
9351
9352
9353
9354
9355 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9356 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9357 goto iocb_busy;
9358 }
9359
9360 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9361 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9362 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9363
9364 if (iocb)
9365 lpfc_sli_update_ring(phba, pring);
9366 else
9367 lpfc_sli_update_full_ring(phba, pring);
9368
9369 if (!piocb)
9370 return IOCB_SUCCESS;
9371
9372 goto out_busy;
9373
9374 iocb_busy:
9375 pring->stats.iocb_cmd_delay++;
9376
9377 out_busy:
9378
9379 if (!(flag & SLI_IOCB_RET_IOCB)) {
9380 __lpfc_sli_ringtx_put(phba, pring, piocb);
9381 return IOCB_SUCCESS;
9382 }
9383
9384 return IOCB_BUSY;
9385}
9386
9387
9388
9389
9390
9391
9392
9393
9394
9395
9396
9397
9398
9399
9400
9401
9402
9403
9404static uint16_t
9405lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9406 struct lpfc_sglq *sglq)
9407{
9408 uint16_t xritag = NO_XRI;
9409 struct ulp_bde64 *bpl = NULL;
9410 struct ulp_bde64 bde;
9411 struct sli4_sge *sgl = NULL;
9412 struct lpfc_dmabuf *dmabuf;
9413 IOCB_t *icmd;
9414 int numBdes = 0;
9415 int i = 0;
9416 uint32_t offset = 0;
9417 int inbound = 0;
9418
9419 if (!piocbq || !sglq)
9420 return xritag;
9421
9422 sgl = (struct sli4_sge *)sglq->sgl;
9423 icmd = &piocbq->iocb;
9424 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9425 return sglq->sli4_xritag;
9426 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9427 numBdes = icmd->un.genreq64.bdl.bdeSize /
9428 sizeof(struct ulp_bde64);
9429
9430
9431
9432
9433 if (piocbq->context3)
9434 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9435 else
9436 return xritag;
9437
9438 bpl = (struct ulp_bde64 *)dmabuf->virt;
9439 if (!bpl)
9440 return xritag;
9441
9442 for (i = 0; i < numBdes; i++) {
9443
9444 sgl->addr_hi = bpl->addrHigh;
9445 sgl->addr_lo = bpl->addrLow;
9446
9447 sgl->word2 = le32_to_cpu(sgl->word2);
9448 if ((i+1) == numBdes)
9449 bf_set(lpfc_sli4_sge_last, sgl, 1);
9450 else
9451 bf_set(lpfc_sli4_sge_last, sgl, 0);
9452
9453
9454
9455 bde.tus.w = le32_to_cpu(bpl->tus.w);
9456 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9457
9458
9459
9460
9461 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9462
9463 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9464 inbound++;
9465
9466 if (inbound == 1)
9467 offset = 0;
9468 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9469 bf_set(lpfc_sli4_sge_type, sgl,
9470 LPFC_SGE_TYPE_DATA);
9471 offset += bde.tus.f.bdeSize;
9472 }
9473 sgl->word2 = cpu_to_le32(sgl->word2);
9474 bpl++;
9475 sgl++;
9476 }
9477 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9478
9479
9480
9481
9482 sgl->addr_hi =
9483 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9484 sgl->addr_lo =
9485 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9486 sgl->word2 = le32_to_cpu(sgl->word2);
9487 bf_set(lpfc_sli4_sge_last, sgl, 1);
9488 sgl->word2 = cpu_to_le32(sgl->word2);
9489 sgl->sge_len =
9490 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9491 }
9492 return sglq->sli4_xritag;
9493}
9494
9495
9496
9497
9498
9499
9500
9501
9502
9503
9504
9505
9506
9507
9508
9509static int
9510lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9511 union lpfc_wqe128 *wqe)
9512{
9513 uint32_t xmit_len = 0, total_len = 0;
9514 uint8_t ct = 0;
9515 uint32_t fip;
9516 uint32_t abort_tag;
9517 uint8_t command_type = ELS_COMMAND_NON_FIP;
9518 uint8_t cmnd;
9519 uint16_t xritag;
9520 uint16_t abrt_iotag;
9521 struct lpfc_iocbq *abrtiocbq;
9522 struct ulp_bde64 *bpl = NULL;
9523 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9524 int numBdes, i;
9525 struct ulp_bde64 bde;
9526 struct lpfc_nodelist *ndlp;
9527 uint32_t *pcmd;
9528 uint32_t if_type;
9529
9530 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9531
9532 if (iocbq->iocb_flag & LPFC_IO_FCP)
9533 command_type = FCP_COMMAND;
9534 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9535 command_type = ELS_COMMAND_FIP;
9536 else
9537 command_type = ELS_COMMAND_NON_FIP;
9538
9539 if (phba->fcp_embed_io)
9540 memset(wqe, 0, sizeof(union lpfc_wqe128));
9541
9542 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9543
9544 wqe->generic.wqe_com.word7 = 0;
9545 wqe->generic.wqe_com.word10 = 0;
9546
9547 abort_tag = (uint32_t) iocbq->iotag;
9548 xritag = iocbq->sli4_xritag;
9549
9550 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9551 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9552 sizeof(struct ulp_bde64);
9553 bpl = (struct ulp_bde64 *)
9554 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9555 if (!bpl)
9556 return IOCB_ERROR;
9557
9558
9559 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9560 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9561
9562
9563
9564 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9565 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9566 total_len = 0;
9567 for (i = 0; i < numBdes; i++) {
9568 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9569 total_len += bde.tus.f.bdeSize;
9570 }
9571 } else
9572 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9573
9574 iocbq->iocb.ulpIoTag = iocbq->iotag;
9575 cmnd = iocbq->iocb.ulpCommand;
9576
9577 switch (iocbq->iocb.ulpCommand) {
9578 case CMD_ELS_REQUEST64_CR:
9579 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9580 ndlp = iocbq->context_un.ndlp;
9581 else
9582 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9583 if (!iocbq->iocb.ulpLe) {
9584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9585 "2007 Only Limited Edition cmd Format"
9586 " supported 0x%x\n",
9587 iocbq->iocb.ulpCommand);
9588 return IOCB_ERROR;
9589 }
9590
9591 wqe->els_req.payload_len = xmit_len;
9592
9593 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9594 iocbq->iocb.ulpTimeout);
9595
9596 bf_set(els_req64_vf, &wqe->els_req, 0);
9597
9598 bf_set(els_req64_vfid, &wqe->els_req, 0);
9599 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9600 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9601 iocbq->iocb.ulpContext);
9602 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9603 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9604
9605 if (command_type == ELS_COMMAND_FIP)
9606 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9607 >> LPFC_FIP_ELS_ID_SHIFT);
9608 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9609 iocbq->context2)->virt);
9610 if_type = bf_get(lpfc_sli_intf_if_type,
9611 &phba->sli4_hba.sli_intf);
9612 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9613 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9614 *pcmd == ELS_CMD_SCR ||
9615 *pcmd == ELS_CMD_RDF ||
9616 *pcmd == ELS_CMD_RSCN_XMT ||
9617 *pcmd == ELS_CMD_FDISC ||
9618 *pcmd == ELS_CMD_LOGO ||
9619 *pcmd == ELS_CMD_PLOGI)) {
9620 bf_set(els_req64_sp, &wqe->els_req, 1);
9621 bf_set(els_req64_sid, &wqe->els_req,
9622 iocbq->vport->fc_myDID);
9623 if ((*pcmd == ELS_CMD_FLOGI) &&
9624 !(phba->fc_topology ==
9625 LPFC_TOPOLOGY_LOOP))
9626 bf_set(els_req64_sid, &wqe->els_req, 0);
9627 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9628 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9629 phba->vpi_ids[iocbq->vport->vpi]);
9630 } else if (pcmd && iocbq->context1) {
9631 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9632 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9633 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9634 }
9635 }
9636 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9637 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9638 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9639 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9640 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9641 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9642 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9643 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9644 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9645 break;
9646 case CMD_XMIT_SEQUENCE64_CX:
9647 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9648 iocbq->iocb.un.ulpWord[3]);
9649 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9650 iocbq->iocb.unsli3.rcvsli3.ox_id);
9651
9652 xmit_len = total_len;
9653 cmnd = CMD_XMIT_SEQUENCE64_CR;
9654 if (phba->link_flag & LS_LOOPBACK_MODE)
9655 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9656 fallthrough;
9657 case CMD_XMIT_SEQUENCE64_CR:
9658
9659 wqe->xmit_sequence.rsvd3 = 0;
9660
9661
9662 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9663 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9664 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9665 LPFC_WQE_IOD_WRITE);
9666 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9667 LPFC_WQE_LENLOC_WORD12);
9668 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9669 wqe->xmit_sequence.xmit_len = xmit_len;
9670 command_type = OTHER_COMMAND;
9671 break;
9672 case CMD_XMIT_BCAST64_CN:
9673
9674 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9675
9676
9677
9678 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9679 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9680 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9681 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9682 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9683 LPFC_WQE_LENLOC_WORD3);
9684 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9685 break;
9686 case CMD_FCP_IWRITE64_CR:
9687 command_type = FCP_COMMAND_DATA_OUT;
9688
9689
9690 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9691 xmit_len + sizeof(struct fcp_rsp));
9692 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9693 0);
9694
9695
9696 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9697 iocbq->iocb.ulpFCP2Rcvy);
9698 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9699
9700 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9701 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9702 LPFC_WQE_LENLOC_WORD4);
9703 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9704 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9705 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9706 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9707 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9708 if (iocbq->priority) {
9709 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9710 (iocbq->priority << 1));
9711 } else {
9712 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9713 (phba->cfg_XLanePriority << 1));
9714 }
9715 }
9716
9717
9718
9719 if (phba->cfg_enable_pbde)
9720 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9721 else
9722 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9723
9724 if (phba->fcp_embed_io) {
9725 struct lpfc_io_buf *lpfc_cmd;
9726 struct sli4_sge *sgl;
9727 struct fcp_cmnd *fcp_cmnd;
9728 uint32_t *ptr;
9729
9730
9731
9732 lpfc_cmd = iocbq->context1;
9733 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9734 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9735
9736
9737 wqe->generic.bde.tus.f.bdeFlags =
9738 BUFF_TYPE_BDE_IMMED;
9739 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9740 wqe->generic.bde.addrHigh = 0;
9741 wqe->generic.bde.addrLow = 88;
9742
9743 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9744 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9745
9746
9747 ptr = &wqe->words[22];
9748 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9749 }
9750 break;
9751 case CMD_FCP_IREAD64_CR:
9752
9753
9754 bf_set(payload_offset_len, &wqe->fcp_iread,
9755 xmit_len + sizeof(struct fcp_rsp));
9756 bf_set(cmd_buff_len, &wqe->fcp_iread,
9757 0);
9758
9759
9760 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9761 iocbq->iocb.ulpFCP2Rcvy);
9762 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9763
9764 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9765 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9766 LPFC_WQE_LENLOC_WORD4);
9767 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9768 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9769 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9770 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9771 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9772 if (iocbq->priority) {
9773 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9774 (iocbq->priority << 1));
9775 } else {
9776 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9777 (phba->cfg_XLanePriority << 1));
9778 }
9779 }
9780
9781
9782
9783 if (phba->cfg_enable_pbde)
9784 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9785 else
9786 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9787
9788 if (phba->fcp_embed_io) {
9789 struct lpfc_io_buf *lpfc_cmd;
9790 struct sli4_sge *sgl;
9791 struct fcp_cmnd *fcp_cmnd;
9792 uint32_t *ptr;
9793
9794
9795
9796 lpfc_cmd = iocbq->context1;
9797 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9798 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9799
9800
9801 wqe->generic.bde.tus.f.bdeFlags =
9802 BUFF_TYPE_BDE_IMMED;
9803 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9804 wqe->generic.bde.addrHigh = 0;
9805 wqe->generic.bde.addrLow = 88;
9806
9807 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9808 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9809
9810
9811 ptr = &wqe->words[22];
9812 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9813 }
9814 break;
9815 case CMD_FCP_ICMND64_CR:
9816
9817
9818 bf_set(payload_offset_len, &wqe->fcp_icmd,
9819 xmit_len + sizeof(struct fcp_rsp));
9820 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9821 0);
9822
9823 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9824
9825 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9826 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9827 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9828 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9829 LPFC_WQE_LENLOC_NONE);
9830 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9831 iocbq->iocb.ulpFCP2Rcvy);
9832 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9833 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9834 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9835 if (iocbq->priority) {
9836 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9837 (iocbq->priority << 1));
9838 } else {
9839 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9840 (phba->cfg_XLanePriority << 1));
9841 }
9842 }
9843
9844
9845 if (phba->fcp_embed_io) {
9846 struct lpfc_io_buf *lpfc_cmd;
9847 struct sli4_sge *sgl;
9848 struct fcp_cmnd *fcp_cmnd;
9849 uint32_t *ptr;
9850
9851
9852
9853 lpfc_cmd = iocbq->context1;
9854 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9855 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9856
9857
9858 wqe->generic.bde.tus.f.bdeFlags =
9859 BUFF_TYPE_BDE_IMMED;
9860 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9861 wqe->generic.bde.addrHigh = 0;
9862 wqe->generic.bde.addrLow = 88;
9863
9864 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
9865 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
9866
9867
9868 ptr = &wqe->words[22];
9869 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9870 }
9871 break;
9872 case CMD_GEN_REQUEST64_CR:
9873
9874
9875
9876 xmit_len = 0;
9877 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9878 sizeof(struct ulp_bde64);
9879 for (i = 0; i < numBdes; i++) {
9880 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9881 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9882 break;
9883 xmit_len += bde.tus.f.bdeSize;
9884 }
9885
9886 wqe->gen_req.request_payload_len = xmit_len;
9887
9888
9889
9890 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
9891 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9892 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9893 "2015 Invalid CT %x command 0x%x\n",
9894 ct, iocbq->iocb.ulpCommand);
9895 return IOCB_ERROR;
9896 }
9897 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9898 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9899 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9900 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9901 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9902 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9903 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9904 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9905 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9906 command_type = OTHER_COMMAND;
9907 break;
9908 case CMD_XMIT_ELS_RSP64_CX:
9909 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9910
9911
9912 wqe->xmit_els_rsp.response_payload_len = xmit_len;
9913
9914 wqe->xmit_els_rsp.word4 = 0;
9915
9916 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9917 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9918
9919 if_type = bf_get(lpfc_sli_intf_if_type,
9920 &phba->sli4_hba.sli_intf);
9921 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9922 if (iocbq->vport->fc_flag & FC_PT2PT) {
9923 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9924 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9925 iocbq->vport->fc_myDID);
9926 if (iocbq->vport->fc_myDID == Fabric_DID) {
9927 bf_set(wqe_els_did,
9928 &wqe->xmit_els_rsp.wqe_dest, 0);
9929 }
9930 }
9931 }
9932 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9933 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9934 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9935 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9936 iocbq->iocb.unsli3.rcvsli3.ox_id);
9937 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9938 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9939 phba->vpi_ids[iocbq->vport->vpi]);
9940 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9941 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9942 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9943 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9944 LPFC_WQE_LENLOC_WORD3);
9945 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9946 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9947 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9948 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9949 iocbq->context2)->virt);
9950 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9951 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9952 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9953 iocbq->vport->fc_myDID);
9954 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9955 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9956 phba->vpi_ids[phba->pport->vpi]);
9957 }
9958 command_type = OTHER_COMMAND;
9959 break;
9960 case CMD_CLOSE_XRI_CN:
9961 case CMD_ABORT_XRI_CN:
9962 case CMD_ABORT_XRI_CX:
9963
9964
9965 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9966 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9967 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9968 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9969 } else
9970 fip = 0;
9971
9972 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9973
9974
9975
9976
9977
9978 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9979 else
9980 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9981 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9982
9983 wqe->abort_cmd.rsrvd5 = 0;
9984 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9985 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9986 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9987
9988
9989
9990
9991 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9992 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9993 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9994 LPFC_WQE_LENLOC_NONE);
9995 cmnd = CMD_ABORT_XRI_CX;
9996 command_type = OTHER_COMMAND;
9997 xritag = 0;
9998 break;
9999 case CMD_XMIT_BLS_RSP64_CX:
10000 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10001
10002
10003
10004
10005 memset(wqe, 0, sizeof(*wqe));
10006
10007 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10008 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10009 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10010 LPFC_ABTS_UNSOL_INT) {
10011
10012
10013
10014
10015 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10016 iocbq->sli4_xritag);
10017 } else {
10018
10019
10020
10021
10022 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10023 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10024 }
10025 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10026 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10027
10028
10029 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10030 ndlp->nlp_DID);
10031 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10032 iocbq->iocb.ulpContext);
10033 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10034 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10035 phba->vpi_ids[phba->pport->vpi]);
10036 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10037 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10038 LPFC_WQE_LENLOC_NONE);
10039
10040 command_type = OTHER_COMMAND;
10041 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10042 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10043 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10044 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10045 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10046 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10047 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10048 }
10049
10050 break;
10051 case CMD_SEND_FRAME:
10052 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10053 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E);
10054 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41);
10055 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10056 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10057 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10058 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10059 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10060 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10061 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10062 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10063 return 0;
10064 case CMD_XRI_ABORTED_CX:
10065 case CMD_CREATE_XRI_CR:
10066 case CMD_IOCB_FCP_IBIDIR64_CR:
10067 case CMD_FCP_TSEND64_CX:
10068 case CMD_FCP_TRSP64_CX:
10069 case CMD_FCP_AUTO_TRSP_CX:
10070 default:
10071 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10072 "2014 Invalid command 0x%x\n",
10073 iocbq->iocb.ulpCommand);
10074 return IOCB_ERROR;
10075 break;
10076 }
10077
10078 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10079 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10080 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10081 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10082 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10083 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10084 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10085 LPFC_IO_DIF_INSERT);
10086 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10087 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10088 wqe->generic.wqe_com.abort_tag = abort_tag;
10089 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10090 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10091 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10092 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10093 return 0;
10094}
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110static int
10111__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10112 struct lpfc_iocbq *piocb, uint32_t flag)
10113{
10114 struct lpfc_sglq *sglq;
10115 union lpfc_wqe128 wqe;
10116 struct lpfc_queue *wq;
10117 struct lpfc_sli_ring *pring;
10118
10119
10120 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10121 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10122 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10123 } else {
10124 wq = phba->sli4_hba.els_wq;
10125 }
10126
10127
10128 pring = wq->pring;
10129
10130
10131
10132
10133
10134 lockdep_assert_held(&pring->ring_lock);
10135
10136 if (piocb->sli4_xritag == NO_XRI) {
10137 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10138 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10139 sglq = NULL;
10140 else {
10141 if (!list_empty(&pring->txq)) {
10142 if (!(flag & SLI_IOCB_RET_IOCB)) {
10143 __lpfc_sli_ringtx_put(phba,
10144 pring, piocb);
10145 return IOCB_SUCCESS;
10146 } else {
10147 return IOCB_BUSY;
10148 }
10149 } else {
10150 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10151 if (!sglq) {
10152 if (!(flag & SLI_IOCB_RET_IOCB)) {
10153 __lpfc_sli_ringtx_put(phba,
10154 pring,
10155 piocb);
10156 return IOCB_SUCCESS;
10157 } else
10158 return IOCB_BUSY;
10159 }
10160 }
10161 }
10162 } else if (piocb->iocb_flag & LPFC_IO_FCP)
10163
10164 sglq = NULL;
10165 else {
10166
10167
10168
10169
10170 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10171 if (!sglq)
10172 return IOCB_ERROR;
10173 }
10174
10175 if (sglq) {
10176 piocb->sli4_lxritag = sglq->sli4_lxritag;
10177 piocb->sli4_xritag = sglq->sli4_xritag;
10178 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10179 return IOCB_ERROR;
10180 }
10181
10182 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10183 return IOCB_ERROR;
10184
10185 if (lpfc_sli4_wq_put(wq, &wqe))
10186 return IOCB_ERROR;
10187 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10188
10189 return 0;
10190}
10191
10192
10193
10194
10195
10196
10197
10198
10199
10200
10201
10202
10203int
10204__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10205 struct lpfc_iocbq *piocb, uint32_t flag)
10206{
10207 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10208}
10209
10210
10211
10212
10213
10214
10215
10216
10217
10218
10219int
10220lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10221{
10222
10223 switch (dev_grp) {
10224 case LPFC_PCI_DEV_LP:
10225 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10226 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10227 break;
10228 case LPFC_PCI_DEV_OC:
10229 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10230 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10231 break;
10232 default:
10233 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10234 "1419 Invalid HBA PCI-device group: 0x%x\n",
10235 dev_grp);
10236 return -ENODEV;
10237 break;
10238 }
10239 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10240 return 0;
10241}
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253struct lpfc_sli_ring *
10254lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10255{
10256 struct lpfc_io_buf *lpfc_cmd;
10257
10258 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10259 if (unlikely(!phba->sli4_hba.hdwq))
10260 return NULL;
10261
10262
10263
10264
10265 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10266 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10267 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10268 }
10269 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10270 } else {
10271 if (unlikely(!phba->sli4_hba.els_wq))
10272 return NULL;
10273 piocb->hba_wqidx = 0;
10274 return phba->sli4_hba.els_wq->pring;
10275 }
10276}
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291int
10292lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10293 struct lpfc_iocbq *piocb, uint32_t flag)
10294{
10295 struct lpfc_sli_ring *pring;
10296 struct lpfc_queue *eq;
10297 unsigned long iflags;
10298 int rc;
10299
10300 if (phba->sli_rev == LPFC_SLI_REV4) {
10301 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10302
10303 pring = lpfc_sli4_calc_ring(phba, piocb);
10304 if (unlikely(pring == NULL))
10305 return IOCB_ERROR;
10306
10307 spin_lock_irqsave(&pring->ring_lock, iflags);
10308 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10309 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10310
10311 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10312 } else {
10313
10314 spin_lock_irqsave(&phba->hbalock, iflags);
10315 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10316 spin_unlock_irqrestore(&phba->hbalock, iflags);
10317 }
10318 return rc;
10319}
10320
10321
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331
10332static int
10333lpfc_extra_ring_setup( struct lpfc_hba *phba)
10334{
10335 struct lpfc_sli *psli;
10336 struct lpfc_sli_ring *pring;
10337
10338 psli = &phba->sli;
10339
10340
10341
10342
10343 pring = &psli->sli3_ring[LPFC_FCP_RING];
10344 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10345 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10346 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10347 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10348
10349
10350 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10351
10352 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10353 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10354 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10355 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10356
10357
10358 pring->iotag_max = 4096;
10359 pring->num_mask = 1;
10360 pring->prt[0].profile = 0;
10361 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10362 pring->prt[0].type = phba->cfg_multi_ring_type;
10363 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10364 return 0;
10365}
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379static void
10380lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10381 struct lpfc_iocbq *iocbq)
10382{
10383 struct lpfc_nodelist *ndlp = NULL;
10384 uint16_t rpi = 0, vpi = 0;
10385 struct lpfc_vport *vport = NULL;
10386
10387
10388 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10389 rpi = iocbq->iocb.ulpContext;
10390
10391 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10392 "3092 Port generated ABTS async event "
10393 "on vpi %d rpi %d status 0x%x\n",
10394 vpi, rpi, iocbq->iocb.ulpStatus);
10395
10396 vport = lpfc_find_vport_by_vpid(phba, vpi);
10397 if (!vport)
10398 goto err_exit;
10399 ndlp = lpfc_findnode_rpi(vport, rpi);
10400 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
10401 goto err_exit;
10402
10403 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10404 lpfc_sli_abts_recover_port(vport, ndlp);
10405 return;
10406
10407 err_exit:
10408 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10409 "3095 Event Context not found, no "
10410 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10411 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10412 vpi, rpi);
10413}
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423
10424
10425void
10426lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10427 struct lpfc_nodelist *ndlp,
10428 struct sli4_wcqe_xri_aborted *axri)
10429{
10430 struct lpfc_vport *vport;
10431 uint32_t ext_status = 0;
10432
10433 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
10434 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10435 "3115 Node Context not found, driver "
10436 "ignoring abts err event\n");
10437 return;
10438 }
10439
10440 vport = ndlp->vport;
10441 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10442 "3116 Port generated FCP XRI ABORT event on "
10443 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10444 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10445 bf_get(lpfc_wcqe_xa_xri, axri),
10446 bf_get(lpfc_wcqe_xa_status, axri),
10447 axri->parameter);
10448
10449
10450
10451
10452
10453
10454 ext_status = axri->parameter & IOERR_PARAM_MASK;
10455 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10456 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10457 lpfc_sli_abts_recover_port(vport, ndlp);
10458}
10459
10460
10461
10462
10463
10464
10465
10466
10467
10468
10469
10470
10471
10472
10473static void
10474lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10475 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10476{
10477 IOCB_t *icmd;
10478 uint16_t evt_code;
10479 struct temp_event temp_event_data;
10480 struct Scsi_Host *shost;
10481 uint32_t *iocb_w;
10482
10483 icmd = &iocbq->iocb;
10484 evt_code = icmd->un.asyncstat.evt_code;
10485
10486 switch (evt_code) {
10487 case ASYNC_TEMP_WARN:
10488 case ASYNC_TEMP_SAFE:
10489 temp_event_data.data = (uint32_t) icmd->ulpContext;
10490 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10491 if (evt_code == ASYNC_TEMP_WARN) {
10492 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10494 "0347 Adapter is very hot, please take "
10495 "corrective action. temperature : %d Celsius\n",
10496 (uint32_t) icmd->ulpContext);
10497 } else {
10498 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10500 "0340 Adapter temperature is OK now. "
10501 "temperature : %d Celsius\n",
10502 (uint32_t) icmd->ulpContext);
10503 }
10504
10505
10506 shost = lpfc_shost_from_vport(phba->pport);
10507 fc_host_post_vendor_event(shost, fc_get_event_number(),
10508 sizeof(temp_event_data), (char *) &temp_event_data,
10509 LPFC_NL_VENDOR_ID);
10510 break;
10511 case ASYNC_STATUS_CN:
10512 lpfc_sli_abts_err_handler(phba, iocbq);
10513 break;
10514 default:
10515 iocb_w = (uint32_t *) icmd;
10516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10517 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10518 " evt_code 0x%x\n"
10519 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10520 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10521 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10522 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10523 pring->ringno, icmd->un.asyncstat.evt_code,
10524 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10525 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10526 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10527 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10528
10529 break;
10530 }
10531}
10532
10533
10534
10535
10536
10537
10538
10539
10540
10541
10542
10543
10544
10545int
10546lpfc_sli4_setup(struct lpfc_hba *phba)
10547{
10548 struct lpfc_sli_ring *pring;
10549
10550 pring = phba->sli4_hba.els_wq->pring;
10551 pring->num_mask = LPFC_MAX_RING_MASK;
10552 pring->prt[0].profile = 0;
10553 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10554 pring->prt[0].type = FC_TYPE_ELS;
10555 pring->prt[0].lpfc_sli_rcv_unsol_event =
10556 lpfc_els_unsol_event;
10557 pring->prt[1].profile = 0;
10558 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10559 pring->prt[1].type = FC_TYPE_ELS;
10560 pring->prt[1].lpfc_sli_rcv_unsol_event =
10561 lpfc_els_unsol_event;
10562 pring->prt[2].profile = 0;
10563
10564 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10565
10566 pring->prt[2].type = FC_TYPE_CT;
10567 pring->prt[2].lpfc_sli_rcv_unsol_event =
10568 lpfc_ct_unsol_event;
10569 pring->prt[3].profile = 0;
10570
10571 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10572
10573 pring->prt[3].type = FC_TYPE_CT;
10574 pring->prt[3].lpfc_sli_rcv_unsol_event =
10575 lpfc_ct_unsol_event;
10576 return 0;
10577}
10578
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588
10589
10590int
10591lpfc_sli_setup(struct lpfc_hba *phba)
10592{
10593 int i, totiocbsize = 0;
10594 struct lpfc_sli *psli = &phba->sli;
10595 struct lpfc_sli_ring *pring;
10596
10597 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10598 psli->sli_flag = 0;
10599
10600 psli->iocbq_lookup = NULL;
10601 psli->iocbq_lookup_len = 0;
10602 psli->last_iotag = 0;
10603
10604 for (i = 0; i < psli->num_rings; i++) {
10605 pring = &psli->sli3_ring[i];
10606 switch (i) {
10607 case LPFC_FCP_RING:
10608
10609 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10610 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10611 pring->sli.sli3.numCiocb +=
10612 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10613 pring->sli.sli3.numRiocb +=
10614 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10615 pring->sli.sli3.numCiocb +=
10616 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10617 pring->sli.sli3.numRiocb +=
10618 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10619 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10620 SLI3_IOCB_CMD_SIZE :
10621 SLI2_IOCB_CMD_SIZE;
10622 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10623 SLI3_IOCB_RSP_SIZE :
10624 SLI2_IOCB_RSP_SIZE;
10625 pring->iotag_ctr = 0;
10626 pring->iotag_max =
10627 (phba->cfg_hba_queue_depth * 2);
10628 pring->fast_iotag = pring->iotag_max;
10629 pring->num_mask = 0;
10630 break;
10631 case LPFC_EXTRA_RING:
10632
10633 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10634 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10635 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10636 SLI3_IOCB_CMD_SIZE :
10637 SLI2_IOCB_CMD_SIZE;
10638 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10639 SLI3_IOCB_RSP_SIZE :
10640 SLI2_IOCB_RSP_SIZE;
10641 pring->iotag_max = phba->cfg_hba_queue_depth;
10642 pring->num_mask = 0;
10643 break;
10644 case LPFC_ELS_RING:
10645
10646 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10647 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10648 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10649 SLI3_IOCB_CMD_SIZE :
10650 SLI2_IOCB_CMD_SIZE;
10651 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10652 SLI3_IOCB_RSP_SIZE :
10653 SLI2_IOCB_RSP_SIZE;
10654 pring->fast_iotag = 0;
10655 pring->iotag_ctr = 0;
10656 pring->iotag_max = 4096;
10657 pring->lpfc_sli_rcv_async_status =
10658 lpfc_sli_async_event_handler;
10659 pring->num_mask = LPFC_MAX_RING_MASK;
10660 pring->prt[0].profile = 0;
10661 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10662 pring->prt[0].type = FC_TYPE_ELS;
10663 pring->prt[0].lpfc_sli_rcv_unsol_event =
10664 lpfc_els_unsol_event;
10665 pring->prt[1].profile = 0;
10666 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10667 pring->prt[1].type = FC_TYPE_ELS;
10668 pring->prt[1].lpfc_sli_rcv_unsol_event =
10669 lpfc_els_unsol_event;
10670 pring->prt[2].profile = 0;
10671
10672 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10673
10674 pring->prt[2].type = FC_TYPE_CT;
10675 pring->prt[2].lpfc_sli_rcv_unsol_event =
10676 lpfc_ct_unsol_event;
10677 pring->prt[3].profile = 0;
10678
10679 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10680
10681 pring->prt[3].type = FC_TYPE_CT;
10682 pring->prt[3].lpfc_sli_rcv_unsol_event =
10683 lpfc_ct_unsol_event;
10684 break;
10685 }
10686 totiocbsize += (pring->sli.sli3.numCiocb *
10687 pring->sli.sli3.sizeCiocb) +
10688 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10689 }
10690 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10691
10692 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10693 "SLI2 SLIM Data: x%x x%lx\n",
10694 phba->brd_no, totiocbsize,
10695 (unsigned long) MAX_SLIM_IOCB_SIZE);
10696 }
10697 if (phba->cfg_multi_ring_support == 2)
10698 lpfc_extra_ring_setup(phba);
10699
10700 return 0;
10701}
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713
10714void
10715lpfc_sli4_queue_init(struct lpfc_hba *phba)
10716{
10717 struct lpfc_sli *psli;
10718 struct lpfc_sli_ring *pring;
10719 int i;
10720
10721 psli = &phba->sli;
10722 spin_lock_irq(&phba->hbalock);
10723 INIT_LIST_HEAD(&psli->mboxq);
10724 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10725
10726 for (i = 0; i < phba->cfg_hdw_queue; i++) {
10727 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
10728 pring->flag = 0;
10729 pring->ringno = LPFC_FCP_RING;
10730 pring->txcmplq_cnt = 0;
10731 INIT_LIST_HEAD(&pring->txq);
10732 INIT_LIST_HEAD(&pring->txcmplq);
10733 INIT_LIST_HEAD(&pring->iocb_continueq);
10734 spin_lock_init(&pring->ring_lock);
10735 }
10736 pring = phba->sli4_hba.els_wq->pring;
10737 pring->flag = 0;
10738 pring->ringno = LPFC_ELS_RING;
10739 pring->txcmplq_cnt = 0;
10740 INIT_LIST_HEAD(&pring->txq);
10741 INIT_LIST_HEAD(&pring->txcmplq);
10742 INIT_LIST_HEAD(&pring->iocb_continueq);
10743 spin_lock_init(&pring->ring_lock);
10744
10745 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10746 pring = phba->sli4_hba.nvmels_wq->pring;
10747 pring->flag = 0;
10748 pring->ringno = LPFC_ELS_RING;
10749 pring->txcmplq_cnt = 0;
10750 INIT_LIST_HEAD(&pring->txq);
10751 INIT_LIST_HEAD(&pring->txcmplq);
10752 INIT_LIST_HEAD(&pring->iocb_continueq);
10753 spin_lock_init(&pring->ring_lock);
10754 }
10755
10756 spin_unlock_irq(&phba->hbalock);
10757}
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770void
10771lpfc_sli_queue_init(struct lpfc_hba *phba)
10772{
10773 struct lpfc_sli *psli;
10774 struct lpfc_sli_ring *pring;
10775 int i;
10776
10777 psli = &phba->sli;
10778 spin_lock_irq(&phba->hbalock);
10779 INIT_LIST_HEAD(&psli->mboxq);
10780 INIT_LIST_HEAD(&psli->mboxq_cmpl);
10781
10782 for (i = 0; i < psli->num_rings; i++) {
10783 pring = &psli->sli3_ring[i];
10784 pring->ringno = i;
10785 pring->sli.sli3.next_cmdidx = 0;
10786 pring->sli.sli3.local_getidx = 0;
10787 pring->sli.sli3.cmdidx = 0;
10788 INIT_LIST_HEAD(&pring->iocb_continueq);
10789 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
10790 INIT_LIST_HEAD(&pring->postbufq);
10791 pring->flag = 0;
10792 INIT_LIST_HEAD(&pring->txq);
10793 INIT_LIST_HEAD(&pring->txcmplq);
10794 spin_lock_init(&pring->ring_lock);
10795 }
10796 spin_unlock_irq(&phba->hbalock);
10797}
10798
10799
10800
10801
10802
10803
10804
10805
10806
10807
10808
10809
10810
10811
10812
10813
10814static void
10815lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
10816{
10817 LIST_HEAD(completions);
10818 struct lpfc_sli *psli = &phba->sli;
10819 LPFC_MBOXQ_t *pmb;
10820 unsigned long iflag;
10821
10822
10823 local_bh_disable();
10824
10825
10826 spin_lock_irqsave(&phba->hbalock, iflag);
10827
10828
10829 list_splice_init(&phba->sli.mboxq, &completions);
10830
10831 if (psli->mbox_active) {
10832 list_add_tail(&psli->mbox_active->list, &completions);
10833 psli->mbox_active = NULL;
10834 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10835 }
10836
10837 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10838 spin_unlock_irqrestore(&phba->hbalock, iflag);
10839
10840
10841 local_bh_enable();
10842
10843
10844 while (!list_empty(&completions)) {
10845 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10846 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10847 if (pmb->mbox_cmpl)
10848 pmb->mbox_cmpl(phba, pmb);
10849 }
10850}
10851
10852
10853
10854
10855
10856
10857
10858
10859
10860
10861
10862
10863
10864
10865
10866
10867
10868
10869int
10870lpfc_sli_host_down(struct lpfc_vport *vport)
10871{
10872 LIST_HEAD(completions);
10873 struct lpfc_hba *phba = vport->phba;
10874 struct lpfc_sli *psli = &phba->sli;
10875 struct lpfc_queue *qp = NULL;
10876 struct lpfc_sli_ring *pring;
10877 struct lpfc_iocbq *iocb, *next_iocb;
10878 int i;
10879 unsigned long flags = 0;
10880 uint16_t prev_pring_flag;
10881
10882 lpfc_cleanup_discovery_resources(vport);
10883
10884 spin_lock_irqsave(&phba->hbalock, flags);
10885
10886
10887
10888
10889
10890
10891 if (phba->sli_rev != LPFC_SLI_REV4) {
10892 for (i = 0; i < psli->num_rings; i++) {
10893 pring = &psli->sli3_ring[i];
10894 prev_pring_flag = pring->flag;
10895
10896 if (pring->ringno == LPFC_ELS_RING) {
10897 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10898
10899 set_bit(LPFC_DATA_READY, &phba->data_flags);
10900 }
10901 list_for_each_entry_safe(iocb, next_iocb,
10902 &pring->txq, list) {
10903 if (iocb->vport != vport)
10904 continue;
10905 list_move_tail(&iocb->list, &completions);
10906 }
10907 list_for_each_entry_safe(iocb, next_iocb,
10908 &pring->txcmplq, list) {
10909 if (iocb->vport != vport)
10910 continue;
10911 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10912 }
10913 pring->flag = prev_pring_flag;
10914 }
10915 } else {
10916 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10917 pring = qp->pring;
10918 if (!pring)
10919 continue;
10920 if (pring == phba->sli4_hba.els_wq->pring) {
10921 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10922
10923 set_bit(LPFC_DATA_READY, &phba->data_flags);
10924 }
10925 prev_pring_flag = pring->flag;
10926 spin_lock(&pring->ring_lock);
10927 list_for_each_entry_safe(iocb, next_iocb,
10928 &pring->txq, list) {
10929 if (iocb->vport != vport)
10930 continue;
10931 list_move_tail(&iocb->list, &completions);
10932 }
10933 spin_unlock(&pring->ring_lock);
10934 list_for_each_entry_safe(iocb, next_iocb,
10935 &pring->txcmplq, list) {
10936 if (iocb->vport != vport)
10937 continue;
10938 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10939 }
10940 pring->flag = prev_pring_flag;
10941 }
10942 }
10943 spin_unlock_irqrestore(&phba->hbalock, flags);
10944
10945
10946 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10947 IOERR_SLI_DOWN);
10948 return 1;
10949}
10950
10951
10952
10953
10954
10955
10956
10957
10958
10959
10960
10961
10962
10963
10964
10965
10966int
10967lpfc_sli_hba_down(struct lpfc_hba *phba)
10968{
10969 LIST_HEAD(completions);
10970 struct lpfc_sli *psli = &phba->sli;
10971 struct lpfc_queue *qp = NULL;
10972 struct lpfc_sli_ring *pring;
10973 struct lpfc_dmabuf *buf_ptr;
10974 unsigned long flags = 0;
10975 int i;
10976
10977
10978 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10979
10980 lpfc_hba_down_prep(phba);
10981
10982
10983 local_bh_disable();
10984
10985 lpfc_fabric_abort_hba(phba);
10986
10987 spin_lock_irqsave(&phba->hbalock, flags);
10988
10989
10990
10991
10992
10993 if (phba->sli_rev != LPFC_SLI_REV4) {
10994 for (i = 0; i < psli->num_rings; i++) {
10995 pring = &psli->sli3_ring[i];
10996
10997 if (pring->ringno == LPFC_ELS_RING) {
10998 pring->flag |= LPFC_DEFERRED_RING_EVENT;
10999
11000 set_bit(LPFC_DATA_READY, &phba->data_flags);
11001 }
11002 list_splice_init(&pring->txq, &completions);
11003 }
11004 } else {
11005 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11006 pring = qp->pring;
11007 if (!pring)
11008 continue;
11009 spin_lock(&pring->ring_lock);
11010 list_splice_init(&pring->txq, &completions);
11011 spin_unlock(&pring->ring_lock);
11012 if (pring == phba->sli4_hba.els_wq->pring) {
11013 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11014
11015 set_bit(LPFC_DATA_READY, &phba->data_flags);
11016 }
11017 }
11018 }
11019 spin_unlock_irqrestore(&phba->hbalock, flags);
11020
11021
11022 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11023 IOERR_SLI_DOWN);
11024
11025 spin_lock_irqsave(&phba->hbalock, flags);
11026 list_splice_init(&phba->elsbuf, &completions);
11027 phba->elsbuf_cnt = 0;
11028 phba->elsbuf_prev_cnt = 0;
11029 spin_unlock_irqrestore(&phba->hbalock, flags);
11030
11031 while (!list_empty(&completions)) {
11032 list_remove_head(&completions, buf_ptr,
11033 struct lpfc_dmabuf, list);
11034 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11035 kfree(buf_ptr);
11036 }
11037
11038
11039 local_bh_enable();
11040
11041
11042 del_timer_sync(&psli->mbox_tmo);
11043
11044 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11045 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11046 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11047
11048 return 1;
11049}
11050
11051
11052
11053
11054
11055
11056
11057
11058
11059
11060
11061
11062
11063void
11064lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11065{
11066 uint32_t *src = srcp;
11067 uint32_t *dest = destp;
11068 uint32_t ldata;
11069 int i;
11070
11071 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11072 ldata = *src;
11073 ldata = le32_to_cpu(ldata);
11074 *dest = ldata;
11075 src++;
11076 dest++;
11077 }
11078}
11079
11080
11081
11082
11083
11084
11085
11086
11087
11088
11089
11090
11091void
11092lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11093{
11094 uint32_t *src = srcp;
11095 uint32_t *dest = destp;
11096 uint32_t ldata;
11097 int i;
11098
11099 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11100 ldata = *src;
11101 ldata = be32_to_cpu(ldata);
11102 *dest = ldata;
11103 src++;
11104 dest++;
11105 }
11106}
11107
11108
11109
11110
11111
11112
11113
11114
11115
11116
11117
11118int
11119lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11120 struct lpfc_dmabuf *mp)
11121{
11122
11123
11124 spin_lock_irq(&phba->hbalock);
11125 list_add_tail(&mp->list, &pring->postbufq);
11126 pring->postbufq_cnt++;
11127 spin_unlock_irq(&phba->hbalock);
11128 return 0;
11129}
11130
11131
11132
11133
11134
11135
11136
11137
11138
11139
11140
11141
11142uint32_t
11143lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11144{
11145 spin_lock_irq(&phba->hbalock);
11146 phba->buffer_tag_count++;
11147
11148
11149
11150
11151 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11152 spin_unlock_irq(&phba->hbalock);
11153 return phba->buffer_tag_count;
11154}
11155
11156
11157
11158
11159
11160
11161
11162
11163
11164
11165
11166
11167
11168
11169
11170
11171struct lpfc_dmabuf *
11172lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11173 uint32_t tag)
11174{
11175 struct lpfc_dmabuf *mp, *next_mp;
11176 struct list_head *slp = &pring->postbufq;
11177
11178
11179 spin_lock_irq(&phba->hbalock);
11180 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11181 if (mp->buffer_tag == tag) {
11182 list_del_init(&mp->list);
11183 pring->postbufq_cnt--;
11184 spin_unlock_irq(&phba->hbalock);
11185 return mp;
11186 }
11187 }
11188
11189 spin_unlock_irq(&phba->hbalock);
11190 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11191 "0402 Cannot find virtual addr for buffer tag on "
11192 "ring %d Data x%lx x%px x%px x%x\n",
11193 pring->ringno, (unsigned long) tag,
11194 slp->next, slp->prev, pring->postbufq_cnt);
11195
11196 return NULL;
11197}
11198
11199
11200
11201
11202
11203
11204
11205
11206
11207
11208
11209
11210
11211
11212
11213
11214
11215struct lpfc_dmabuf *
11216lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11217 dma_addr_t phys)
11218{
11219 struct lpfc_dmabuf *mp, *next_mp;
11220 struct list_head *slp = &pring->postbufq;
11221
11222
11223 spin_lock_irq(&phba->hbalock);
11224 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11225 if (mp->phys == phys) {
11226 list_del_init(&mp->list);
11227 pring->postbufq_cnt--;
11228 spin_unlock_irq(&phba->hbalock);
11229 return mp;
11230 }
11231 }
11232
11233 spin_unlock_irq(&phba->hbalock);
11234 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11235 "0410 Cannot find virtual addr for mapped buf on "
11236 "ring %d Data x%llx x%px x%px x%x\n",
11237 pring->ringno, (unsigned long long)phys,
11238 slp->next, slp->prev, pring->postbufq_cnt);
11239 return NULL;
11240}
11241
11242
11243
11244
11245
11246
11247
11248
11249
11250
11251
11252
11253static void
11254lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11255 struct lpfc_iocbq *rspiocb)
11256{
11257 IOCB_t *irsp = &rspiocb->iocb;
11258 uint16_t abort_iotag, abort_context;
11259 struct lpfc_iocbq *abort_iocb = NULL;
11260
11261 if (irsp->ulpStatus) {
11262
11263
11264
11265
11266
11267 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11268 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11269
11270 spin_lock_irq(&phba->hbalock);
11271 if (phba->sli_rev < LPFC_SLI_REV4) {
11272 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11273 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11274 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11275 spin_unlock_irq(&phba->hbalock);
11276 goto release_iocb;
11277 }
11278 if (abort_iotag != 0 &&
11279 abort_iotag <= phba->sli.last_iotag)
11280 abort_iocb =
11281 phba->sli.iocbq_lookup[abort_iotag];
11282 } else
11283
11284
11285
11286
11287
11288 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11289
11290 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11291 "0327 Cannot abort els iocb x%px "
11292 "with tag %x context %x, abort status %x, "
11293 "abort code %x\n",
11294 abort_iocb, abort_iotag, abort_context,
11295 irsp->ulpStatus, irsp->un.ulpWord[4]);
11296
11297 spin_unlock_irq(&phba->hbalock);
11298 }
11299release_iocb:
11300 lpfc_sli_release_iocbq(phba, cmdiocb);
11301 return;
11302}
11303
11304
11305
11306
11307
11308
11309
11310
11311
11312
11313
11314
11315static void
11316lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11317 struct lpfc_iocbq *rspiocb)
11318{
11319 IOCB_t *irsp = &rspiocb->iocb;
11320
11321
11322 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11323 "0139 Ignoring ELS cmd tag x%x completion Data: "
11324 "x%x x%x x%x\n",
11325 irsp->ulpIoTag, irsp->ulpStatus,
11326 irsp->un.ulpWord[4], irsp->ulpTimeout);
11327 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11328 lpfc_ct_free_iocb(phba, cmdiocb);
11329 else
11330 lpfc_els_free_iocb(phba, cmdiocb);
11331 return;
11332}
11333
11334
11335
11336
11337
11338
11339
11340
11341
11342
11343
11344
11345
11346
11347static int
11348lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11349 struct lpfc_iocbq *cmdiocb)
11350{
11351 struct lpfc_vport *vport = cmdiocb->vport;
11352 struct lpfc_iocbq *abtsiocbp;
11353 IOCB_t *icmd = NULL;
11354 IOCB_t *iabt = NULL;
11355 int retval;
11356 unsigned long iflags;
11357 struct lpfc_nodelist *ndlp;
11358
11359
11360
11361
11362
11363
11364 icmd = &cmdiocb->iocb;
11365 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11366 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11367 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11368 return 0;
11369
11370
11371 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11372 if (abtsiocbp == NULL)
11373 return 0;
11374
11375
11376
11377
11378 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11379
11380 iabt = &abtsiocbp->iocb;
11381 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11382 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11383 if (phba->sli_rev == LPFC_SLI_REV4) {
11384 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11385 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11386 } else {
11387 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11388 if (pring->ringno == LPFC_ELS_RING) {
11389 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11390 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11391 }
11392 }
11393 iabt->ulpLe = 1;
11394 iabt->ulpClass = icmd->ulpClass;
11395
11396
11397 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11398 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
11399 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11400 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11401 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11402
11403 if (phba->link_state >= LPFC_LINK_UP)
11404 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11405 else
11406 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11407
11408 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11409 abtsiocbp->vport = vport;
11410
11411 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11412 "0339 Abort xri x%x, original iotag x%x, "
11413 "abort cmd iotag x%x\n",
11414 iabt->un.acxri.abortIoTag,
11415 iabt->un.acxri.abortContextTag,
11416 abtsiocbp->iotag);
11417
11418 if (phba->sli_rev == LPFC_SLI_REV4) {
11419 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11420 if (unlikely(pring == NULL))
11421 return 0;
11422
11423 spin_lock_irqsave(&pring->ring_lock, iflags);
11424 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11425 abtsiocbp, 0);
11426 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11427 } else {
11428 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11429 abtsiocbp, 0);
11430 }
11431
11432 if (retval)
11433 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11434
11435
11436
11437
11438
11439
11440 return retval;
11441}
11442
11443
11444
11445
11446
11447
11448
11449
11450
11451
11452
11453
11454
11455
11456int
11457lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11458 struct lpfc_iocbq *cmdiocb)
11459{
11460 struct lpfc_vport *vport = cmdiocb->vport;
11461 int retval = IOCB_ERROR;
11462 IOCB_t *icmd = NULL;
11463
11464 lockdep_assert_held(&phba->hbalock);
11465
11466
11467
11468
11469
11470
11471 icmd = &cmdiocb->iocb;
11472 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11473 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11474 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
11475 return 0;
11476
11477 if (!pring) {
11478 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11479 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11480 else
11481 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11482 goto abort_iotag_exit;
11483 }
11484
11485
11486
11487
11488
11489 if ((vport->load_flag & FC_UNLOADING) &&
11490 (pring->ringno == LPFC_ELS_RING)) {
11491 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11492 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11493 else
11494 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11495 goto abort_iotag_exit;
11496 }
11497
11498
11499 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
11500
11501abort_iotag_exit:
11502
11503
11504
11505
11506
11507 return retval;
11508}
11509
11510
11511
11512
11513
11514
11515
11516void
11517lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11518{
11519 struct lpfc_sli *psli = &phba->sli;
11520 struct lpfc_sli_ring *pring;
11521 struct lpfc_queue *qp = NULL;
11522 int i;
11523
11524 if (phba->sli_rev != LPFC_SLI_REV4) {
11525 for (i = 0; i < psli->num_rings; i++) {
11526 pring = &psli->sli3_ring[i];
11527 lpfc_sli_abort_iocb_ring(phba, pring);
11528 }
11529 return;
11530 }
11531 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11532 pring = qp->pring;
11533 if (!pring)
11534 continue;
11535 lpfc_sli_abort_iocb_ring(phba, pring);
11536 }
11537}
11538
11539
11540
11541
11542
11543
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556
11557
11558
11559
11560
11561static int
11562lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11563 uint16_t tgt_id, uint64_t lun_id,
11564 lpfc_ctx_cmd ctx_cmd)
11565{
11566 struct lpfc_io_buf *lpfc_cmd;
11567 int rc = 1;
11568
11569 if (iocbq->vport != vport)
11570 return rc;
11571
11572 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11573 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
11574 return rc;
11575
11576 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11577
11578 if (lpfc_cmd->pCmd == NULL)
11579 return rc;
11580
11581 switch (ctx_cmd) {
11582 case LPFC_CTX_LUN:
11583 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11584 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11585 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11586 rc = 0;
11587 break;
11588 case LPFC_CTX_TGT:
11589 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11590 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11591 rc = 0;
11592 break;
11593 case LPFC_CTX_HOST:
11594 rc = 0;
11595 break;
11596 default:
11597 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11598 __func__, ctx_cmd);
11599 break;
11600 }
11601
11602 return rc;
11603}
11604
11605
11606
11607
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617
11618
11619
11620
11621
11622
11623
11624int
11625lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11626 lpfc_ctx_cmd ctx_cmd)
11627{
11628 struct lpfc_hba *phba = vport->phba;
11629 struct lpfc_iocbq *iocbq;
11630 int sum, i;
11631
11632 spin_lock_irq(&phba->hbalock);
11633 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11634 iocbq = phba->sli.iocbq_lookup[i];
11635
11636 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11637 ctx_cmd) == 0)
11638 sum++;
11639 }
11640 spin_unlock_irq(&phba->hbalock);
11641
11642 return sum;
11643}
11644
11645
11646
11647
11648
11649
11650
11651
11652
11653
11654
11655void
11656lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11657 struct lpfc_iocbq *rspiocb)
11658{
11659 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11660 "3096 ABORT_XRI_CN completing on rpi x%x "
11661 "original iotag x%x, abort cmd iotag x%x "
11662 "status 0x%x, reason 0x%x\n",
11663 cmdiocb->iocb.un.acxri.abortContextTag,
11664 cmdiocb->iocb.un.acxri.abortIoTag,
11665 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11666 rspiocb->iocb.un.ulpWord[4]);
11667 lpfc_sli_release_iocbq(phba, cmdiocb);
11668 return;
11669}
11670
11671
11672
11673
11674
11675
11676
11677
11678
11679
11680
11681
11682
11683
11684
11685
11686
11687
11688
11689
11690
11691
11692int
11693lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11694 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
11695{
11696 struct lpfc_hba *phba = vport->phba;
11697 struct lpfc_iocbq *iocbq;
11698 struct lpfc_iocbq *abtsiocb;
11699 struct lpfc_sli_ring *pring_s4;
11700 IOCB_t *cmd = NULL;
11701 int errcnt = 0, ret_val = 0;
11702 int i;
11703
11704
11705 if (phba->hba_flag & HBA_IOQ_FLUSH)
11706 return errcnt;
11707
11708 for (i = 1; i <= phba->sli.last_iotag; i++) {
11709 iocbq = phba->sli.iocbq_lookup[i];
11710
11711 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11712 abort_cmd) != 0)
11713 continue;
11714
11715
11716
11717
11718
11719 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11720 continue;
11721
11722
11723 abtsiocb = lpfc_sli_get_iocbq(phba);
11724 if (abtsiocb == NULL) {
11725 errcnt++;
11726 continue;
11727 }
11728
11729
11730 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11731
11732 cmd = &iocbq->iocb;
11733 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11734 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
11735 if (phba->sli_rev == LPFC_SLI_REV4)
11736 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
11737 else
11738 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
11739 abtsiocb->iocb.ulpLe = 1;
11740 abtsiocb->iocb.ulpClass = cmd->ulpClass;
11741 abtsiocb->vport = vport;
11742
11743
11744 abtsiocb->hba_wqidx = iocbq->hba_wqidx;
11745 if (iocbq->iocb_flag & LPFC_IO_FCP)
11746 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
11747 if (iocbq->iocb_flag & LPFC_IO_FOF)
11748 abtsiocb->iocb_flag |= LPFC_IO_FOF;
11749
11750 if (lpfc_is_link_up(phba))
11751 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11752 else
11753 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11754
11755
11756 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11757 if (phba->sli_rev == LPFC_SLI_REV4) {
11758 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11759 if (!pring_s4)
11760 continue;
11761 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11762 abtsiocb, 0);
11763 } else
11764 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11765 abtsiocb, 0);
11766 if (ret_val == IOCB_ERROR) {
11767 lpfc_sli_release_iocbq(phba, abtsiocb);
11768 errcnt++;
11769 continue;
11770 }
11771 }
11772
11773 return errcnt;
11774}
11775
11776
11777
11778
11779
11780
11781
11782
11783
11784
11785
11786
11787
11788
11789
11790
11791
11792
11793
11794
11795
11796
11797
11798int
11799lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11800 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11801{
11802 struct lpfc_hba *phba = vport->phba;
11803 struct lpfc_io_buf *lpfc_cmd;
11804 struct lpfc_iocbq *abtsiocbq;
11805 struct lpfc_nodelist *ndlp;
11806 struct lpfc_iocbq *iocbq;
11807 IOCB_t *icmd;
11808 int sum, i, ret_val;
11809 unsigned long iflags;
11810 struct lpfc_sli_ring *pring_s4 = NULL;
11811
11812 spin_lock_irqsave(&phba->hbalock, iflags);
11813
11814
11815 if (phba->hba_flag & HBA_IOQ_FLUSH) {
11816 spin_unlock_irqrestore(&phba->hbalock, iflags);
11817 return 0;
11818 }
11819 sum = 0;
11820
11821 for (i = 1; i <= phba->sli.last_iotag; i++) {
11822 iocbq = phba->sli.iocbq_lookup[i];
11823
11824 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11825 cmd) != 0)
11826 continue;
11827
11828
11829 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11830 spin_lock(&lpfc_cmd->buf_lock);
11831
11832 if (!lpfc_cmd->pCmd) {
11833 spin_unlock(&lpfc_cmd->buf_lock);
11834 continue;
11835 }
11836
11837 if (phba->sli_rev == LPFC_SLI_REV4) {
11838 pring_s4 =
11839 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
11840 if (!pring_s4) {
11841 spin_unlock(&lpfc_cmd->buf_lock);
11842 continue;
11843 }
11844
11845 spin_lock(&pring_s4->ring_lock);
11846 }
11847
11848
11849
11850
11851
11852 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
11853 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
11854 if (phba->sli_rev == LPFC_SLI_REV4)
11855 spin_unlock(&pring_s4->ring_lock);
11856 spin_unlock(&lpfc_cmd->buf_lock);
11857 continue;
11858 }
11859
11860
11861 abtsiocbq = __lpfc_sli_get_iocbq(phba);
11862 if (!abtsiocbq) {
11863 if (phba->sli_rev == LPFC_SLI_REV4)
11864 spin_unlock(&pring_s4->ring_lock);
11865 spin_unlock(&lpfc_cmd->buf_lock);
11866 continue;
11867 }
11868
11869 icmd = &iocbq->iocb;
11870 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11871 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11872 if (phba->sli_rev == LPFC_SLI_REV4)
11873 abtsiocbq->iocb.un.acxri.abortIoTag =
11874 iocbq->sli4_xritag;
11875 else
11876 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11877 abtsiocbq->iocb.ulpLe = 1;
11878 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11879 abtsiocbq->vport = vport;
11880
11881
11882 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11883 if (iocbq->iocb_flag & LPFC_IO_FCP)
11884 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11885 if (iocbq->iocb_flag & LPFC_IO_FOF)
11886 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11887
11888 ndlp = lpfc_cmd->rdata->pnode;
11889
11890 if (lpfc_is_link_up(phba) &&
11891 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11892 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11893 else
11894 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11895
11896
11897 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11898
11899
11900
11901
11902
11903 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11904
11905 if (phba->sli_rev == LPFC_SLI_REV4) {
11906 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11907 abtsiocbq, 0);
11908 spin_unlock(&pring_s4->ring_lock);
11909 } else {
11910 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11911 abtsiocbq, 0);
11912 }
11913
11914 spin_unlock(&lpfc_cmd->buf_lock);
11915
11916 if (ret_val == IOCB_ERROR)
11917 __lpfc_sli_release_iocbq(phba, abtsiocbq);
11918 else
11919 sum++;
11920 }
11921 spin_unlock_irqrestore(&phba->hbalock, iflags);
11922 return sum;
11923}
11924
11925
11926
11927
11928
11929
11930
11931
11932
11933
11934
11935
11936
11937
11938
11939
11940
11941
11942static void
11943lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11944 struct lpfc_iocbq *cmdiocbq,
11945 struct lpfc_iocbq *rspiocbq)
11946{
11947 wait_queue_head_t *pdone_q;
11948 unsigned long iflags;
11949 struct lpfc_io_buf *lpfc_cmd;
11950
11951 spin_lock_irqsave(&phba->hbalock, iflags);
11952 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11953
11954
11955
11956
11957
11958
11959
11960 spin_unlock_irqrestore(&phba->hbalock, iflags);
11961 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11962 cmdiocbq->wait_iocb_cmpl = NULL;
11963 if (cmdiocbq->iocb_cmpl)
11964 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11965 else
11966 lpfc_sli_release_iocbq(phba, cmdiocbq);
11967 return;
11968 }
11969
11970 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11971 if (cmdiocbq->context2 && rspiocbq)
11972 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11973 &rspiocbq->iocb, sizeof(IOCB_t));
11974
11975
11976 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11977 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11978 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
11979 cur_iocbq);
11980 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
11981 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
11982 else
11983 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
11984 }
11985
11986 pdone_q = cmdiocbq->context_un.wait_queue;
11987 if (pdone_q)
11988 wake_up(pdone_q);
11989 spin_unlock_irqrestore(&phba->hbalock, iflags);
11990 return;
11991}
11992
11993
11994
11995
11996
11997
11998
11999
12000
12001
12002
12003
12004
12005static int
12006lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12007 struct lpfc_iocbq *piocbq, uint32_t flag)
12008{
12009 unsigned long iflags;
12010 int ret;
12011
12012 spin_lock_irqsave(&phba->hbalock, iflags);
12013 ret = piocbq->iocb_flag & flag;
12014 spin_unlock_irqrestore(&phba->hbalock, iflags);
12015 return ret;
12016
12017}
12018
12019
12020
12021
12022
12023
12024
12025
12026
12027
12028
12029
12030
12031
12032
12033
12034
12035
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054
12055int
12056lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12057 uint32_t ring_number,
12058 struct lpfc_iocbq *piocb,
12059 struct lpfc_iocbq *prspiocbq,
12060 uint32_t timeout)
12061{
12062 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12063 long timeleft, timeout_req = 0;
12064 int retval = IOCB_SUCCESS;
12065 uint32_t creg_val;
12066 struct lpfc_iocbq *iocb;
12067 int txq_cnt = 0;
12068 int txcmplq_cnt = 0;
12069 struct lpfc_sli_ring *pring;
12070 unsigned long iflags;
12071 bool iocb_completed = true;
12072
12073 if (phba->sli_rev >= LPFC_SLI_REV4)
12074 pring = lpfc_sli4_calc_ring(phba, piocb);
12075 else
12076 pring = &phba->sli.sli3_ring[ring_number];
12077
12078
12079
12080
12081 if (prspiocbq) {
12082 if (piocb->context2)
12083 return IOCB_ERROR;
12084 piocb->context2 = prspiocbq;
12085 }
12086
12087 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12088 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12089 piocb->context_un.wait_queue = &done_q;
12090 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12091
12092 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12093 if (lpfc_readl(phba->HCregaddr, &creg_val))
12094 return IOCB_ERROR;
12095 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12096 writel(creg_val, phba->HCregaddr);
12097 readl(phba->HCregaddr);
12098 }
12099
12100 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12101 SLI_IOCB_RET_IOCB);
12102 if (retval == IOCB_SUCCESS) {
12103 timeout_req = msecs_to_jiffies(timeout * 1000);
12104 timeleft = wait_event_timeout(done_q,
12105 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12106 timeout_req);
12107 spin_lock_irqsave(&phba->hbalock, iflags);
12108 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12109
12110
12111
12112
12113
12114
12115 iocb_completed = false;
12116 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12117 }
12118 spin_unlock_irqrestore(&phba->hbalock, iflags);
12119 if (iocb_completed) {
12120 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12121 "0331 IOCB wake signaled\n");
12122
12123
12124
12125
12126
12127 } else if (timeleft == 0) {
12128 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12129 "0338 IOCB wait timeout error - no "
12130 "wake response Data x%x\n", timeout);
12131 retval = IOCB_TIMEDOUT;
12132 } else {
12133 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12134 "0330 IOCB wake NOT set, "
12135 "Data x%x x%lx\n",
12136 timeout, (timeleft / jiffies));
12137 retval = IOCB_TIMEDOUT;
12138 }
12139 } else if (retval == IOCB_BUSY) {
12140 if (phba->cfg_log_verbose & LOG_SLI) {
12141 list_for_each_entry(iocb, &pring->txq, list) {
12142 txq_cnt++;
12143 }
12144 list_for_each_entry(iocb, &pring->txcmplq, list) {
12145 txcmplq_cnt++;
12146 }
12147 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12148 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12149 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12150 }
12151 return retval;
12152 } else {
12153 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12154 "0332 IOCB wait issue failed, Data x%x\n",
12155 retval);
12156 retval = IOCB_ERROR;
12157 }
12158
12159 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12160 if (lpfc_readl(phba->HCregaddr, &creg_val))
12161 return IOCB_ERROR;
12162 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12163 writel(creg_val, phba->HCregaddr);
12164 readl(phba->HCregaddr);
12165 }
12166
12167 if (prspiocbq)
12168 piocb->context2 = NULL;
12169
12170 piocb->context_un.wait_queue = NULL;
12171 piocb->iocb_cmpl = NULL;
12172 return retval;
12173}
12174
12175
12176
12177
12178
12179
12180
12181
12182
12183
12184
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200
12201int
12202lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12203 uint32_t timeout)
12204{
12205 struct completion mbox_done;
12206 int retval;
12207 unsigned long flag;
12208
12209 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12210
12211 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12212
12213
12214 init_completion(&mbox_done);
12215 pmboxq->context3 = &mbox_done;
12216
12217 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12218 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12219 wait_for_completion_timeout(&mbox_done,
12220 msecs_to_jiffies(timeout * 1000));
12221
12222 spin_lock_irqsave(&phba->hbalock, flag);
12223 pmboxq->context3 = NULL;
12224
12225
12226
12227
12228 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12229 retval = MBX_SUCCESS;
12230 } else {
12231 retval = MBX_TIMEOUT;
12232 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12233 }
12234 spin_unlock_irqrestore(&phba->hbalock, flag);
12235 }
12236 return retval;
12237}
12238
12239
12240
12241
12242
12243
12244
12245
12246
12247
12248
12249
12250
12251
12252
12253
12254
12255void
12256lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12257{
12258 struct lpfc_sli *psli = &phba->sli;
12259 unsigned long timeout;
12260
12261 if (mbx_action == LPFC_MBX_NO_WAIT) {
12262
12263 msleep(100);
12264 lpfc_sli_mbox_sys_flush(phba);
12265 return;
12266 }
12267 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12268
12269
12270 local_bh_disable();
12271
12272 spin_lock_irq(&phba->hbalock);
12273 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12274
12275 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12276
12277
12278
12279 if (phba->sli.mbox_active)
12280 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12281 phba->sli.mbox_active) *
12282 1000) + jiffies;
12283 spin_unlock_irq(&phba->hbalock);
12284
12285
12286 local_bh_enable();
12287
12288 while (phba->sli.mbox_active) {
12289
12290 msleep(2);
12291 if (time_after(jiffies, timeout))
12292
12293
12294
12295 break;
12296 }
12297 } else {
12298 spin_unlock_irq(&phba->hbalock);
12299
12300
12301 local_bh_enable();
12302 }
12303
12304 lpfc_sli_mbox_sys_flush(phba);
12305}
12306
12307
12308
12309
12310
12311
12312
12313
12314
12315
12316
12317
12318static int
12319lpfc_sli_eratt_read(struct lpfc_hba *phba)
12320{
12321 uint32_t ha_copy;
12322
12323
12324 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12325 goto unplug_err;
12326
12327 if (ha_copy & HA_ERATT) {
12328
12329 if (lpfc_sli_read_hs(phba))
12330 goto unplug_err;
12331
12332
12333 if ((HS_FFER1 & phba->work_hs) &&
12334 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12335 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12336 phba->hba_flag |= DEFER_ERATT;
12337
12338 writel(0, phba->HCregaddr);
12339 readl(phba->HCregaddr);
12340 }
12341
12342
12343 phba->work_ha |= HA_ERATT;
12344
12345 phba->hba_flag |= HBA_ERATT_HANDLED;
12346 return 1;
12347 }
12348 return 0;
12349
12350unplug_err:
12351
12352 phba->work_hs |= UNPLUG_ERR;
12353
12354 phba->work_ha |= HA_ERATT;
12355
12356 phba->hba_flag |= HBA_ERATT_HANDLED;
12357 return 1;
12358}
12359
12360
12361
12362
12363
12364
12365
12366
12367
12368
12369
12370
12371static int
12372lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12373{
12374 uint32_t uerr_sta_hi, uerr_sta_lo;
12375 uint32_t if_type, portsmphr;
12376 struct lpfc_register portstat_reg;
12377
12378
12379
12380
12381
12382 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12383 switch (if_type) {
12384 case LPFC_SLI_INTF_IF_TYPE_0:
12385 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12386 &uerr_sta_lo) ||
12387 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12388 &uerr_sta_hi)) {
12389 phba->work_hs |= UNPLUG_ERR;
12390 phba->work_ha |= HA_ERATT;
12391 phba->hba_flag |= HBA_ERATT_HANDLED;
12392 return 1;
12393 }
12394 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12395 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12396 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12397 "1423 HBA Unrecoverable error: "
12398 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12399 "ue_mask_lo_reg=0x%x, "
12400 "ue_mask_hi_reg=0x%x\n",
12401 uerr_sta_lo, uerr_sta_hi,
12402 phba->sli4_hba.ue_mask_lo,
12403 phba->sli4_hba.ue_mask_hi);
12404 phba->work_status[0] = uerr_sta_lo;
12405 phba->work_status[1] = uerr_sta_hi;
12406 phba->work_ha |= HA_ERATT;
12407 phba->hba_flag |= HBA_ERATT_HANDLED;
12408 return 1;
12409 }
12410 break;
12411 case LPFC_SLI_INTF_IF_TYPE_2:
12412 case LPFC_SLI_INTF_IF_TYPE_6:
12413 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12414 &portstat_reg.word0) ||
12415 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12416 &portsmphr)){
12417 phba->work_hs |= UNPLUG_ERR;
12418 phba->work_ha |= HA_ERATT;
12419 phba->hba_flag |= HBA_ERATT_HANDLED;
12420 return 1;
12421 }
12422 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12423 phba->work_status[0] =
12424 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12425 phba->work_status[1] =
12426 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12427 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12428 "2885 Port Status Event: "
12429 "port status reg 0x%x, "
12430 "port smphr reg 0x%x, "
12431 "error 1=0x%x, error 2=0x%x\n",
12432 portstat_reg.word0,
12433 portsmphr,
12434 phba->work_status[0],
12435 phba->work_status[1]);
12436 phba->work_ha |= HA_ERATT;
12437 phba->hba_flag |= HBA_ERATT_HANDLED;
12438 return 1;
12439 }
12440 break;
12441 case LPFC_SLI_INTF_IF_TYPE_1:
12442 default:
12443 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12444 "2886 HBA Error Attention on unsupported "
12445 "if type %d.", if_type);
12446 return 1;
12447 }
12448
12449 return 0;
12450}
12451
12452
12453
12454
12455
12456
12457
12458
12459
12460
12461
12462int
12463lpfc_sli_check_eratt(struct lpfc_hba *phba)
12464{
12465 uint32_t ha_copy;
12466
12467
12468
12469
12470 if (phba->link_flag & LS_IGNORE_ERATT)
12471 return 0;
12472
12473
12474 spin_lock_irq(&phba->hbalock);
12475 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12476
12477 spin_unlock_irq(&phba->hbalock);
12478 return 0;
12479 }
12480
12481
12482
12483
12484
12485 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12486 spin_unlock_irq(&phba->hbalock);
12487 return 0;
12488 }
12489
12490
12491 if (unlikely(pci_channel_offline(phba->pcidev))) {
12492 spin_unlock_irq(&phba->hbalock);
12493 return 0;
12494 }
12495
12496 switch (phba->sli_rev) {
12497 case LPFC_SLI_REV2:
12498 case LPFC_SLI_REV3:
12499
12500 ha_copy = lpfc_sli_eratt_read(phba);
12501 break;
12502 case LPFC_SLI_REV4:
12503
12504 ha_copy = lpfc_sli4_eratt_read(phba);
12505 break;
12506 default:
12507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12508 "0299 Invalid SLI revision (%d)\n",
12509 phba->sli_rev);
12510 ha_copy = 0;
12511 break;
12512 }
12513 spin_unlock_irq(&phba->hbalock);
12514
12515 return ha_copy;
12516}
12517
12518
12519
12520
12521
12522
12523
12524
12525
12526
12527
12528static inline int
12529lpfc_intr_state_check(struct lpfc_hba *phba)
12530{
12531
12532 if (unlikely(pci_channel_offline(phba->pcidev)))
12533 return -EIO;
12534
12535
12536 phba->sli.slistat.sli_intr++;
12537
12538
12539 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12540 return -EIO;
12541
12542 return 0;
12543}
12544
12545
12546
12547
12548
12549
12550
12551
12552
12553
12554
12555
12556
12557
12558
12559
12560
12561
12562
12563
12564
12565
12566irqreturn_t
12567lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12568{
12569 struct lpfc_hba *phba;
12570 uint32_t ha_copy, hc_copy;
12571 uint32_t work_ha_copy;
12572 unsigned long status;
12573 unsigned long iflag;
12574 uint32_t control;
12575
12576 MAILBOX_t *mbox, *pmbox;
12577 struct lpfc_vport *vport;
12578 struct lpfc_nodelist *ndlp;
12579 struct lpfc_dmabuf *mp;
12580 LPFC_MBOXQ_t *pmb;
12581 int rc;
12582
12583
12584
12585
12586
12587 phba = (struct lpfc_hba *)dev_id;
12588
12589 if (unlikely(!phba))
12590 return IRQ_NONE;
12591
12592
12593
12594
12595
12596 if (phba->intr_type == MSIX) {
12597
12598 if (lpfc_intr_state_check(phba))
12599 return IRQ_NONE;
12600
12601 spin_lock_irqsave(&phba->hbalock, iflag);
12602 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12603 goto unplug_error;
12604
12605
12606
12607 if (phba->link_flag & LS_IGNORE_ERATT)
12608 ha_copy &= ~HA_ERATT;
12609
12610 if (ha_copy & HA_ERATT) {
12611 if (phba->hba_flag & HBA_ERATT_HANDLED)
12612
12613 ha_copy &= ~HA_ERATT;
12614 else
12615
12616 phba->hba_flag |= HBA_ERATT_HANDLED;
12617 }
12618
12619
12620
12621
12622
12623 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12624 spin_unlock_irqrestore(&phba->hbalock, iflag);
12625 return IRQ_NONE;
12626 }
12627
12628
12629 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12630 goto unplug_error;
12631
12632 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12633 HC_LAINT_ENA | HC_ERINT_ENA),
12634 phba->HCregaddr);
12635 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12636 phba->HAregaddr);
12637 writel(hc_copy, phba->HCregaddr);
12638 readl(phba->HAregaddr);
12639 spin_unlock_irqrestore(&phba->hbalock, iflag);
12640 } else
12641 ha_copy = phba->ha_copy;
12642
12643 work_ha_copy = ha_copy & phba->work_ha_mask;
12644
12645 if (work_ha_copy) {
12646 if (work_ha_copy & HA_LATT) {
12647 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12648
12649
12650
12651
12652 spin_lock_irqsave(&phba->hbalock, iflag);
12653 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12654 if (lpfc_readl(phba->HCregaddr, &control))
12655 goto unplug_error;
12656 control &= ~HC_LAINT_ENA;
12657 writel(control, phba->HCregaddr);
12658 readl(phba->HCregaddr);
12659 spin_unlock_irqrestore(&phba->hbalock, iflag);
12660 }
12661 else
12662 work_ha_copy &= ~HA_LATT;
12663 }
12664
12665 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12666
12667
12668
12669
12670 status = (work_ha_copy &
12671 (HA_RXMASK << (4*LPFC_ELS_RING)));
12672 status >>= (4*LPFC_ELS_RING);
12673 if (status & HA_RXMASK) {
12674 spin_lock_irqsave(&phba->hbalock, iflag);
12675 if (lpfc_readl(phba->HCregaddr, &control))
12676 goto unplug_error;
12677
12678 lpfc_debugfs_slow_ring_trc(phba,
12679 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12680 control, status,
12681 (uint32_t)phba->sli.slistat.sli_intr);
12682
12683 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12684 lpfc_debugfs_slow_ring_trc(phba,
12685 "ISR Disable ring:"
12686 "pwork:x%x hawork:x%x wait:x%x",
12687 phba->work_ha, work_ha_copy,
12688 (uint32_t)((unsigned long)
12689 &phba->work_waitq));
12690
12691 control &=
12692 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12693 writel(control, phba->HCregaddr);
12694 readl(phba->HCregaddr);
12695 }
12696 else {
12697 lpfc_debugfs_slow_ring_trc(phba,
12698 "ISR slow ring: pwork:"
12699 "x%x hawork:x%x wait:x%x",
12700 phba->work_ha, work_ha_copy,
12701 (uint32_t)((unsigned long)
12702 &phba->work_waitq));
12703 }
12704 spin_unlock_irqrestore(&phba->hbalock, iflag);
12705 }
12706 }
12707 spin_lock_irqsave(&phba->hbalock, iflag);
12708 if (work_ha_copy & HA_ERATT) {
12709 if (lpfc_sli_read_hs(phba))
12710 goto unplug_error;
12711
12712
12713
12714
12715 if ((HS_FFER1 & phba->work_hs) &&
12716 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12717 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12718 phba->work_hs)) {
12719 phba->hba_flag |= DEFER_ERATT;
12720
12721 writel(0, phba->HCregaddr);
12722 readl(phba->HCregaddr);
12723 }
12724 }
12725
12726 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12727 pmb = phba->sli.mbox_active;
12728 pmbox = &pmb->u.mb;
12729 mbox = phba->mbox;
12730 vport = pmb->vport;
12731
12732
12733 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12734 if (pmbox->mbxOwner != OWN_HOST) {
12735 spin_unlock_irqrestore(&phba->hbalock, iflag);
12736
12737
12738
12739
12740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12741 "(%d):0304 Stray Mailbox "
12742 "Interrupt mbxCommand x%x "
12743 "mbxStatus x%x\n",
12744 (vport ? vport->vpi : 0),
12745 pmbox->mbxCommand,
12746 pmbox->mbxStatus);
12747
12748 work_ha_copy &= ~HA_MBATT;
12749 } else {
12750 phba->sli.mbox_active = NULL;
12751 spin_unlock_irqrestore(&phba->hbalock, iflag);
12752 phba->last_completion_time = jiffies;
12753 del_timer(&phba->sli.mbox_tmo);
12754 if (pmb->mbox_cmpl) {
12755 lpfc_sli_pcimem_bcopy(mbox, pmbox,
12756 MAILBOX_CMD_SIZE);
12757 if (pmb->out_ext_byte_len &&
12758 pmb->ctx_buf)
12759 lpfc_sli_pcimem_bcopy(
12760 phba->mbox_ext,
12761 pmb->ctx_buf,
12762 pmb->out_ext_byte_len);
12763 }
12764 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12765 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12766
12767 lpfc_debugfs_disc_trc(vport,
12768 LPFC_DISC_TRC_MBOX_VPORT,
12769 "MBOX dflt rpi: : "
12770 "status:x%x rpi:x%x",
12771 (uint32_t)pmbox->mbxStatus,
12772 pmbox->un.varWords[0], 0);
12773
12774 if (!pmbox->mbxStatus) {
12775 mp = (struct lpfc_dmabuf *)
12776 (pmb->ctx_buf);
12777 ndlp = (struct lpfc_nodelist *)
12778 pmb->ctx_ndlp;
12779
12780
12781
12782
12783
12784
12785 lpfc_unreg_login(phba,
12786 vport->vpi,
12787 pmbox->un.varWords[0],
12788 pmb);
12789 pmb->mbox_cmpl =
12790 lpfc_mbx_cmpl_dflt_rpi;
12791 pmb->ctx_buf = mp;
12792 pmb->ctx_ndlp = ndlp;
12793 pmb->vport = vport;
12794 rc = lpfc_sli_issue_mbox(phba,
12795 pmb,
12796 MBX_NOWAIT);
12797 if (rc != MBX_BUSY)
12798 lpfc_printf_log(phba,
12799 KERN_ERR,
12800 LOG_TRACE_EVENT,
12801 "0350 rc should have"
12802 "been MBX_BUSY\n");
12803 if (rc != MBX_NOT_FINISHED)
12804 goto send_current_mbox;
12805 }
12806 }
12807 spin_lock_irqsave(
12808 &phba->pport->work_port_lock,
12809 iflag);
12810 phba->pport->work_port_events &=
12811 ~WORKER_MBOX_TMO;
12812 spin_unlock_irqrestore(
12813 &phba->pport->work_port_lock,
12814 iflag);
12815 lpfc_mbox_cmpl_put(phba, pmb);
12816 }
12817 } else
12818 spin_unlock_irqrestore(&phba->hbalock, iflag);
12819
12820 if ((work_ha_copy & HA_MBATT) &&
12821 (phba->sli.mbox_active == NULL)) {
12822send_current_mbox:
12823
12824 do {
12825 rc = lpfc_sli_issue_mbox(phba, NULL,
12826 MBX_NOWAIT);
12827 } while (rc == MBX_NOT_FINISHED);
12828 if (rc != MBX_SUCCESS)
12829 lpfc_printf_log(phba, KERN_ERR,
12830 LOG_TRACE_EVENT,
12831 "0349 rc should be "
12832 "MBX_SUCCESS\n");
12833 }
12834
12835 spin_lock_irqsave(&phba->hbalock, iflag);
12836 phba->work_ha |= work_ha_copy;
12837 spin_unlock_irqrestore(&phba->hbalock, iflag);
12838 lpfc_worker_wake_up(phba);
12839 }
12840 return IRQ_HANDLED;
12841unplug_error:
12842 spin_unlock_irqrestore(&phba->hbalock, iflag);
12843 return IRQ_HANDLED;
12844
12845}
12846
12847
12848
12849
12850
12851
12852
12853
12854
12855
12856
12857
12858
12859
12860
12861
12862
12863
12864
12865
12866irqreturn_t
12867lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12868{
12869 struct lpfc_hba *phba;
12870 uint32_t ha_copy;
12871 unsigned long status;
12872 unsigned long iflag;
12873 struct lpfc_sli_ring *pring;
12874
12875
12876
12877
12878 phba = (struct lpfc_hba *) dev_id;
12879
12880 if (unlikely(!phba))
12881 return IRQ_NONE;
12882
12883
12884
12885
12886
12887 if (phba->intr_type == MSIX) {
12888
12889 if (lpfc_intr_state_check(phba))
12890 return IRQ_NONE;
12891
12892 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12893 return IRQ_HANDLED;
12894
12895 spin_lock_irqsave(&phba->hbalock, iflag);
12896
12897
12898
12899
12900 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12901 spin_unlock_irqrestore(&phba->hbalock, iflag);
12902 return IRQ_NONE;
12903 }
12904 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12905 phba->HAregaddr);
12906 readl(phba->HAregaddr);
12907 spin_unlock_irqrestore(&phba->hbalock, iflag);
12908 } else
12909 ha_copy = phba->ha_copy;
12910
12911
12912
12913
12914 ha_copy &= ~(phba->work_ha_mask);
12915
12916 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12917 status >>= (4*LPFC_FCP_RING);
12918 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12919 if (status & HA_RXMASK)
12920 lpfc_sli_handle_fast_ring_event(phba, pring, status);
12921
12922 if (phba->cfg_multi_ring_support == 2) {
12923
12924
12925
12926
12927 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12928 status >>= (4*LPFC_EXTRA_RING);
12929 if (status & HA_RXMASK) {
12930 lpfc_sli_handle_fast_ring_event(phba,
12931 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
12932 status);
12933 }
12934 }
12935 return IRQ_HANDLED;
12936}
12937
12938
12939
12940
12941
12942
12943
12944
12945
12946
12947
12948
12949
12950
12951
12952
12953
12954
12955irqreturn_t
12956lpfc_sli_intr_handler(int irq, void *dev_id)
12957{
12958 struct lpfc_hba *phba;
12959 irqreturn_t sp_irq_rc, fp_irq_rc;
12960 unsigned long status1, status2;
12961 uint32_t hc_copy;
12962
12963
12964
12965
12966
12967 phba = (struct lpfc_hba *) dev_id;
12968
12969 if (unlikely(!phba))
12970 return IRQ_NONE;
12971
12972
12973 if (lpfc_intr_state_check(phba))
12974 return IRQ_NONE;
12975
12976 spin_lock(&phba->hbalock);
12977 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12978 spin_unlock(&phba->hbalock);
12979 return IRQ_HANDLED;
12980 }
12981
12982 if (unlikely(!phba->ha_copy)) {
12983 spin_unlock(&phba->hbalock);
12984 return IRQ_NONE;
12985 } else if (phba->ha_copy & HA_ERATT) {
12986 if (phba->hba_flag & HBA_ERATT_HANDLED)
12987
12988 phba->ha_copy &= ~HA_ERATT;
12989 else
12990
12991 phba->hba_flag |= HBA_ERATT_HANDLED;
12992 }
12993
12994
12995
12996
12997 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12998 spin_unlock(&phba->hbalock);
12999 return IRQ_NONE;
13000 }
13001
13002
13003 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13004 spin_unlock(&phba->hbalock);
13005 return IRQ_HANDLED;
13006 }
13007 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13008 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13009 phba->HCregaddr);
13010 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13011 writel(hc_copy, phba->HCregaddr);
13012 readl(phba->HAregaddr);
13013 spin_unlock(&phba->hbalock);
13014
13015
13016
13017
13018
13019
13020 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13021
13022
13023 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13024 status2 >>= (4*LPFC_ELS_RING);
13025
13026 if (status1 || (status2 & HA_RXMASK))
13027 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13028 else
13029 sp_irq_rc = IRQ_NONE;
13030
13031
13032
13033
13034
13035
13036 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13037 status1 >>= (4*LPFC_FCP_RING);
13038
13039
13040 if (phba->cfg_multi_ring_support == 2) {
13041 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13042 status2 >>= (4*LPFC_EXTRA_RING);
13043 } else
13044 status2 = 0;
13045
13046 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13047 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13048 else
13049 fp_irq_rc = IRQ_NONE;
13050
13051
13052 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13053}
13054
13055
13056
13057
13058
13059
13060
13061
13062void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13063{
13064 struct lpfc_cq_event *cq_event;
13065
13066
13067 spin_lock_irq(&phba->hbalock);
13068 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13069 spin_unlock_irq(&phba->hbalock);
13070
13071 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13072
13073 spin_lock_irq(&phba->hbalock);
13074 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13075 cq_event, struct lpfc_cq_event, list);
13076 spin_unlock_irq(&phba->hbalock);
13077
13078 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13079
13080 lpfc_sli4_cq_event_release(phba, cq_event);
13081 }
13082}
13083
13084
13085
13086
13087
13088
13089
13090
13091
13092
13093
13094
13095static void
13096lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13097 struct lpfc_iocbq *pIocbIn,
13098 struct lpfc_iocbq *pIocbOut,
13099 struct lpfc_wcqe_complete *wcqe)
13100{
13101 int numBdes, i;
13102 unsigned long iflags;
13103 uint32_t status, max_response;
13104 struct lpfc_dmabuf *dmabuf;
13105 struct ulp_bde64 *bpl, bde;
13106 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13107
13108 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13109 sizeof(struct lpfc_iocbq) - offset);
13110
13111 status = bf_get(lpfc_wcqe_c_status, wcqe);
13112 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13113 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13114 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13115 pIocbIn->iocb.un.fcpi.fcpi_parm =
13116 pIocbOut->iocb.un.fcpi.fcpi_parm -
13117 wcqe->total_data_placed;
13118 else
13119 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13120 else {
13121 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13122 switch (pIocbOut->iocb.ulpCommand) {
13123 case CMD_ELS_REQUEST64_CR:
13124 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13125 bpl = (struct ulp_bde64 *)dmabuf->virt;
13126 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13127 max_response = bde.tus.f.bdeSize;
13128 break;
13129 case CMD_GEN_REQUEST64_CR:
13130 max_response = 0;
13131 if (!pIocbOut->context3)
13132 break;
13133 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13134 sizeof(struct ulp_bde64);
13135 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13136 bpl = (struct ulp_bde64 *)dmabuf->virt;
13137 for (i = 0; i < numBdes; i++) {
13138 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13139 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13140 max_response += bde.tus.f.bdeSize;
13141 }
13142 break;
13143 default:
13144 max_response = wcqe->total_data_placed;
13145 break;
13146 }
13147 if (max_response < wcqe->total_data_placed)
13148 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13149 else
13150 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13151 wcqe->total_data_placed;
13152 }
13153
13154
13155 if (status == CQE_STATUS_DI_ERROR) {
13156 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13157
13158 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13159 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13160 else
13161 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13162
13163 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13164 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
13165 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13166 BGS_GUARD_ERR_MASK;
13167 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
13168 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13169 BGS_APPTAG_ERR_MASK;
13170 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
13171 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13172 BGS_REFTAG_ERR_MASK;
13173
13174
13175 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13176 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13177 BGS_HI_WATER_MARK_PRESENT_MASK;
13178 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13179 wcqe->total_data_placed;
13180 }
13181
13182
13183
13184
13185
13186 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13187 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13188 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13189 BGS_GUARD_ERR_MASK);
13190 }
13191
13192
13193 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13194 spin_lock_irqsave(&phba->hbalock, iflags);
13195 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13196 spin_unlock_irqrestore(&phba->hbalock, iflags);
13197 }
13198}
13199
13200
13201
13202
13203
13204
13205
13206
13207
13208
13209
13210
13211static struct lpfc_iocbq *
13212lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13213 struct lpfc_iocbq *irspiocbq)
13214{
13215 struct lpfc_sli_ring *pring;
13216 struct lpfc_iocbq *cmdiocbq;
13217 struct lpfc_wcqe_complete *wcqe;
13218 unsigned long iflags;
13219
13220 pring = lpfc_phba_elsring(phba);
13221 if (unlikely(!pring))
13222 return NULL;
13223
13224 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13225 pring->stats.iocb_event++;
13226
13227 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13228 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13229 if (unlikely(!cmdiocbq)) {
13230 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13231 "0386 ELS complete with no corresponding "
13232 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13233 wcqe->word0, wcqe->total_data_placed,
13234 wcqe->parameter, wcqe->word3);
13235 lpfc_sli_release_iocbq(phba, irspiocbq);
13236 return NULL;
13237 }
13238
13239 spin_lock_irqsave(&pring->ring_lock, iflags);
13240
13241 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13242 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13243
13244
13245 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13246
13247 return irspiocbq;
13248}
13249
13250inline struct lpfc_cq_event *
13251lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13252{
13253 struct lpfc_cq_event *cq_event;
13254
13255
13256 cq_event = lpfc_sli4_cq_event_alloc(phba);
13257 if (!cq_event) {
13258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13259 "0602 Failed to alloc CQ_EVENT entry\n");
13260 return NULL;
13261 }
13262
13263
13264 memcpy(&cq_event->cqe, entry, size);
13265 return cq_event;
13266}
13267
13268
13269
13270
13271
13272
13273
13274
13275
13276
13277
13278static bool
13279lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13280{
13281 struct lpfc_cq_event *cq_event;
13282 unsigned long iflags;
13283
13284 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13285 "0392 Async Event: word0:x%x, word1:x%x, "
13286 "word2:x%x, word3:x%x\n", mcqe->word0,
13287 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13288
13289 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13290 if (!cq_event)
13291 return false;
13292 spin_lock_irqsave(&phba->hbalock, iflags);
13293 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13294
13295 phba->hba_flag |= ASYNC_EVENT;
13296 spin_unlock_irqrestore(&phba->hbalock, iflags);
13297
13298 return true;
13299}
13300
13301
13302
13303
13304
13305
13306
13307
13308
13309
13310
13311static bool
13312lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13313{
13314 uint32_t mcqe_status;
13315 MAILBOX_t *mbox, *pmbox;
13316 struct lpfc_mqe *mqe;
13317 struct lpfc_vport *vport;
13318 struct lpfc_nodelist *ndlp;
13319 struct lpfc_dmabuf *mp;
13320 unsigned long iflags;
13321 LPFC_MBOXQ_t *pmb;
13322 bool workposted = false;
13323 int rc;
13324
13325
13326 if (!bf_get(lpfc_trailer_completed, mcqe))
13327 goto out_no_mqe_complete;
13328
13329
13330 spin_lock_irqsave(&phba->hbalock, iflags);
13331 pmb = phba->sli.mbox_active;
13332 if (unlikely(!pmb)) {
13333 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13334 "1832 No pending MBOX command to handle\n");
13335 spin_unlock_irqrestore(&phba->hbalock, iflags);
13336 goto out_no_mqe_complete;
13337 }
13338 spin_unlock_irqrestore(&phba->hbalock, iflags);
13339 mqe = &pmb->u.mqe;
13340 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13341 mbox = phba->mbox;
13342 vport = pmb->vport;
13343
13344
13345 phba->last_completion_time = jiffies;
13346 del_timer(&phba->sli.mbox_tmo);
13347
13348
13349 if (pmb->mbox_cmpl && mbox)
13350 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13351
13352
13353
13354
13355
13356 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13357 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13358 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13359 bf_set(lpfc_mqe_status, mqe,
13360 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13361 }
13362 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13363 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13364 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13365 "MBOX dflt rpi: status:x%x rpi:x%x",
13366 mcqe_status,
13367 pmbox->un.varWords[0], 0);
13368 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13369 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13370 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13371
13372
13373
13374 lpfc_unreg_login(phba, vport->vpi,
13375 pmbox->un.varWords[0], pmb);
13376 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13377 pmb->ctx_buf = mp;
13378 pmb->ctx_ndlp = ndlp;
13379 pmb->vport = vport;
13380 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13381 if (rc != MBX_BUSY)
13382 lpfc_printf_log(phba, KERN_ERR,
13383 LOG_TRACE_EVENT,
13384 "0385 rc should "
13385 "have been MBX_BUSY\n");
13386 if (rc != MBX_NOT_FINISHED)
13387 goto send_current_mbox;
13388 }
13389 }
13390 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13391 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13392 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13393
13394
13395 spin_lock_irqsave(&phba->hbalock, iflags);
13396 __lpfc_mbox_cmpl_put(phba, pmb);
13397 phba->work_ha |= HA_MBATT;
13398 spin_unlock_irqrestore(&phba->hbalock, iflags);
13399 workposted = true;
13400
13401send_current_mbox:
13402 spin_lock_irqsave(&phba->hbalock, iflags);
13403
13404 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13405
13406 phba->sli.mbox_active = NULL;
13407 if (bf_get(lpfc_trailer_consumed, mcqe))
13408 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13409 spin_unlock_irqrestore(&phba->hbalock, iflags);
13410
13411 lpfc_worker_wake_up(phba);
13412 return workposted;
13413
13414out_no_mqe_complete:
13415 spin_lock_irqsave(&phba->hbalock, iflags);
13416 if (bf_get(lpfc_trailer_consumed, mcqe))
13417 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13418 spin_unlock_irqrestore(&phba->hbalock, iflags);
13419 return false;
13420}
13421
13422
13423
13424
13425
13426
13427
13428
13429
13430
13431
13432
13433
13434static bool
13435lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13436 struct lpfc_cqe *cqe)
13437{
13438 struct lpfc_mcqe mcqe;
13439 bool workposted;
13440
13441 cq->CQ_mbox++;
13442
13443
13444 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13445
13446
13447 if (!bf_get(lpfc_trailer_async, &mcqe))
13448 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13449 else
13450 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13451 return workposted;
13452}
13453
13454
13455
13456
13457
13458
13459
13460
13461
13462
13463
13464static bool
13465lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13466 struct lpfc_wcqe_complete *wcqe)
13467{
13468 struct lpfc_iocbq *irspiocbq;
13469 unsigned long iflags;
13470 struct lpfc_sli_ring *pring = cq->pring;
13471 int txq_cnt = 0;
13472 int txcmplq_cnt = 0;
13473
13474
13475 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13476
13477 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13478 "0357 ELS CQE error: status=x%x: "
13479 "CQE: %08x %08x %08x %08x\n",
13480 bf_get(lpfc_wcqe_c_status, wcqe),
13481 wcqe->word0, wcqe->total_data_placed,
13482 wcqe->parameter, wcqe->word3);
13483 }
13484
13485
13486 irspiocbq = lpfc_sli_get_iocbq(phba);
13487 if (!irspiocbq) {
13488 if (!list_empty(&pring->txq))
13489 txq_cnt++;
13490 if (!list_empty(&pring->txcmplq))
13491 txcmplq_cnt++;
13492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13493 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13494 "els_txcmplq_cnt=%d\n",
13495 txq_cnt, phba->iocb_cnt,
13496 txcmplq_cnt);
13497 return false;
13498 }
13499
13500
13501 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13502 spin_lock_irqsave(&phba->hbalock, iflags);
13503 list_add_tail(&irspiocbq->cq_event.list,
13504 &phba->sli4_hba.sp_queue_event);
13505 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13506 spin_unlock_irqrestore(&phba->hbalock, iflags);
13507
13508 return true;
13509}
13510
13511
13512
13513
13514
13515
13516
13517
13518
13519static void
13520lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13521 struct lpfc_wcqe_release *wcqe)
13522{
13523
13524 if (unlikely(!phba->sli4_hba.els_wq))
13525 return;
13526
13527 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13528 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13529 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13530 else
13531 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13532 "2579 Slow-path wqe consume event carries "
13533 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13534 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13535 phba->sli4_hba.els_wq->queue_id);
13536}
13537
13538
13539
13540
13541
13542
13543
13544
13545
13546
13547
13548static bool
13549lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13550 struct lpfc_queue *cq,
13551 struct sli4_wcqe_xri_aborted *wcqe)
13552{
13553 bool workposted = false;
13554 struct lpfc_cq_event *cq_event;
13555 unsigned long iflags;
13556
13557 switch (cq->subtype) {
13558 case LPFC_IO:
13559 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13560 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13561
13562 if (phba->nvmet_support)
13563 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13564 }
13565 workposted = false;
13566 break;
13567 case LPFC_NVME_LS:
13568 case LPFC_ELS:
13569 cq_event = lpfc_cq_event_setup(
13570 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
13571 if (!cq_event)
13572 return false;
13573 cq_event->hdwq = cq->hdwq;
13574 spin_lock_irqsave(&phba->hbalock, iflags);
13575 list_add_tail(&cq_event->list,
13576 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13577
13578 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13579 spin_unlock_irqrestore(&phba->hbalock, iflags);
13580 workposted = true;
13581 break;
13582 default:
13583 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13584 "0603 Invalid CQ subtype %d: "
13585 "%08x %08x %08x %08x\n",
13586 cq->subtype, wcqe->word0, wcqe->parameter,
13587 wcqe->word2, wcqe->word3);
13588 workposted = false;
13589 break;
13590 }
13591 return workposted;
13592}
13593
13594#define FC_RCTL_MDS_DIAGS 0xF4
13595
13596
13597
13598
13599
13600
13601
13602
13603
13604
13605static bool
13606lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13607{
13608 bool workposted = false;
13609 struct fc_frame_header *fc_hdr;
13610 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13611 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13612 struct lpfc_nvmet_tgtport *tgtp;
13613 struct hbq_dmabuf *dma_buf;
13614 uint32_t status, rq_id;
13615 unsigned long iflags;
13616
13617
13618 if (unlikely(!hrq) || unlikely(!drq))
13619 return workposted;
13620
13621 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13622 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13623 else
13624 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13625 if (rq_id != hrq->queue_id)
13626 goto out;
13627
13628 status = bf_get(lpfc_rcqe_status, rcqe);
13629 switch (status) {
13630 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13631 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13632 "2537 Receive Frame Truncated!!\n");
13633 fallthrough;
13634 case FC_STATUS_RQ_SUCCESS:
13635 spin_lock_irqsave(&phba->hbalock, iflags);
13636 lpfc_sli4_rq_release(hrq, drq);
13637 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13638 if (!dma_buf) {
13639 hrq->RQ_no_buf_found++;
13640 spin_unlock_irqrestore(&phba->hbalock, iflags);
13641 goto out;
13642 }
13643 hrq->RQ_rcv_buf++;
13644 hrq->RQ_buf_posted--;
13645 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13646
13647 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13648
13649 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13650 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13651 spin_unlock_irqrestore(&phba->hbalock, iflags);
13652
13653 if (!(phba->pport->load_flag & FC_UNLOADING))
13654 lpfc_sli4_handle_mds_loopback(phba->pport,
13655 dma_buf);
13656 else
13657 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13658 break;
13659 }
13660
13661
13662 list_add_tail(&dma_buf->cq_event.list,
13663 &phba->sli4_hba.sp_queue_event);
13664
13665 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13666 spin_unlock_irqrestore(&phba->hbalock, iflags);
13667 workposted = true;
13668 break;
13669 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13670 if (phba->nvmet_support) {
13671 tgtp = phba->targetport->private;
13672 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13673 "6402 RQE Error x%x, posted %d err_cnt "
13674 "%d: %x %x %x\n",
13675 status, hrq->RQ_buf_posted,
13676 hrq->RQ_no_posted_buf,
13677 atomic_read(&tgtp->rcv_fcp_cmd_in),
13678 atomic_read(&tgtp->rcv_fcp_cmd_out),
13679 atomic_read(&tgtp->xmt_fcp_release));
13680 }
13681 fallthrough;
13682
13683 case FC_STATUS_INSUFF_BUF_NEED_BUF:
13684 hrq->RQ_no_posted_buf++;
13685
13686 spin_lock_irqsave(&phba->hbalock, iflags);
13687 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
13688 spin_unlock_irqrestore(&phba->hbalock, iflags);
13689 workposted = true;
13690 break;
13691 }
13692out:
13693 return workposted;
13694}
13695
13696
13697
13698
13699
13700
13701
13702
13703
13704
13705
13706
13707static bool
13708lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13709 struct lpfc_cqe *cqe)
13710{
13711 struct lpfc_cqe cqevt;
13712 bool workposted = false;
13713
13714
13715 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
13716
13717
13718 switch (bf_get(lpfc_cqe_code, &cqevt)) {
13719 case CQE_CODE_COMPL_WQE:
13720
13721 phba->last_completion_time = jiffies;
13722 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
13723 (struct lpfc_wcqe_complete *)&cqevt);
13724 break;
13725 case CQE_CODE_RELEASE_WQE:
13726
13727 lpfc_sli4_sp_handle_rel_wcqe(phba,
13728 (struct lpfc_wcqe_release *)&cqevt);
13729 break;
13730 case CQE_CODE_XRI_ABORTED:
13731
13732 phba->last_completion_time = jiffies;
13733 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13734 (struct sli4_wcqe_xri_aborted *)&cqevt);
13735 break;
13736 case CQE_CODE_RECEIVE:
13737 case CQE_CODE_RECEIVE_V1:
13738
13739 phba->last_completion_time = jiffies;
13740 workposted = lpfc_sli4_sp_handle_rcqe(phba,
13741 (struct lpfc_rcqe *)&cqevt);
13742 break;
13743 default:
13744 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13745 "0388 Not a valid WCQE code: x%x\n",
13746 bf_get(lpfc_cqe_code, &cqevt));
13747 break;
13748 }
13749 return workposted;
13750}
13751
13752
13753
13754
13755
13756
13757
13758
13759
13760
13761
13762
13763
13764
13765
13766static void
13767lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13768 struct lpfc_queue *speq)
13769{
13770 struct lpfc_queue *cq = NULL, *childq;
13771 uint16_t cqid;
13772 int ret = 0;
13773
13774
13775 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13776
13777 list_for_each_entry(childq, &speq->child_list, list) {
13778 if (childq->queue_id == cqid) {
13779 cq = childq;
13780 break;
13781 }
13782 }
13783 if (unlikely(!cq)) {
13784 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13785 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13786 "0365 Slow-path CQ identifier "
13787 "(%d) does not exist\n", cqid);
13788 return;
13789 }
13790
13791
13792 cq->assoc_qp = speq;
13793
13794 if (is_kdump_kernel())
13795 ret = queue_work(phba->wq, &cq->spwork);
13796 else
13797 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
13798
13799 if (!ret)
13800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13801 "0390 Cannot schedule queue work "
13802 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13803 cqid, cq->queue_id, raw_smp_processor_id());
13804}
13805
13806
13807
13808
13809
13810
13811
13812
13813
13814
13815
13816
13817
13818
13819
13820
13821
13822
13823
13824
13825
13826
13827static bool
13828__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
13829 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
13830 struct lpfc_cqe *), unsigned long *delay,
13831 enum lpfc_poll_mode poll_mode)
13832{
13833 struct lpfc_cqe *cqe;
13834 bool workposted = false;
13835 int count = 0, consumed = 0;
13836 bool arm = true;
13837
13838
13839 *delay = 0;
13840
13841 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
13842 goto rearm_and_exit;
13843
13844
13845 cq->q_flag = 0;
13846 cqe = lpfc_sli4_cq_get(cq);
13847 while (cqe) {
13848 workposted |= handler(phba, cq, cqe);
13849 __lpfc_sli4_consume_cqe(phba, cq, cqe);
13850
13851 consumed++;
13852 if (!(++count % cq->max_proc_limit))
13853 break;
13854
13855 if (!(count % cq->notify_interval)) {
13856 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13857 LPFC_QUEUE_NOARM);
13858 consumed = 0;
13859 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
13860 }
13861
13862 if (count == LPFC_NVMET_CQ_NOTIFY)
13863 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
13864
13865 cqe = lpfc_sli4_cq_get(cq);
13866 }
13867 if (count >= phba->cfg_cq_poll_threshold) {
13868 *delay = 1;
13869 arm = false;
13870 }
13871
13872
13873 if (poll_mode == LPFC_IRQ_POLL)
13874 irq_poll_complete(&cq->iop);
13875
13876
13877 if (count > cq->CQ_max_cqe)
13878 cq->CQ_max_cqe = count;
13879
13880 cq->assoc_qp->EQ_cqe_cnt += count;
13881
13882
13883 if (unlikely(count == 0))
13884 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13885 "0369 No entry from completion queue "
13886 "qid=%d\n", cq->queue_id);
13887
13888 xchg(&cq->queue_claimed, 0);
13889
13890rearm_and_exit:
13891 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
13892 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
13893
13894 return workposted;
13895}
13896
13897
13898
13899
13900
13901
13902
13903
13904
13905
13906
13907
13908
13909
13910
13911
13912static void
13913__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
13914{
13915 struct lpfc_hba *phba = cq->phba;
13916 unsigned long delay;
13917 bool workposted = false;
13918 int ret = 0;
13919
13920
13921 switch (cq->type) {
13922 case LPFC_MCQ:
13923 workposted |= __lpfc_sli4_process_cq(phba, cq,
13924 lpfc_sli4_sp_handle_mcqe,
13925 &delay, LPFC_QUEUE_WORK);
13926 break;
13927 case LPFC_WCQ:
13928 if (cq->subtype == LPFC_IO)
13929 workposted |= __lpfc_sli4_process_cq(phba, cq,
13930 lpfc_sli4_fp_handle_cqe,
13931 &delay, LPFC_QUEUE_WORK);
13932 else
13933 workposted |= __lpfc_sli4_process_cq(phba, cq,
13934 lpfc_sli4_sp_handle_cqe,
13935 &delay, LPFC_QUEUE_WORK);
13936 break;
13937 default:
13938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13939 "0370 Invalid completion queue type (%d)\n",
13940 cq->type);
13941 return;
13942 }
13943
13944 if (delay) {
13945 if (is_kdump_kernel())
13946 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
13947 delay);
13948 else
13949 ret = queue_delayed_work_on(cq->chann, phba->wq,
13950 &cq->sched_spwork, delay);
13951 if (!ret)
13952 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13953 "0394 Cannot schedule queue work "
13954 "for cqid=%d on CPU %d\n",
13955 cq->queue_id, cq->chann);
13956 }
13957
13958
13959 if (workposted)
13960 lpfc_worker_wake_up(phba);
13961}
13962
13963
13964
13965
13966
13967
13968
13969
13970static void
13971lpfc_sli4_sp_process_cq(struct work_struct *work)
13972{
13973 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
13974
13975 __lpfc_sli4_sp_process_cq(cq);
13976}
13977
13978
13979
13980
13981
13982
13983
13984static void
13985lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
13986{
13987 struct lpfc_queue *cq = container_of(to_delayed_work(work),
13988 struct lpfc_queue, sched_spwork);
13989
13990 __lpfc_sli4_sp_process_cq(cq);
13991}
13992
13993
13994
13995
13996
13997
13998
13999
14000
14001
14002static void
14003lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14004 struct lpfc_wcqe_complete *wcqe)
14005{
14006 struct lpfc_sli_ring *pring = cq->pring;
14007 struct lpfc_iocbq *cmdiocbq;
14008 struct lpfc_iocbq irspiocbq;
14009 unsigned long iflags;
14010
14011
14012 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14013
14014
14015
14016 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14017 IOSTAT_LOCAL_REJECT)) &&
14018 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14019 IOERR_NO_RESOURCES))
14020 phba->lpfc_rampdown_queue_depth(phba);
14021
14022
14023 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14024 "0373 FCP CQE cmpl: status=x%x: "
14025 "CQE: %08x %08x %08x %08x\n",
14026 bf_get(lpfc_wcqe_c_status, wcqe),
14027 wcqe->word0, wcqe->total_data_placed,
14028 wcqe->parameter, wcqe->word3);
14029 }
14030
14031
14032 spin_lock_irqsave(&pring->ring_lock, iflags);
14033 pring->stats.iocb_event++;
14034 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14035 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14036 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14037 if (unlikely(!cmdiocbq)) {
14038 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14039 "0374 FCP complete with no corresponding "
14040 "cmdiocb: iotag (%d)\n",
14041 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14042 return;
14043 }
14044#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14045 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14046#endif
14047 if (cmdiocbq->iocb_cmpl == NULL) {
14048 if (cmdiocbq->wqe_cmpl) {
14049 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14050 spin_lock_irqsave(&phba->hbalock, iflags);
14051 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14052 spin_unlock_irqrestore(&phba->hbalock, iflags);
14053 }
14054
14055
14056 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14057 return;
14058 }
14059 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14060 "0375 FCP cmdiocb not callback function "
14061 "iotag: (%d)\n",
14062 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14063 return;
14064 }
14065
14066
14067 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
14068
14069 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14070 spin_lock_irqsave(&phba->hbalock, iflags);
14071 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14072 spin_unlock_irqrestore(&phba->hbalock, iflags);
14073 }
14074
14075
14076 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14077}
14078
14079
14080
14081
14082
14083
14084
14085
14086
14087
14088static void
14089lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14090 struct lpfc_wcqe_release *wcqe)
14091{
14092 struct lpfc_queue *childwq;
14093 bool wqid_matched = false;
14094 uint16_t hba_wqid;
14095
14096
14097 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14098 list_for_each_entry(childwq, &cq->child_list, list) {
14099 if (childwq->queue_id == hba_wqid) {
14100 lpfc_sli4_wq_release(childwq,
14101 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14102 if (childwq->q_flag & HBA_NVMET_WQFULL)
14103 lpfc_nvmet_wqfull_process(phba, childwq);
14104 wqid_matched = true;
14105 break;
14106 }
14107 }
14108
14109 if (wqid_matched != true)
14110 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14111 "2580 Fast-path wqe consume event carries "
14112 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14113}
14114
14115
14116
14117
14118
14119
14120
14121
14122
14123
14124
14125static bool
14126lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14127 struct lpfc_rcqe *rcqe)
14128{
14129 bool workposted = false;
14130 struct lpfc_queue *hrq;
14131 struct lpfc_queue *drq;
14132 struct rqb_dmabuf *dma_buf;
14133 struct fc_frame_header *fc_hdr;
14134 struct lpfc_nvmet_tgtport *tgtp;
14135 uint32_t status, rq_id;
14136 unsigned long iflags;
14137 uint32_t fctl, idx;
14138
14139 if ((phba->nvmet_support == 0) ||
14140 (phba->sli4_hba.nvmet_cqset == NULL))
14141 return workposted;
14142
14143 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14144 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14145 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14146
14147
14148 if (unlikely(!hrq) || unlikely(!drq))
14149 return workposted;
14150
14151 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14152 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14153 else
14154 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14155
14156 if ((phba->nvmet_support == 0) ||
14157 (rq_id != hrq->queue_id))
14158 return workposted;
14159
14160 status = bf_get(lpfc_rcqe_status, rcqe);
14161 switch (status) {
14162 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14164 "6126 Receive Frame Truncated!!\n");
14165 fallthrough;
14166 case FC_STATUS_RQ_SUCCESS:
14167 spin_lock_irqsave(&phba->hbalock, iflags);
14168 lpfc_sli4_rq_release(hrq, drq);
14169 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14170 if (!dma_buf) {
14171 hrq->RQ_no_buf_found++;
14172 spin_unlock_irqrestore(&phba->hbalock, iflags);
14173 goto out;
14174 }
14175 spin_unlock_irqrestore(&phba->hbalock, iflags);
14176 hrq->RQ_rcv_buf++;
14177 hrq->RQ_buf_posted--;
14178 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14179
14180
14181 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14182 fc_hdr->fh_f_ctl[1] << 8 |
14183 fc_hdr->fh_f_ctl[2]);
14184 if (((fctl &
14185 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14186 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14187 (fc_hdr->fh_seq_cnt != 0))
14188 goto drop;
14189
14190 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14191 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14192 lpfc_nvmet_unsol_fcp_event(
14193 phba, idx, dma_buf, cq->isr_timestamp,
14194 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14195 return false;
14196 }
14197drop:
14198 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14199 break;
14200 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14201 if (phba->nvmet_support) {
14202 tgtp = phba->targetport->private;
14203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14204 "6401 RQE Error x%x, posted %d err_cnt "
14205 "%d: %x %x %x\n",
14206 status, hrq->RQ_buf_posted,
14207 hrq->RQ_no_posted_buf,
14208 atomic_read(&tgtp->rcv_fcp_cmd_in),
14209 atomic_read(&tgtp->rcv_fcp_cmd_out),
14210 atomic_read(&tgtp->xmt_fcp_release));
14211 }
14212 fallthrough;
14213
14214 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14215 hrq->RQ_no_posted_buf++;
14216
14217 break;
14218 }
14219out:
14220 return workposted;
14221}
14222
14223
14224
14225
14226
14227
14228
14229
14230
14231
14232
14233
14234static bool
14235lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14236 struct lpfc_cqe *cqe)
14237{
14238 struct lpfc_wcqe_release wcqe;
14239 bool workposted = false;
14240
14241
14242 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14243
14244
14245 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14246 case CQE_CODE_COMPL_WQE:
14247 case CQE_CODE_NVME_ERSP:
14248 cq->CQ_wq++;
14249
14250 phba->last_completion_time = jiffies;
14251 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14252 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14253 (struct lpfc_wcqe_complete *)&wcqe);
14254 break;
14255 case CQE_CODE_RELEASE_WQE:
14256 cq->CQ_release_wqe++;
14257
14258 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14259 (struct lpfc_wcqe_release *)&wcqe);
14260 break;
14261 case CQE_CODE_XRI_ABORTED:
14262 cq->CQ_xri_aborted++;
14263
14264 phba->last_completion_time = jiffies;
14265 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14266 (struct sli4_wcqe_xri_aborted *)&wcqe);
14267 break;
14268 case CQE_CODE_RECEIVE_V1:
14269 case CQE_CODE_RECEIVE:
14270 phba->last_completion_time = jiffies;
14271 if (cq->subtype == LPFC_NVMET) {
14272 workposted = lpfc_sli4_nvmet_handle_rcqe(
14273 phba, cq, (struct lpfc_rcqe *)&wcqe);
14274 }
14275 break;
14276 default:
14277 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14278 "0144 Not a valid CQE code: x%x\n",
14279 bf_get(lpfc_wcqe_c_code, &wcqe));
14280 break;
14281 }
14282 return workposted;
14283}
14284
14285
14286
14287
14288
14289
14290
14291
14292
14293
14294
14295
14296
14297
14298
14299static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14300 struct lpfc_queue *cq, uint16_t cqid)
14301{
14302 int ret = 0;
14303
14304 switch (cq->poll_mode) {
14305 case LPFC_IRQ_POLL:
14306 irq_poll_sched(&cq->iop);
14307 break;
14308 case LPFC_QUEUE_WORK:
14309 default:
14310 if (is_kdump_kernel())
14311 ret = queue_work(phba->wq, &cq->irqwork);
14312 else
14313 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14314 if (!ret)
14315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14316 "0383 Cannot schedule queue work "
14317 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14318 cqid, cq->queue_id,
14319 raw_smp_processor_id());
14320 }
14321}
14322
14323
14324
14325
14326
14327
14328
14329
14330
14331
14332
14333
14334
14335
14336static void
14337lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14338 struct lpfc_eqe *eqe)
14339{
14340 struct lpfc_queue *cq = NULL;
14341 uint32_t qidx = eq->hdwq;
14342 uint16_t cqid, id;
14343
14344 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14345 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14346 "0366 Not a valid completion "
14347 "event: majorcode=x%x, minorcode=x%x\n",
14348 bf_get_le32(lpfc_eqe_major_code, eqe),
14349 bf_get_le32(lpfc_eqe_minor_code, eqe));
14350 return;
14351 }
14352
14353
14354 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14355
14356
14357 if (cqid <= phba->sli4_hba.cq_max) {
14358 cq = phba->sli4_hba.cq_lookup[cqid];
14359 if (cq)
14360 goto work_cq;
14361 }
14362
14363
14364 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14365 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14366 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14367
14368 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14369 goto process_cq;
14370 }
14371 }
14372
14373 if (phba->sli4_hba.nvmels_cq &&
14374 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14375
14376 cq = phba->sli4_hba.nvmels_cq;
14377 }
14378
14379
14380 if (cq == NULL) {
14381 lpfc_sli4_sp_handle_eqe(phba, eqe,
14382 phba->sli4_hba.hdwq[qidx].hba_eq);
14383 return;
14384 }
14385
14386process_cq:
14387 if (unlikely(cqid != cq->queue_id)) {
14388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14389 "0368 Miss-matched fast-path completion "
14390 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14391 cqid, cq->queue_id);
14392 return;
14393 }
14394
14395work_cq:
14396#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14397 if (phba->ktime_on)
14398 cq->isr_timestamp = ktime_get_ns();
14399 else
14400 cq->isr_timestamp = 0;
14401#endif
14402 lpfc_sli4_sched_cq_work(phba, cq, cqid);
14403}
14404
14405
14406
14407
14408
14409
14410
14411
14412
14413
14414
14415
14416
14417
14418
14419
14420
14421static void
14422__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14423 enum lpfc_poll_mode poll_mode)
14424{
14425 struct lpfc_hba *phba = cq->phba;
14426 unsigned long delay;
14427 bool workposted = false;
14428 int ret = 0;
14429
14430
14431 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14432 &delay, poll_mode);
14433
14434 if (delay) {
14435 if (is_kdump_kernel())
14436 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14437 delay);
14438 else
14439 ret = queue_delayed_work_on(cq->chann, phba->wq,
14440 &cq->sched_irqwork, delay);
14441 if (!ret)
14442 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14443 "0367 Cannot schedule queue work "
14444 "for cqid=%d on CPU %d\n",
14445 cq->queue_id, cq->chann);
14446 }
14447
14448
14449 if (workposted)
14450 lpfc_worker_wake_up(phba);
14451}
14452
14453
14454
14455
14456
14457
14458
14459
14460static void
14461lpfc_sli4_hba_process_cq(struct work_struct *work)
14462{
14463 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14464
14465 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14466}
14467
14468
14469
14470
14471
14472
14473
14474static void
14475lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14476{
14477 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14478 struct lpfc_queue, sched_irqwork);
14479
14480 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14481}
14482
14483
14484
14485
14486
14487
14488
14489
14490
14491
14492
14493
14494
14495
14496
14497
14498
14499
14500
14501
14502
14503
14504
14505
14506
14507
14508
14509irqreturn_t
14510lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14511{
14512 struct lpfc_hba *phba;
14513 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14514 struct lpfc_queue *fpeq;
14515 unsigned long iflag;
14516 int ecount = 0;
14517 int hba_eqidx;
14518 struct lpfc_eq_intr_info *eqi;
14519
14520
14521 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14522 phba = hba_eq_hdl->phba;
14523 hba_eqidx = hba_eq_hdl->idx;
14524
14525 if (unlikely(!phba))
14526 return IRQ_NONE;
14527 if (unlikely(!phba->sli4_hba.hdwq))
14528 return IRQ_NONE;
14529
14530
14531 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14532 if (unlikely(!fpeq))
14533 return IRQ_NONE;
14534
14535
14536 if (unlikely(lpfc_intr_state_check(phba))) {
14537
14538 spin_lock_irqsave(&phba->hbalock, iflag);
14539 if (phba->link_state < LPFC_LINK_DOWN)
14540
14541 lpfc_sli4_eqcq_flush(phba, fpeq);
14542 spin_unlock_irqrestore(&phba->hbalock, iflag);
14543 return IRQ_NONE;
14544 }
14545
14546 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14547 eqi->icnt++;
14548
14549 fpeq->last_cpu = raw_smp_processor_id();
14550
14551 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14552 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14553 phba->cfg_auto_imax &&
14554 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14555 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14556 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14557
14558
14559 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14560
14561 if (unlikely(ecount == 0)) {
14562 fpeq->EQ_no_entry++;
14563 if (phba->intr_type == MSIX)
14564
14565 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14566 "0358 MSI-X interrupt with no EQE\n");
14567 else
14568
14569 return IRQ_NONE;
14570 }
14571
14572 return IRQ_HANDLED;
14573}
14574
14575
14576
14577
14578
14579
14580
14581
14582
14583
14584
14585
14586
14587
14588
14589
14590
14591
14592irqreturn_t
14593lpfc_sli4_intr_handler(int irq, void *dev_id)
14594{
14595 struct lpfc_hba *phba;
14596 irqreturn_t hba_irq_rc;
14597 bool hba_handled = false;
14598 int qidx;
14599
14600
14601 phba = (struct lpfc_hba *)dev_id;
14602
14603 if (unlikely(!phba))
14604 return IRQ_NONE;
14605
14606
14607
14608
14609 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14610 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14611 &phba->sli4_hba.hba_eq_hdl[qidx]);
14612 if (hba_irq_rc == IRQ_HANDLED)
14613 hba_handled |= true;
14614 }
14615
14616 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14617}
14618
14619void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14620{
14621 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14622 struct lpfc_queue *eq;
14623 int i = 0;
14624
14625 rcu_read_lock();
14626
14627 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14628 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14629 if (!list_empty(&phba->poll_list))
14630 mod_timer(&phba->cpuhp_poll_timer,
14631 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14632
14633 rcu_read_unlock();
14634}
14635
14636inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14637{
14638 struct lpfc_hba *phba = eq->phba;
14639 int i = 0;
14640
14641
14642
14643
14644
14645
14646
14647
14648 smp_rmb();
14649
14650 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14651
14652
14653
14654
14655
14656
14657
14658 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14659
14660 return i;
14661}
14662
14663static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14664{
14665 struct lpfc_hba *phba = eq->phba;
14666
14667
14668 if (list_empty(&phba->poll_list))
14669 mod_timer(&phba->cpuhp_poll_timer,
14670 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14671
14672 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14673 synchronize_rcu();
14674}
14675
14676static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
14677{
14678 struct lpfc_hba *phba = eq->phba;
14679
14680
14681
14682
14683 list_del_rcu(&eq->_poll_list);
14684 synchronize_rcu();
14685
14686 if (list_empty(&phba->poll_list))
14687 del_timer_sync(&phba->cpuhp_poll_timer);
14688}
14689
14690void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
14691{
14692 struct lpfc_queue *eq, *next;
14693
14694 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
14695 list_del(&eq->_poll_list);
14696
14697 INIT_LIST_HEAD(&phba->poll_list);
14698 synchronize_rcu();
14699}
14700
14701static inline void
14702__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
14703{
14704 if (mode == eq->mode)
14705 return;
14706
14707
14708
14709
14710
14711
14712
14713
14714
14715
14716
14717 WRITE_ONCE(eq->mode, mode);
14718
14719 smp_wmb();
14720
14721
14722
14723
14724
14725
14726
14727
14728
14729
14730
14731
14732
14733 mode ? lpfc_sli4_add_to_poll_list(eq) :
14734 lpfc_sli4_remove_from_poll_list(eq);
14735}
14736
14737void lpfc_sli4_start_polling(struct lpfc_queue *eq)
14738{
14739 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
14740}
14741
14742void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
14743{
14744 struct lpfc_hba *phba = eq->phba;
14745
14746 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
14747
14748
14749
14750
14751
14752
14753
14754
14755 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
14756}
14757
14758
14759
14760
14761
14762
14763
14764
14765
14766void
14767lpfc_sli4_queue_free(struct lpfc_queue *queue)
14768{
14769 struct lpfc_dmabuf *dmabuf;
14770
14771 if (!queue)
14772 return;
14773
14774 if (!list_empty(&queue->wq_list))
14775 list_del(&queue->wq_list);
14776
14777 while (!list_empty(&queue->page_list)) {
14778 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
14779 list);
14780 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
14781 dmabuf->virt, dmabuf->phys);
14782 kfree(dmabuf);
14783 }
14784 if (queue->rqbp) {
14785 lpfc_free_rq_buffer(queue->phba, queue);
14786 kfree(queue->rqbp);
14787 }
14788
14789 if (!list_empty(&queue->cpu_list))
14790 list_del(&queue->cpu_list);
14791
14792 kfree(queue);
14793 return;
14794}
14795
14796
14797
14798
14799
14800
14801
14802
14803
14804
14805
14806
14807
14808struct lpfc_queue *
14809lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
14810 uint32_t entry_size, uint32_t entry_count, int cpu)
14811{
14812 struct lpfc_queue *queue;
14813 struct lpfc_dmabuf *dmabuf;
14814 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14815 uint16_t x, pgcnt;
14816
14817 if (!phba->sli4_hba.pc_sli4_params.supported)
14818 hw_page_size = page_size;
14819
14820 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
14821
14822
14823 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
14824 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
14825
14826 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
14827 GFP_KERNEL, cpu_to_node(cpu));
14828 if (!queue)
14829 return NULL;
14830
14831 INIT_LIST_HEAD(&queue->list);
14832 INIT_LIST_HEAD(&queue->_poll_list);
14833 INIT_LIST_HEAD(&queue->wq_list);
14834 INIT_LIST_HEAD(&queue->wqfull_list);
14835 INIT_LIST_HEAD(&queue->page_list);
14836 INIT_LIST_HEAD(&queue->child_list);
14837 INIT_LIST_HEAD(&queue->cpu_list);
14838
14839
14840
14841
14842 queue->page_count = pgcnt;
14843 queue->q_pgs = (void **)&queue[1];
14844 queue->entry_cnt_per_pg = hw_page_size / entry_size;
14845 queue->entry_size = entry_size;
14846 queue->entry_count = entry_count;
14847 queue->page_size = hw_page_size;
14848 queue->phba = phba;
14849
14850 for (x = 0; x < queue->page_count; x++) {
14851 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
14852 dev_to_node(&phba->pcidev->dev));
14853 if (!dmabuf)
14854 goto out_fail;
14855 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14856 hw_page_size, &dmabuf->phys,
14857 GFP_KERNEL);
14858 if (!dmabuf->virt) {
14859 kfree(dmabuf);
14860 goto out_fail;
14861 }
14862 dmabuf->buffer_tag = x;
14863 list_add_tail(&dmabuf->list, &queue->page_list);
14864
14865 queue->q_pgs[x] = dmabuf->virt;
14866 }
14867 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
14868 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
14869 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
14870 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
14871
14872
14873
14874 return queue;
14875out_fail:
14876 lpfc_sli4_queue_free(queue);
14877 return NULL;
14878}
14879
14880
14881
14882
14883
14884
14885
14886
14887
14888
14889static void __iomem *
14890lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
14891{
14892 if (!phba->pcidev)
14893 return NULL;
14894
14895 switch (pci_barset) {
14896 case WQ_PCI_BAR_0_AND_1:
14897 return phba->pci_bar0_memmap_p;
14898 case WQ_PCI_BAR_2_AND_3:
14899 return phba->pci_bar2_memmap_p;
14900 case WQ_PCI_BAR_4_AND_5:
14901 return phba->pci_bar4_memmap_p;
14902 default:
14903 break;
14904 }
14905 return NULL;
14906}
14907
14908
14909
14910
14911
14912
14913
14914
14915
14916
14917
14918
14919
14920
14921
14922
14923
14924
14925
14926
14927
14928
14929
14930
14931void
14932lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14933 uint32_t numq, uint32_t usdelay)
14934{
14935 struct lpfc_mbx_modify_eq_delay *eq_delay;
14936 LPFC_MBOXQ_t *mbox;
14937 struct lpfc_queue *eq;
14938 int cnt = 0, rc, length;
14939 uint32_t shdr_status, shdr_add_status;
14940 uint32_t dmult;
14941 int qidx;
14942 union lpfc_sli4_cfg_shdr *shdr;
14943
14944 if (startq >= phba->cfg_irq_chann)
14945 return;
14946
14947 if (usdelay > 0xFFFF) {
14948 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
14949 "6429 usdelay %d too large. Scaled down to "
14950 "0xFFFF.\n", usdelay);
14951 usdelay = 0xFFFF;
14952 }
14953
14954
14955 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14956 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14957 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14958 if (!eq)
14959 continue;
14960
14961 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
14962
14963 if (++cnt >= numq)
14964 break;
14965 }
14966 return;
14967 }
14968
14969
14970
14971 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14972 if (!mbox) {
14973 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14974 "6428 Failed allocating mailbox cmd buffer."
14975 " EQ delay was not set.\n");
14976 return;
14977 }
14978 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14979 sizeof(struct lpfc_sli4_cfg_mhdr));
14980 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14981 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14982 length, LPFC_SLI4_MBX_EMBED);
14983 eq_delay = &mbox->u.mqe.un.eq_delay;
14984
14985
14986 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
14987 if (dmult)
14988 dmult--;
14989 if (dmult > LPFC_DMULT_MAX)
14990 dmult = LPFC_DMULT_MAX;
14991
14992 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
14993 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
14994 if (!eq)
14995 continue;
14996 eq->q_mode = usdelay;
14997 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14998 eq_delay->u.request.eq[cnt].phase = 0;
14999 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15000
15001 if (++cnt >= numq)
15002 break;
15003 }
15004 eq_delay->u.request.num_eq = cnt;
15005
15006 mbox->vport = phba->pport;
15007 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15008 mbox->ctx_buf = NULL;
15009 mbox->ctx_ndlp = NULL;
15010 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15011 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15012 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15013 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15014 if (shdr_status || shdr_add_status || rc) {
15015 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15016 "2512 MODIFY_EQ_DELAY mailbox failed with "
15017 "status x%x add_status x%x, mbx status x%x\n",
15018 shdr_status, shdr_add_status, rc);
15019 }
15020 mempool_free(mbox, phba->mbox_mem_pool);
15021 return;
15022}
15023
15024
15025
15026
15027
15028
15029
15030
15031
15032
15033
15034
15035
15036
15037
15038
15039
15040
15041
15042
15043
15044int
15045lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15046{
15047 struct lpfc_mbx_eq_create *eq_create;
15048 LPFC_MBOXQ_t *mbox;
15049 int rc, length, status = 0;
15050 struct lpfc_dmabuf *dmabuf;
15051 uint32_t shdr_status, shdr_add_status;
15052 union lpfc_sli4_cfg_shdr *shdr;
15053 uint16_t dmult;
15054 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15055
15056
15057 if (!eq)
15058 return -ENODEV;
15059 if (!phba->sli4_hba.pc_sli4_params.supported)
15060 hw_page_size = SLI4_PAGE_SIZE;
15061
15062 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15063 if (!mbox)
15064 return -ENOMEM;
15065 length = (sizeof(struct lpfc_mbx_eq_create) -
15066 sizeof(struct lpfc_sli4_cfg_mhdr));
15067 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15068 LPFC_MBOX_OPCODE_EQ_CREATE,
15069 length, LPFC_SLI4_MBX_EMBED);
15070 eq_create = &mbox->u.mqe.un.eq_create;
15071 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15072 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15073 eq->page_count);
15074 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15075 LPFC_EQE_SIZE);
15076 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15077
15078
15079 if (phba->sli4_hba.pc_sli4_params.eqav) {
15080 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15081 LPFC_Q_CREATE_VERSION_2);
15082 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15083 phba->sli4_hba.pc_sli4_params.eqav);
15084 }
15085
15086
15087 dmult = 0;
15088 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15089 dmult);
15090 switch (eq->entry_count) {
15091 default:
15092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15093 "0360 Unsupported EQ count. (%d)\n",
15094 eq->entry_count);
15095 if (eq->entry_count < 256) {
15096 status = -EINVAL;
15097 goto out;
15098 }
15099 fallthrough;
15100 case 256:
15101 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15102 LPFC_EQ_CNT_256);
15103 break;
15104 case 512:
15105 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15106 LPFC_EQ_CNT_512);
15107 break;
15108 case 1024:
15109 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15110 LPFC_EQ_CNT_1024);
15111 break;
15112 case 2048:
15113 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15114 LPFC_EQ_CNT_2048);
15115 break;
15116 case 4096:
15117 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15118 LPFC_EQ_CNT_4096);
15119 break;
15120 }
15121 list_for_each_entry(dmabuf, &eq->page_list, list) {
15122 memset(dmabuf->virt, 0, hw_page_size);
15123 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15124 putPaddrLow(dmabuf->phys);
15125 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15126 putPaddrHigh(dmabuf->phys);
15127 }
15128 mbox->vport = phba->pport;
15129 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15130 mbox->ctx_buf = NULL;
15131 mbox->ctx_ndlp = NULL;
15132 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15133 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15134 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15135 if (shdr_status || shdr_add_status || rc) {
15136 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15137 "2500 EQ_CREATE mailbox failed with "
15138 "status x%x add_status x%x, mbx status x%x\n",
15139 shdr_status, shdr_add_status, rc);
15140 status = -ENXIO;
15141 }
15142 eq->type = LPFC_EQ;
15143 eq->subtype = LPFC_NONE;
15144 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15145 if (eq->queue_id == 0xFFFF)
15146 status = -ENXIO;
15147 eq->host_index = 0;
15148 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15149 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15150out:
15151 mempool_free(mbox, phba->mbox_mem_pool);
15152 return status;
15153}
15154
15155static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15156{
15157 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15158
15159 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15160
15161 return 1;
15162}
15163
15164
15165
15166
15167
15168
15169
15170
15171
15172
15173
15174
15175
15176
15177
15178
15179
15180
15181
15182
15183
15184
15185
15186
15187int
15188lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15189 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15190{
15191 struct lpfc_mbx_cq_create *cq_create;
15192 struct lpfc_dmabuf *dmabuf;
15193 LPFC_MBOXQ_t *mbox;
15194 int rc, length, status = 0;
15195 uint32_t shdr_status, shdr_add_status;
15196 union lpfc_sli4_cfg_shdr *shdr;
15197
15198
15199 if (!cq || !eq)
15200 return -ENODEV;
15201
15202 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15203 if (!mbox)
15204 return -ENOMEM;
15205 length = (sizeof(struct lpfc_mbx_cq_create) -
15206 sizeof(struct lpfc_sli4_cfg_mhdr));
15207 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15208 LPFC_MBOX_OPCODE_CQ_CREATE,
15209 length, LPFC_SLI4_MBX_EMBED);
15210 cq_create = &mbox->u.mqe.un.cq_create;
15211 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15212 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15213 cq->page_count);
15214 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15215 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15216 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15217 phba->sli4_hba.pc_sli4_params.cqv);
15218 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15219 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15220 (cq->page_size / SLI4_PAGE_SIZE));
15221 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15222 eq->queue_id);
15223 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15224 phba->sli4_hba.pc_sli4_params.cqav);
15225 } else {
15226 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15227 eq->queue_id);
15228 }
15229 switch (cq->entry_count) {
15230 case 2048:
15231 case 4096:
15232 if (phba->sli4_hba.pc_sli4_params.cqv ==
15233 LPFC_Q_CREATE_VERSION_2) {
15234 cq_create->u.request.context.lpfc_cq_context_count =
15235 cq->entry_count;
15236 bf_set(lpfc_cq_context_count,
15237 &cq_create->u.request.context,
15238 LPFC_CQ_CNT_WORD7);
15239 break;
15240 }
15241 fallthrough;
15242 default:
15243 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15244 "0361 Unsupported CQ count: "
15245 "entry cnt %d sz %d pg cnt %d\n",
15246 cq->entry_count, cq->entry_size,
15247 cq->page_count);
15248 if (cq->entry_count < 256) {
15249 status = -EINVAL;
15250 goto out;
15251 }
15252 fallthrough;
15253 case 256:
15254 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15255 LPFC_CQ_CNT_256);
15256 break;
15257 case 512:
15258 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15259 LPFC_CQ_CNT_512);
15260 break;
15261 case 1024:
15262 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15263 LPFC_CQ_CNT_1024);
15264 break;
15265 }
15266 list_for_each_entry(dmabuf, &cq->page_list, list) {
15267 memset(dmabuf->virt, 0, cq->page_size);
15268 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15269 putPaddrLow(dmabuf->phys);
15270 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15271 putPaddrHigh(dmabuf->phys);
15272 }
15273 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15274
15275
15276 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15277 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15278 if (shdr_status || shdr_add_status || rc) {
15279 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15280 "2501 CQ_CREATE mailbox failed with "
15281 "status x%x add_status x%x, mbx status x%x\n",
15282 shdr_status, shdr_add_status, rc);
15283 status = -ENXIO;
15284 goto out;
15285 }
15286 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15287 if (cq->queue_id == 0xFFFF) {
15288 status = -ENXIO;
15289 goto out;
15290 }
15291
15292 list_add_tail(&cq->list, &eq->child_list);
15293
15294 cq->type = type;
15295 cq->subtype = subtype;
15296 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15297 cq->assoc_qid = eq->queue_id;
15298 cq->assoc_qp = eq;
15299 cq->host_index = 0;
15300 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15301 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15302
15303 if (cq->queue_id > phba->sli4_hba.cq_max)
15304 phba->sli4_hba.cq_max = cq->queue_id;
15305
15306 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
15307out:
15308 mempool_free(mbox, phba->mbox_mem_pool);
15309 return status;
15310}
15311
15312
15313
15314
15315
15316
15317
15318
15319
15320
15321
15322
15323
15324
15325
15326
15327
15328
15329
15330
15331
15332
15333
15334
15335
15336int
15337lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15338 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15339 uint32_t subtype)
15340{
15341 struct lpfc_queue *cq;
15342 struct lpfc_queue *eq;
15343 struct lpfc_mbx_cq_create_set *cq_set;
15344 struct lpfc_dmabuf *dmabuf;
15345 LPFC_MBOXQ_t *mbox;
15346 int rc, length, alloclen, status = 0;
15347 int cnt, idx, numcq, page_idx = 0;
15348 uint32_t shdr_status, shdr_add_status;
15349 union lpfc_sli4_cfg_shdr *shdr;
15350 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15351
15352
15353 numcq = phba->cfg_nvmet_mrq;
15354 if (!cqp || !hdwq || !numcq)
15355 return -ENODEV;
15356
15357 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15358 if (!mbox)
15359 return -ENOMEM;
15360
15361 length = sizeof(struct lpfc_mbx_cq_create_set);
15362 length += ((numcq * cqp[0]->page_count) *
15363 sizeof(struct dma_address));
15364 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15365 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15366 LPFC_SLI4_MBX_NEMBED);
15367 if (alloclen < length) {
15368 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15369 "3098 Allocated DMA memory size (%d) is "
15370 "less than the requested DMA memory size "
15371 "(%d)\n", alloclen, length);
15372 status = -ENOMEM;
15373 goto out;
15374 }
15375 cq_set = mbox->sge_array->addr[0];
15376 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15377 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15378
15379 for (idx = 0; idx < numcq; idx++) {
15380 cq = cqp[idx];
15381 eq = hdwq[idx].hba_eq;
15382 if (!cq || !eq) {
15383 status = -ENOMEM;
15384 goto out;
15385 }
15386 if (!phba->sli4_hba.pc_sli4_params.supported)
15387 hw_page_size = cq->page_size;
15388
15389 switch (idx) {
15390 case 0:
15391 bf_set(lpfc_mbx_cq_create_set_page_size,
15392 &cq_set->u.request,
15393 (hw_page_size / SLI4_PAGE_SIZE));
15394 bf_set(lpfc_mbx_cq_create_set_num_pages,
15395 &cq_set->u.request, cq->page_count);
15396 bf_set(lpfc_mbx_cq_create_set_evt,
15397 &cq_set->u.request, 1);
15398 bf_set(lpfc_mbx_cq_create_set_valid,
15399 &cq_set->u.request, 1);
15400 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15401 &cq_set->u.request, 0);
15402 bf_set(lpfc_mbx_cq_create_set_num_cq,
15403 &cq_set->u.request, numcq);
15404 bf_set(lpfc_mbx_cq_create_set_autovalid,
15405 &cq_set->u.request,
15406 phba->sli4_hba.pc_sli4_params.cqav);
15407 switch (cq->entry_count) {
15408 case 2048:
15409 case 4096:
15410 if (phba->sli4_hba.pc_sli4_params.cqv ==
15411 LPFC_Q_CREATE_VERSION_2) {
15412 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15413 &cq_set->u.request,
15414 cq->entry_count);
15415 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15416 &cq_set->u.request,
15417 LPFC_CQ_CNT_WORD7);
15418 break;
15419 }
15420 fallthrough;
15421 default:
15422 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15423 "3118 Bad CQ count. (%d)\n",
15424 cq->entry_count);
15425 if (cq->entry_count < 256) {
15426 status = -EINVAL;
15427 goto out;
15428 }
15429 fallthrough;
15430 case 256:
15431 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15432 &cq_set->u.request, LPFC_CQ_CNT_256);
15433 break;
15434 case 512:
15435 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15436 &cq_set->u.request, LPFC_CQ_CNT_512);
15437 break;
15438 case 1024:
15439 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15440 &cq_set->u.request, LPFC_CQ_CNT_1024);
15441 break;
15442 }
15443 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15444 &cq_set->u.request, eq->queue_id);
15445 break;
15446 case 1:
15447 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15448 &cq_set->u.request, eq->queue_id);
15449 break;
15450 case 2:
15451 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15452 &cq_set->u.request, eq->queue_id);
15453 break;
15454 case 3:
15455 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15456 &cq_set->u.request, eq->queue_id);
15457 break;
15458 case 4:
15459 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15460 &cq_set->u.request, eq->queue_id);
15461 break;
15462 case 5:
15463 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15464 &cq_set->u.request, eq->queue_id);
15465 break;
15466 case 6:
15467 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15468 &cq_set->u.request, eq->queue_id);
15469 break;
15470 case 7:
15471 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15472 &cq_set->u.request, eq->queue_id);
15473 break;
15474 case 8:
15475 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15476 &cq_set->u.request, eq->queue_id);
15477 break;
15478 case 9:
15479 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15480 &cq_set->u.request, eq->queue_id);
15481 break;
15482 case 10:
15483 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15484 &cq_set->u.request, eq->queue_id);
15485 break;
15486 case 11:
15487 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15488 &cq_set->u.request, eq->queue_id);
15489 break;
15490 case 12:
15491 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15492 &cq_set->u.request, eq->queue_id);
15493 break;
15494 case 13:
15495 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15496 &cq_set->u.request, eq->queue_id);
15497 break;
15498 case 14:
15499 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15500 &cq_set->u.request, eq->queue_id);
15501 break;
15502 case 15:
15503 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15504 &cq_set->u.request, eq->queue_id);
15505 break;
15506 }
15507
15508
15509 list_add_tail(&cq->list, &eq->child_list);
15510
15511 cq->type = type;
15512 cq->subtype = subtype;
15513 cq->assoc_qid = eq->queue_id;
15514 cq->assoc_qp = eq;
15515 cq->host_index = 0;
15516 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15517 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15518 cq->entry_count);
15519 cq->chann = idx;
15520
15521 rc = 0;
15522 list_for_each_entry(dmabuf, &cq->page_list, list) {
15523 memset(dmabuf->virt, 0, hw_page_size);
15524 cnt = page_idx + dmabuf->buffer_tag;
15525 cq_set->u.request.page[cnt].addr_lo =
15526 putPaddrLow(dmabuf->phys);
15527 cq_set->u.request.page[cnt].addr_hi =
15528 putPaddrHigh(dmabuf->phys);
15529 rc++;
15530 }
15531 page_idx += rc;
15532 }
15533
15534 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15535
15536
15537 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15538 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15539 if (shdr_status || shdr_add_status || rc) {
15540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15541 "3119 CQ_CREATE_SET mailbox failed with "
15542 "status x%x add_status x%x, mbx status x%x\n",
15543 shdr_status, shdr_add_status, rc);
15544 status = -ENXIO;
15545 goto out;
15546 }
15547 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15548 if (rc == 0xFFFF) {
15549 status = -ENXIO;
15550 goto out;
15551 }
15552
15553 for (idx = 0; idx < numcq; idx++) {
15554 cq = cqp[idx];
15555 cq->queue_id = rc + idx;
15556 if (cq->queue_id > phba->sli4_hba.cq_max)
15557 phba->sli4_hba.cq_max = cq->queue_id;
15558 }
15559
15560out:
15561 lpfc_sli4_mbox_cmd_free(phba, mbox);
15562 return status;
15563}
15564
15565
15566
15567
15568
15569
15570
15571
15572
15573
15574
15575
15576
15577
15578
15579static void
15580lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15581 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15582{
15583 struct lpfc_mbx_mq_create *mq_create;
15584 struct lpfc_dmabuf *dmabuf;
15585 int length;
15586
15587 length = (sizeof(struct lpfc_mbx_mq_create) -
15588 sizeof(struct lpfc_sli4_cfg_mhdr));
15589 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15590 LPFC_MBOX_OPCODE_MQ_CREATE,
15591 length, LPFC_SLI4_MBX_EMBED);
15592 mq_create = &mbox->u.mqe.un.mq_create;
15593 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15594 mq->page_count);
15595 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15596 cq->queue_id);
15597 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15598 switch (mq->entry_count) {
15599 case 16:
15600 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15601 LPFC_MQ_RING_SIZE_16);
15602 break;
15603 case 32:
15604 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15605 LPFC_MQ_RING_SIZE_32);
15606 break;
15607 case 64:
15608 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15609 LPFC_MQ_RING_SIZE_64);
15610 break;
15611 case 128:
15612 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15613 LPFC_MQ_RING_SIZE_128);
15614 break;
15615 }
15616 list_for_each_entry(dmabuf, &mq->page_list, list) {
15617 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15618 putPaddrLow(dmabuf->phys);
15619 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15620 putPaddrHigh(dmabuf->phys);
15621 }
15622}
15623
15624
15625
15626
15627
15628
15629
15630
15631
15632
15633
15634
15635
15636
15637
15638
15639
15640
15641
15642
15643
15644
15645int32_t
15646lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15647 struct lpfc_queue *cq, uint32_t subtype)
15648{
15649 struct lpfc_mbx_mq_create *mq_create;
15650 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15651 struct lpfc_dmabuf *dmabuf;
15652 LPFC_MBOXQ_t *mbox;
15653 int rc, length, status = 0;
15654 uint32_t shdr_status, shdr_add_status;
15655 union lpfc_sli4_cfg_shdr *shdr;
15656 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15657
15658
15659 if (!mq || !cq)
15660 return -ENODEV;
15661 if (!phba->sli4_hba.pc_sli4_params.supported)
15662 hw_page_size = SLI4_PAGE_SIZE;
15663
15664 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15665 if (!mbox)
15666 return -ENOMEM;
15667 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15668 sizeof(struct lpfc_sli4_cfg_mhdr));
15669 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15670 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15671 length, LPFC_SLI4_MBX_EMBED);
15672
15673 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15674 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15675 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15676 &mq_create_ext->u.request, mq->page_count);
15677 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
15678 &mq_create_ext->u.request, 1);
15679 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
15680 &mq_create_ext->u.request, 1);
15681 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
15682 &mq_create_ext->u.request, 1);
15683 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
15684 &mq_create_ext->u.request, 1);
15685 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
15686 &mq_create_ext->u.request, 1);
15687 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
15688 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15689 phba->sli4_hba.pc_sli4_params.mqv);
15690 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
15691 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
15692 cq->queue_id);
15693 else
15694 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
15695 cq->queue_id);
15696 switch (mq->entry_count) {
15697 default:
15698 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15699 "0362 Unsupported MQ count. (%d)\n",
15700 mq->entry_count);
15701 if (mq->entry_count < 16) {
15702 status = -EINVAL;
15703 goto out;
15704 }
15705 fallthrough;
15706 case 16:
15707 bf_set(lpfc_mq_context_ring_size,
15708 &mq_create_ext->u.request.context,
15709 LPFC_MQ_RING_SIZE_16);
15710 break;
15711 case 32:
15712 bf_set(lpfc_mq_context_ring_size,
15713 &mq_create_ext->u.request.context,
15714 LPFC_MQ_RING_SIZE_32);
15715 break;
15716 case 64:
15717 bf_set(lpfc_mq_context_ring_size,
15718 &mq_create_ext->u.request.context,
15719 LPFC_MQ_RING_SIZE_64);
15720 break;
15721 case 128:
15722 bf_set(lpfc_mq_context_ring_size,
15723 &mq_create_ext->u.request.context,
15724 LPFC_MQ_RING_SIZE_128);
15725 break;
15726 }
15727 list_for_each_entry(dmabuf, &mq->page_list, list) {
15728 memset(dmabuf->virt, 0, hw_page_size);
15729 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
15730 putPaddrLow(dmabuf->phys);
15731 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
15732 putPaddrHigh(dmabuf->phys);
15733 }
15734 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15735 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15736 &mq_create_ext->u.response);
15737 if (rc != MBX_SUCCESS) {
15738 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15739 "2795 MQ_CREATE_EXT failed with "
15740 "status x%x. Failback to MQ_CREATE.\n",
15741 rc);
15742 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
15743 mq_create = &mbox->u.mqe.un.mq_create;
15744 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15745 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
15746 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
15747 &mq_create->u.response);
15748 }
15749
15750
15751 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15752 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15753 if (shdr_status || shdr_add_status || rc) {
15754 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15755 "2502 MQ_CREATE mailbox failed with "
15756 "status x%x add_status x%x, mbx status x%x\n",
15757 shdr_status, shdr_add_status, rc);
15758 status = -ENXIO;
15759 goto out;
15760 }
15761 if (mq->queue_id == 0xFFFF) {
15762 status = -ENXIO;
15763 goto out;
15764 }
15765 mq->type = LPFC_MQ;
15766 mq->assoc_qid = cq->queue_id;
15767 mq->subtype = subtype;
15768 mq->host_index = 0;
15769 mq->hba_index = 0;
15770
15771
15772 list_add_tail(&mq->list, &cq->child_list);
15773out:
15774 mempool_free(mbox, phba->mbox_mem_pool);
15775 return status;
15776}
15777
15778
15779
15780
15781
15782
15783
15784
15785
15786
15787
15788
15789
15790
15791
15792
15793
15794
15795
15796
15797
15798
15799
15800int
15801lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
15802 struct lpfc_queue *cq, uint32_t subtype)
15803{
15804 struct lpfc_mbx_wq_create *wq_create;
15805 struct lpfc_dmabuf *dmabuf;
15806 LPFC_MBOXQ_t *mbox;
15807 int rc, length, status = 0;
15808 uint32_t shdr_status, shdr_add_status;
15809 union lpfc_sli4_cfg_shdr *shdr;
15810 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15811 struct dma_address *page;
15812 void __iomem *bar_memmap_p;
15813 uint32_t db_offset;
15814 uint16_t pci_barset;
15815 uint8_t dpp_barset;
15816 uint32_t dpp_offset;
15817 uint8_t wq_create_version;
15818#ifdef CONFIG_X86
15819 unsigned long pg_addr;
15820#endif
15821
15822
15823 if (!wq || !cq)
15824 return -ENODEV;
15825 if (!phba->sli4_hba.pc_sli4_params.supported)
15826 hw_page_size = wq->page_size;
15827
15828 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15829 if (!mbox)
15830 return -ENOMEM;
15831 length = (sizeof(struct lpfc_mbx_wq_create) -
15832 sizeof(struct lpfc_sli4_cfg_mhdr));
15833 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15834 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
15835 length, LPFC_SLI4_MBX_EMBED);
15836 wq_create = &mbox->u.mqe.un.wq_create;
15837 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
15838 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
15839 wq->page_count);
15840 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
15841 cq->queue_id);
15842
15843
15844 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15845 phba->sli4_hba.pc_sli4_params.wqv);
15846
15847 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
15848 (wq->page_size > SLI4_PAGE_SIZE))
15849 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15850 else
15851 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15852
15853
15854 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT)
15855 wq_create_version = LPFC_Q_CREATE_VERSION_1;
15856 else
15857 wq_create_version = LPFC_Q_CREATE_VERSION_0;
15858
15859 switch (wq_create_version) {
15860 case LPFC_Q_CREATE_VERSION_1:
15861 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
15862 wq->entry_count);
15863 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15864 LPFC_Q_CREATE_VERSION_1);
15865
15866 switch (wq->entry_size) {
15867 default:
15868 case 64:
15869 bf_set(lpfc_mbx_wq_create_wqe_size,
15870 &wq_create->u.request_1,
15871 LPFC_WQ_WQE_SIZE_64);
15872 break;
15873 case 128:
15874 bf_set(lpfc_mbx_wq_create_wqe_size,
15875 &wq_create->u.request_1,
15876 LPFC_WQ_WQE_SIZE_128);
15877 break;
15878 }
15879
15880 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
15881 bf_set(lpfc_mbx_wq_create_page_size,
15882 &wq_create->u.request_1,
15883 (wq->page_size / SLI4_PAGE_SIZE));
15884 page = wq_create->u.request_1.page;
15885 break;
15886 default:
15887 page = wq_create->u.request.page;
15888 break;
15889 }
15890
15891 list_for_each_entry(dmabuf, &wq->page_list, list) {
15892 memset(dmabuf->virt, 0, hw_page_size);
15893 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
15894 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
15895 }
15896
15897 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15898 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
15899
15900 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15901
15902 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15903 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15904 if (shdr_status || shdr_add_status || rc) {
15905 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15906 "2503 WQ_CREATE mailbox failed with "
15907 "status x%x add_status x%x, mbx status x%x\n",
15908 shdr_status, shdr_add_status, rc);
15909 status = -ENXIO;
15910 goto out;
15911 }
15912
15913 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
15914 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
15915 &wq_create->u.response);
15916 else
15917 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
15918 &wq_create->u.response_1);
15919
15920 if (wq->queue_id == 0xFFFF) {
15921 status = -ENXIO;
15922 goto out;
15923 }
15924
15925 wq->db_format = LPFC_DB_LIST_FORMAT;
15926 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
15927 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15928 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
15929 &wq_create->u.response);
15930 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
15931 (wq->db_format != LPFC_DB_RING_FORMAT)) {
15932 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15933 "3265 WQ[%d] doorbell format "
15934 "not supported: x%x\n",
15935 wq->queue_id, wq->db_format);
15936 status = -EINVAL;
15937 goto out;
15938 }
15939 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
15940 &wq_create->u.response);
15941 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15942 pci_barset);
15943 if (!bar_memmap_p) {
15944 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15945 "3263 WQ[%d] failed to memmap "
15946 "pci barset:x%x\n",
15947 wq->queue_id, pci_barset);
15948 status = -ENOMEM;
15949 goto out;
15950 }
15951 db_offset = wq_create->u.response.doorbell_offset;
15952 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
15953 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
15954 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15955 "3252 WQ[%d] doorbell offset "
15956 "not supported: x%x\n",
15957 wq->queue_id, db_offset);
15958 status = -EINVAL;
15959 goto out;
15960 }
15961 wq->db_regaddr = bar_memmap_p + db_offset;
15962 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15963 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15964 "format:x%x\n", wq->queue_id,
15965 pci_barset, db_offset, wq->db_format);
15966 } else
15967 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
15968 } else {
15969
15970 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
15971 &wq_create->u.response_1);
15972 if (wq->dpp_enable) {
15973 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
15974 &wq_create->u.response_1);
15975 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15976 pci_barset);
15977 if (!bar_memmap_p) {
15978 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15979 "3267 WQ[%d] failed to memmap "
15980 "pci barset:x%x\n",
15981 wq->queue_id, pci_barset);
15982 status = -ENOMEM;
15983 goto out;
15984 }
15985 db_offset = wq_create->u.response_1.doorbell_offset;
15986 wq->db_regaddr = bar_memmap_p + db_offset;
15987 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
15988 &wq_create->u.response_1);
15989 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
15990 &wq_create->u.response_1);
15991 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
15992 dpp_barset);
15993 if (!bar_memmap_p) {
15994 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15995 "3268 WQ[%d] failed to memmap "
15996 "pci barset:x%x\n",
15997 wq->queue_id, dpp_barset);
15998 status = -ENOMEM;
15999 goto out;
16000 }
16001 dpp_offset = wq_create->u.response_1.dpp_offset;
16002 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16003 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16004 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16005 "dpp_id:x%x dpp_barset:x%x "
16006 "dpp_offset:x%x\n",
16007 wq->queue_id, pci_barset, db_offset,
16008 wq->dpp_id, dpp_barset, dpp_offset);
16009
16010#ifdef CONFIG_X86
16011
16012 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16013 rc = set_memory_wc(pg_addr, 1);
16014 if (rc) {
16015 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16016 "3272 Cannot setup Combined "
16017 "Write on WQ[%d] - disable DPP\n",
16018 wq->queue_id);
16019 phba->cfg_enable_dpp = 0;
16020 }
16021#else
16022 phba->cfg_enable_dpp = 0;
16023#endif
16024 } else
16025 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16026 }
16027 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16028 if (wq->pring == NULL) {
16029 status = -ENOMEM;
16030 goto out;
16031 }
16032 wq->type = LPFC_WQ;
16033 wq->assoc_qid = cq->queue_id;
16034 wq->subtype = subtype;
16035 wq->host_index = 0;
16036 wq->hba_index = 0;
16037 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16038
16039
16040 list_add_tail(&wq->list, &cq->child_list);
16041out:
16042 mempool_free(mbox, phba->mbox_mem_pool);
16043 return status;
16044}
16045
16046
16047
16048
16049
16050
16051
16052
16053
16054
16055
16056
16057
16058
16059
16060
16061
16062
16063
16064
16065
16066
16067
16068
16069
16070int
16071lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16072 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16073{
16074 struct lpfc_mbx_rq_create *rq_create;
16075 struct lpfc_dmabuf *dmabuf;
16076 LPFC_MBOXQ_t *mbox;
16077 int rc, length, status = 0;
16078 uint32_t shdr_status, shdr_add_status;
16079 union lpfc_sli4_cfg_shdr *shdr;
16080 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16081 void __iomem *bar_memmap_p;
16082 uint32_t db_offset;
16083 uint16_t pci_barset;
16084
16085
16086 if (!hrq || !drq || !cq)
16087 return -ENODEV;
16088 if (!phba->sli4_hba.pc_sli4_params.supported)
16089 hw_page_size = SLI4_PAGE_SIZE;
16090
16091 if (hrq->entry_count != drq->entry_count)
16092 return -EINVAL;
16093 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16094 if (!mbox)
16095 return -ENOMEM;
16096 length = (sizeof(struct lpfc_mbx_rq_create) -
16097 sizeof(struct lpfc_sli4_cfg_mhdr));
16098 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16099 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16100 length, LPFC_SLI4_MBX_EMBED);
16101 rq_create = &mbox->u.mqe.un.rq_create;
16102 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16103 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16104 phba->sli4_hba.pc_sli4_params.rqv);
16105 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16106 bf_set(lpfc_rq_context_rqe_count_1,
16107 &rq_create->u.request.context,
16108 hrq->entry_count);
16109 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16110 bf_set(lpfc_rq_context_rqe_size,
16111 &rq_create->u.request.context,
16112 LPFC_RQE_SIZE_8);
16113 bf_set(lpfc_rq_context_page_size,
16114 &rq_create->u.request.context,
16115 LPFC_RQ_PAGE_SIZE_4096);
16116 } else {
16117 switch (hrq->entry_count) {
16118 default:
16119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16120 "2535 Unsupported RQ count. (%d)\n",
16121 hrq->entry_count);
16122 if (hrq->entry_count < 512) {
16123 status = -EINVAL;
16124 goto out;
16125 }
16126 fallthrough;
16127 case 512:
16128 bf_set(lpfc_rq_context_rqe_count,
16129 &rq_create->u.request.context,
16130 LPFC_RQ_RING_SIZE_512);
16131 break;
16132 case 1024:
16133 bf_set(lpfc_rq_context_rqe_count,
16134 &rq_create->u.request.context,
16135 LPFC_RQ_RING_SIZE_1024);
16136 break;
16137 case 2048:
16138 bf_set(lpfc_rq_context_rqe_count,
16139 &rq_create->u.request.context,
16140 LPFC_RQ_RING_SIZE_2048);
16141 break;
16142 case 4096:
16143 bf_set(lpfc_rq_context_rqe_count,
16144 &rq_create->u.request.context,
16145 LPFC_RQ_RING_SIZE_4096);
16146 break;
16147 }
16148 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16149 LPFC_HDR_BUF_SIZE);
16150 }
16151 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16152 cq->queue_id);
16153 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16154 hrq->page_count);
16155 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16156 memset(dmabuf->virt, 0, hw_page_size);
16157 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16158 putPaddrLow(dmabuf->phys);
16159 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16160 putPaddrHigh(dmabuf->phys);
16161 }
16162 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16163 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16164
16165 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16166
16167 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16168 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16169 if (shdr_status || shdr_add_status || rc) {
16170 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16171 "2504 RQ_CREATE mailbox failed with "
16172 "status x%x add_status x%x, mbx status x%x\n",
16173 shdr_status, shdr_add_status, rc);
16174 status = -ENXIO;
16175 goto out;
16176 }
16177 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16178 if (hrq->queue_id == 0xFFFF) {
16179 status = -ENXIO;
16180 goto out;
16181 }
16182
16183 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16184 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16185 &rq_create->u.response);
16186 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16187 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16189 "3262 RQ [%d] doorbell format not "
16190 "supported: x%x\n", hrq->queue_id,
16191 hrq->db_format);
16192 status = -EINVAL;
16193 goto out;
16194 }
16195
16196 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16197 &rq_create->u.response);
16198 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16199 if (!bar_memmap_p) {
16200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16201 "3269 RQ[%d] failed to memmap pci "
16202 "barset:x%x\n", hrq->queue_id,
16203 pci_barset);
16204 status = -ENOMEM;
16205 goto out;
16206 }
16207
16208 db_offset = rq_create->u.response.doorbell_offset;
16209 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16210 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16212 "3270 RQ[%d] doorbell offset not "
16213 "supported: x%x\n", hrq->queue_id,
16214 db_offset);
16215 status = -EINVAL;
16216 goto out;
16217 }
16218 hrq->db_regaddr = bar_memmap_p + db_offset;
16219 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16220 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16221 "format:x%x\n", hrq->queue_id, pci_barset,
16222 db_offset, hrq->db_format);
16223 } else {
16224 hrq->db_format = LPFC_DB_RING_FORMAT;
16225 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16226 }
16227 hrq->type = LPFC_HRQ;
16228 hrq->assoc_qid = cq->queue_id;
16229 hrq->subtype = subtype;
16230 hrq->host_index = 0;
16231 hrq->hba_index = 0;
16232 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16233
16234
16235 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16236 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16237 length, LPFC_SLI4_MBX_EMBED);
16238 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16239 phba->sli4_hba.pc_sli4_params.rqv);
16240 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16241 bf_set(lpfc_rq_context_rqe_count_1,
16242 &rq_create->u.request.context, hrq->entry_count);
16243 if (subtype == LPFC_NVMET)
16244 rq_create->u.request.context.buffer_size =
16245 LPFC_NVMET_DATA_BUF_SIZE;
16246 else
16247 rq_create->u.request.context.buffer_size =
16248 LPFC_DATA_BUF_SIZE;
16249 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16250 LPFC_RQE_SIZE_8);
16251 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16252 (PAGE_SIZE/SLI4_PAGE_SIZE));
16253 } else {
16254 switch (drq->entry_count) {
16255 default:
16256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16257 "2536 Unsupported RQ count. (%d)\n",
16258 drq->entry_count);
16259 if (drq->entry_count < 512) {
16260 status = -EINVAL;
16261 goto out;
16262 }
16263 fallthrough;
16264 case 512:
16265 bf_set(lpfc_rq_context_rqe_count,
16266 &rq_create->u.request.context,
16267 LPFC_RQ_RING_SIZE_512);
16268 break;
16269 case 1024:
16270 bf_set(lpfc_rq_context_rqe_count,
16271 &rq_create->u.request.context,
16272 LPFC_RQ_RING_SIZE_1024);
16273 break;
16274 case 2048:
16275 bf_set(lpfc_rq_context_rqe_count,
16276 &rq_create->u.request.context,
16277 LPFC_RQ_RING_SIZE_2048);
16278 break;
16279 case 4096:
16280 bf_set(lpfc_rq_context_rqe_count,
16281 &rq_create->u.request.context,
16282 LPFC_RQ_RING_SIZE_4096);
16283 break;
16284 }
16285 if (subtype == LPFC_NVMET)
16286 bf_set(lpfc_rq_context_buf_size,
16287 &rq_create->u.request.context,
16288 LPFC_NVMET_DATA_BUF_SIZE);
16289 else
16290 bf_set(lpfc_rq_context_buf_size,
16291 &rq_create->u.request.context,
16292 LPFC_DATA_BUF_SIZE);
16293 }
16294 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16295 cq->queue_id);
16296 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16297 drq->page_count);
16298 list_for_each_entry(dmabuf, &drq->page_list, list) {
16299 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16300 putPaddrLow(dmabuf->phys);
16301 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16302 putPaddrHigh(dmabuf->phys);
16303 }
16304 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16305 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16306 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16307
16308 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16309 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16310 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16311 if (shdr_status || shdr_add_status || rc) {
16312 status = -ENXIO;
16313 goto out;
16314 }
16315 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16316 if (drq->queue_id == 0xFFFF) {
16317 status = -ENXIO;
16318 goto out;
16319 }
16320 drq->type = LPFC_DRQ;
16321 drq->assoc_qid = cq->queue_id;
16322 drq->subtype = subtype;
16323 drq->host_index = 0;
16324 drq->hba_index = 0;
16325 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16326
16327
16328 list_add_tail(&hrq->list, &cq->child_list);
16329 list_add_tail(&drq->list, &cq->child_list);
16330
16331out:
16332 mempool_free(mbox, phba->mbox_mem_pool);
16333 return status;
16334}
16335
16336
16337
16338
16339
16340
16341
16342
16343
16344
16345
16346
16347
16348
16349
16350
16351
16352
16353
16354
16355
16356
16357
16358
16359
16360int
16361lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16362 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16363 uint32_t subtype)
16364{
16365 struct lpfc_queue *hrq, *drq, *cq;
16366 struct lpfc_mbx_rq_create_v2 *rq_create;
16367 struct lpfc_dmabuf *dmabuf;
16368 LPFC_MBOXQ_t *mbox;
16369 int rc, length, alloclen, status = 0;
16370 int cnt, idx, numrq, page_idx = 0;
16371 uint32_t shdr_status, shdr_add_status;
16372 union lpfc_sli4_cfg_shdr *shdr;
16373 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16374
16375 numrq = phba->cfg_nvmet_mrq;
16376
16377 if (!hrqp || !drqp || !cqp || !numrq)
16378 return -ENODEV;
16379 if (!phba->sli4_hba.pc_sli4_params.supported)
16380 hw_page_size = SLI4_PAGE_SIZE;
16381
16382 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16383 if (!mbox)
16384 return -ENOMEM;
16385
16386 length = sizeof(struct lpfc_mbx_rq_create_v2);
16387 length += ((2 * numrq * hrqp[0]->page_count) *
16388 sizeof(struct dma_address));
16389
16390 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16391 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16392 LPFC_SLI4_MBX_NEMBED);
16393 if (alloclen < length) {
16394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16395 "3099 Allocated DMA memory size (%d) is "
16396 "less than the requested DMA memory size "
16397 "(%d)\n", alloclen, length);
16398 status = -ENOMEM;
16399 goto out;
16400 }
16401
16402
16403
16404 rq_create = mbox->sge_array->addr[0];
16405 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16406
16407 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16408 cnt = 0;
16409
16410 for (idx = 0; idx < numrq; idx++) {
16411 hrq = hrqp[idx];
16412 drq = drqp[idx];
16413 cq = cqp[idx];
16414
16415
16416 if (!hrq || !drq || !cq) {
16417 status = -ENODEV;
16418 goto out;
16419 }
16420
16421 if (hrq->entry_count != drq->entry_count) {
16422 status = -EINVAL;
16423 goto out;
16424 }
16425
16426 if (idx == 0) {
16427 bf_set(lpfc_mbx_rq_create_num_pages,
16428 &rq_create->u.request,
16429 hrq->page_count);
16430 bf_set(lpfc_mbx_rq_create_rq_cnt,
16431 &rq_create->u.request, (numrq * 2));
16432 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16433 1);
16434 bf_set(lpfc_rq_context_base_cq,
16435 &rq_create->u.request.context,
16436 cq->queue_id);
16437 bf_set(lpfc_rq_context_data_size,
16438 &rq_create->u.request.context,
16439 LPFC_NVMET_DATA_BUF_SIZE);
16440 bf_set(lpfc_rq_context_hdr_size,
16441 &rq_create->u.request.context,
16442 LPFC_HDR_BUF_SIZE);
16443 bf_set(lpfc_rq_context_rqe_count_1,
16444 &rq_create->u.request.context,
16445 hrq->entry_count);
16446 bf_set(lpfc_rq_context_rqe_size,
16447 &rq_create->u.request.context,
16448 LPFC_RQE_SIZE_8);
16449 bf_set(lpfc_rq_context_page_size,
16450 &rq_create->u.request.context,
16451 (PAGE_SIZE/SLI4_PAGE_SIZE));
16452 }
16453 rc = 0;
16454 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16455 memset(dmabuf->virt, 0, hw_page_size);
16456 cnt = page_idx + dmabuf->buffer_tag;
16457 rq_create->u.request.page[cnt].addr_lo =
16458 putPaddrLow(dmabuf->phys);
16459 rq_create->u.request.page[cnt].addr_hi =
16460 putPaddrHigh(dmabuf->phys);
16461 rc++;
16462 }
16463 page_idx += rc;
16464
16465 rc = 0;
16466 list_for_each_entry(dmabuf, &drq->page_list, list) {
16467 memset(dmabuf->virt, 0, hw_page_size);
16468 cnt = page_idx + dmabuf->buffer_tag;
16469 rq_create->u.request.page[cnt].addr_lo =
16470 putPaddrLow(dmabuf->phys);
16471 rq_create->u.request.page[cnt].addr_hi =
16472 putPaddrHigh(dmabuf->phys);
16473 rc++;
16474 }
16475 page_idx += rc;
16476
16477 hrq->db_format = LPFC_DB_RING_FORMAT;
16478 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16479 hrq->type = LPFC_HRQ;
16480 hrq->assoc_qid = cq->queue_id;
16481 hrq->subtype = subtype;
16482 hrq->host_index = 0;
16483 hrq->hba_index = 0;
16484 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16485
16486 drq->db_format = LPFC_DB_RING_FORMAT;
16487 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16488 drq->type = LPFC_DRQ;
16489 drq->assoc_qid = cq->queue_id;
16490 drq->subtype = subtype;
16491 drq->host_index = 0;
16492 drq->hba_index = 0;
16493 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16494
16495 list_add_tail(&hrq->list, &cq->child_list);
16496 list_add_tail(&drq->list, &cq->child_list);
16497 }
16498
16499 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16500
16501 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16502 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16503 if (shdr_status || shdr_add_status || rc) {
16504 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16505 "3120 RQ_CREATE mailbox failed with "
16506 "status x%x add_status x%x, mbx status x%x\n",
16507 shdr_status, shdr_add_status, rc);
16508 status = -ENXIO;
16509 goto out;
16510 }
16511 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16512 if (rc == 0xFFFF) {
16513 status = -ENXIO;
16514 goto out;
16515 }
16516
16517
16518 for (idx = 0; idx < numrq; idx++) {
16519 hrq = hrqp[idx];
16520 hrq->queue_id = rc + (2 * idx);
16521 drq = drqp[idx];
16522 drq->queue_id = rc + (2 * idx) + 1;
16523 }
16524
16525out:
16526 lpfc_sli4_mbox_cmd_free(phba, mbox);
16527 return status;
16528}
16529
16530
16531
16532
16533
16534
16535
16536
16537
16538
16539
16540
16541
16542
16543int
16544lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16545{
16546 LPFC_MBOXQ_t *mbox;
16547 int rc, length, status = 0;
16548 uint32_t shdr_status, shdr_add_status;
16549 union lpfc_sli4_cfg_shdr *shdr;
16550
16551
16552 if (!eq)
16553 return -ENODEV;
16554
16555 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16556 if (!mbox)
16557 return -ENOMEM;
16558 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16559 sizeof(struct lpfc_sli4_cfg_mhdr));
16560 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16561 LPFC_MBOX_OPCODE_EQ_DESTROY,
16562 length, LPFC_SLI4_MBX_EMBED);
16563 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16564 eq->queue_id);
16565 mbox->vport = eq->phba->pport;
16566 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16567
16568 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16569
16570 shdr = (union lpfc_sli4_cfg_shdr *)
16571 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16572 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16573 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16574 if (shdr_status || shdr_add_status || rc) {
16575 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16576 "2505 EQ_DESTROY mailbox failed with "
16577 "status x%x add_status x%x, mbx status x%x\n",
16578 shdr_status, shdr_add_status, rc);
16579 status = -ENXIO;
16580 }
16581
16582
16583 list_del_init(&eq->list);
16584 mempool_free(mbox, eq->phba->mbox_mem_pool);
16585 return status;
16586}
16587
16588
16589
16590
16591
16592
16593
16594
16595
16596
16597
16598
16599
16600
16601int
16602lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16603{
16604 LPFC_MBOXQ_t *mbox;
16605 int rc, length, status = 0;
16606 uint32_t shdr_status, shdr_add_status;
16607 union lpfc_sli4_cfg_shdr *shdr;
16608
16609
16610 if (!cq)
16611 return -ENODEV;
16612 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16613 if (!mbox)
16614 return -ENOMEM;
16615 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16616 sizeof(struct lpfc_sli4_cfg_mhdr));
16617 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16618 LPFC_MBOX_OPCODE_CQ_DESTROY,
16619 length, LPFC_SLI4_MBX_EMBED);
16620 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16621 cq->queue_id);
16622 mbox->vport = cq->phba->pport;
16623 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16624 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16625
16626 shdr = (union lpfc_sli4_cfg_shdr *)
16627 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16628 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16629 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16630 if (shdr_status || shdr_add_status || rc) {
16631 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16632 "2506 CQ_DESTROY mailbox failed with "
16633 "status x%x add_status x%x, mbx status x%x\n",
16634 shdr_status, shdr_add_status, rc);
16635 status = -ENXIO;
16636 }
16637
16638 list_del_init(&cq->list);
16639 mempool_free(mbox, cq->phba->mbox_mem_pool);
16640 return status;
16641}
16642
16643
16644
16645
16646
16647
16648
16649
16650
16651
16652
16653
16654
16655
16656int
16657lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16658{
16659 LPFC_MBOXQ_t *mbox;
16660 int rc, length, status = 0;
16661 uint32_t shdr_status, shdr_add_status;
16662 union lpfc_sli4_cfg_shdr *shdr;
16663
16664
16665 if (!mq)
16666 return -ENODEV;
16667 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16668 if (!mbox)
16669 return -ENOMEM;
16670 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16671 sizeof(struct lpfc_sli4_cfg_mhdr));
16672 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16673 LPFC_MBOX_OPCODE_MQ_DESTROY,
16674 length, LPFC_SLI4_MBX_EMBED);
16675 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16676 mq->queue_id);
16677 mbox->vport = mq->phba->pport;
16678 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16679 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16680
16681 shdr = (union lpfc_sli4_cfg_shdr *)
16682 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
16683 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16684 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16685 if (shdr_status || shdr_add_status || rc) {
16686 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16687 "2507 MQ_DESTROY mailbox failed with "
16688 "status x%x add_status x%x, mbx status x%x\n",
16689 shdr_status, shdr_add_status, rc);
16690 status = -ENXIO;
16691 }
16692
16693 list_del_init(&mq->list);
16694 mempool_free(mbox, mq->phba->mbox_mem_pool);
16695 return status;
16696}
16697
16698
16699
16700
16701
16702
16703
16704
16705
16706
16707
16708
16709
16710
16711int
16712lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
16713{
16714 LPFC_MBOXQ_t *mbox;
16715 int rc, length, status = 0;
16716 uint32_t shdr_status, shdr_add_status;
16717 union lpfc_sli4_cfg_shdr *shdr;
16718
16719
16720 if (!wq)
16721 return -ENODEV;
16722 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
16723 if (!mbox)
16724 return -ENOMEM;
16725 length = (sizeof(struct lpfc_mbx_wq_destroy) -
16726 sizeof(struct lpfc_sli4_cfg_mhdr));
16727 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16728 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
16729 length, LPFC_SLI4_MBX_EMBED);
16730 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
16731 wq->queue_id);
16732 mbox->vport = wq->phba->pport;
16733 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16734 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
16735 shdr = (union lpfc_sli4_cfg_shdr *)
16736 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
16737 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16738 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16739 if (shdr_status || shdr_add_status || rc) {
16740 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16741 "2508 WQ_DESTROY mailbox failed with "
16742 "status x%x add_status x%x, mbx status x%x\n",
16743 shdr_status, shdr_add_status, rc);
16744 status = -ENXIO;
16745 }
16746
16747 list_del_init(&wq->list);
16748 kfree(wq->pring);
16749 wq->pring = NULL;
16750 mempool_free(mbox, wq->phba->mbox_mem_pool);
16751 return status;
16752}
16753
16754
16755
16756
16757
16758
16759
16760
16761
16762
16763
16764
16765
16766
16767
16768int
16769lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16770 struct lpfc_queue *drq)
16771{
16772 LPFC_MBOXQ_t *mbox;
16773 int rc, length, status = 0;
16774 uint32_t shdr_status, shdr_add_status;
16775 union lpfc_sli4_cfg_shdr *shdr;
16776
16777
16778 if (!hrq || !drq)
16779 return -ENODEV;
16780 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
16781 if (!mbox)
16782 return -ENOMEM;
16783 length = (sizeof(struct lpfc_mbx_rq_destroy) -
16784 sizeof(struct lpfc_sli4_cfg_mhdr));
16785 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16786 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
16787 length, LPFC_SLI4_MBX_EMBED);
16788 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16789 hrq->queue_id);
16790 mbox->vport = hrq->phba->pport;
16791 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16792 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
16793
16794 shdr = (union lpfc_sli4_cfg_shdr *)
16795 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16796 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16797 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16798 if (shdr_status || shdr_add_status || rc) {
16799 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16800 "2509 RQ_DESTROY mailbox failed with "
16801 "status x%x add_status x%x, mbx status x%x\n",
16802 shdr_status, shdr_add_status, rc);
16803 if (rc != MBX_TIMEOUT)
16804 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16805 return -ENXIO;
16806 }
16807 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
16808 drq->queue_id);
16809 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
16810 shdr = (union lpfc_sli4_cfg_shdr *)
16811 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
16812 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16813 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16814 if (shdr_status || shdr_add_status || rc) {
16815 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16816 "2510 RQ_DESTROY mailbox failed with "
16817 "status x%x add_status x%x, mbx status x%x\n",
16818 shdr_status, shdr_add_status, rc);
16819 status = -ENXIO;
16820 }
16821 list_del_init(&hrq->list);
16822 list_del_init(&drq->list);
16823 mempool_free(mbox, hrq->phba->mbox_mem_pool);
16824 return status;
16825}
16826
16827
16828
16829
16830
16831
16832
16833
16834
16835
16836
16837
16838
16839
16840
16841
16842
16843
16844
16845
16846
16847
16848
16849int
16850lpfc_sli4_post_sgl(struct lpfc_hba *phba,
16851 dma_addr_t pdma_phys_addr0,
16852 dma_addr_t pdma_phys_addr1,
16853 uint16_t xritag)
16854{
16855 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
16856 LPFC_MBOXQ_t *mbox;
16857 int rc;
16858 uint32_t shdr_status, shdr_add_status;
16859 uint32_t mbox_tmo;
16860 union lpfc_sli4_cfg_shdr *shdr;
16861
16862 if (xritag == NO_XRI) {
16863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16864 "0364 Invalid param:\n");
16865 return -EINVAL;
16866 }
16867
16868 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16869 if (!mbox)
16870 return -ENOMEM;
16871
16872 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16873 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
16874 sizeof(struct lpfc_mbx_post_sgl_pages) -
16875 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
16876
16877 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
16878 &mbox->u.mqe.un.post_sgl_pages;
16879 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
16880 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
16881
16882 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
16883 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
16884 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
16885 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
16886
16887 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
16888 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
16889 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
16890 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
16891 if (!phba->sli4_hba.intr_enable)
16892 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16893 else {
16894 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16895 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16896 }
16897
16898 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
16899 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16900 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16901 if (rc != MBX_TIMEOUT)
16902 mempool_free(mbox, phba->mbox_mem_pool);
16903 if (shdr_status || shdr_add_status || rc) {
16904 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16905 "2511 POST_SGL mailbox failed with "
16906 "status x%x add_status x%x, mbx status x%x\n",
16907 shdr_status, shdr_add_status, rc);
16908 }
16909 return 0;
16910}
16911
16912
16913
16914
16915
16916
16917
16918
16919
16920
16921
16922
16923
16924
16925static uint16_t
16926lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
16927{
16928 unsigned long xri;
16929
16930
16931
16932
16933
16934 spin_lock_irq(&phba->hbalock);
16935 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
16936 phba->sli4_hba.max_cfg_param.max_xri, 0);
16937 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
16938 spin_unlock_irq(&phba->hbalock);
16939 return NO_XRI;
16940 } else {
16941 set_bit(xri, phba->sli4_hba.xri_bmask);
16942 phba->sli4_hba.max_cfg_param.xri_used++;
16943 }
16944 spin_unlock_irq(&phba->hbalock);
16945 return xri;
16946}
16947
16948
16949
16950
16951
16952
16953
16954
16955
16956static void
16957__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16958{
16959 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
16960 phba->sli4_hba.max_cfg_param.xri_used--;
16961 }
16962}
16963
16964
16965
16966
16967
16968
16969
16970
16971
16972void
16973lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
16974{
16975 spin_lock_irq(&phba->hbalock);
16976 __lpfc_sli4_free_xri(phba, xri);
16977 spin_unlock_irq(&phba->hbalock);
16978}
16979
16980
16981
16982
16983
16984
16985
16986
16987
16988
16989
16990uint16_t
16991lpfc_sli4_next_xritag(struct lpfc_hba *phba)
16992{
16993 uint16_t xri_index;
16994
16995 xri_index = lpfc_sli4_alloc_xri(phba);
16996 if (xri_index == NO_XRI)
16997 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16998 "2004 Failed to allocate XRI.last XRITAG is %d"
16999 " Max XRI is %d, Used XRI is %d\n",
17000 xri_index,
17001 phba->sli4_hba.max_cfg_param.max_xri,
17002 phba->sli4_hba.max_cfg_param.xri_used);
17003 return xri_index;
17004}
17005
17006
17007
17008
17009
17010
17011
17012
17013
17014
17015
17016
17017static int
17018lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17019 struct list_head *post_sgl_list,
17020 int post_cnt)
17021{
17022 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17023 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17024 struct sgl_page_pairs *sgl_pg_pairs;
17025 void *viraddr;
17026 LPFC_MBOXQ_t *mbox;
17027 uint32_t reqlen, alloclen, pg_pairs;
17028 uint32_t mbox_tmo;
17029 uint16_t xritag_start = 0;
17030 int rc = 0;
17031 uint32_t shdr_status, shdr_add_status;
17032 union lpfc_sli4_cfg_shdr *shdr;
17033
17034 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17035 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17036 if (reqlen > SLI4_PAGE_SIZE) {
17037 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17038 "2559 Block sgl registration required DMA "
17039 "size (%d) great than a page\n", reqlen);
17040 return -ENOMEM;
17041 }
17042
17043 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17044 if (!mbox)
17045 return -ENOMEM;
17046
17047
17048 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17049 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17050 LPFC_SLI4_MBX_NEMBED);
17051
17052 if (alloclen < reqlen) {
17053 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17054 "0285 Allocated DMA memory size (%d) is "
17055 "less than the requested DMA memory "
17056 "size (%d)\n", alloclen, reqlen);
17057 lpfc_sli4_mbox_cmd_free(phba, mbox);
17058 return -ENOMEM;
17059 }
17060
17061 viraddr = mbox->sge_array->addr[0];
17062 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17063 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17064
17065 pg_pairs = 0;
17066 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17067
17068 sgl_pg_pairs->sgl_pg0_addr_lo =
17069 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17070 sgl_pg_pairs->sgl_pg0_addr_hi =
17071 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17072 sgl_pg_pairs->sgl_pg1_addr_lo =
17073 cpu_to_le32(putPaddrLow(0));
17074 sgl_pg_pairs->sgl_pg1_addr_hi =
17075 cpu_to_le32(putPaddrHigh(0));
17076
17077
17078 if (pg_pairs == 0)
17079 xritag_start = sglq_entry->sli4_xritag;
17080 sgl_pg_pairs++;
17081 pg_pairs++;
17082 }
17083
17084
17085 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17086 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17087 sgl->word0 = cpu_to_le32(sgl->word0);
17088
17089 if (!phba->sli4_hba.intr_enable)
17090 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17091 else {
17092 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17093 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17094 }
17095 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17096 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17097 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17098 if (rc != MBX_TIMEOUT)
17099 lpfc_sli4_mbox_cmd_free(phba, mbox);
17100 if (shdr_status || shdr_add_status || rc) {
17101 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17102 "2513 POST_SGL_BLOCK mailbox command failed "
17103 "status x%x add_status x%x mbx status x%x\n",
17104 shdr_status, shdr_add_status, rc);
17105 rc = -ENXIO;
17106 }
17107 return rc;
17108}
17109
17110
17111
17112
17113
17114
17115
17116
17117
17118
17119
17120
17121static int
17122lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17123 int count)
17124{
17125 struct lpfc_io_buf *lpfc_ncmd;
17126 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17127 struct sgl_page_pairs *sgl_pg_pairs;
17128 void *viraddr;
17129 LPFC_MBOXQ_t *mbox;
17130 uint32_t reqlen, alloclen, pg_pairs;
17131 uint32_t mbox_tmo;
17132 uint16_t xritag_start = 0;
17133 int rc = 0;
17134 uint32_t shdr_status, shdr_add_status;
17135 dma_addr_t pdma_phys_bpl1;
17136 union lpfc_sli4_cfg_shdr *shdr;
17137
17138
17139 reqlen = count * sizeof(struct sgl_page_pairs) +
17140 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17141 if (reqlen > SLI4_PAGE_SIZE) {
17142 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17143 "6118 Block sgl registration required DMA "
17144 "size (%d) great than a page\n", reqlen);
17145 return -ENOMEM;
17146 }
17147 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17148 if (!mbox) {
17149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17150 "6119 Failed to allocate mbox cmd memory\n");
17151 return -ENOMEM;
17152 }
17153
17154
17155 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17156 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17157 reqlen, LPFC_SLI4_MBX_NEMBED);
17158
17159 if (alloclen < reqlen) {
17160 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17161 "6120 Allocated DMA memory size (%d) is "
17162 "less than the requested DMA memory "
17163 "size (%d)\n", alloclen, reqlen);
17164 lpfc_sli4_mbox_cmd_free(phba, mbox);
17165 return -ENOMEM;
17166 }
17167
17168
17169 viraddr = mbox->sge_array->addr[0];
17170
17171
17172 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17173 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17174
17175 pg_pairs = 0;
17176 list_for_each_entry(lpfc_ncmd, nblist, list) {
17177
17178 sgl_pg_pairs->sgl_pg0_addr_lo =
17179 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17180 sgl_pg_pairs->sgl_pg0_addr_hi =
17181 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17182 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17183 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17184 SGL_PAGE_SIZE;
17185 else
17186 pdma_phys_bpl1 = 0;
17187 sgl_pg_pairs->sgl_pg1_addr_lo =
17188 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17189 sgl_pg_pairs->sgl_pg1_addr_hi =
17190 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17191
17192 if (pg_pairs == 0)
17193 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17194 sgl_pg_pairs++;
17195 pg_pairs++;
17196 }
17197 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17198 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17199
17200 sgl->word0 = cpu_to_le32(sgl->word0);
17201
17202 if (!phba->sli4_hba.intr_enable) {
17203 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17204 } else {
17205 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17206 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17207 }
17208 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17209 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17210 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17211 if (rc != MBX_TIMEOUT)
17212 lpfc_sli4_mbox_cmd_free(phba, mbox);
17213 if (shdr_status || shdr_add_status || rc) {
17214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17215 "6125 POST_SGL_BLOCK mailbox command failed "
17216 "status x%x add_status x%x mbx status x%x\n",
17217 shdr_status, shdr_add_status, rc);
17218 rc = -ENXIO;
17219 }
17220 return rc;
17221}
17222
17223
17224
17225
17226
17227
17228
17229
17230
17231
17232
17233
17234
17235
17236
17237
17238int
17239lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17240 struct list_head *post_nblist, int sb_count)
17241{
17242 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17243 int status, sgl_size;
17244 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17245 dma_addr_t pdma_phys_sgl1;
17246 int last_xritag = NO_XRI;
17247 int cur_xritag;
17248 LIST_HEAD(prep_nblist);
17249 LIST_HEAD(blck_nblist);
17250 LIST_HEAD(nvme_nblist);
17251
17252
17253 if (sb_count <= 0)
17254 return -EINVAL;
17255
17256 sgl_size = phba->cfg_sg_dma_buf_size;
17257 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17258 list_del_init(&lpfc_ncmd->list);
17259 block_cnt++;
17260 if ((last_xritag != NO_XRI) &&
17261 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17262
17263 list_splice_init(&prep_nblist, &blck_nblist);
17264 post_cnt = block_cnt - 1;
17265
17266 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17267 block_cnt = 1;
17268 } else {
17269
17270 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17271
17272 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17273 list_splice_init(&prep_nblist, &blck_nblist);
17274 post_cnt = block_cnt;
17275 block_cnt = 0;
17276 }
17277 }
17278 num_posting++;
17279 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17280
17281
17282 if (num_posting == sb_count) {
17283 if (post_cnt == 0) {
17284
17285 list_splice_init(&prep_nblist, &blck_nblist);
17286 post_cnt = block_cnt;
17287 } else if (block_cnt == 1) {
17288
17289 if (sgl_size > SGL_PAGE_SIZE)
17290 pdma_phys_sgl1 =
17291 lpfc_ncmd->dma_phys_sgl +
17292 SGL_PAGE_SIZE;
17293 else
17294 pdma_phys_sgl1 = 0;
17295 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17296 status = lpfc_sli4_post_sgl(
17297 phba, lpfc_ncmd->dma_phys_sgl,
17298 pdma_phys_sgl1, cur_xritag);
17299 if (status) {
17300
17301 lpfc_ncmd->flags |=
17302 LPFC_SBUF_NOT_POSTED;
17303 } else {
17304
17305 lpfc_ncmd->flags &=
17306 ~LPFC_SBUF_NOT_POSTED;
17307 lpfc_ncmd->status = IOSTAT_SUCCESS;
17308 num_posted++;
17309 }
17310
17311 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17312 }
17313 }
17314
17315
17316 if (post_cnt == 0)
17317 continue;
17318
17319
17320 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17321 post_cnt);
17322
17323
17324 if (block_cnt == 0)
17325 last_xritag = NO_XRI;
17326
17327
17328 post_cnt = 0;
17329
17330
17331 while (!list_empty(&blck_nblist)) {
17332 list_remove_head(&blck_nblist, lpfc_ncmd,
17333 struct lpfc_io_buf, list);
17334 if (status) {
17335
17336 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17337 } else {
17338
17339 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17340 lpfc_ncmd->status = IOSTAT_SUCCESS;
17341 num_posted++;
17342 }
17343 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17344 }
17345 }
17346
17347 lpfc_io_buf_replenish(phba, &nvme_nblist);
17348
17349 return num_posted;
17350}
17351
17352
17353
17354
17355
17356
17357
17358
17359
17360
17361
17362static int
17363lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17364{
17365
17366 struct fc_vft_header *fc_vft_hdr;
17367 uint32_t *header = (uint32_t *) fc_hdr;
17368
17369#define FC_RCTL_MDS_DIAGS 0xF4
17370
17371 switch (fc_hdr->fh_r_ctl) {
17372 case FC_RCTL_DD_UNCAT:
17373 case FC_RCTL_DD_SOL_DATA:
17374 case FC_RCTL_DD_UNSOL_CTL:
17375 case FC_RCTL_DD_SOL_CTL:
17376 case FC_RCTL_DD_UNSOL_DATA:
17377 case FC_RCTL_DD_DATA_DESC:
17378 case FC_RCTL_DD_UNSOL_CMD:
17379 case FC_RCTL_DD_CMD_STATUS:
17380 case FC_RCTL_ELS_REQ:
17381 case FC_RCTL_ELS_REP:
17382 case FC_RCTL_ELS4_REQ:
17383 case FC_RCTL_ELS4_REP:
17384 case FC_RCTL_BA_NOP:
17385 case FC_RCTL_BA_ABTS:
17386 case FC_RCTL_BA_RMC:
17387 case FC_RCTL_BA_ACC:
17388 case FC_RCTL_BA_RJT:
17389 case FC_RCTL_BA_PRMT:
17390 case FC_RCTL_ACK_1:
17391 case FC_RCTL_ACK_0:
17392 case FC_RCTL_P_RJT:
17393 case FC_RCTL_F_RJT:
17394 case FC_RCTL_P_BSY:
17395 case FC_RCTL_F_BSY:
17396 case FC_RCTL_F_BSYL:
17397 case FC_RCTL_LCR:
17398 case FC_RCTL_MDS_DIAGS:
17399 case FC_RCTL_END:
17400 break;
17401 case FC_RCTL_VFTH:
17402 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17403 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17404 return lpfc_fc_frame_check(phba, fc_hdr);
17405 default:
17406 goto drop;
17407 }
17408
17409 switch (fc_hdr->fh_type) {
17410 case FC_TYPE_BLS:
17411 case FC_TYPE_ELS:
17412 case FC_TYPE_FCP:
17413 case FC_TYPE_CT:
17414 case FC_TYPE_NVME:
17415 break;
17416 case FC_TYPE_IP:
17417 case FC_TYPE_ILS:
17418 default:
17419 goto drop;
17420 }
17421
17422 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17423 "2538 Received frame rctl:x%x, type:x%x, "
17424 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17425 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17426 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17427 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17428 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17429 be32_to_cpu(header[6]));
17430 return 0;
17431drop:
17432 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17433 "2539 Dropped frame rctl:x%x type:x%x\n",
17434 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17435 return 1;
17436}
17437
17438
17439
17440
17441
17442
17443
17444
17445
17446static uint32_t
17447lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17448{
17449 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17450
17451 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17452 return 0;
17453 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17454}
17455
17456
17457
17458
17459
17460
17461
17462
17463
17464
17465
17466
17467
17468
17469static struct lpfc_vport *
17470lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17471 uint16_t fcfi, uint32_t did)
17472{
17473 struct lpfc_vport **vports;
17474 struct lpfc_vport *vport = NULL;
17475 int i;
17476
17477 if (did == Fabric_DID)
17478 return phba->pport;
17479 if ((phba->pport->fc_flag & FC_PT2PT) &&
17480 !(phba->link_state == LPFC_HBA_READY))
17481 return phba->pport;
17482
17483 vports = lpfc_create_vport_work_array(phba);
17484 if (vports != NULL) {
17485 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17486 if (phba->fcf.fcfi == fcfi &&
17487 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17488 vports[i]->fc_myDID == did) {
17489 vport = vports[i];
17490 break;
17491 }
17492 }
17493 }
17494 lpfc_destroy_vport_work_array(phba, vports);
17495 return vport;
17496}
17497
17498
17499
17500
17501
17502
17503
17504
17505
17506
17507
17508static void
17509lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17510{
17511 struct lpfc_dmabuf *h_buf;
17512 struct hbq_dmabuf *dmabuf = NULL;
17513
17514
17515 h_buf = list_get_first(&vport->rcv_buffer_list,
17516 struct lpfc_dmabuf, list);
17517 if (!h_buf)
17518 return;
17519 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17520 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17521}
17522
17523
17524
17525
17526
17527
17528
17529
17530
17531void
17532lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17533{
17534 struct lpfc_dmabuf *h_buf, *hnext;
17535 struct lpfc_dmabuf *d_buf, *dnext;
17536 struct hbq_dmabuf *dmabuf = NULL;
17537
17538
17539 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17540 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17541 list_del_init(&dmabuf->hbuf.list);
17542 list_for_each_entry_safe(d_buf, dnext,
17543 &dmabuf->dbuf.list, list) {
17544 list_del_init(&d_buf->list);
17545 lpfc_in_buf_free(vport->phba, d_buf);
17546 }
17547 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17548 }
17549}
17550
17551
17552
17553
17554
17555
17556
17557
17558
17559
17560
17561
17562
17563void
17564lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17565{
17566 struct lpfc_dmabuf *h_buf, *hnext;
17567 struct lpfc_dmabuf *d_buf, *dnext;
17568 struct hbq_dmabuf *dmabuf = NULL;
17569 unsigned long timeout;
17570 int abort_count = 0;
17571
17572 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17573 vport->rcv_buffer_time_stamp);
17574 if (list_empty(&vport->rcv_buffer_list) ||
17575 time_before(jiffies, timeout))
17576 return;
17577
17578 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17579 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17580 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17581 dmabuf->time_stamp);
17582 if (time_before(jiffies, timeout))
17583 break;
17584 abort_count++;
17585 list_del_init(&dmabuf->hbuf.list);
17586 list_for_each_entry_safe(d_buf, dnext,
17587 &dmabuf->dbuf.list, list) {
17588 list_del_init(&d_buf->list);
17589 lpfc_in_buf_free(vport->phba, d_buf);
17590 }
17591 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17592 }
17593 if (abort_count)
17594 lpfc_update_rcv_time_stamp(vport);
17595}
17596
17597
17598
17599
17600
17601
17602
17603
17604
17605
17606
17607
17608
17609
17610static struct hbq_dmabuf *
17611lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17612{
17613 struct fc_frame_header *new_hdr;
17614 struct fc_frame_header *temp_hdr;
17615 struct lpfc_dmabuf *d_buf;
17616 struct lpfc_dmabuf *h_buf;
17617 struct hbq_dmabuf *seq_dmabuf = NULL;
17618 struct hbq_dmabuf *temp_dmabuf = NULL;
17619 uint8_t found = 0;
17620
17621 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17622 dmabuf->time_stamp = jiffies;
17623 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17624
17625
17626 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17627 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17628 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17629 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17630 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17631 continue;
17632
17633 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17634 break;
17635 }
17636 if (!seq_dmabuf) {
17637
17638
17639
17640
17641 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17642 lpfc_update_rcv_time_stamp(vport);
17643 return dmabuf;
17644 }
17645 temp_hdr = seq_dmabuf->hbuf.virt;
17646 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17647 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17648 list_del_init(&seq_dmabuf->hbuf.list);
17649 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17650 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17651 lpfc_update_rcv_time_stamp(vport);
17652 return dmabuf;
17653 }
17654
17655 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17656 seq_dmabuf->time_stamp = jiffies;
17657 lpfc_update_rcv_time_stamp(vport);
17658 if (list_empty(&seq_dmabuf->dbuf.list)) {
17659 temp_hdr = dmabuf->hbuf.virt;
17660 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17661 return seq_dmabuf;
17662 }
17663
17664 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17665 while (!found) {
17666 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17667 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17668
17669
17670
17671
17672 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17673 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17674 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17675 found = 1;
17676 break;
17677 }
17678
17679 if (&d_buf->list == &seq_dmabuf->dbuf.list)
17680 break;
17681 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
17682 }
17683
17684 if (found)
17685 return seq_dmabuf;
17686 return NULL;
17687}
17688
17689
17690
17691
17692
17693
17694
17695
17696
17697
17698
17699
17700
17701
17702
17703
17704
17705static bool
17706lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
17707 struct hbq_dmabuf *dmabuf)
17708{
17709 struct fc_frame_header *new_hdr;
17710 struct fc_frame_header *temp_hdr;
17711 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
17712 struct hbq_dmabuf *seq_dmabuf = NULL;
17713
17714
17715 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17716 INIT_LIST_HEAD(&dmabuf->hbuf.list);
17717 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17718 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17719 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17720 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17721 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17722 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17723 continue;
17724
17725 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17726 break;
17727 }
17728
17729
17730 if (seq_dmabuf) {
17731 list_for_each_entry_safe(d_buf, n_buf,
17732 &seq_dmabuf->dbuf.list, list) {
17733 list_del_init(&d_buf->list);
17734 lpfc_in_buf_free(vport->phba, d_buf);
17735 }
17736 return true;
17737 }
17738 return false;
17739}
17740
17741
17742
17743
17744
17745
17746
17747
17748
17749
17750
17751
17752
17753
17754
17755
17756
17757static bool
17758lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17759{
17760 struct lpfc_hba *phba = vport->phba;
17761 int handled;
17762
17763
17764 if (phba->sli_rev < LPFC_SLI_REV4)
17765 return false;
17766
17767
17768 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
17769 if (handled)
17770 return true;
17771
17772 return false;
17773}
17774
17775
17776
17777
17778
17779
17780
17781
17782
17783
17784
17785static void
17786lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
17787 struct lpfc_iocbq *cmd_iocbq,
17788 struct lpfc_iocbq *rsp_iocbq)
17789{
17790 struct lpfc_nodelist *ndlp;
17791
17792 if (cmd_iocbq) {
17793 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
17794 lpfc_nlp_put(ndlp);
17795 lpfc_nlp_not_used(ndlp);
17796 lpfc_sli_release_iocbq(phba, cmd_iocbq);
17797 }
17798
17799
17800 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
17801 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17802 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17803 rsp_iocbq->iocb.ulpStatus,
17804 rsp_iocbq->iocb.un.ulpWord[4]);
17805}
17806
17807
17808
17809
17810
17811
17812
17813
17814
17815uint16_t
17816lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
17817 uint16_t xri)
17818{
17819 uint16_t i;
17820
17821 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
17822 if (xri == phba->sli4_hba.xri_ids[i])
17823 return i;
17824 }
17825 return NO_XRI;
17826}
17827
17828
17829
17830
17831
17832
17833
17834
17835
17836
17837void
17838lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
17839 struct fc_frame_header *fc_hdr, bool aborted)
17840{
17841 struct lpfc_hba *phba = vport->phba;
17842 struct lpfc_iocbq *ctiocb = NULL;
17843 struct lpfc_nodelist *ndlp;
17844 uint16_t oxid, rxid, xri, lxri;
17845 uint32_t sid, fctl;
17846 IOCB_t *icmd;
17847 int rc;
17848
17849 if (!lpfc_is_link_up(phba))
17850 return;
17851
17852 sid = sli4_sid_from_fc_hdr(fc_hdr);
17853 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
17854 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
17855
17856 ndlp = lpfc_findnode_did(vport, sid);
17857 if (!ndlp) {
17858 ndlp = lpfc_nlp_init(vport, sid);
17859 if (!ndlp) {
17860 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17861 "1268 Failed to allocate ndlp for "
17862 "oxid:x%x SID:x%x\n", oxid, sid);
17863 return;
17864 }
17865
17866 lpfc_enqueue_node(vport, ndlp);
17867 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
17868
17869 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
17870 if (!ndlp) {
17871 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
17872 "3275 Failed to active ndlp found "
17873 "for oxid:x%x SID:x%x\n", oxid, sid);
17874 return;
17875 }
17876 }
17877
17878
17879 ctiocb = lpfc_sli_get_iocbq(phba);
17880 if (!ctiocb)
17881 return;
17882
17883
17884 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
17885
17886 icmd = &ctiocb->iocb;
17887 icmd->un.xseq64.bdl.bdeSize = 0;
17888 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
17889 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
17890 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
17891 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
17892
17893
17894 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
17895 icmd->ulpBdeCount = 0;
17896 icmd->ulpLe = 1;
17897 icmd->ulpClass = CLASS3;
17898 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
17899 ctiocb->context1 = lpfc_nlp_get(ndlp);
17900
17901 ctiocb->vport = phba->pport;
17902 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
17903 ctiocb->sli4_lxritag = NO_XRI;
17904 ctiocb->sli4_xritag = NO_XRI;
17905
17906 if (fctl & FC_FC_EX_CTX)
17907
17908
17909
17910 xri = oxid;
17911 else
17912 xri = rxid;
17913 lxri = lpfc_sli4_xri_inrange(phba, xri);
17914 if (lxri != NO_XRI)
17915 lpfc_set_rrq_active(phba, ndlp, lxri,
17916 (xri == oxid) ? rxid : oxid, 0);
17917
17918
17919
17920
17921
17922 if ((fctl & FC_FC_EX_CTX) &&
17923 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
17924 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17925 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17926 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17927 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17928 }
17929
17930
17931
17932
17933
17934 if (aborted == false) {
17935 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
17936 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
17937 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
17938 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
17939 }
17940
17941 if (fctl & FC_FC_EX_CTX) {
17942
17943
17944
17945
17946 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
17947 } else {
17948
17949
17950
17951
17952 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
17953 }
17954 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
17955 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
17956
17957
17958 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
17959 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17960 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
17961
17962 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
17963 if (rc == IOCB_ERROR) {
17964 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
17965 "2925 Failed to issue CT ABTS RSP x%x on "
17966 "xri x%x, Data x%x\n",
17967 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
17968 phba->link_state);
17969 lpfc_nlp_put(ndlp);
17970 ctiocb->context1 = NULL;
17971 lpfc_sli_release_iocbq(phba, ctiocb);
17972 }
17973}
17974
17975
17976
17977
17978
17979
17980
17981
17982
17983
17984
17985
17986
17987
17988static void
17989lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
17990 struct hbq_dmabuf *dmabuf)
17991{
17992 struct lpfc_hba *phba = vport->phba;
17993 struct fc_frame_header fc_hdr;
17994 uint32_t fctl;
17995 bool aborted;
17996
17997
17998 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
17999 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18000
18001 if (fctl & FC_FC_EX_CTX) {
18002
18003 aborted = true;
18004 } else {
18005
18006 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18007 if (aborted == false)
18008 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18009 }
18010 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18011
18012 if (phba->nvmet_support) {
18013 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18014 return;
18015 }
18016
18017
18018 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18019}
18020
18021
18022
18023
18024
18025
18026
18027
18028
18029
18030
18031
18032
18033static int
18034lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18035{
18036 struct fc_frame_header *hdr;
18037 struct lpfc_dmabuf *d_buf;
18038 struct hbq_dmabuf *seq_dmabuf;
18039 uint32_t fctl;
18040 int seq_count = 0;
18041
18042 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18043
18044 if (hdr->fh_seq_cnt != seq_count)
18045 return 0;
18046 fctl = (hdr->fh_f_ctl[0] << 16 |
18047 hdr->fh_f_ctl[1] << 8 |
18048 hdr->fh_f_ctl[2]);
18049
18050 if (fctl & FC_FC_END_SEQ)
18051 return 1;
18052 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18053 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18054 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18055
18056 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18057 return 0;
18058 fctl = (hdr->fh_f_ctl[0] << 16 |
18059 hdr->fh_f_ctl[1] << 8 |
18060 hdr->fh_f_ctl[2]);
18061
18062 if (fctl & FC_FC_END_SEQ)
18063 return 1;
18064 }
18065 return 0;
18066}
18067
18068
18069
18070
18071
18072
18073
18074
18075
18076
18077
18078
18079
18080
18081static struct lpfc_iocbq *
18082lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18083{
18084 struct hbq_dmabuf *hbq_buf;
18085 struct lpfc_dmabuf *d_buf, *n_buf;
18086 struct lpfc_iocbq *first_iocbq, *iocbq;
18087 struct fc_frame_header *fc_hdr;
18088 uint32_t sid;
18089 uint32_t len, tot_len;
18090 struct ulp_bde64 *pbde;
18091
18092 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18093
18094 list_del_init(&seq_dmabuf->hbuf.list);
18095 lpfc_update_rcv_time_stamp(vport);
18096
18097 sid = sli4_sid_from_fc_hdr(fc_hdr);
18098 tot_len = 0;
18099
18100 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18101 if (first_iocbq) {
18102
18103 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
18104 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
18105 first_iocbq->vport = vport;
18106
18107
18108 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18109 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18110 first_iocbq->iocb.un.rcvels.parmRo =
18111 sli4_did_from_fc_hdr(fc_hdr);
18112 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18113 } else
18114 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
18115 first_iocbq->iocb.ulpContext = NO_XRI;
18116 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18117 be16_to_cpu(fc_hdr->fh_ox_id);
18118
18119 first_iocbq->iocb.unsli3.rcvsli3.vpi =
18120 vport->phba->vpi_ids[vport->vpi];
18121
18122 tot_len = bf_get(lpfc_rcqe_length,
18123 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18124
18125 first_iocbq->context2 = &seq_dmabuf->dbuf;
18126 first_iocbq->context3 = NULL;
18127 first_iocbq->iocb.ulpBdeCount = 1;
18128 if (tot_len > LPFC_DATA_BUF_SIZE)
18129 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18130 LPFC_DATA_BUF_SIZE;
18131 else
18132 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18133
18134 first_iocbq->iocb.un.rcvels.remoteID = sid;
18135
18136 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18137 }
18138 iocbq = first_iocbq;
18139
18140
18141
18142
18143 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18144 if (!iocbq) {
18145 lpfc_in_buf_free(vport->phba, d_buf);
18146 continue;
18147 }
18148 if (!iocbq->context3) {
18149 iocbq->context3 = d_buf;
18150 iocbq->iocb.ulpBdeCount++;
18151
18152 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18153 len = bf_get(lpfc_rcqe_length,
18154 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18155 pbde = (struct ulp_bde64 *)
18156 &iocbq->iocb.unsli3.sli3Words[4];
18157 if (len > LPFC_DATA_BUF_SIZE)
18158 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18159 else
18160 pbde->tus.f.bdeSize = len;
18161
18162 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18163 tot_len += len;
18164 } else {
18165 iocbq = lpfc_sli_get_iocbq(vport->phba);
18166 if (!iocbq) {
18167 if (first_iocbq) {
18168 first_iocbq->iocb.ulpStatus =
18169 IOSTAT_FCP_RSP_ERROR;
18170 first_iocbq->iocb.un.ulpWord[4] =
18171 IOERR_NO_RESOURCES;
18172 }
18173 lpfc_in_buf_free(vport->phba, d_buf);
18174 continue;
18175 }
18176
18177 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18178 len = bf_get(lpfc_rcqe_length,
18179 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18180 iocbq->context2 = d_buf;
18181 iocbq->context3 = NULL;
18182 iocbq->iocb.ulpBdeCount = 1;
18183 if (len > LPFC_DATA_BUF_SIZE)
18184 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18185 LPFC_DATA_BUF_SIZE;
18186 else
18187 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18188
18189 tot_len += len;
18190 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18191
18192 iocbq->iocb.un.rcvels.remoteID = sid;
18193 list_add_tail(&iocbq->list, &first_iocbq->list);
18194 }
18195 }
18196
18197 if (!first_iocbq)
18198 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18199
18200 return first_iocbq;
18201}
18202
18203static void
18204lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18205 struct hbq_dmabuf *seq_dmabuf)
18206{
18207 struct fc_frame_header *fc_hdr;
18208 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18209 struct lpfc_hba *phba = vport->phba;
18210
18211 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18212 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18213 if (!iocbq) {
18214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18215 "2707 Ring %d handler: Failed to allocate "
18216 "iocb Rctl x%x Type x%x received\n",
18217 LPFC_ELS_RING,
18218 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18219 return;
18220 }
18221 if (!lpfc_complete_unsol_iocb(phba,
18222 phba->sli4_hba.els_wq->pring,
18223 iocbq, fc_hdr->fh_r_ctl,
18224 fc_hdr->fh_type))
18225 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18226 "2540 Ring %d handler: unexpected Rctl "
18227 "x%x Type x%x received\n",
18228 LPFC_ELS_RING,
18229 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18230
18231
18232 list_for_each_entry_safe(curr_iocb, next_iocb,
18233 &iocbq->list, list) {
18234 list_del_init(&curr_iocb->list);
18235 lpfc_sli_release_iocbq(phba, curr_iocb);
18236 }
18237 lpfc_sli_release_iocbq(phba, iocbq);
18238}
18239
18240static void
18241lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18242 struct lpfc_iocbq *rspiocb)
18243{
18244 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18245
18246 if (pcmd && pcmd->virt)
18247 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18248 kfree(pcmd);
18249 lpfc_sli_release_iocbq(phba, cmdiocb);
18250 lpfc_drain_txq(phba);
18251}
18252
18253static void
18254lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18255 struct hbq_dmabuf *dmabuf)
18256{
18257 struct fc_frame_header *fc_hdr;
18258 struct lpfc_hba *phba = vport->phba;
18259 struct lpfc_iocbq *iocbq = NULL;
18260 union lpfc_wqe *wqe;
18261 struct lpfc_dmabuf *pcmd = NULL;
18262 uint32_t frame_len;
18263 int rc;
18264 unsigned long iflags;
18265
18266 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18267 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18268
18269
18270 iocbq = lpfc_sli_get_iocbq(phba);
18271 if (!iocbq) {
18272
18273 spin_lock_irqsave(&phba->hbalock, iflags);
18274 list_add_tail(&dmabuf->cq_event.list,
18275 &phba->sli4_hba.sp_queue_event);
18276 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18277 spin_unlock_irqrestore(&phba->hbalock, iflags);
18278 lpfc_worker_wake_up(phba);
18279 return;
18280 }
18281
18282
18283 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18284 if (pcmd)
18285 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18286 &pcmd->phys);
18287 if (!pcmd || !pcmd->virt)
18288 goto exit;
18289
18290 INIT_LIST_HEAD(&pcmd->list);
18291
18292
18293 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18294
18295
18296 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18297 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18298 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18299 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18300
18301 iocbq->context2 = pcmd;
18302 iocbq->vport = vport;
18303 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18304 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18305
18306
18307
18308
18309
18310 wqe = (union lpfc_wqe *)&iocbq->iocb;
18311
18312 wqe->send_frame.frame_len = frame_len;
18313 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18314 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18315 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18316 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18317 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18318 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18319
18320 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18321 iocbq->iocb.ulpLe = 1;
18322 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18323 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18324 if (rc == IOCB_ERROR)
18325 goto exit;
18326
18327 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18328 return;
18329
18330exit:
18331 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18332 "2023 Unable to process MDS loopback frame\n");
18333 if (pcmd && pcmd->virt)
18334 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18335 kfree(pcmd);
18336 if (iocbq)
18337 lpfc_sli_release_iocbq(phba, iocbq);
18338 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18339}
18340
18341
18342
18343
18344
18345
18346
18347
18348
18349
18350
18351
18352
18353void
18354lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18355 struct hbq_dmabuf *dmabuf)
18356{
18357 struct hbq_dmabuf *seq_dmabuf;
18358 struct fc_frame_header *fc_hdr;
18359 struct lpfc_vport *vport;
18360 uint32_t fcfi;
18361 uint32_t did;
18362
18363
18364 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18365
18366 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18367 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18368 vport = phba->pport;
18369
18370 if (!(phba->pport->load_flag & FC_UNLOADING))
18371 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18372 else
18373 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18374 return;
18375 }
18376
18377
18378 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18379 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18380 return;
18381 }
18382
18383 if ((bf_get(lpfc_cqe_code,
18384 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18385 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18386 &dmabuf->cq_event.cqe.rcqe_cmpl);
18387 else
18388 fcfi = bf_get(lpfc_rcqe_fcf_id,
18389 &dmabuf->cq_event.cqe.rcqe_cmpl);
18390
18391 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18392 vport = phba->pport;
18393 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18394 "2023 MDS Loopback %d bytes\n",
18395 bf_get(lpfc_rcqe_length,
18396 &dmabuf->cq_event.cqe.rcqe_cmpl));
18397
18398 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18399 return;
18400 }
18401
18402
18403 did = sli4_did_from_fc_hdr(fc_hdr);
18404
18405 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18406 if (!vport) {
18407
18408 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18409 return;
18410 }
18411
18412
18413 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18414 (did != Fabric_DID)) {
18415
18416
18417
18418
18419
18420 if (!(vport->fc_flag & FC_PT2PT) ||
18421 (phba->link_state == LPFC_HBA_READY)) {
18422 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18423 return;
18424 }
18425 }
18426
18427
18428 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18429 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18430 return;
18431 }
18432
18433
18434 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18435 if (!seq_dmabuf) {
18436
18437 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18438 return;
18439 }
18440
18441 if (!lpfc_seq_complete(seq_dmabuf))
18442 return;
18443
18444
18445 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18446}
18447
18448
18449
18450
18451
18452
18453
18454
18455
18456
18457
18458
18459
18460
18461
18462
18463
18464
18465
18466
18467
18468
18469int
18470lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18471{
18472 struct lpfc_rpi_hdr *rpi_page;
18473 uint32_t rc = 0;
18474 uint16_t lrpi = 0;
18475
18476
18477 if (!phba->sli4_hba.rpi_hdrs_in_use)
18478 goto exit;
18479 if (phba->sli4_hba.extents_in_use)
18480 return -EIO;
18481
18482 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18483
18484
18485
18486
18487
18488 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18489 LPFC_RPI_RSRC_RDY)
18490 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18491
18492 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18493 if (rc != MBX_SUCCESS) {
18494 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18495 "2008 Error %d posting all rpi "
18496 "headers\n", rc);
18497 rc = -EIO;
18498 break;
18499 }
18500 }
18501
18502 exit:
18503 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18504 LPFC_RPI_RSRC_RDY);
18505 return rc;
18506}
18507
18508
18509
18510
18511
18512
18513
18514
18515
18516
18517
18518
18519
18520
18521
18522int
18523lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18524{
18525 LPFC_MBOXQ_t *mboxq;
18526 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18527 uint32_t rc = 0;
18528 uint32_t shdr_status, shdr_add_status;
18529 union lpfc_sli4_cfg_shdr *shdr;
18530
18531
18532 if (!phba->sli4_hba.rpi_hdrs_in_use)
18533 return rc;
18534 if (phba->sli4_hba.extents_in_use)
18535 return -EIO;
18536
18537
18538 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18539 if (!mboxq) {
18540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18541 "2001 Unable to allocate memory for issuing "
18542 "SLI_CONFIG_SPECIAL mailbox command\n");
18543 return -ENOMEM;
18544 }
18545
18546
18547 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18548 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18549 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18550 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18551 sizeof(struct lpfc_sli4_cfg_mhdr),
18552 LPFC_SLI4_MBX_EMBED);
18553
18554
18555
18556 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18557 rpi_page->start_rpi);
18558 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18559 hdr_tmpl, rpi_page->page_count);
18560
18561 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18562 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18563 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18564 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18565 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18566 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18567 if (rc != MBX_TIMEOUT)
18568 mempool_free(mboxq, phba->mbox_mem_pool);
18569 if (shdr_status || shdr_add_status || rc) {
18570 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18571 "2514 POST_RPI_HDR mailbox failed with "
18572 "status x%x add_status x%x, mbx status x%x\n",
18573 shdr_status, shdr_add_status, rc);
18574 rc = -ENXIO;
18575 } else {
18576
18577
18578
18579
18580 spin_lock_irq(&phba->hbalock);
18581 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18582 spin_unlock_irq(&phba->hbalock);
18583 }
18584 return rc;
18585}
18586
18587
18588
18589
18590
18591
18592
18593
18594
18595
18596
18597
18598
18599
18600int
18601lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18602{
18603 unsigned long rpi;
18604 uint16_t max_rpi, rpi_limit;
18605 uint16_t rpi_remaining, lrpi = 0;
18606 struct lpfc_rpi_hdr *rpi_hdr;
18607 unsigned long iflag;
18608
18609
18610
18611
18612
18613 spin_lock_irqsave(&phba->hbalock, iflag);
18614 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18615 rpi_limit = phba->sli4_hba.next_rpi;
18616
18617 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18618 if (rpi >= rpi_limit)
18619 rpi = LPFC_RPI_ALLOC_ERROR;
18620 else {
18621 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18622 phba->sli4_hba.max_cfg_param.rpi_used++;
18623 phba->sli4_hba.rpi_count++;
18624 }
18625 lpfc_printf_log(phba, KERN_INFO,
18626 LOG_NODE | LOG_DISCOVERY,
18627 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18628 (int) rpi, max_rpi, rpi_limit);
18629
18630
18631
18632
18633
18634 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18635 (phba->sli4_hba.rpi_count >= max_rpi)) {
18636 spin_unlock_irqrestore(&phba->hbalock, iflag);
18637 return rpi;
18638 }
18639
18640
18641
18642
18643
18644 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18645 spin_unlock_irqrestore(&phba->hbalock, iflag);
18646 return rpi;
18647 }
18648
18649
18650
18651
18652
18653
18654
18655 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18656 spin_unlock_irqrestore(&phba->hbalock, iflag);
18657 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18658 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18659 if (!rpi_hdr) {
18660 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18661 "2002 Error Could not grow rpi "
18662 "count\n");
18663 } else {
18664 lrpi = rpi_hdr->start_rpi;
18665 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18666 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18667 }
18668 }
18669
18670 return rpi;
18671}
18672
18673
18674
18675
18676
18677
18678
18679
18680
18681static void
18682__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18683{
18684
18685
18686
18687
18688 if (rpi == LPFC_RPI_ALLOC_ERROR)
18689 return;
18690
18691 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
18692 phba->sli4_hba.rpi_count--;
18693 phba->sli4_hba.max_cfg_param.rpi_used--;
18694 } else {
18695 lpfc_printf_log(phba, KERN_INFO,
18696 LOG_NODE | LOG_DISCOVERY,
18697 "2016 rpi %x not inuse\n",
18698 rpi);
18699 }
18700}
18701
18702
18703
18704
18705
18706
18707
18708
18709
18710void
18711lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18712{
18713 spin_lock_irq(&phba->hbalock);
18714 __lpfc_sli4_free_rpi(phba, rpi);
18715 spin_unlock_irq(&phba->hbalock);
18716}
18717
18718
18719
18720
18721
18722
18723
18724
18725void
18726lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
18727{
18728 kfree(phba->sli4_hba.rpi_bmask);
18729 kfree(phba->sli4_hba.rpi_ids);
18730 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
18731}
18732
18733
18734
18735
18736
18737
18738
18739
18740
18741
18742int
18743lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
18744 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
18745{
18746 LPFC_MBOXQ_t *mboxq;
18747 struct lpfc_hba *phba = ndlp->phba;
18748 int rc;
18749
18750
18751 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18752 if (!mboxq)
18753 return -ENOMEM;
18754
18755
18756 lpfc_resume_rpi(mboxq, ndlp);
18757 if (cmpl) {
18758 mboxq->mbox_cmpl = cmpl;
18759 mboxq->ctx_buf = arg;
18760 mboxq->ctx_ndlp = ndlp;
18761 } else
18762 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18763 mboxq->vport = ndlp->vport;
18764 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18765 if (rc == MBX_NOT_FINISHED) {
18766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18767 "2010 Resume RPI Mailbox failed "
18768 "status %d, mbxStatus x%x\n", rc,
18769 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18770 mempool_free(mboxq, phba->mbox_mem_pool);
18771 return -EIO;
18772 }
18773 return 0;
18774}
18775
18776
18777
18778
18779
18780
18781
18782
18783
18784
18785
18786int
18787lpfc_sli4_init_vpi(struct lpfc_vport *vport)
18788{
18789 LPFC_MBOXQ_t *mboxq;
18790 int rc = 0;
18791 int retval = MBX_SUCCESS;
18792 uint32_t mbox_tmo;
18793 struct lpfc_hba *phba = vport->phba;
18794 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18795 if (!mboxq)
18796 return -ENOMEM;
18797 lpfc_init_vpi(phba, mboxq, vport->vpi);
18798 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
18799 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
18800 if (rc != MBX_SUCCESS) {
18801 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18802 "2022 INIT VPI Mailbox failed "
18803 "status %d, mbxStatus x%x\n", rc,
18804 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
18805 retval = -EIO;
18806 }
18807 if (rc != MBX_TIMEOUT)
18808 mempool_free(mboxq, vport->phba->mbox_mem_pool);
18809
18810 return retval;
18811}
18812
18813
18814
18815
18816
18817
18818
18819
18820
18821
18822static void
18823lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
18824{
18825 void *virt_addr;
18826 union lpfc_sli4_cfg_shdr *shdr;
18827 uint32_t shdr_status, shdr_add_status;
18828
18829 virt_addr = mboxq->sge_array->addr[0];
18830
18831 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
18832 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18833 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18834
18835 if ((shdr_status || shdr_add_status) &&
18836 (shdr_status != STATUS_FCF_IN_USE))
18837 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18838 "2558 ADD_FCF_RECORD mailbox failed with "
18839 "status x%x add_status x%x\n",
18840 shdr_status, shdr_add_status);
18841
18842 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18843}
18844
18845
18846
18847
18848
18849
18850
18851
18852
18853
18854int
18855lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
18856{
18857 int rc = 0;
18858 LPFC_MBOXQ_t *mboxq;
18859 uint8_t *bytep;
18860 void *virt_addr;
18861 struct lpfc_mbx_sge sge;
18862 uint32_t alloc_len, req_len;
18863 uint32_t fcfindex;
18864
18865 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18866 if (!mboxq) {
18867 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18868 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18869 return -ENOMEM;
18870 }
18871
18872 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
18873 sizeof(uint32_t);
18874
18875
18876 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18877 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
18878 req_len, LPFC_SLI4_MBX_NEMBED);
18879 if (alloc_len < req_len) {
18880 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18881 "2523 Allocated DMA memory size (x%x) is "
18882 "less than the requested DMA memory "
18883 "size (x%x)\n", alloc_len, req_len);
18884 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18885 return -ENOMEM;
18886 }
18887
18888
18889
18890
18891
18892 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
18893 virt_addr = mboxq->sge_array->addr[0];
18894
18895
18896
18897
18898 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
18899 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
18900 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
18901
18902
18903
18904
18905
18906
18907 bytep += sizeof(uint32_t);
18908 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
18909 mboxq->vport = phba->pport;
18910 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
18911 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
18912 if (rc == MBX_NOT_FINISHED) {
18913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18914 "2515 ADD_FCF_RECORD mailbox failed with "
18915 "status 0x%x\n", rc);
18916 lpfc_sli4_mbox_cmd_free(phba, mboxq);
18917 rc = -EIO;
18918 } else
18919 rc = 0;
18920
18921 return rc;
18922}
18923
18924
18925
18926
18927
18928
18929
18930
18931
18932
18933
18934void
18935lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
18936 struct fcf_record *fcf_record,
18937 uint16_t fcf_index)
18938{
18939 memset(fcf_record, 0, sizeof(struct fcf_record));
18940 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
18941 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
18942 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
18943 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
18944 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
18945 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
18946 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
18947 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
18948 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
18949 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
18950 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
18951 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
18952 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
18953 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
18954 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
18955 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
18956 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
18957
18958 if (phba->valid_vlan) {
18959 fcf_record->vlan_bitmap[phba->vlan_id / 8]
18960 = 1 << (phba->vlan_id % 8);
18961 }
18962}
18963
18964
18965
18966
18967
18968
18969
18970
18971
18972
18973
18974
18975
18976int
18977lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
18978{
18979 int rc = 0, error;
18980 LPFC_MBOXQ_t *mboxq;
18981
18982 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
18983 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
18984 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18985 if (!mboxq) {
18986 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18987 "2000 Failed to allocate mbox for "
18988 "READ_FCF cmd\n");
18989 error = -ENOMEM;
18990 goto fail_fcf_scan;
18991 }
18992
18993 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
18994 if (rc) {
18995 error = -EINVAL;
18996 goto fail_fcf_scan;
18997 }
18998
18999 mboxq->vport = phba->pport;
19000 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19001
19002 spin_lock_irq(&phba->hbalock);
19003 phba->hba_flag |= FCF_TS_INPROG;
19004 spin_unlock_irq(&phba->hbalock);
19005
19006 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19007 if (rc == MBX_NOT_FINISHED)
19008 error = -EIO;
19009 else {
19010
19011 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19012 phba->fcf.eligible_fcf_cnt = 0;
19013 error = 0;
19014 }
19015fail_fcf_scan:
19016 if (error) {
19017 if (mboxq)
19018 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19019
19020 spin_lock_irq(&phba->hbalock);
19021 phba->hba_flag &= ~FCF_TS_INPROG;
19022 spin_unlock_irq(&phba->hbalock);
19023 }
19024 return error;
19025}
19026
19027
19028
19029
19030
19031
19032
19033
19034
19035
19036
19037
19038int
19039lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19040{
19041 int rc = 0, error;
19042 LPFC_MBOXQ_t *mboxq;
19043
19044 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19045 if (!mboxq) {
19046 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19047 "2763 Failed to allocate mbox for "
19048 "READ_FCF cmd\n");
19049 error = -ENOMEM;
19050 goto fail_fcf_read;
19051 }
19052
19053 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19054 if (rc) {
19055 error = -EINVAL;
19056 goto fail_fcf_read;
19057 }
19058
19059 mboxq->vport = phba->pport;
19060 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19061 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19062 if (rc == MBX_NOT_FINISHED)
19063 error = -EIO;
19064 else
19065 error = 0;
19066
19067fail_fcf_read:
19068 if (error && mboxq)
19069 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19070 return error;
19071}
19072
19073
19074
19075
19076
19077
19078
19079
19080
19081
19082
19083
19084int
19085lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19086{
19087 int rc = 0, error;
19088 LPFC_MBOXQ_t *mboxq;
19089
19090 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19091 if (!mboxq) {
19092 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19093 "2758 Failed to allocate mbox for "
19094 "READ_FCF cmd\n");
19095 error = -ENOMEM;
19096 goto fail_fcf_read;
19097 }
19098
19099 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19100 if (rc) {
19101 error = -EINVAL;
19102 goto fail_fcf_read;
19103 }
19104
19105 mboxq->vport = phba->pport;
19106 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19107 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19108 if (rc == MBX_NOT_FINISHED)
19109 error = -EIO;
19110 else
19111 error = 0;
19112
19113fail_fcf_read:
19114 if (error && mboxq)
19115 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19116 return error;
19117}
19118
19119
19120
19121
19122
19123
19124
19125
19126
19127
19128
19129
19130
19131
19132static int
19133lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19134{
19135 uint16_t next_fcf_pri;
19136 uint16_t last_index;
19137 struct lpfc_fcf_pri *fcf_pri;
19138 int rc;
19139 int ret = 0;
19140
19141 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19142 LPFC_SLI4_FCF_TBL_INDX_MAX);
19143 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19144 "3060 Last IDX %d\n", last_index);
19145
19146
19147 spin_lock_irq(&phba->hbalock);
19148 if (list_empty(&phba->fcf.fcf_pri_list) ||
19149 list_is_singular(&phba->fcf.fcf_pri_list)) {
19150 spin_unlock_irq(&phba->hbalock);
19151 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19152 "3061 Last IDX %d\n", last_index);
19153 return 0;
19154 }
19155 spin_unlock_irq(&phba->hbalock);
19156
19157 next_fcf_pri = 0;
19158
19159
19160
19161
19162 memset(phba->fcf.fcf_rr_bmask, 0,
19163 sizeof(*phba->fcf.fcf_rr_bmask));
19164 spin_lock_irq(&phba->hbalock);
19165 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19166 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19167 continue;
19168
19169
19170
19171
19172 if (!next_fcf_pri)
19173 next_fcf_pri = fcf_pri->fcf_rec.priority;
19174 spin_unlock_irq(&phba->hbalock);
19175 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19176 rc = lpfc_sli4_fcf_rr_index_set(phba,
19177 fcf_pri->fcf_rec.fcf_index);
19178 if (rc)
19179 return 0;
19180 }
19181 spin_lock_irq(&phba->hbalock);
19182 }
19183
19184
19185
19186
19187
19188 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19189 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19190 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19191
19192
19193
19194
19195 if (!next_fcf_pri)
19196 next_fcf_pri = fcf_pri->fcf_rec.priority;
19197 spin_unlock_irq(&phba->hbalock);
19198 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19199 rc = lpfc_sli4_fcf_rr_index_set(phba,
19200 fcf_pri->fcf_rec.fcf_index);
19201 if (rc)
19202 return 0;
19203 }
19204 spin_lock_irq(&phba->hbalock);
19205 }
19206 } else
19207 ret = 1;
19208 spin_unlock_irq(&phba->hbalock);
19209
19210 return ret;
19211}
19212
19213
19214
19215
19216
19217
19218
19219
19220
19221
19222uint16_t
19223lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19224{
19225 uint16_t next_fcf_index;
19226
19227initial_priority:
19228
19229 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19230
19231next_priority:
19232
19233 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19234 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19235 LPFC_SLI4_FCF_TBL_INDX_MAX,
19236 next_fcf_index);
19237
19238
19239 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19240
19241
19242
19243
19244
19245 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19246 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19247 }
19248
19249
19250
19251 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19252 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19253
19254
19255
19256
19257
19258
19259 if (lpfc_check_next_fcf_pri_level(phba))
19260 goto initial_priority;
19261 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19262 "2844 No roundrobin failover FCF available\n");
19263
19264 return LPFC_FCOE_FCF_NEXT_NONE;
19265 }
19266
19267 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19268 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19269 LPFC_FCF_FLOGI_FAILED) {
19270 if (list_is_singular(&phba->fcf.fcf_pri_list))
19271 return LPFC_FCOE_FCF_NEXT_NONE;
19272
19273 goto next_priority;
19274 }
19275
19276 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19277 "2845 Get next roundrobin failover FCF (x%x)\n",
19278 next_fcf_index);
19279
19280 return next_fcf_index;
19281}
19282
19283
19284
19285
19286
19287
19288
19289
19290
19291
19292
19293
19294
19295
19296int
19297lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19298{
19299 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19300 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19301 "2610 FCF (x%x) reached driver's book "
19302 "keeping dimension:x%x\n",
19303 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19304 return -EINVAL;
19305 }
19306
19307 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19308
19309 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19310 "2790 Set FCF (x%x) to roundrobin FCF failover "
19311 "bmask\n", fcf_index);
19312
19313 return 0;
19314}
19315
19316
19317
19318
19319
19320
19321
19322
19323
19324
19325
19326void
19327lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19328{
19329 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19330 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19331 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19332 "2762 FCF (x%x) reached driver's book "
19333 "keeping dimension:x%x\n",
19334 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19335 return;
19336 }
19337
19338 spin_lock_irq(&phba->hbalock);
19339 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19340 list) {
19341 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19342 list_del_init(&fcf_pri->list);
19343 break;
19344 }
19345 }
19346 spin_unlock_irq(&phba->hbalock);
19347 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19348
19349 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19350 "2791 Clear FCF (x%x) from roundrobin failover "
19351 "bmask\n", fcf_index);
19352}
19353
19354
19355
19356
19357
19358
19359
19360
19361
19362
19363static void
19364lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19365{
19366 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19367 uint32_t shdr_status, shdr_add_status;
19368
19369 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19370
19371 shdr_status = bf_get(lpfc_mbox_hdr_status,
19372 &redisc_fcf->header.cfg_shdr.response);
19373 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19374 &redisc_fcf->header.cfg_shdr.response);
19375 if (shdr_status || shdr_add_status) {
19376 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19377 "2746 Requesting for FCF rediscovery failed "
19378 "status x%x add_status x%x\n",
19379 shdr_status, shdr_add_status);
19380 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19381 spin_lock_irq(&phba->hbalock);
19382 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19383 spin_unlock_irq(&phba->hbalock);
19384
19385
19386
19387
19388 lpfc_retry_pport_discovery(phba);
19389 } else {
19390 spin_lock_irq(&phba->hbalock);
19391 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19392 spin_unlock_irq(&phba->hbalock);
19393
19394
19395
19396
19397
19398 lpfc_sli4_fcf_dead_failthrough(phba);
19399 }
19400 } else {
19401 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19402 "2775 Start FCF rediscover quiescent timer\n");
19403
19404
19405
19406
19407 lpfc_fcf_redisc_wait_start_timer(phba);
19408 }
19409
19410 mempool_free(mbox, phba->mbox_mem_pool);
19411}
19412
19413
19414
19415
19416
19417
19418
19419
19420int
19421lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19422{
19423 LPFC_MBOXQ_t *mbox;
19424 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19425 int rc, length;
19426
19427
19428 lpfc_cancel_all_vport_retry_delay_timer(phba);
19429
19430 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19431 if (!mbox) {
19432 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19433 "2745 Failed to allocate mbox for "
19434 "requesting FCF rediscover.\n");
19435 return -ENOMEM;
19436 }
19437
19438 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19439 sizeof(struct lpfc_sli4_cfg_mhdr));
19440 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19441 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19442 length, LPFC_SLI4_MBX_EMBED);
19443
19444 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19445
19446 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19447
19448
19449 mbox->vport = phba->pport;
19450 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19451 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19452
19453 if (rc == MBX_NOT_FINISHED) {
19454 mempool_free(mbox, phba->mbox_mem_pool);
19455 return -EIO;
19456 }
19457 return 0;
19458}
19459
19460
19461
19462
19463
19464
19465
19466
19467void
19468lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19469{
19470 uint32_t link_state;
19471
19472
19473
19474
19475
19476
19477 link_state = phba->link_state;
19478 lpfc_linkdown(phba);
19479 phba->link_state = link_state;
19480
19481
19482 lpfc_unregister_unused_fcf(phba);
19483}
19484
19485
19486
19487
19488
19489
19490
19491
19492
19493
19494static uint32_t
19495lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19496{
19497 LPFC_MBOXQ_t *pmb = NULL;
19498 MAILBOX_t *mb;
19499 uint32_t offset = 0;
19500 int i, rc;
19501
19502 if (!rgn23_data)
19503 return 0;
19504
19505 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19506 if (!pmb) {
19507 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19508 "2600 failed to allocate mailbox memory\n");
19509 return 0;
19510 }
19511 mb = &pmb->u.mb;
19512
19513 do {
19514 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19515 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19516
19517 if (rc != MBX_SUCCESS) {
19518 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19519 "2601 failed to read config "
19520 "region 23, rc 0x%x Status 0x%x\n",
19521 rc, mb->mbxStatus);
19522 mb->un.varDmp.word_cnt = 0;
19523 }
19524
19525
19526
19527
19528 if (mb->un.varDmp.word_cnt == 0)
19529 break;
19530
19531 i = mb->un.varDmp.word_cnt * sizeof(uint32_t);
19532 if (offset + i > DMP_RGN23_SIZE)
19533 i = DMP_RGN23_SIZE - offset;
19534 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19535 rgn23_data + offset, i);
19536 offset += i;
19537 } while (offset < DMP_RGN23_SIZE);
19538
19539 mempool_free(pmb, phba->mbox_mem_pool);
19540 return offset;
19541}
19542
19543
19544
19545
19546
19547
19548
19549
19550
19551
19552static uint32_t
19553lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19554{
19555 LPFC_MBOXQ_t *mboxq = NULL;
19556 struct lpfc_dmabuf *mp = NULL;
19557 struct lpfc_mqe *mqe;
19558 uint32_t data_length = 0;
19559 int rc;
19560
19561 if (!rgn23_data)
19562 return 0;
19563
19564 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19565 if (!mboxq) {
19566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19567 "3105 failed to allocate mailbox memory\n");
19568 return 0;
19569 }
19570
19571 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19572 goto out;
19573 mqe = &mboxq->u.mqe;
19574 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19575 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19576 if (rc)
19577 goto out;
19578 data_length = mqe->un.mb_words[5];
19579 if (data_length == 0)
19580 goto out;
19581 if (data_length > DMP_RGN23_SIZE) {
19582 data_length = 0;
19583 goto out;
19584 }
19585 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19586out:
19587 mempool_free(mboxq, phba->mbox_mem_pool);
19588 if (mp) {
19589 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19590 kfree(mp);
19591 }
19592 return data_length;
19593}
19594
19595
19596
19597
19598
19599
19600
19601
19602
19603void
19604lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19605{
19606 uint8_t *rgn23_data = NULL;
19607 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19608 uint32_t offset = 0;
19609
19610
19611 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19612 if (!rgn23_data)
19613 goto out;
19614
19615 if (phba->sli_rev < LPFC_SLI_REV4)
19616 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19617 else {
19618 if_type = bf_get(lpfc_sli_intf_if_type,
19619 &phba->sli4_hba.sli_intf);
19620 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19621 goto out;
19622 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19623 }
19624
19625 if (!data_size)
19626 goto out;
19627
19628
19629 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19630 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19631 "2619 Config region 23 has bad signature\n");
19632 goto out;
19633 }
19634 offset += 4;
19635
19636
19637 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19639 "2620 Config region 23 has bad version\n");
19640 goto out;
19641 }
19642 offset += 4;
19643
19644
19645 while (offset < data_size) {
19646 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19647 break;
19648
19649
19650
19651
19652 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19653 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19654 (rgn23_data[offset + 3] != 0)) {
19655 offset += rgn23_data[offset + 1] * 4 + 4;
19656 continue;
19657 }
19658
19659
19660 sub_tlv_len = rgn23_data[offset + 1] * 4;
19661 offset += 4;
19662 tlv_offset = 0;
19663
19664
19665
19666
19667 while ((offset < data_size) &&
19668 (tlv_offset < sub_tlv_len)) {
19669 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
19670 offset += 4;
19671 tlv_offset += 4;
19672 break;
19673 }
19674 if (rgn23_data[offset] != PORT_STE_TYPE) {
19675 offset += rgn23_data[offset + 1] * 4 + 4;
19676 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
19677 continue;
19678 }
19679
19680
19681 if (!rgn23_data[offset + 2])
19682 phba->hba_flag |= LINK_DISABLED;
19683
19684 goto out;
19685 }
19686 }
19687
19688out:
19689 kfree(rgn23_data);
19690 return;
19691}
19692
19693
19694
19695
19696
19697
19698
19699
19700
19701
19702
19703
19704
19705
19706
19707
19708
19709
19710
19711
19712int
19713lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
19714 uint32_t size, uint32_t *offset)
19715{
19716 struct lpfc_mbx_wr_object *wr_object;
19717 LPFC_MBOXQ_t *mbox;
19718 int rc = 0, i = 0;
19719 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
19720 uint32_t mbox_tmo;
19721 struct lpfc_dmabuf *dmabuf;
19722 uint32_t written = 0;
19723 bool check_change_status = false;
19724
19725 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19726 if (!mbox)
19727 return -ENOMEM;
19728
19729 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
19730 LPFC_MBOX_OPCODE_WRITE_OBJECT,
19731 sizeof(struct lpfc_mbx_wr_object) -
19732 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
19733
19734 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
19735 wr_object->u.request.write_offset = *offset;
19736 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
19737 wr_object->u.request.object_name[0] =
19738 cpu_to_le32(wr_object->u.request.object_name[0]);
19739 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
19740 list_for_each_entry(dmabuf, dmabuf_list, list) {
19741 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
19742 break;
19743 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
19744 wr_object->u.request.bde[i].addrHigh =
19745 putPaddrHigh(dmabuf->phys);
19746 if (written + SLI4_PAGE_SIZE >= size) {
19747 wr_object->u.request.bde[i].tus.f.bdeSize =
19748 (size - written);
19749 written += (size - written);
19750 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
19751 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
19752 check_change_status = true;
19753 } else {
19754 wr_object->u.request.bde[i].tus.f.bdeSize =
19755 SLI4_PAGE_SIZE;
19756 written += SLI4_PAGE_SIZE;
19757 }
19758 i++;
19759 }
19760 wr_object->u.request.bde_count = i;
19761 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
19762 if (!phba->sli4_hba.intr_enable)
19763 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
19764 else {
19765 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
19766 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
19767 }
19768
19769 shdr_status = bf_get(lpfc_mbox_hdr_status,
19770 &wr_object->header.cfg_shdr.response);
19771 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19772 &wr_object->header.cfg_shdr.response);
19773 if (check_change_status) {
19774 shdr_change_status = bf_get(lpfc_wr_object_change_status,
19775 &wr_object->u.response);
19776
19777 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
19778 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
19779 shdr_csf = bf_get(lpfc_wr_object_csf,
19780 &wr_object->u.response);
19781 if (shdr_csf)
19782 shdr_change_status =
19783 LPFC_CHANGE_STATUS_PCI_RESET;
19784 }
19785
19786 switch (shdr_change_status) {
19787 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
19788 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19789 "3198 Firmware write complete: System "
19790 "reboot required to instantiate\n");
19791 break;
19792 case (LPFC_CHANGE_STATUS_FW_RESET):
19793 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19794 "3199 Firmware write complete: Firmware"
19795 " reset required to instantiate\n");
19796 break;
19797 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
19798 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19799 "3200 Firmware write complete: Port "
19800 "Migration or PCI Reset required to "
19801 "instantiate\n");
19802 break;
19803 case (LPFC_CHANGE_STATUS_PCI_RESET):
19804 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19805 "3201 Firmware write complete: PCI "
19806 "Reset required to instantiate\n");
19807 break;
19808 default:
19809 break;
19810 }
19811 }
19812 if (rc != MBX_TIMEOUT)
19813 mempool_free(mbox, phba->mbox_mem_pool);
19814 if (shdr_status || shdr_add_status || rc) {
19815 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19816 "3025 Write Object mailbox failed with "
19817 "status x%x add_status x%x, mbx status x%x\n",
19818 shdr_status, shdr_add_status, rc);
19819 rc = -ENXIO;
19820 *offset = shdr_add_status;
19821 } else
19822 *offset += wr_object->u.response.actual_write_length;
19823 return rc;
19824}
19825
19826
19827
19828
19829
19830
19831
19832
19833
19834
19835void
19836lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
19837{
19838 struct lpfc_hba *phba = vport->phba;
19839 LPFC_MBOXQ_t *mb, *nextmb;
19840 struct lpfc_dmabuf *mp;
19841 struct lpfc_nodelist *ndlp;
19842 struct lpfc_nodelist *act_mbx_ndlp = NULL;
19843 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
19844 LIST_HEAD(mbox_cmd_list);
19845 uint8_t restart_loop;
19846
19847
19848 spin_lock_irq(&phba->hbalock);
19849 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
19850 if (mb->vport != vport)
19851 continue;
19852
19853 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19854 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19855 continue;
19856
19857 list_del(&mb->list);
19858 list_add_tail(&mb->list, &mbox_cmd_list);
19859 }
19860
19861 mb = phba->sli.mbox_active;
19862 if (mb && (mb->vport == vport)) {
19863 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
19864 (mb->u.mb.mbxCommand == MBX_REG_VPI))
19865 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19866 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19867 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19868
19869 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
19870
19871 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19872 }
19873 }
19874
19875 do {
19876 restart_loop = 0;
19877 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
19878
19879
19880
19881
19882 if ((mb->vport != vport) ||
19883 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
19884 continue;
19885
19886 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
19887 (mb->u.mb.mbxCommand != MBX_REG_VPI))
19888 continue;
19889
19890 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19891 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19892 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19893
19894 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
19895 restart_loop = 1;
19896 spin_unlock_irq(&phba->hbalock);
19897 spin_lock(shost->host_lock);
19898 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19899 spin_unlock(shost->host_lock);
19900 spin_lock_irq(&phba->hbalock);
19901 break;
19902 }
19903 }
19904 } while (restart_loop);
19905
19906 spin_unlock_irq(&phba->hbalock);
19907
19908
19909 while (!list_empty(&mbox_cmd_list)) {
19910 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
19911 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
19912 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
19913 if (mp) {
19914 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
19915 kfree(mp);
19916 }
19917 mb->ctx_buf = NULL;
19918 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
19919 mb->ctx_ndlp = NULL;
19920 if (ndlp) {
19921 spin_lock(shost->host_lock);
19922 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19923 spin_unlock(shost->host_lock);
19924 lpfc_nlp_put(ndlp);
19925 }
19926 }
19927 mempool_free(mb, phba->mbox_mem_pool);
19928 }
19929
19930
19931 if (act_mbx_ndlp) {
19932 spin_lock(shost->host_lock);
19933 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
19934 spin_unlock(shost->host_lock);
19935 lpfc_nlp_put(act_mbx_ndlp);
19936 }
19937}
19938
19939
19940
19941
19942
19943
19944
19945
19946
19947
19948
19949
19950uint32_t
19951lpfc_drain_txq(struct lpfc_hba *phba)
19952{
19953 LIST_HEAD(completions);
19954 struct lpfc_sli_ring *pring;
19955 struct lpfc_iocbq *piocbq = NULL;
19956 unsigned long iflags = 0;
19957 char *fail_msg = NULL;
19958 struct lpfc_sglq *sglq;
19959 union lpfc_wqe128 wqe;
19960 uint32_t txq_cnt = 0;
19961 struct lpfc_queue *wq;
19962
19963 if (phba->link_flag & LS_MDS_LOOPBACK) {
19964
19965 wq = phba->sli4_hba.hdwq[0].io_wq;
19966 if (unlikely(!wq))
19967 return 0;
19968 pring = wq->pring;
19969 } else {
19970 wq = phba->sli4_hba.els_wq;
19971 if (unlikely(!wq))
19972 return 0;
19973 pring = lpfc_phba_elsring(phba);
19974 }
19975
19976 if (unlikely(!pring) || list_empty(&pring->txq))
19977 return 0;
19978
19979 spin_lock_irqsave(&pring->ring_lock, iflags);
19980 list_for_each_entry(piocbq, &pring->txq, list) {
19981 txq_cnt++;
19982 }
19983
19984 if (txq_cnt > pring->txq_max)
19985 pring->txq_max = txq_cnt;
19986
19987 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19988
19989 while (!list_empty(&pring->txq)) {
19990 spin_lock_irqsave(&pring->ring_lock, iflags);
19991
19992 piocbq = lpfc_sli_ringtx_get(phba, pring);
19993 if (!piocbq) {
19994 spin_unlock_irqrestore(&pring->ring_lock, iflags);
19995 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19996 "2823 txq empty and txq_cnt is %d\n ",
19997 txq_cnt);
19998 break;
19999 }
20000 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
20001 if (!sglq) {
20002 __lpfc_sli_ringtx_put(phba, pring, piocbq);
20003 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20004 break;
20005 }
20006 txq_cnt--;
20007
20008
20009
20010
20011 piocbq->sli4_lxritag = sglq->sli4_lxritag;
20012 piocbq->sli4_xritag = sglq->sli4_xritag;
20013 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20014 fail_msg = "to convert bpl to sgl";
20015 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
20016 fail_msg = "to convert iocb to wqe";
20017 else if (lpfc_sli4_wq_put(wq, &wqe))
20018 fail_msg = " - Wq is full";
20019 else
20020 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20021
20022 if (fail_msg) {
20023
20024 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20025 "2822 IOCB failed %s iotag 0x%x "
20026 "xri 0x%x\n",
20027 fail_msg,
20028 piocbq->iotag, piocbq->sli4_xritag);
20029 list_add_tail(&piocbq->list, &completions);
20030 }
20031 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20032 }
20033
20034
20035 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20036 IOERR_SLI_ABORTED);
20037
20038 return txq_cnt;
20039}
20040
20041
20042
20043
20044
20045
20046
20047
20048
20049
20050
20051
20052
20053
20054
20055
20056
20057
20058static uint16_t
20059lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20060 struct lpfc_sglq *sglq)
20061{
20062 uint16_t xritag = NO_XRI;
20063 struct ulp_bde64 *bpl = NULL;
20064 struct ulp_bde64 bde;
20065 struct sli4_sge *sgl = NULL;
20066 struct lpfc_dmabuf *dmabuf;
20067 union lpfc_wqe128 *wqe;
20068 int numBdes = 0;
20069 int i = 0;
20070 uint32_t offset = 0;
20071 int inbound = 0;
20072 uint32_t cmd;
20073
20074 if (!pwqeq || !sglq)
20075 return xritag;
20076
20077 sgl = (struct sli4_sge *)sglq->sgl;
20078 wqe = &pwqeq->wqe;
20079 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20080
20081 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20082 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20083 return sglq->sli4_xritag;
20084 numBdes = pwqeq->rsvd2;
20085 if (numBdes) {
20086
20087
20088
20089
20090 if (pwqeq->context3)
20091 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20092 else
20093 return xritag;
20094
20095 bpl = (struct ulp_bde64 *)dmabuf->virt;
20096 if (!bpl)
20097 return xritag;
20098
20099 for (i = 0; i < numBdes; i++) {
20100
20101 sgl->addr_hi = bpl->addrHigh;
20102 sgl->addr_lo = bpl->addrLow;
20103
20104 sgl->word2 = le32_to_cpu(sgl->word2);
20105 if ((i+1) == numBdes)
20106 bf_set(lpfc_sli4_sge_last, sgl, 1);
20107 else
20108 bf_set(lpfc_sli4_sge_last, sgl, 0);
20109
20110
20111
20112 bde.tus.w = le32_to_cpu(bpl->tus.w);
20113 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20114
20115
20116
20117
20118 switch (cmd) {
20119 case CMD_GEN_REQUEST64_WQE:
20120
20121 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20122 inbound++;
20123
20124 if (inbound == 1)
20125 offset = 0;
20126 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20127 bf_set(lpfc_sli4_sge_type, sgl,
20128 LPFC_SGE_TYPE_DATA);
20129 offset += bde.tus.f.bdeSize;
20130 break;
20131 case CMD_FCP_TRSP64_WQE:
20132 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20133 bf_set(lpfc_sli4_sge_type, sgl,
20134 LPFC_SGE_TYPE_DATA);
20135 break;
20136 case CMD_FCP_TSEND64_WQE:
20137 case CMD_FCP_TRECEIVE64_WQE:
20138 bf_set(lpfc_sli4_sge_type, sgl,
20139 bpl->tus.f.bdeFlags);
20140 if (i < 3)
20141 offset = 0;
20142 else
20143 offset += bde.tus.f.bdeSize;
20144 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20145 break;
20146 }
20147 sgl->word2 = cpu_to_le32(sgl->word2);
20148 bpl++;
20149 sgl++;
20150 }
20151 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20152
20153
20154
20155
20156 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20157 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20158 sgl->word2 = le32_to_cpu(sgl->word2);
20159 bf_set(lpfc_sli4_sge_last, sgl, 1);
20160 sgl->word2 = cpu_to_le32(sgl->word2);
20161 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20162 }
20163 return sglq->sli4_xritag;
20164}
20165
20166
20167
20168
20169
20170
20171
20172int
20173lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20174 struct lpfc_iocbq *pwqe)
20175{
20176 union lpfc_wqe128 *wqe = &pwqe->wqe;
20177 struct lpfc_async_xchg_ctx *ctxp;
20178 struct lpfc_queue *wq;
20179 struct lpfc_sglq *sglq;
20180 struct lpfc_sli_ring *pring;
20181 unsigned long iflags;
20182 uint32_t ret = 0;
20183
20184
20185 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20186 pring = phba->sli4_hba.nvmels_wq->pring;
20187 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20188 qp, wq_access);
20189 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20190 if (!sglq) {
20191 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20192 return WQE_BUSY;
20193 }
20194 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20195 pwqe->sli4_xritag = sglq->sli4_xritag;
20196 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20197 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20198 return WQE_ERROR;
20199 }
20200 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20201 pwqe->sli4_xritag);
20202 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20203 if (ret) {
20204 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20205 return ret;
20206 }
20207
20208 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20209 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20210
20211 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20212 return 0;
20213 }
20214
20215
20216 if (pwqe->iocb_flag & LPFC_IO_NVME) {
20217
20218 wq = qp->io_wq;
20219 pring = wq->pring;
20220
20221 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20222
20223 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20224 qp, wq_access);
20225 ret = lpfc_sli4_wq_put(wq, wqe);
20226 if (ret) {
20227 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20228 return ret;
20229 }
20230 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20231 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20232
20233 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20234 return 0;
20235 }
20236
20237
20238 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20239
20240 wq = qp->io_wq;
20241 pring = wq->pring;
20242
20243 ctxp = pwqe->context2;
20244 sglq = ctxp->ctxbuf->sglq;
20245 if (pwqe->sli4_xritag == NO_XRI) {
20246 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20247 pwqe->sli4_xritag = sglq->sli4_xritag;
20248 }
20249 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20250 pwqe->sli4_xritag);
20251 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20252
20253 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20254 qp, wq_access);
20255 ret = lpfc_sli4_wq_put(wq, wqe);
20256 if (ret) {
20257 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20258 return ret;
20259 }
20260 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20261 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20262
20263 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20264 return 0;
20265 }
20266 return WQE_ERROR;
20267}
20268
20269#ifdef LPFC_MXP_STAT
20270
20271
20272
20273
20274
20275
20276
20277
20278
20279
20280
20281
20282
20283
20284void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20285{
20286 struct lpfc_sli4_hdw_queue *qp;
20287 struct lpfc_multixri_pool *multixri_pool;
20288 struct lpfc_pvt_pool *pvt_pool;
20289 struct lpfc_pbl_pool *pbl_pool;
20290 u32 txcmplq_cnt;
20291
20292 qp = &phba->sli4_hba.hdwq[hwqid];
20293 multixri_pool = qp->p_multixri_pool;
20294 if (!multixri_pool)
20295 return;
20296
20297 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20298 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20299 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20300 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20301
20302 multixri_pool->stat_pbl_count = pbl_pool->count;
20303 multixri_pool->stat_pvt_count = pvt_pool->count;
20304 multixri_pool->stat_busy_count = txcmplq_cnt;
20305 }
20306
20307 multixri_pool->stat_snapshot_taken++;
20308}
20309#endif
20310
20311
20312
20313
20314
20315
20316
20317
20318
20319void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20320{
20321 struct lpfc_multixri_pool *multixri_pool;
20322 u32 io_req_count;
20323 u32 prev_io_req_count;
20324
20325 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20326 if (!multixri_pool)
20327 return;
20328 io_req_count = multixri_pool->io_req_count;
20329 prev_io_req_count = multixri_pool->prev_io_req_count;
20330
20331 if (prev_io_req_count != io_req_count) {
20332
20333 multixri_pool->prev_io_req_count = io_req_count;
20334 } else {
20335
20336
20337
20338 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20339 }
20340}
20341
20342
20343
20344
20345
20346
20347
20348
20349
20350void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20351{
20352 u32 new_watermark;
20353 u32 watermark_max;
20354 u32 watermark_min;
20355 u32 xri_limit;
20356 u32 txcmplq_cnt;
20357 u32 abts_io_bufs;
20358 struct lpfc_multixri_pool *multixri_pool;
20359 struct lpfc_sli4_hdw_queue *qp;
20360
20361 qp = &phba->sli4_hba.hdwq[hwqid];
20362 multixri_pool = qp->p_multixri_pool;
20363 if (!multixri_pool)
20364 return;
20365 xri_limit = multixri_pool->xri_limit;
20366
20367 watermark_max = xri_limit;
20368 watermark_min = xri_limit / 2;
20369
20370 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20371 abts_io_bufs = qp->abts_scsi_io_bufs;
20372 abts_io_bufs += qp->abts_nvme_io_bufs;
20373
20374 new_watermark = txcmplq_cnt + abts_io_bufs;
20375 new_watermark = min(watermark_max, new_watermark);
20376 new_watermark = max(watermark_min, new_watermark);
20377 multixri_pool->pvt_pool.high_watermark = new_watermark;
20378
20379#ifdef LPFC_MXP_STAT
20380 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20381 new_watermark);
20382#endif
20383}
20384
20385
20386
20387
20388
20389
20390
20391
20392
20393
20394
20395void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20396{
20397 struct lpfc_pbl_pool *pbl_pool;
20398 struct lpfc_pvt_pool *pvt_pool;
20399 struct lpfc_sli4_hdw_queue *qp;
20400 struct lpfc_io_buf *lpfc_ncmd;
20401 struct lpfc_io_buf *lpfc_ncmd_next;
20402 unsigned long iflag;
20403 struct list_head tmp_list;
20404 u32 tmp_count;
20405
20406 qp = &phba->sli4_hba.hdwq[hwqid];
20407 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20408 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20409 tmp_count = 0;
20410
20411 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20412 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20413
20414 if (pvt_pool->count > pvt_pool->low_watermark) {
20415
20416
20417
20418
20419
20420 INIT_LIST_HEAD(&tmp_list);
20421 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20422 &pvt_pool->list, list) {
20423 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20424 tmp_count++;
20425 if (tmp_count >= pvt_pool->low_watermark)
20426 break;
20427 }
20428
20429
20430 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20431
20432
20433 list_splice(&tmp_list, &pvt_pool->list);
20434
20435 pbl_pool->count += (pvt_pool->count - tmp_count);
20436 pvt_pool->count = tmp_count;
20437 } else {
20438
20439 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20440 pbl_pool->count += pvt_pool->count;
20441 pvt_pool->count = 0;
20442 }
20443
20444 spin_unlock(&pvt_pool->lock);
20445 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20446}
20447
20448
20449
20450
20451
20452
20453
20454
20455
20456
20457
20458
20459
20460
20461
20462
20463
20464
20465static bool
20466_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20467 struct lpfc_pbl_pool *pbl_pool,
20468 struct lpfc_pvt_pool *pvt_pool, u32 count)
20469{
20470 struct lpfc_io_buf *lpfc_ncmd;
20471 struct lpfc_io_buf *lpfc_ncmd_next;
20472 unsigned long iflag;
20473 int ret;
20474
20475 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20476 if (ret) {
20477 if (pbl_pool->count) {
20478
20479 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20480 list_for_each_entry_safe(lpfc_ncmd,
20481 lpfc_ncmd_next,
20482 &pbl_pool->list,
20483 list) {
20484 list_move_tail(&lpfc_ncmd->list,
20485 &pvt_pool->list);
20486 pvt_pool->count++;
20487 pbl_pool->count--;
20488 count--;
20489 if (count == 0)
20490 break;
20491 }
20492
20493 spin_unlock(&pvt_pool->lock);
20494 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20495 return true;
20496 }
20497 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20498 }
20499
20500 return false;
20501}
20502
20503
20504
20505
20506
20507
20508
20509
20510
20511
20512
20513
20514
20515void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20516{
20517 struct lpfc_multixri_pool *multixri_pool;
20518 struct lpfc_multixri_pool *next_multixri_pool;
20519 struct lpfc_pvt_pool *pvt_pool;
20520 struct lpfc_pbl_pool *pbl_pool;
20521 struct lpfc_sli4_hdw_queue *qp;
20522 u32 next_hwqid;
20523 u32 hwq_count;
20524 int ret;
20525
20526 qp = &phba->sli4_hba.hdwq[hwqid];
20527 multixri_pool = qp->p_multixri_pool;
20528 pvt_pool = &multixri_pool->pvt_pool;
20529 pbl_pool = &multixri_pool->pbl_pool;
20530
20531
20532 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20533 if (ret) {
20534#ifdef LPFC_MXP_STAT
20535 multixri_pool->local_pbl_hit_count++;
20536#endif
20537 return;
20538 }
20539
20540 hwq_count = phba->cfg_hdw_queue;
20541
20542
20543 next_hwqid = multixri_pool->rrb_next_hwqid;
20544
20545 do {
20546
20547 next_hwqid = (next_hwqid + 1) % hwq_count;
20548
20549 next_multixri_pool =
20550 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20551 pbl_pool = &next_multixri_pool->pbl_pool;
20552
20553
20554 ret = _lpfc_move_xri_pbl_to_pvt(
20555 phba, qp, pbl_pool, pvt_pool, count);
20556
20557
20558 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20559
20560
20561 multixri_pool->rrb_next_hwqid = next_hwqid;
20562
20563 if (!ret) {
20564
20565 multixri_pool->pbl_empty_count++;
20566 }
20567
20568#ifdef LPFC_MXP_STAT
20569 if (ret) {
20570 if (next_hwqid == hwqid)
20571 multixri_pool->local_pbl_hit_count++;
20572 else
20573 multixri_pool->other_pbl_hit_count++;
20574 }
20575#endif
20576}
20577
20578
20579
20580
20581
20582
20583
20584
20585
20586void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20587{
20588 struct lpfc_multixri_pool *multixri_pool;
20589 struct lpfc_pvt_pool *pvt_pool;
20590
20591 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20592 pvt_pool = &multixri_pool->pvt_pool;
20593
20594 if (pvt_pool->count < pvt_pool->low_watermark)
20595 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20596}
20597
20598
20599
20600
20601
20602
20603
20604
20605
20606
20607
20608
20609
20610void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
20611 struct lpfc_sli4_hdw_queue *qp)
20612{
20613 unsigned long iflag;
20614 struct lpfc_pbl_pool *pbl_pool;
20615 struct lpfc_pvt_pool *pvt_pool;
20616 struct lpfc_epd_pool *epd_pool;
20617 u32 txcmplq_cnt;
20618 u32 xri_owned;
20619 u32 xri_limit;
20620 u32 abts_io_bufs;
20621
20622
20623 lpfc_ncmd->nvmeCmd = NULL;
20624 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
20625 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
20626
20627 if (phba->cfg_xpsgl && !phba->nvmet_support &&
20628 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
20629 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
20630
20631 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
20632 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
20633
20634 if (phba->cfg_xri_rebalancing) {
20635 if (lpfc_ncmd->expedite) {
20636
20637 epd_pool = &phba->epd_pool;
20638 spin_lock_irqsave(&epd_pool->lock, iflag);
20639 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
20640 epd_pool->count++;
20641 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20642 return;
20643 }
20644
20645
20646
20647
20648
20649 if (!qp->p_multixri_pool)
20650 return;
20651
20652 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20653 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20654
20655 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20656 abts_io_bufs = qp->abts_scsi_io_bufs;
20657 abts_io_bufs += qp->abts_nvme_io_bufs;
20658
20659 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
20660 xri_limit = qp->p_multixri_pool->xri_limit;
20661
20662#ifdef LPFC_MXP_STAT
20663 if (xri_owned <= xri_limit)
20664 qp->p_multixri_pool->below_limit_count++;
20665 else
20666 qp->p_multixri_pool->above_limit_count++;
20667#endif
20668
20669
20670
20671
20672 if ((pvt_pool->count < pvt_pool->low_watermark) ||
20673 (xri_owned < xri_limit &&
20674 pvt_pool->count < pvt_pool->high_watermark)) {
20675 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
20676 qp, free_pvt_pool);
20677 list_add_tail(&lpfc_ncmd->list,
20678 &pvt_pool->list);
20679 pvt_pool->count++;
20680 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20681 } else {
20682 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
20683 qp, free_pub_pool);
20684 list_add_tail(&lpfc_ncmd->list,
20685 &pbl_pool->list);
20686 pbl_pool->count++;
20687 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20688 }
20689 } else {
20690 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
20691 qp, free_xri);
20692 list_add_tail(&lpfc_ncmd->list,
20693 &qp->lpfc_io_buf_list_put);
20694 qp->put_io_bufs++;
20695 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
20696 iflag);
20697 }
20698}
20699
20700
20701
20702
20703
20704
20705
20706
20707
20708
20709
20710
20711
20712
20713static struct lpfc_io_buf *
20714lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
20715 struct lpfc_sli4_hdw_queue *qp,
20716 struct lpfc_pvt_pool *pvt_pool,
20717 struct lpfc_nodelist *ndlp)
20718{
20719 struct lpfc_io_buf *lpfc_ncmd;
20720 struct lpfc_io_buf *lpfc_ncmd_next;
20721 unsigned long iflag;
20722
20723 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
20724 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20725 &pvt_pool->list, list) {
20726 if (lpfc_test_rrq_active(
20727 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
20728 continue;
20729 list_del(&lpfc_ncmd->list);
20730 pvt_pool->count--;
20731 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20732 return lpfc_ncmd;
20733 }
20734 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
20735
20736 return NULL;
20737}
20738
20739
20740
20741
20742
20743
20744
20745
20746
20747
20748
20749static struct lpfc_io_buf *
20750lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
20751{
20752 struct lpfc_io_buf *lpfc_ncmd;
20753 struct lpfc_io_buf *lpfc_ncmd_next;
20754 unsigned long iflag;
20755 struct lpfc_epd_pool *epd_pool;
20756
20757 epd_pool = &phba->epd_pool;
20758 lpfc_ncmd = NULL;
20759
20760 spin_lock_irqsave(&epd_pool->lock, iflag);
20761 if (epd_pool->count > 0) {
20762 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20763 &epd_pool->list, list) {
20764 list_del(&lpfc_ncmd->list);
20765 epd_pool->count--;
20766 break;
20767 }
20768 }
20769 spin_unlock_irqrestore(&epd_pool->lock, iflag);
20770
20771 return lpfc_ncmd;
20772}
20773
20774
20775
20776
20777
20778
20779
20780
20781
20782
20783
20784
20785
20786
20787
20788
20789
20790
20791
20792
20793
20794
20795
20796
20797static struct lpfc_io_buf *
20798lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
20799 struct lpfc_nodelist *ndlp,
20800 int hwqid, int expedite)
20801{
20802 struct lpfc_sli4_hdw_queue *qp;
20803 struct lpfc_multixri_pool *multixri_pool;
20804 struct lpfc_pvt_pool *pvt_pool;
20805 struct lpfc_io_buf *lpfc_ncmd;
20806
20807 qp = &phba->sli4_hba.hdwq[hwqid];
20808 lpfc_ncmd = NULL;
20809 multixri_pool = qp->p_multixri_pool;
20810 pvt_pool = &multixri_pool->pvt_pool;
20811 multixri_pool->io_req_count++;
20812
20813
20814 if (pvt_pool->count == 0)
20815 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
20816
20817
20818 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
20819
20820 if (lpfc_ncmd) {
20821 lpfc_ncmd->hdwq = qp;
20822 lpfc_ncmd->hdwq_no = hwqid;
20823 } else if (expedite) {
20824
20825
20826
20827 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
20828 }
20829
20830 return lpfc_ncmd;
20831}
20832
20833static inline struct lpfc_io_buf *
20834lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
20835{
20836 struct lpfc_sli4_hdw_queue *qp;
20837 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
20838
20839 qp = &phba->sli4_hba.hdwq[idx];
20840 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
20841 &qp->lpfc_io_buf_list_get, list) {
20842 if (lpfc_test_rrq_active(phba, ndlp,
20843 lpfc_cmd->cur_iocbq.sli4_lxritag))
20844 continue;
20845
20846 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
20847 continue;
20848
20849 list_del_init(&lpfc_cmd->list);
20850 qp->get_io_bufs--;
20851 lpfc_cmd->hdwq = qp;
20852 lpfc_cmd->hdwq_no = idx;
20853 return lpfc_cmd;
20854 }
20855 return NULL;
20856}
20857
20858
20859
20860
20861
20862
20863
20864
20865
20866
20867
20868
20869
20870
20871
20872
20873
20874
20875
20876struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
20877 struct lpfc_nodelist *ndlp,
20878 u32 hwqid, int expedite)
20879{
20880 struct lpfc_sli4_hdw_queue *qp;
20881 unsigned long iflag;
20882 struct lpfc_io_buf *lpfc_cmd;
20883
20884 qp = &phba->sli4_hba.hdwq[hwqid];
20885 lpfc_cmd = NULL;
20886
20887 if (phba->cfg_xri_rebalancing)
20888 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
20889 phba, ndlp, hwqid, expedite);
20890 else {
20891 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
20892 qp, alloc_xri_get);
20893 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
20894 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20895 if (!lpfc_cmd) {
20896 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
20897 qp, alloc_xri_put);
20898 list_splice(&qp->lpfc_io_buf_list_put,
20899 &qp->lpfc_io_buf_list_get);
20900 qp->get_io_bufs += qp->put_io_bufs;
20901 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
20902 qp->put_io_bufs = 0;
20903 spin_unlock(&qp->io_buf_list_put_lock);
20904 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
20905 expedite)
20906 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
20907 }
20908 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
20909 }
20910
20911 return lpfc_cmd;
20912}
20913
20914
20915
20916
20917
20918
20919
20920
20921
20922
20923
20924
20925
20926struct sli4_hybrid_sgl *
20927lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20928{
20929 struct sli4_hybrid_sgl *list_entry = NULL;
20930 struct sli4_hybrid_sgl *tmp = NULL;
20931 struct sli4_hybrid_sgl *allocated_sgl = NULL;
20932 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
20933 struct list_head *buf_list = &hdwq->sgl_list;
20934 unsigned long iflags;
20935
20936 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20937
20938 if (likely(!list_empty(buf_list))) {
20939
20940 list_for_each_entry_safe(list_entry, tmp,
20941 buf_list, list_node) {
20942 list_move_tail(&list_entry->list_node,
20943 &lpfc_buf->dma_sgl_xtra_list);
20944 break;
20945 }
20946 } else {
20947
20948 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20949 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
20950 cpu_to_node(hdwq->io_wq->chann));
20951 if (!tmp) {
20952 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20953 "8353 error kmalloc memory for HDWQ "
20954 "%d %s\n",
20955 lpfc_buf->hdwq_no, __func__);
20956 return NULL;
20957 }
20958
20959 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
20960 GFP_ATOMIC, &tmp->dma_phys_sgl);
20961 if (!tmp->dma_sgl) {
20962 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
20963 "8354 error pool_alloc memory for HDWQ "
20964 "%d %s\n",
20965 lpfc_buf->hdwq_no, __func__);
20966 kfree(tmp);
20967 return NULL;
20968 }
20969
20970 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
20971 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
20972 }
20973
20974 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
20975 struct sli4_hybrid_sgl,
20976 list_node);
20977
20978 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
20979
20980 return allocated_sgl;
20981}
20982
20983
20984
20985
20986
20987
20988
20989
20990
20991
20992
20993
20994int
20995lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
20996{
20997 int rc = 0;
20998 struct sli4_hybrid_sgl *list_entry = NULL;
20999 struct sli4_hybrid_sgl *tmp = NULL;
21000 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21001 struct list_head *buf_list = &hdwq->sgl_list;
21002 unsigned long iflags;
21003
21004 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21005
21006 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21007 list_for_each_entry_safe(list_entry, tmp,
21008 &lpfc_buf->dma_sgl_xtra_list,
21009 list_node) {
21010 list_move_tail(&list_entry->list_node,
21011 buf_list);
21012 }
21013 } else {
21014 rc = -EINVAL;
21015 }
21016
21017 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21018 return rc;
21019}
21020
21021
21022
21023
21024
21025
21026
21027
21028
21029
21030
21031void
21032lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21033 struct lpfc_sli4_hdw_queue *hdwq)
21034{
21035 struct list_head *buf_list = &hdwq->sgl_list;
21036 struct sli4_hybrid_sgl *list_entry = NULL;
21037 struct sli4_hybrid_sgl *tmp = NULL;
21038 unsigned long iflags;
21039
21040 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21041
21042
21043 list_for_each_entry_safe(list_entry, tmp,
21044 buf_list, list_node) {
21045 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21046 list_entry->dma_sgl,
21047 list_entry->dma_phys_sgl);
21048 list_del(&list_entry->list_node);
21049 kfree(list_entry);
21050 }
21051
21052 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21053}
21054
21055
21056
21057
21058
21059
21060
21061
21062
21063
21064
21065
21066
21067struct fcp_cmd_rsp_buf *
21068lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21069 struct lpfc_io_buf *lpfc_buf)
21070{
21071 struct fcp_cmd_rsp_buf *list_entry = NULL;
21072 struct fcp_cmd_rsp_buf *tmp = NULL;
21073 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21074 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21075 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21076 unsigned long iflags;
21077
21078 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21079
21080 if (likely(!list_empty(buf_list))) {
21081
21082 list_for_each_entry_safe(list_entry, tmp,
21083 buf_list,
21084 list_node) {
21085 list_move_tail(&list_entry->list_node,
21086 &lpfc_buf->dma_cmd_rsp_list);
21087 break;
21088 }
21089 } else {
21090
21091 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21092 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21093 cpu_to_node(hdwq->io_wq->chann));
21094 if (!tmp) {
21095 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21096 "8355 error kmalloc memory for HDWQ "
21097 "%d %s\n",
21098 lpfc_buf->hdwq_no, __func__);
21099 return NULL;
21100 }
21101
21102 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21103 GFP_ATOMIC,
21104 &tmp->fcp_cmd_rsp_dma_handle);
21105
21106 if (!tmp->fcp_cmnd) {
21107 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21108 "8356 error pool_alloc memory for HDWQ "
21109 "%d %s\n",
21110 lpfc_buf->hdwq_no, __func__);
21111 kfree(tmp);
21112 return NULL;
21113 }
21114
21115 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21116 sizeof(struct fcp_cmnd));
21117
21118 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21119 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21120 }
21121
21122 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21123 struct fcp_cmd_rsp_buf,
21124 list_node);
21125
21126 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21127
21128 return allocated_buf;
21129}
21130
21131
21132
21133
21134
21135
21136
21137
21138
21139
21140
21141
21142int
21143lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21144 struct lpfc_io_buf *lpfc_buf)
21145{
21146 int rc = 0;
21147 struct fcp_cmd_rsp_buf *list_entry = NULL;
21148 struct fcp_cmd_rsp_buf *tmp = NULL;
21149 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21150 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21151 unsigned long iflags;
21152
21153 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21154
21155 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21156 list_for_each_entry_safe(list_entry, tmp,
21157 &lpfc_buf->dma_cmd_rsp_list,
21158 list_node) {
21159 list_move_tail(&list_entry->list_node,
21160 buf_list);
21161 }
21162 } else {
21163 rc = -EINVAL;
21164 }
21165
21166 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21167 return rc;
21168}
21169
21170
21171
21172
21173
21174
21175
21176
21177
21178
21179
21180void
21181lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21182 struct lpfc_sli4_hdw_queue *hdwq)
21183{
21184 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21185 struct fcp_cmd_rsp_buf *list_entry = NULL;
21186 struct fcp_cmd_rsp_buf *tmp = NULL;
21187 unsigned long iflags;
21188
21189 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21190
21191
21192 list_for_each_entry_safe(list_entry, tmp,
21193 buf_list,
21194 list_node) {
21195 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21196 list_entry->fcp_cmnd,
21197 list_entry->fcp_cmd_rsp_dma_handle);
21198 list_del(&list_entry->list_node);
21199 kfree(list_entry);
21200 }
21201
21202 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21203}
21204