1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/aer.h>
38#include <linux/crash_dump.h>
39#ifdef CONFIG_X86
40#include <asm/set_memory.h>
41#endif
42
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc.h"
50#include "lpfc_scsi.h"
51#include "lpfc_nvme.h"
52#include "lpfc_crtn.h"
53#include "lpfc_logmsg.h"
54#include "lpfc_compat.h"
55#include "lpfc_debugfs.h"
56#include "lpfc_vport.h"
57#include "lpfc_version.h"
58
59
60typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65} lpfc_iocb_type;
66
67
68
69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *);
75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 int);
83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
86static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
92
93union lpfc_wqe128 lpfc_iread_cmd_template;
94union lpfc_wqe128 lpfc_iwrite_cmd_template;
95union lpfc_wqe128 lpfc_icmnd_cmd_template;
96
97static IOCB_t *
98lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
99{
100 return &iocbq->iocb;
101}
102
103
104void lpfc_wqe_cmd_template(void)
105{
106 union lpfc_wqe128 *wqe;
107
108
109 wqe = &lpfc_iread_cmd_template;
110 memset(wqe, 0, sizeof(union lpfc_wqe128));
111
112
113
114
115
116
117
118
119
120
121
122
123 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
124 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
125 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
126 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
127
128
129
130
131
132
133 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
134 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
135 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
137 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
138
139
140 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
141 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
142 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
143
144
145
146
147
148
149 wqe = &lpfc_iwrite_cmd_template;
150 memset(wqe, 0, sizeof(union lpfc_wqe128));
151
152
153
154
155
156
157
158
159
160
161
162
163 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
164 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
165 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
166 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
167
168
169
170
171
172
173 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
174 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
175 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
176 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
177 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
178
179
180 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
181 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
182 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
183
184
185
186
187
188
189 wqe = &lpfc_icmnd_cmd_template;
190 memset(wqe, 0, sizeof(union lpfc_wqe128));
191
192
193
194
195
196
197
198
199
200
201 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
202 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
203 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
204 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
205
206
207
208
209
210
211 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
212 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
213 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
214 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
215 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
216
217
218 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
219 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
220 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
221
222
223}
224
225#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
226
227
228
229
230
231
232
233
234
235
236
237
238
239static void
240lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
241{
242 uint64_t *src = srcp;
243 uint64_t *dest = destp;
244 int i;
245
246 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
247 *dest++ = *src++;
248}
249#else
250#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
251#endif
252
253
254
255
256
257
258
259
260
261
262
263
264
265static int
266lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
267{
268 union lpfc_wqe *temp_wqe;
269 struct lpfc_register doorbell;
270 uint32_t host_index;
271 uint32_t idx;
272 uint32_t i = 0;
273 uint8_t *tmp;
274 u32 if_type;
275
276
277 if (unlikely(!q))
278 return -ENOMEM;
279
280 temp_wqe = lpfc_sli4_qe(q, q->host_index);
281
282
283 idx = ((q->host_index + 1) % q->entry_count);
284 if (idx == q->hba_index) {
285 q->WQ_overflow++;
286 return -EBUSY;
287 }
288 q->WQ_posted++;
289
290 if (!((q->host_index + 1) % q->notify_interval))
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
292 else
293 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
294 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
295 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
296 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
297 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
298
299 tmp = (uint8_t *)temp_wqe;
300#ifdef __raw_writeq
301 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
302 __raw_writeq(*((uint64_t *)(tmp + i)),
303 q->dpp_regaddr + i);
304#else
305 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
306 __raw_writel(*((uint32_t *)(tmp + i)),
307 q->dpp_regaddr + i);
308#endif
309 }
310
311 wmb();
312
313
314 host_index = q->host_index;
315
316 q->host_index = idx;
317
318
319 doorbell.word0 = 0;
320 if (q->db_format == LPFC_DB_LIST_FORMAT) {
321 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
322 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
323 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
324 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
325 q->dpp_id);
326 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
327 q->queue_id);
328 } else {
329 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
330 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
331
332
333 if_type = bf_get(lpfc_sli_intf_if_type,
334 &q->phba->sli4_hba.sli_intf);
335 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
336 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
337 host_index);
338 }
339 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
340 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
341 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
342 } else {
343 return -EINVAL;
344 }
345 writel(doorbell.word0, q->db_regaddr);
346
347 return 0;
348}
349
350
351
352
353
354
355
356
357
358
359
360static void
361lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
362{
363
364 if (unlikely(!q))
365 return;
366
367 q->hba_index = index;
368}
369
370
371
372
373
374
375
376
377
378
379
380
381
382static uint32_t
383lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
384{
385 struct lpfc_mqe *temp_mqe;
386 struct lpfc_register doorbell;
387
388
389 if (unlikely(!q))
390 return -ENOMEM;
391 temp_mqe = lpfc_sli4_qe(q, q->host_index);
392
393
394 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
395 return -ENOMEM;
396 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
397
398 q->phba->mbox = (MAILBOX_t *)temp_mqe;
399
400
401 q->host_index = ((q->host_index + 1) % q->entry_count);
402
403
404 doorbell.word0 = 0;
405 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
406 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
407 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
408 return 0;
409}
410
411
412
413
414
415
416
417
418
419
420
421static uint32_t
422lpfc_sli4_mq_release(struct lpfc_queue *q)
423{
424
425 if (unlikely(!q))
426 return 0;
427
428
429 q->phba->mbox = NULL;
430 q->hba_index = ((q->hba_index + 1) % q->entry_count);
431 return 1;
432}
433
434
435
436
437
438
439
440
441
442
443static struct lpfc_eqe *
444lpfc_sli4_eq_get(struct lpfc_queue *q)
445{
446 struct lpfc_eqe *eqe;
447
448
449 if (unlikely(!q))
450 return NULL;
451 eqe = lpfc_sli4_qe(q, q->host_index);
452
453
454 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
455 return NULL;
456
457
458
459
460
461
462
463
464
465
466 mb();
467 return eqe;
468}
469
470
471
472
473
474
475void
476lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
477{
478 struct lpfc_register doorbell;
479
480 doorbell.word0 = 0;
481 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
482 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
483 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
484 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
485 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
486 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
487}
488
489
490
491
492
493
494void
495lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
496{
497 struct lpfc_register doorbell;
498
499 doorbell.word0 = 0;
500 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
501 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
502}
503
504
505
506
507
508
509
510
511
512
513
514
515void
516lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
517 uint32_t count, bool arm)
518{
519 struct lpfc_register doorbell;
520
521
522 if (unlikely(!q || (count == 0 && !arm)))
523 return;
524
525
526 doorbell.word0 = 0;
527 if (arm) {
528 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
529 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
530 }
531 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
532 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
533 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
534 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
535 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
536 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
537
538 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
539 readl(q->phba->sli4_hba.EQDBregaddr);
540}
541
542
543
544
545
546
547
548
549
550
551
552
553void
554lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
555 uint32_t count, bool arm)
556{
557 struct lpfc_register doorbell;
558
559
560 if (unlikely(!q || (count == 0 && !arm)))
561 return;
562
563
564 doorbell.word0 = 0;
565 if (arm)
566 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
567 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
568 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
569 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
570
571 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
572 readl(q->phba->sli4_hba.EQDBregaddr);
573}
574
575static void
576__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
577 struct lpfc_eqe *eqe)
578{
579 if (!phba->sli4_hba.pc_sli4_params.eqav)
580 bf_set_le32(lpfc_eqe_valid, eqe, 0);
581
582 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
583
584
585 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
586 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
587}
588
589static void
590lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
591{
592 struct lpfc_eqe *eqe = NULL;
593 u32 eq_count = 0, cq_count = 0;
594 struct lpfc_cqe *cqe = NULL;
595 struct lpfc_queue *cq = NULL, *childq = NULL;
596 int cqid = 0;
597
598
599 eqe = lpfc_sli4_eq_get(eq);
600 while (eqe) {
601
602 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
603 cq = NULL;
604
605 list_for_each_entry(childq, &eq->child_list, list) {
606 if (childq->queue_id == cqid) {
607 cq = childq;
608 break;
609 }
610 }
611
612 if (cq) {
613 cqe = lpfc_sli4_cq_get(cq);
614 while (cqe) {
615 __lpfc_sli4_consume_cqe(phba, cq, cqe);
616 cq_count++;
617 cqe = lpfc_sli4_cq_get(cq);
618 }
619
620 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
621 LPFC_QUEUE_REARM);
622 cq_count = 0;
623 }
624 __lpfc_sli4_consume_eqe(phba, eq, eqe);
625 eq_count++;
626 eqe = lpfc_sli4_eq_get(eq);
627 }
628
629
630 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
631}
632
633static int
634lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
635 uint8_t rearm)
636{
637 struct lpfc_eqe *eqe;
638 int count = 0, consumed = 0;
639
640 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
641 goto rearm_and_exit;
642
643 eqe = lpfc_sli4_eq_get(eq);
644 while (eqe) {
645 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
646 __lpfc_sli4_consume_eqe(phba, eq, eqe);
647
648 consumed++;
649 if (!(++count % eq->max_proc_limit))
650 break;
651
652 if (!(count % eq->notify_interval)) {
653 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
654 LPFC_QUEUE_NOARM);
655 consumed = 0;
656 }
657
658 eqe = lpfc_sli4_eq_get(eq);
659 }
660 eq->EQ_processed += count;
661
662
663 if (count > eq->EQ_max_eqe)
664 eq->EQ_max_eqe = count;
665
666 xchg(&eq->queue_claimed, 0);
667
668rearm_and_exit:
669
670 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
671
672 return count;
673}
674
675
676
677
678
679
680
681
682
683
684static struct lpfc_cqe *
685lpfc_sli4_cq_get(struct lpfc_queue *q)
686{
687 struct lpfc_cqe *cqe;
688
689
690 if (unlikely(!q))
691 return NULL;
692 cqe = lpfc_sli4_qe(q, q->host_index);
693
694
695 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
696 return NULL;
697
698
699
700
701
702
703
704
705
706 mb();
707 return cqe;
708}
709
710static void
711__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
712 struct lpfc_cqe *cqe)
713{
714 if (!phba->sli4_hba.pc_sli4_params.cqav)
715 bf_set_le32(lpfc_cqe_valid, cqe, 0);
716
717 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
718
719
720 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
721 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
722}
723
724
725
726
727
728
729
730
731
732
733
734
735void
736lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
737 uint32_t count, bool arm)
738{
739 struct lpfc_register doorbell;
740
741
742 if (unlikely(!q || (count == 0 && !arm)))
743 return;
744
745
746 doorbell.word0 = 0;
747 if (arm)
748 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
749 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
750 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
751 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
752 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
753 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
754 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
755}
756
757
758
759
760
761
762
763
764
765
766
767
768void
769lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
770 uint32_t count, bool arm)
771{
772 struct lpfc_register doorbell;
773
774
775 if (unlikely(!q || (count == 0 && !arm)))
776 return;
777
778
779 doorbell.word0 = 0;
780 if (arm)
781 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
782 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
783 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
784 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
785}
786
787
788
789
790
791
792
793
794
795
796
797int
798lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
799 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
800{
801 struct lpfc_rqe *temp_hrqe;
802 struct lpfc_rqe *temp_drqe;
803 struct lpfc_register doorbell;
804 int hq_put_index;
805 int dq_put_index;
806
807
808 if (unlikely(!hq) || unlikely(!dq))
809 return -ENOMEM;
810 hq_put_index = hq->host_index;
811 dq_put_index = dq->host_index;
812 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
813 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
814
815 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
816 return -EINVAL;
817 if (hq_put_index != dq_put_index)
818 return -EINVAL;
819
820 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
821 return -EBUSY;
822 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
823 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
824
825
826 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
827 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
828 hq->RQ_buf_posted++;
829
830
831 if (!(hq->host_index % hq->notify_interval)) {
832 doorbell.word0 = 0;
833 if (hq->db_format == LPFC_DB_RING_FORMAT) {
834 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
835 hq->notify_interval);
836 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
837 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
838 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
839 hq->notify_interval);
840 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
841 hq->host_index);
842 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
843 } else {
844 return -EINVAL;
845 }
846 writel(doorbell.word0, hq->db_regaddr);
847 }
848 return hq_put_index;
849}
850
851
852
853
854
855
856
857
858
859
860static uint32_t
861lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
862{
863
864 if (unlikely(!hq) || unlikely(!dq))
865 return 0;
866
867 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
868 return 0;
869 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
870 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
871 return 1;
872}
873
874
875
876
877
878
879
880
881
882
883
884static inline IOCB_t *
885lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
886{
887 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
888 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
889}
890
891
892
893
894
895
896
897
898
899
900
901static inline IOCB_t *
902lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
903{
904 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
905 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
906}
907
908
909
910
911
912
913
914
915
916
917struct lpfc_iocbq *
918__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
919{
920 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
921 struct lpfc_iocbq * iocbq = NULL;
922
923 lockdep_assert_held(&phba->hbalock);
924
925 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
926 if (iocbq)
927 phba->iocb_cnt++;
928 if (phba->iocb_cnt > phba->iocb_max)
929 phba->iocb_max = phba->iocb_cnt;
930 return iocbq;
931}
932
933
934
935
936
937
938
939
940
941
942
943
944
945struct lpfc_sglq *
946__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
947{
948 struct lpfc_sglq *sglq;
949
950 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
951 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
952 return sglq;
953}
954
955
956
957
958
959
960
961
962
963
964
965
966
967struct lpfc_sglq *
968__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
969{
970 struct lpfc_sglq *sglq;
971
972 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
973 return sglq;
974}
975
976
977
978
979
980
981
982
983void
984lpfc_clr_rrq_active(struct lpfc_hba *phba,
985 uint16_t xritag,
986 struct lpfc_node_rrq *rrq)
987{
988 struct lpfc_nodelist *ndlp = NULL;
989
990
991 if (rrq->vport)
992 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
993
994 if (!ndlp)
995 goto out;
996
997 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
998 rrq->send_rrq = 0;
999 rrq->xritag = 0;
1000 rrq->rrq_stop_time = 0;
1001 }
1002out:
1003 mempool_free(rrq, phba->rrq_pool);
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020void
1021lpfc_handle_rrq_active(struct lpfc_hba *phba)
1022{
1023 struct lpfc_node_rrq *rrq;
1024 struct lpfc_node_rrq *nextrrq;
1025 unsigned long next_time;
1026 unsigned long iflags;
1027 LIST_HEAD(send_rrq);
1028
1029 spin_lock_irqsave(&phba->hbalock, iflags);
1030 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1031 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1032 list_for_each_entry_safe(rrq, nextrrq,
1033 &phba->active_rrq_list, list) {
1034 if (time_after(jiffies, rrq->rrq_stop_time))
1035 list_move(&rrq->list, &send_rrq);
1036 else if (time_before(rrq->rrq_stop_time, next_time))
1037 next_time = rrq->rrq_stop_time;
1038 }
1039 spin_unlock_irqrestore(&phba->hbalock, iflags);
1040 if ((!list_empty(&phba->active_rrq_list)) &&
1041 (!(phba->pport->load_flag & FC_UNLOADING)))
1042 mod_timer(&phba->rrq_tmr, next_time);
1043 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1044 list_del(&rrq->list);
1045 if (!rrq->send_rrq) {
1046
1047 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1048 } else if (lpfc_send_rrq(phba, rrq)) {
1049
1050
1051
1052 lpfc_clr_rrq_active(phba, rrq->xritag,
1053 rrq);
1054 }
1055 }
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067struct lpfc_node_rrq *
1068lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1069{
1070 struct lpfc_hba *phba = vport->phba;
1071 struct lpfc_node_rrq *rrq;
1072 struct lpfc_node_rrq *nextrrq;
1073 unsigned long iflags;
1074
1075 if (phba->sli_rev != LPFC_SLI_REV4)
1076 return NULL;
1077 spin_lock_irqsave(&phba->hbalock, iflags);
1078 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1079 if (rrq->vport == vport && rrq->xritag == xri &&
1080 rrq->nlp_DID == did){
1081 list_del(&rrq->list);
1082 spin_unlock_irqrestore(&phba->hbalock, iflags);
1083 return rrq;
1084 }
1085 }
1086 spin_unlock_irqrestore(&phba->hbalock, iflags);
1087 return NULL;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098void
1099lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1100
1101{
1102 struct lpfc_hba *phba = vport->phba;
1103 struct lpfc_node_rrq *rrq;
1104 struct lpfc_node_rrq *nextrrq;
1105 unsigned long iflags;
1106 LIST_HEAD(rrq_list);
1107
1108 if (phba->sli_rev != LPFC_SLI_REV4)
1109 return;
1110 if (!ndlp) {
1111 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1112 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1113 }
1114 spin_lock_irqsave(&phba->hbalock, iflags);
1115 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1116 if (rrq->vport != vport)
1117 continue;
1118
1119 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1120 list_move(&rrq->list, &rrq_list);
1121
1122 }
1123 spin_unlock_irqrestore(&phba->hbalock, iflags);
1124
1125 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1126 list_del(&rrq->list);
1127 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1128 }
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141int
1142lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1143 uint16_t xritag)
1144{
1145 if (!ndlp)
1146 return 0;
1147 if (!ndlp->active_rrqs_xri_bitmap)
1148 return 0;
1149 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1150 return 1;
1151 else
1152 return 0;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170int
1171lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1172 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1173{
1174 unsigned long iflags;
1175 struct lpfc_node_rrq *rrq;
1176 int empty;
1177
1178 if (!ndlp)
1179 return -EINVAL;
1180
1181 if (!phba->cfg_enable_rrq)
1182 return -EINVAL;
1183
1184 spin_lock_irqsave(&phba->hbalock, iflags);
1185 if (phba->pport->load_flag & FC_UNLOADING) {
1186 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1187 goto out;
1188 }
1189
1190 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1191 goto out;
1192
1193 if (!ndlp->active_rrqs_xri_bitmap)
1194 goto out;
1195
1196 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1197 goto out;
1198
1199 spin_unlock_irqrestore(&phba->hbalock, iflags);
1200 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1201 if (!rrq) {
1202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1203 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1204 " DID:0x%x Send:%d\n",
1205 xritag, rxid, ndlp->nlp_DID, send_rrq);
1206 return -EINVAL;
1207 }
1208 if (phba->cfg_enable_rrq == 1)
1209 rrq->send_rrq = send_rrq;
1210 else
1211 rrq->send_rrq = 0;
1212 rrq->xritag = xritag;
1213 rrq->rrq_stop_time = jiffies +
1214 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1215 rrq->nlp_DID = ndlp->nlp_DID;
1216 rrq->vport = ndlp->vport;
1217 rrq->rxid = rxid;
1218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 empty = list_empty(&phba->active_rrq_list);
1220 list_add_tail(&rrq->list, &phba->active_rrq_list);
1221 phba->hba_flag |= HBA_RRQ_ACTIVE;
1222 if (empty)
1223 lpfc_worker_wake_up(phba);
1224 spin_unlock_irqrestore(&phba->hbalock, iflags);
1225 return 0;
1226out:
1227 spin_unlock_irqrestore(&phba->hbalock, iflags);
1228 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230 " DID:0x%x Send:%d\n",
1231 xritag, rxid, ndlp->nlp_DID, send_rrq);
1232 return -EINVAL;
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246static struct lpfc_sglq *
1247__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1248{
1249 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1250 struct lpfc_sglq *sglq = NULL;
1251 struct lpfc_sglq *start_sglq = NULL;
1252 struct lpfc_io_buf *lpfc_cmd;
1253 struct lpfc_nodelist *ndlp;
1254 struct lpfc_sli_ring *pring = NULL;
1255 int found = 0;
1256
1257 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1258 pring = phba->sli4_hba.nvmels_wq->pring;
1259 else
1260 pring = lpfc_phba_elsring(phba);
1261
1262 lockdep_assert_held(&pring->ring_lock);
1263
1264 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1265 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1266 ndlp = lpfc_cmd->rdata->pnode;
1267 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1268 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1269 ndlp = piocbq->context_un.ndlp;
1270 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1271 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1272 ndlp = NULL;
1273 else
1274 ndlp = piocbq->context_un.ndlp;
1275 } else {
1276 ndlp = piocbq->context1;
1277 }
1278
1279 spin_lock(&phba->sli4_hba.sgl_list_lock);
1280 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1281 start_sglq = sglq;
1282 while (!found) {
1283 if (!sglq)
1284 break;
1285 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1286 test_bit(sglq->sli4_lxritag,
1287 ndlp->active_rrqs_xri_bitmap)) {
1288
1289
1290
1291 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1292 sglq = NULL;
1293 list_remove_head(lpfc_els_sgl_list, sglq,
1294 struct lpfc_sglq, list);
1295 if (sglq == start_sglq) {
1296 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1297 sglq = NULL;
1298 break;
1299 } else
1300 continue;
1301 }
1302 sglq->ndlp = ndlp;
1303 found = 1;
1304 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1305 sglq->state = SGL_ALLOCATED;
1306 }
1307 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1308 return sglq;
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321struct lpfc_sglq *
1322__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1323{
1324 struct list_head *lpfc_nvmet_sgl_list;
1325 struct lpfc_sglq *sglq = NULL;
1326
1327 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1328
1329 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1330
1331 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1332 if (!sglq)
1333 return NULL;
1334 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1335 sglq->state = SGL_ALLOCATED;
1336 return sglq;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348struct lpfc_iocbq *
1349lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1350{
1351 struct lpfc_iocbq * iocbq = NULL;
1352 unsigned long iflags;
1353
1354 spin_lock_irqsave(&phba->hbalock, iflags);
1355 iocbq = __lpfc_sli_get_iocbq(phba);
1356 spin_unlock_irqrestore(&phba->hbalock, iflags);
1357 return iocbq;
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379static void
1380__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1381{
1382 struct lpfc_sglq *sglq;
1383 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1384 unsigned long iflag = 0;
1385 struct lpfc_sli_ring *pring;
1386
1387 if (iocbq->sli4_xritag == NO_XRI)
1388 sglq = NULL;
1389 else
1390 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1391
1392
1393 if (sglq) {
1394 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1395 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1396 iflag);
1397 sglq->state = SGL_FREED;
1398 sglq->ndlp = NULL;
1399 list_add_tail(&sglq->list,
1400 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1401 spin_unlock_irqrestore(
1402 &phba->sli4_hba.sgl_list_lock, iflag);
1403 goto out;
1404 }
1405
1406 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1407 (sglq->state != SGL_XRI_ABORTED)) {
1408 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1409 iflag);
1410
1411
1412 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1413 sglq->ndlp = NULL;
1414
1415 list_add(&sglq->list,
1416 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1417 spin_unlock_irqrestore(
1418 &phba->sli4_hba.sgl_list_lock, iflag);
1419 } else {
1420 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1421 iflag);
1422 sglq->state = SGL_FREED;
1423 sglq->ndlp = NULL;
1424 list_add_tail(&sglq->list,
1425 &phba->sli4_hba.lpfc_els_sgl_list);
1426 spin_unlock_irqrestore(
1427 &phba->sli4_hba.sgl_list_lock, iflag);
1428 pring = lpfc_phba_elsring(phba);
1429
1430 if (pring && (!list_empty(&pring->txq)))
1431 lpfc_worker_wake_up(phba);
1432 }
1433 }
1434
1435out:
1436
1437
1438
1439 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1440 iocbq->sli4_lxritag = NO_XRI;
1441 iocbq->sli4_xritag = NO_XRI;
1442 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1443 LPFC_IO_NVME_LS);
1444 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459static void
1460__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1461{
1462 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1463
1464
1465
1466
1467 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1468 iocbq->sli4_xritag = NO_XRI;
1469 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static void
1483__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1484{
1485 lockdep_assert_held(&phba->hbalock);
1486
1487 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1488 phba->iocb_cnt--;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499void
1500lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1501{
1502 unsigned long iflags;
1503
1504
1505
1506
1507 spin_lock_irqsave(&phba->hbalock, iflags);
1508 __lpfc_sli_release_iocbq(phba, iocbq);
1509 spin_unlock_irqrestore(&phba->hbalock, iflags);
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524void
1525lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1526 uint32_t ulpstatus, uint32_t ulpWord4)
1527{
1528 struct lpfc_iocbq *piocb;
1529
1530 while (!list_empty(iocblist)) {
1531 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1532 if (piocb->wqe_cmpl) {
1533 if (piocb->iocb_flag & LPFC_IO_NVME)
1534 lpfc_nvme_cancel_iocb(phba, piocb,
1535 ulpstatus, ulpWord4);
1536 else
1537 lpfc_sli_release_iocbq(phba, piocb);
1538
1539 } else if (piocb->iocb_cmpl) {
1540 piocb->iocb.ulpStatus = ulpstatus;
1541 piocb->iocb.un.ulpWord[4] = ulpWord4;
1542 (piocb->iocb_cmpl) (phba, piocb, piocb);
1543 } else {
1544 lpfc_sli_release_iocbq(phba, piocb);
1545 }
1546 }
1547 return;
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565static lpfc_iocb_type
1566lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1567{
1568 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1569
1570 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1571 return 0;
1572
1573 switch (iocb_cmnd) {
1574 case CMD_XMIT_SEQUENCE_CR:
1575 case CMD_XMIT_SEQUENCE_CX:
1576 case CMD_XMIT_BCAST_CN:
1577 case CMD_XMIT_BCAST_CX:
1578 case CMD_ELS_REQUEST_CR:
1579 case CMD_ELS_REQUEST_CX:
1580 case CMD_CREATE_XRI_CR:
1581 case CMD_CREATE_XRI_CX:
1582 case CMD_GET_RPI_CN:
1583 case CMD_XMIT_ELS_RSP_CX:
1584 case CMD_GET_RPI_CR:
1585 case CMD_FCP_IWRITE_CR:
1586 case CMD_FCP_IWRITE_CX:
1587 case CMD_FCP_IREAD_CR:
1588 case CMD_FCP_IREAD_CX:
1589 case CMD_FCP_ICMND_CR:
1590 case CMD_FCP_ICMND_CX:
1591 case CMD_FCP_TSEND_CX:
1592 case CMD_FCP_TRSP_CX:
1593 case CMD_FCP_TRECEIVE_CX:
1594 case CMD_FCP_AUTO_TRSP_CX:
1595 case CMD_ADAPTER_MSG:
1596 case CMD_ADAPTER_DUMP:
1597 case CMD_XMIT_SEQUENCE64_CR:
1598 case CMD_XMIT_SEQUENCE64_CX:
1599 case CMD_XMIT_BCAST64_CN:
1600 case CMD_XMIT_BCAST64_CX:
1601 case CMD_ELS_REQUEST64_CR:
1602 case CMD_ELS_REQUEST64_CX:
1603 case CMD_FCP_IWRITE64_CR:
1604 case CMD_FCP_IWRITE64_CX:
1605 case CMD_FCP_IREAD64_CR:
1606 case CMD_FCP_IREAD64_CX:
1607 case CMD_FCP_ICMND64_CR:
1608 case CMD_FCP_ICMND64_CX:
1609 case CMD_FCP_TSEND64_CX:
1610 case CMD_FCP_TRSP64_CX:
1611 case CMD_FCP_TRECEIVE64_CX:
1612 case CMD_GEN_REQUEST64_CR:
1613 case CMD_GEN_REQUEST64_CX:
1614 case CMD_XMIT_ELS_RSP64_CX:
1615 case DSSCMD_IWRITE64_CR:
1616 case DSSCMD_IWRITE64_CX:
1617 case DSSCMD_IREAD64_CR:
1618 case DSSCMD_IREAD64_CX:
1619 case CMD_SEND_FRAME:
1620 type = LPFC_SOL_IOCB;
1621 break;
1622 case CMD_ABORT_XRI_CN:
1623 case CMD_ABORT_XRI_CX:
1624 case CMD_CLOSE_XRI_CN:
1625 case CMD_CLOSE_XRI_CX:
1626 case CMD_XRI_ABORTED_CX:
1627 case CMD_ABORT_MXRI64_CN:
1628 case CMD_XMIT_BLS_RSP64_CX:
1629 type = LPFC_ABORT_IOCB;
1630 break;
1631 case CMD_RCV_SEQUENCE_CX:
1632 case CMD_RCV_ELS_REQ_CX:
1633 case CMD_RCV_SEQUENCE64_CX:
1634 case CMD_RCV_ELS_REQ64_CX:
1635 case CMD_ASYNC_STATUS:
1636 case CMD_IOCB_RCV_SEQ64_CX:
1637 case CMD_IOCB_RCV_ELS64_CX:
1638 case CMD_IOCB_RCV_CONT64_CX:
1639 case CMD_IOCB_RET_XRI64_CX:
1640 type = LPFC_UNSOL_IOCB;
1641 break;
1642 case CMD_IOCB_XMIT_MSEQ64_CR:
1643 case CMD_IOCB_XMIT_MSEQ64_CX:
1644 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1645 case CMD_IOCB_RCV_ELS_LIST64_CX:
1646 case CMD_IOCB_CLOSE_EXTENDED_CN:
1647 case CMD_IOCB_ABORT_EXTENDED_CN:
1648 case CMD_IOCB_RET_HBQE64_CN:
1649 case CMD_IOCB_FCP_IBIDIR64_CR:
1650 case CMD_IOCB_FCP_IBIDIR64_CX:
1651 case CMD_IOCB_FCP_ITASKMGT64_CX:
1652 case CMD_IOCB_LOGENTRY_CN:
1653 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1654 printk("%s - Unhandled SLI-3 Command x%x\n",
1655 __func__, iocb_cmnd);
1656 type = LPFC_UNKNOWN_IOCB;
1657 break;
1658 default:
1659 type = LPFC_UNKNOWN_IOCB;
1660 break;
1661 }
1662
1663 return type;
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677static int
1678lpfc_sli_ring_map(struct lpfc_hba *phba)
1679{
1680 struct lpfc_sli *psli = &phba->sli;
1681 LPFC_MBOXQ_t *pmb;
1682 MAILBOX_t *pmbox;
1683 int i, rc, ret = 0;
1684
1685 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1686 if (!pmb)
1687 return -ENOMEM;
1688 pmbox = &pmb->u.mb;
1689 phba->link_state = LPFC_INIT_MBX_CMDS;
1690 for (i = 0; i < psli->num_rings; i++) {
1691 lpfc_config_ring(phba, i, pmb);
1692 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1693 if (rc != MBX_SUCCESS) {
1694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1695 "0446 Adapter failed to init (%d), "
1696 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1697 "ring %d\n",
1698 rc, pmbox->mbxCommand,
1699 pmbox->mbxStatus, i);
1700 phba->link_state = LPFC_HBA_ERROR;
1701 ret = -ENXIO;
1702 break;
1703 }
1704 }
1705 mempool_free(pmb, phba->mbox_mem_pool);
1706 return ret;
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722static int
1723lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724 struct lpfc_iocbq *piocb)
1725{
1726 if (phba->sli_rev == LPFC_SLI_REV4)
1727 lockdep_assert_held(&pring->ring_lock);
1728 else
1729 lockdep_assert_held(&phba->hbalock);
1730
1731 BUG_ON(!piocb);
1732
1733 list_add_tail(&piocb->list, &pring->txcmplq);
1734 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1735 pring->txcmplq_cnt++;
1736
1737 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1738 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1739 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1740 BUG_ON(!piocb->vport);
1741 if (!(piocb->vport->load_flag & FC_UNLOADING))
1742 mod_timer(&piocb->vport->els_tmofunc,
1743 jiffies +
1744 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1745 }
1746
1747 return 0;
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760struct lpfc_iocbq *
1761lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1762{
1763 struct lpfc_iocbq *cmd_iocb;
1764
1765 lockdep_assert_held(&phba->hbalock);
1766
1767 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1768 return cmd_iocb;
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static IOCB_t *
1786lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1787{
1788 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1789 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1790
1791 lockdep_assert_held(&phba->hbalock);
1792
1793 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1794 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1795 pring->sli.sli3.next_cmdidx = 0;
1796
1797 if (unlikely(pring->sli.sli3.local_getidx ==
1798 pring->sli.sli3.next_cmdidx)) {
1799
1800 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1801
1802 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1804 "0315 Ring %d issue: portCmdGet %d "
1805 "is bigger than cmd ring %d\n",
1806 pring->ringno,
1807 pring->sli.sli3.local_getidx,
1808 max_cmd_idx);
1809
1810 phba->link_state = LPFC_HBA_ERROR;
1811
1812
1813
1814
1815 phba->work_ha |= HA_ERATT;
1816 phba->work_hs = HS_FFER3;
1817
1818 lpfc_worker_wake_up(phba);
1819
1820 return NULL;
1821 }
1822
1823 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1824 return NULL;
1825 }
1826
1827 return lpfc_cmd_iocb(phba, pring);
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842uint16_t
1843lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1844{
1845 struct lpfc_iocbq **new_arr;
1846 struct lpfc_iocbq **old_arr;
1847 size_t new_len;
1848 struct lpfc_sli *psli = &phba->sli;
1849 uint16_t iotag;
1850
1851 spin_lock_irq(&phba->hbalock);
1852 iotag = psli->last_iotag;
1853 if(++iotag < psli->iocbq_lookup_len) {
1854 psli->last_iotag = iotag;
1855 psli->iocbq_lookup[iotag] = iocbq;
1856 spin_unlock_irq(&phba->hbalock);
1857 iocbq->iotag = iotag;
1858 return iotag;
1859 } else if (psli->iocbq_lookup_len < (0xffff
1860 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1861 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1862 spin_unlock_irq(&phba->hbalock);
1863 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1864 GFP_KERNEL);
1865 if (new_arr) {
1866 spin_lock_irq(&phba->hbalock);
1867 old_arr = psli->iocbq_lookup;
1868 if (new_len <= psli->iocbq_lookup_len) {
1869
1870 kfree(new_arr);
1871 iotag = psli->last_iotag;
1872 if(++iotag < psli->iocbq_lookup_len) {
1873 psli->last_iotag = iotag;
1874 psli->iocbq_lookup[iotag] = iocbq;
1875 spin_unlock_irq(&phba->hbalock);
1876 iocbq->iotag = iotag;
1877 return iotag;
1878 }
1879 spin_unlock_irq(&phba->hbalock);
1880 return 0;
1881 }
1882 if (psli->iocbq_lookup)
1883 memcpy(new_arr, old_arr,
1884 ((psli->last_iotag + 1) *
1885 sizeof (struct lpfc_iocbq *)));
1886 psli->iocbq_lookup = new_arr;
1887 psli->iocbq_lookup_len = new_len;
1888 psli->last_iotag = iotag;
1889 psli->iocbq_lookup[iotag] = iocbq;
1890 spin_unlock_irq(&phba->hbalock);
1891 iocbq->iotag = iotag;
1892 kfree(old_arr);
1893 return iotag;
1894 }
1895 } else
1896 spin_unlock_irq(&phba->hbalock);
1897
1898 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1899 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1900 psli->last_iotag);
1901
1902 return 0;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920static void
1921lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1922 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1923{
1924
1925
1926
1927 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1928
1929
1930 if (pring->ringno == LPFC_ELS_RING) {
1931 lpfc_debugfs_slow_ring_trc(phba,
1932 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1933 *(((uint32_t *) &nextiocb->iocb) + 4),
1934 *(((uint32_t *) &nextiocb->iocb) + 6),
1935 *(((uint32_t *) &nextiocb->iocb) + 7));
1936 }
1937
1938
1939
1940
1941 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1942 wmb();
1943 pring->stats.iocb_cmd++;
1944
1945
1946
1947
1948
1949
1950 if (nextiocb->iocb_cmpl)
1951 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1952 else
1953 __lpfc_sli_release_iocbq(phba, nextiocb);
1954
1955
1956
1957
1958
1959 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1960 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static void
1976lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1977{
1978 int ringno = pring->ringno;
1979
1980 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1981
1982 wmb();
1983
1984
1985
1986
1987
1988 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1989 readl(phba->CAregaddr);
1990
1991 pring->stats.iocb_cmd_full++;
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003static void
2004lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2005{
2006 int ringno = pring->ringno;
2007
2008
2009
2010
2011 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2012 wmb();
2013 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2014 readl(phba->CAregaddr);
2015 }
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027static void
2028lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2029{
2030 IOCB_t *iocb;
2031 struct lpfc_iocbq *nextiocb;
2032
2033 lockdep_assert_held(&phba->hbalock);
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 if (lpfc_is_link_up(phba) &&
2044 (!list_empty(&pring->txq)) &&
2045 (pring->ringno != LPFC_FCP_RING ||
2046 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2047
2048 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2049 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2050 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2051
2052 if (iocb)
2053 lpfc_sli_update_ring(phba, pring);
2054 else
2055 lpfc_sli_update_full_ring(phba, pring);
2056 }
2057
2058 return;
2059}
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071static struct lpfc_hbq_entry *
2072lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2073{
2074 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2075
2076 lockdep_assert_held(&phba->hbalock);
2077
2078 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2079 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2080 hbqp->next_hbqPutIdx = 0;
2081
2082 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2083 uint32_t raw_index = phba->hbq_get[hbqno];
2084 uint32_t getidx = le32_to_cpu(raw_index);
2085
2086 hbqp->local_hbqGetIdx = getidx;
2087
2088 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2089 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2090 "1802 HBQ %d: local_hbqGetIdx "
2091 "%u is > than hbqp->entry_count %u\n",
2092 hbqno, hbqp->local_hbqGetIdx,
2093 hbqp->entry_count);
2094
2095 phba->link_state = LPFC_HBA_ERROR;
2096 return NULL;
2097 }
2098
2099 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2100 return NULL;
2101 }
2102
2103 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2104 hbqp->hbqPutIdx;
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116void
2117lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2118{
2119 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2120 struct hbq_dmabuf *hbq_buf;
2121 unsigned long flags;
2122 int i, hbq_count;
2123
2124 hbq_count = lpfc_sli_hbq_count();
2125
2126 spin_lock_irqsave(&phba->hbalock, flags);
2127 for (i = 0; i < hbq_count; ++i) {
2128 list_for_each_entry_safe(dmabuf, next_dmabuf,
2129 &phba->hbqs[i].hbq_buffer_list, list) {
2130 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2131 list_del(&hbq_buf->dbuf.list);
2132 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2133 }
2134 phba->hbqs[i].buffer_count = 0;
2135 }
2136
2137
2138 phba->hbq_in_use = 0;
2139 spin_unlock_irqrestore(&phba->hbalock, flags);
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154static int
2155lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2156 struct hbq_dmabuf *hbq_buf)
2157{
2158 lockdep_assert_held(&phba->hbalock);
2159 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173static int
2174lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2175 struct hbq_dmabuf *hbq_buf)
2176{
2177 struct lpfc_hbq_entry *hbqe;
2178 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2179
2180 lockdep_assert_held(&phba->hbalock);
2181
2182 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2183 if (hbqe) {
2184 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2185
2186 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2187 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2188 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2189 hbqe->bde.tus.f.bdeFlags = 0;
2190 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2191 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2192
2193 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2194 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2195
2196 readl(phba->hbq_put + hbqno);
2197 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2198 return 0;
2199 } else
2200 return -ENOMEM;
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213static int
2214lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2215 struct hbq_dmabuf *hbq_buf)
2216{
2217 int rc;
2218 struct lpfc_rqe hrqe;
2219 struct lpfc_rqe drqe;
2220 struct lpfc_queue *hrq;
2221 struct lpfc_queue *drq;
2222
2223 if (hbqno != LPFC_ELS_HBQ)
2224 return 1;
2225 hrq = phba->sli4_hba.hdr_rq;
2226 drq = phba->sli4_hba.dat_rq;
2227
2228 lockdep_assert_held(&phba->hbalock);
2229 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2230 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2231 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2232 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2233 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2234 if (rc < 0)
2235 return rc;
2236 hbq_buf->tag = (rc | (hbqno << 16));
2237 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2238 return 0;
2239}
2240
2241
2242static struct lpfc_hbq_init lpfc_els_hbq = {
2243 .rn = 1,
2244 .entry_count = 256,
2245 .mask_count = 0,
2246 .profile = 0,
2247 .ring_mask = (1 << LPFC_ELS_RING),
2248 .buffer_count = 0,
2249 .init_count = 40,
2250 .add_count = 40,
2251};
2252
2253
2254struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2255 &lpfc_els_hbq,
2256};
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268static int
2269lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2270{
2271 uint32_t i, posted = 0;
2272 unsigned long flags;
2273 struct hbq_dmabuf *hbq_buffer;
2274 LIST_HEAD(hbq_buf_list);
2275 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2276 return 0;
2277
2278 if ((phba->hbqs[hbqno].buffer_count + count) >
2279 lpfc_hbq_defs[hbqno]->entry_count)
2280 count = lpfc_hbq_defs[hbqno]->entry_count -
2281 phba->hbqs[hbqno].buffer_count;
2282 if (!count)
2283 return 0;
2284
2285 for (i = 0; i < count; i++) {
2286 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2287 if (!hbq_buffer)
2288 break;
2289 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2290 }
2291
2292 spin_lock_irqsave(&phba->hbalock, flags);
2293 if (!phba->hbq_in_use)
2294 goto err;
2295 while (!list_empty(&hbq_buf_list)) {
2296 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2297 dbuf.list);
2298 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2299 (hbqno << 16));
2300 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2301 phba->hbqs[hbqno].buffer_count++;
2302 posted++;
2303 } else
2304 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2305 }
2306 spin_unlock_irqrestore(&phba->hbalock, flags);
2307 return posted;
2308err:
2309 spin_unlock_irqrestore(&phba->hbalock, flags);
2310 while (!list_empty(&hbq_buf_list)) {
2311 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2312 dbuf.list);
2313 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2314 }
2315 return 0;
2316}
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327int
2328lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2329{
2330 if (phba->sli_rev == LPFC_SLI_REV4)
2331 return 0;
2332 else
2333 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2334 lpfc_hbq_defs[qno]->add_count);
2335}
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346static int
2347lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2348{
2349 if (phba->sli_rev == LPFC_SLI_REV4)
2350 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2351 lpfc_hbq_defs[qno]->entry_count);
2352 else
2353 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2354 lpfc_hbq_defs[qno]->init_count);
2355}
2356
2357
2358
2359
2360
2361
2362
2363static struct hbq_dmabuf *
2364lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2365{
2366 struct lpfc_dmabuf *d_buf;
2367
2368 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2369 if (!d_buf)
2370 return NULL;
2371 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2372}
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382static struct rqb_dmabuf *
2383lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2384{
2385 struct lpfc_dmabuf *h_buf;
2386 struct lpfc_rqb *rqbp;
2387
2388 rqbp = hrq->rqbp;
2389 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2390 struct lpfc_dmabuf, list);
2391 if (!h_buf)
2392 return NULL;
2393 rqbp->buffer_count--;
2394 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2395}
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406static struct hbq_dmabuf *
2407lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2408{
2409 struct lpfc_dmabuf *d_buf;
2410 struct hbq_dmabuf *hbq_buf;
2411 uint32_t hbqno;
2412
2413 hbqno = tag >> 16;
2414 if (hbqno >= LPFC_MAX_HBQS)
2415 return NULL;
2416
2417 spin_lock_irq(&phba->hbalock);
2418 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2419 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2420 if (hbq_buf->tag == tag) {
2421 spin_unlock_irq(&phba->hbalock);
2422 return hbq_buf;
2423 }
2424 }
2425 spin_unlock_irq(&phba->hbalock);
2426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2427 "1803 Bad hbq tag. Data: x%x x%x\n",
2428 tag, phba->hbqs[tag >> 16].buffer_count);
2429 return NULL;
2430}
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441void
2442lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2443{
2444 uint32_t hbqno;
2445
2446 if (hbq_buffer) {
2447 hbqno = hbq_buffer->tag >> 16;
2448 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2449 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2450 }
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462static int
2463lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2464{
2465 uint8_t ret;
2466
2467 switch (mbxCommand) {
2468 case MBX_LOAD_SM:
2469 case MBX_READ_NV:
2470 case MBX_WRITE_NV:
2471 case MBX_WRITE_VPARMS:
2472 case MBX_RUN_BIU_DIAG:
2473 case MBX_INIT_LINK:
2474 case MBX_DOWN_LINK:
2475 case MBX_CONFIG_LINK:
2476 case MBX_CONFIG_RING:
2477 case MBX_RESET_RING:
2478 case MBX_READ_CONFIG:
2479 case MBX_READ_RCONFIG:
2480 case MBX_READ_SPARM:
2481 case MBX_READ_STATUS:
2482 case MBX_READ_RPI:
2483 case MBX_READ_XRI:
2484 case MBX_READ_REV:
2485 case MBX_READ_LNK_STAT:
2486 case MBX_REG_LOGIN:
2487 case MBX_UNREG_LOGIN:
2488 case MBX_CLEAR_LA:
2489 case MBX_DUMP_MEMORY:
2490 case MBX_DUMP_CONTEXT:
2491 case MBX_RUN_DIAGS:
2492 case MBX_RESTART:
2493 case MBX_UPDATE_CFG:
2494 case MBX_DOWN_LOAD:
2495 case MBX_DEL_LD_ENTRY:
2496 case MBX_RUN_PROGRAM:
2497 case MBX_SET_MASK:
2498 case MBX_SET_VARIABLE:
2499 case MBX_UNREG_D_ID:
2500 case MBX_KILL_BOARD:
2501 case MBX_CONFIG_FARP:
2502 case MBX_BEACON:
2503 case MBX_LOAD_AREA:
2504 case MBX_RUN_BIU_DIAG64:
2505 case MBX_CONFIG_PORT:
2506 case MBX_READ_SPARM64:
2507 case MBX_READ_RPI64:
2508 case MBX_REG_LOGIN64:
2509 case MBX_READ_TOPOLOGY:
2510 case MBX_WRITE_WWN:
2511 case MBX_SET_DEBUG:
2512 case MBX_LOAD_EXP_ROM:
2513 case MBX_ASYNCEVT_ENABLE:
2514 case MBX_REG_VPI:
2515 case MBX_UNREG_VPI:
2516 case MBX_HEARTBEAT:
2517 case MBX_PORT_CAPABILITIES:
2518 case MBX_PORT_IOV_CONTROL:
2519 case MBX_SLI4_CONFIG:
2520 case MBX_SLI4_REQ_FTRS:
2521 case MBX_REG_FCFI:
2522 case MBX_UNREG_FCFI:
2523 case MBX_REG_VFI:
2524 case MBX_UNREG_VFI:
2525 case MBX_INIT_VPI:
2526 case MBX_INIT_VFI:
2527 case MBX_RESUME_RPI:
2528 case MBX_READ_EVENT_LOG_STATUS:
2529 case MBX_READ_EVENT_LOG:
2530 case MBX_SECURITY_MGMT:
2531 case MBX_AUTH_PORT:
2532 case MBX_ACCESS_VDATA:
2533 ret = mbxCommand;
2534 break;
2535 default:
2536 ret = MBX_SHUTDOWN;
2537 break;
2538 }
2539 return ret;
2540}
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553void
2554lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2555{
2556 unsigned long drvr_flag;
2557 struct completion *pmbox_done;
2558
2559
2560
2561
2562
2563 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2564 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2565 pmbox_done = (struct completion *)pmboxq->context3;
2566 if (pmbox_done)
2567 complete(pmbox_done);
2568 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2569 return;
2570}
2571
2572static void
2573__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2574{
2575 unsigned long iflags;
2576
2577 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2578 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2579 spin_lock_irqsave(&ndlp->lock, iflags);
2580 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2581 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2582 spin_unlock_irqrestore(&ndlp->lock, iflags);
2583 }
2584 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597void
2598lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2599{
2600 struct lpfc_vport *vport = pmb->vport;
2601 struct lpfc_dmabuf *mp;
2602 struct lpfc_nodelist *ndlp;
2603 struct Scsi_Host *shost;
2604 uint16_t rpi, vpi;
2605 int rc;
2606
2607 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2608
2609 if (mp) {
2610 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2611 kfree(mp);
2612 }
2613
2614
2615
2616
2617
2618 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2619 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2620 !pmb->u.mb.mbxStatus) {
2621 rpi = pmb->u.mb.un.varWords[0];
2622 vpi = pmb->u.mb.un.varRegLogin.vpi;
2623 if (phba->sli_rev == LPFC_SLI_REV4)
2624 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2625 lpfc_unreg_login(phba, vpi, rpi, pmb);
2626 pmb->vport = vport;
2627 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2629 if (rc != MBX_NOT_FINISHED)
2630 return;
2631 }
2632
2633 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2634 !(phba->pport->load_flag & FC_UNLOADING) &&
2635 !pmb->u.mb.mbxStatus) {
2636 shost = lpfc_shost_from_vport(vport);
2637 spin_lock_irq(shost->host_lock);
2638 vport->vpi_state |= LPFC_VPI_REGISTERED;
2639 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2640 spin_unlock_irq(shost->host_lock);
2641 }
2642
2643 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2644 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2645 lpfc_nlp_put(ndlp);
2646 pmb->ctx_buf = NULL;
2647 pmb->ctx_ndlp = NULL;
2648 }
2649
2650 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2651 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2652
2653
2654 if (ndlp) {
2655 lpfc_printf_vlog(
2656 vport,
2657 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2658 "1438 UNREG cmpl deferred mbox x%x "
2659 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2660 ndlp->nlp_rpi, ndlp->nlp_DID,
2661 ndlp->nlp_flag, ndlp->nlp_defer_did,
2662 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2663
2664 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2665 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2666 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2667 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2668 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2669 } else {
2670 __lpfc_sli_rpi_release(vport, ndlp);
2671 }
2672
2673
2674
2675
2676
2677 lpfc_nlp_put(ndlp);
2678 pmb->ctx_ndlp = NULL;
2679 }
2680 }
2681
2682
2683 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2684 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2685 lpfc_nlp_put(ndlp);
2686 }
2687
2688
2689 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2690 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2692 "2860 SLI authentication is required "
2693 "for INIT_LINK but has not done yet\n");
2694
2695 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2696 lpfc_sli4_mbox_cmd_free(phba, pmb);
2697 else
2698 mempool_free(pmb, phba->mbox_mem_pool);
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713void
2714lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2715{
2716 struct lpfc_vport *vport = pmb->vport;
2717 struct lpfc_nodelist *ndlp;
2718
2719 ndlp = pmb->ctx_ndlp;
2720 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2721 if (phba->sli_rev == LPFC_SLI_REV4 &&
2722 (bf_get(lpfc_sli_intf_if_type,
2723 &phba->sli4_hba.sli_intf) >=
2724 LPFC_SLI_INTF_IF_TYPE_2)) {
2725 if (ndlp) {
2726 lpfc_printf_vlog(
2727 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2728 "0010 UNREG_LOGIN vpi:%x "
2729 "rpi:%x DID:%x defer x%x flg x%x "
2730 "x%px\n",
2731 vport->vpi, ndlp->nlp_rpi,
2732 ndlp->nlp_DID, ndlp->nlp_defer_did,
2733 ndlp->nlp_flag,
2734 ndlp);
2735 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2736
2737
2738
2739
2740 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2741 (ndlp->nlp_defer_did !=
2742 NLP_EVT_NOTHING_PENDING)) {
2743 lpfc_printf_vlog(
2744 vport, KERN_INFO, LOG_DISCOVERY,
2745 "4111 UNREG cmpl deferred "
2746 "clr x%x on "
2747 "NPort x%x Data: x%x x%px\n",
2748 ndlp->nlp_rpi, ndlp->nlp_DID,
2749 ndlp->nlp_defer_did, ndlp);
2750 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2751 ndlp->nlp_defer_did =
2752 NLP_EVT_NOTHING_PENDING;
2753 lpfc_issue_els_plogi(
2754 vport, ndlp->nlp_DID, 0);
2755 } else {
2756 __lpfc_sli_rpi_release(vport, ndlp);
2757 }
2758 lpfc_nlp_put(ndlp);
2759 }
2760 }
2761 }
2762
2763 mempool_free(pmb, phba->mbox_mem_pool);
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779int
2780lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2781{
2782 MAILBOX_t *pmbox;
2783 LPFC_MBOXQ_t *pmb;
2784 int rc;
2785 LIST_HEAD(cmplq);
2786
2787 phba->sli.slistat.mbox_event++;
2788
2789
2790 spin_lock_irq(&phba->hbalock);
2791 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2792 spin_unlock_irq(&phba->hbalock);
2793
2794
2795 do {
2796 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2797 if (pmb == NULL)
2798 break;
2799
2800 pmbox = &pmb->u.mb;
2801
2802 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2803 if (pmb->vport) {
2804 lpfc_debugfs_disc_trc(pmb->vport,
2805 LPFC_DISC_TRC_MBOX_VPORT,
2806 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2807 (uint32_t)pmbox->mbxCommand,
2808 pmbox->un.varWords[0],
2809 pmbox->un.varWords[1]);
2810 }
2811 else {
2812 lpfc_debugfs_disc_trc(phba->pport,
2813 LPFC_DISC_TRC_MBOX,
2814 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2815 (uint32_t)pmbox->mbxCommand,
2816 pmbox->un.varWords[0],
2817 pmbox->un.varWords[1]);
2818 }
2819 }
2820
2821
2822
2823
2824 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2825 MBX_SHUTDOWN) {
2826
2827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2828 "(%d):0323 Unknown Mailbox command "
2829 "x%x (x%x/x%x) Cmpl\n",
2830 pmb->vport ? pmb->vport->vpi :
2831 LPFC_VPORT_UNKNOWN,
2832 pmbox->mbxCommand,
2833 lpfc_sli_config_mbox_subsys_get(phba,
2834 pmb),
2835 lpfc_sli_config_mbox_opcode_get(phba,
2836 pmb));
2837 phba->link_state = LPFC_HBA_ERROR;
2838 phba->work_hs = HS_FFER3;
2839 lpfc_handle_eratt(phba);
2840 continue;
2841 }
2842
2843 if (pmbox->mbxStatus) {
2844 phba->sli.slistat.mbox_stat_err++;
2845 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2846
2847 lpfc_printf_log(phba, KERN_INFO,
2848 LOG_MBOX | LOG_SLI,
2849 "(%d):0305 Mbox cmd cmpl "
2850 "error - RETRYing Data: x%x "
2851 "(x%x/x%x) x%x x%x x%x\n",
2852 pmb->vport ? pmb->vport->vpi :
2853 LPFC_VPORT_UNKNOWN,
2854 pmbox->mbxCommand,
2855 lpfc_sli_config_mbox_subsys_get(phba,
2856 pmb),
2857 lpfc_sli_config_mbox_opcode_get(phba,
2858 pmb),
2859 pmbox->mbxStatus,
2860 pmbox->un.varWords[0],
2861 pmb->vport ? pmb->vport->port_state :
2862 LPFC_VPORT_UNKNOWN);
2863 pmbox->mbxStatus = 0;
2864 pmbox->mbxOwner = OWN_HOST;
2865 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2866 if (rc != MBX_NOT_FINISHED)
2867 continue;
2868 }
2869 }
2870
2871
2872 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2873 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2874 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2875 "x%x x%x x%x\n",
2876 pmb->vport ? pmb->vport->vpi : 0,
2877 pmbox->mbxCommand,
2878 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2879 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2880 pmb->mbox_cmpl,
2881 *((uint32_t *) pmbox),
2882 pmbox->un.varWords[0],
2883 pmbox->un.varWords[1],
2884 pmbox->un.varWords[2],
2885 pmbox->un.varWords[3],
2886 pmbox->un.varWords[4],
2887 pmbox->un.varWords[5],
2888 pmbox->un.varWords[6],
2889 pmbox->un.varWords[7],
2890 pmbox->un.varWords[8],
2891 pmbox->un.varWords[9],
2892 pmbox->un.varWords[10]);
2893
2894 if (pmb->mbox_cmpl)
2895 pmb->mbox_cmpl(phba,pmb);
2896 } while (1);
2897 return 0;
2898}
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912static struct lpfc_dmabuf *
2913lpfc_sli_get_buff(struct lpfc_hba *phba,
2914 struct lpfc_sli_ring *pring,
2915 uint32_t tag)
2916{
2917 struct hbq_dmabuf *hbq_entry;
2918
2919 if (tag & QUE_BUFTAG_BIT)
2920 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2921 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2922 if (!hbq_entry)
2923 return NULL;
2924 return &hbq_entry->dbuf;
2925}
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938static void
2939lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2940{
2941 struct lpfc_nodelist *ndlp;
2942 struct lpfc_dmabuf *d_buf;
2943 struct hbq_dmabuf *nvmebuf;
2944 struct fc_frame_header *fc_hdr;
2945 struct lpfc_async_xchg_ctx *axchg = NULL;
2946 char *failwhy = NULL;
2947 uint32_t oxid, sid, did, fctl, size;
2948 int ret = 1;
2949
2950 d_buf = piocb->context2;
2951
2952 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2953 fc_hdr = nvmebuf->hbuf.virt;
2954 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2955 sid = sli4_sid_from_fc_hdr(fc_hdr);
2956 did = sli4_did_from_fc_hdr(fc_hdr);
2957 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2958 fc_hdr->fh_f_ctl[1] << 8 |
2959 fc_hdr->fh_f_ctl[2]);
2960 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2961
2962 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2963 oxid, size, sid);
2964
2965 if (phba->pport->load_flag & FC_UNLOADING) {
2966 failwhy = "Driver Unloading";
2967 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2968 failwhy = "NVME FC4 Disabled";
2969 } else if (!phba->nvmet_support && !phba->pport->localport) {
2970 failwhy = "No Localport";
2971 } else if (phba->nvmet_support && !phba->targetport) {
2972 failwhy = "No Targetport";
2973 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2974 failwhy = "Bad NVME LS R_CTL";
2975 } else if (unlikely((fctl & 0x00FF0000) !=
2976 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2977 failwhy = "Bad NVME LS F_CTL";
2978 } else {
2979 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2980 if (!axchg)
2981 failwhy = "No CTX memory";
2982 }
2983
2984 if (unlikely(failwhy)) {
2985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2986 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2987 sid, oxid, failwhy);
2988 goto out_fail;
2989 }
2990
2991
2992 ndlp = lpfc_findnode_did(phba->pport, sid);
2993 if (!ndlp ||
2994 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2995 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2996 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2997 "6216 NVME Unsol rcv: No ndlp: "
2998 "NPort_ID x%x oxid x%x\n",
2999 sid, oxid);
3000 goto out_fail;
3001 }
3002
3003 axchg->phba = phba;
3004 axchg->ndlp = ndlp;
3005 axchg->size = size;
3006 axchg->oxid = oxid;
3007 axchg->sid = sid;
3008 axchg->wqeq = NULL;
3009 axchg->state = LPFC_NVME_STE_LS_RCV;
3010 axchg->entry_cnt = 1;
3011 axchg->rqb_buffer = (void *)nvmebuf;
3012 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3013 axchg->payload = nvmebuf->dbuf.virt;
3014 INIT_LIST_HEAD(&axchg->list);
3015
3016 if (phba->nvmet_support) {
3017 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3018 spin_lock_irq(&ndlp->lock);
3019 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3020 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3021 spin_unlock_irq(&ndlp->lock);
3022
3023
3024
3025
3026
3027 if (!lpfc_nlp_get(ndlp))
3028 goto out_fail;
3029
3030 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3031 "6206 NVMET unsol ls_req ndlp x%px "
3032 "DID x%x xflags x%x refcnt %d\n",
3033 ndlp, ndlp->nlp_DID,
3034 ndlp->fc4_xpt_flags,
3035 kref_read(&ndlp->kref));
3036 } else {
3037 spin_unlock_irq(&ndlp->lock);
3038 }
3039 } else {
3040 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3041 }
3042
3043
3044 if (!ret)
3045 return;
3046
3047out_fail:
3048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3049 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3050 "NVMe%s handler failed %d\n",
3051 did, sid, oxid,
3052 (phba->nvmet_support) ? "T" : "I", ret);
3053
3054
3055 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3056
3057
3058 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3059 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3060
3061 if (ret)
3062 kfree(axchg);
3063}
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077static int
3078lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3079 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3080 uint32_t fch_type)
3081{
3082 int i;
3083
3084 switch (fch_type) {
3085 case FC_TYPE_NVME:
3086 lpfc_nvme_unsol_ls_handler(phba, saveq);
3087 return 1;
3088 default:
3089 break;
3090 }
3091
3092
3093 if (pring->prt[0].profile) {
3094 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3095 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3096 saveq);
3097 return 1;
3098 }
3099
3100
3101 for (i = 0; i < pring->num_mask; i++) {
3102 if ((pring->prt[i].rctl == fch_r_ctl) &&
3103 (pring->prt[i].type == fch_type)) {
3104 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3105 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3106 (phba, pring, saveq);
3107 return 1;
3108 }
3109 }
3110 return 0;
3111}
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127static int
3128lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3129 struct lpfc_iocbq *saveq)
3130{
3131 IOCB_t * irsp;
3132 WORD5 * w5p;
3133 uint32_t Rctl, Type;
3134 struct lpfc_iocbq *iocbq;
3135 struct lpfc_dmabuf *dmzbuf;
3136
3137 irsp = &(saveq->iocb);
3138
3139 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3140 if (pring->lpfc_sli_rcv_async_status)
3141 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3142 else
3143 lpfc_printf_log(phba,
3144 KERN_WARNING,
3145 LOG_SLI,
3146 "0316 Ring %d handler: unexpected "
3147 "ASYNC_STATUS iocb received evt_code "
3148 "0x%x\n",
3149 pring->ringno,
3150 irsp->un.asyncstat.evt_code);
3151 return 1;
3152 }
3153
3154 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3155 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3156 if (irsp->ulpBdeCount > 0) {
3157 dmzbuf = lpfc_sli_get_buff(phba, pring,
3158 irsp->un.ulpWord[3]);
3159 lpfc_in_buf_free(phba, dmzbuf);
3160 }
3161
3162 if (irsp->ulpBdeCount > 1) {
3163 dmzbuf = lpfc_sli_get_buff(phba, pring,
3164 irsp->unsli3.sli3Words[3]);
3165 lpfc_in_buf_free(phba, dmzbuf);
3166 }
3167
3168 if (irsp->ulpBdeCount > 2) {
3169 dmzbuf = lpfc_sli_get_buff(phba, pring,
3170 irsp->unsli3.sli3Words[7]);
3171 lpfc_in_buf_free(phba, dmzbuf);
3172 }
3173
3174 return 1;
3175 }
3176
3177 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3178 if (irsp->ulpBdeCount != 0) {
3179 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3180 irsp->un.ulpWord[3]);
3181 if (!saveq->context2)
3182 lpfc_printf_log(phba,
3183 KERN_ERR,
3184 LOG_SLI,
3185 "0341 Ring %d Cannot find buffer for "
3186 "an unsolicited iocb. tag 0x%x\n",
3187 pring->ringno,
3188 irsp->un.ulpWord[3]);
3189 }
3190 if (irsp->ulpBdeCount == 2) {
3191 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3192 irsp->unsli3.sli3Words[7]);
3193 if (!saveq->context3)
3194 lpfc_printf_log(phba,
3195 KERN_ERR,
3196 LOG_SLI,
3197 "0342 Ring %d Cannot find buffer for an"
3198 " unsolicited iocb. tag 0x%x\n",
3199 pring->ringno,
3200 irsp->unsli3.sli3Words[7]);
3201 }
3202 list_for_each_entry(iocbq, &saveq->list, list) {
3203 irsp = &(iocbq->iocb);
3204 if (irsp->ulpBdeCount != 0) {
3205 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3206 irsp->un.ulpWord[3]);
3207 if (!iocbq->context2)
3208 lpfc_printf_log(phba,
3209 KERN_ERR,
3210 LOG_SLI,
3211 "0343 Ring %d Cannot find "
3212 "buffer for an unsolicited iocb"
3213 ". tag 0x%x\n", pring->ringno,
3214 irsp->un.ulpWord[3]);
3215 }
3216 if (irsp->ulpBdeCount == 2) {
3217 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3218 irsp->unsli3.sli3Words[7]);
3219 if (!iocbq->context3)
3220 lpfc_printf_log(phba,
3221 KERN_ERR,
3222 LOG_SLI,
3223 "0344 Ring %d Cannot find "
3224 "buffer for an unsolicited "
3225 "iocb. tag 0x%x\n",
3226 pring->ringno,
3227 irsp->unsli3.sli3Words[7]);
3228 }
3229 }
3230 }
3231 if (irsp->ulpBdeCount != 0 &&
3232 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3233 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3234 int found = 0;
3235
3236
3237 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3238 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3239 saveq->iocb.unsli3.rcvsli3.ox_id) {
3240 list_add_tail(&saveq->list, &iocbq->list);
3241 found = 1;
3242 break;
3243 }
3244 }
3245 if (!found)
3246 list_add_tail(&saveq->clist,
3247 &pring->iocb_continue_saveq);
3248 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3249 list_del_init(&iocbq->clist);
3250 saveq = iocbq;
3251 irsp = &(saveq->iocb);
3252 } else
3253 return 0;
3254 }
3255 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3256 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3257 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3258 Rctl = FC_RCTL_ELS_REQ;
3259 Type = FC_TYPE_ELS;
3260 } else {
3261 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3262 Rctl = w5p->hcsw.Rctl;
3263 Type = w5p->hcsw.Type;
3264
3265
3266 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3267 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3268 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3269 Rctl = FC_RCTL_ELS_REQ;
3270 Type = FC_TYPE_ELS;
3271 w5p->hcsw.Rctl = Rctl;
3272 w5p->hcsw.Type = Type;
3273 }
3274 }
3275
3276 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3277 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3278 "0313 Ring %d handler: unexpected Rctl x%x "
3279 "Type x%x received\n",
3280 pring->ringno, Rctl, Type);
3281
3282 return 1;
3283}
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298static struct lpfc_iocbq *
3299lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3300 struct lpfc_sli_ring *pring,
3301 struct lpfc_iocbq *prspiocb)
3302{
3303 struct lpfc_iocbq *cmd_iocb = NULL;
3304 uint16_t iotag;
3305 spinlock_t *temp_lock = NULL;
3306 unsigned long iflag = 0;
3307
3308 if (phba->sli_rev == LPFC_SLI_REV4)
3309 temp_lock = &pring->ring_lock;
3310 else
3311 temp_lock = &phba->hbalock;
3312
3313 spin_lock_irqsave(temp_lock, iflag);
3314 iotag = prspiocb->iocb.ulpIoTag;
3315
3316 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3317 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3318 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3319
3320 list_del_init(&cmd_iocb->list);
3321 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3322 pring->txcmplq_cnt--;
3323 spin_unlock_irqrestore(temp_lock, iflag);
3324 return cmd_iocb;
3325 }
3326 }
3327
3328 spin_unlock_irqrestore(temp_lock, iflag);
3329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3330 "0317 iotag x%x is out of "
3331 "range: max iotag x%x wd0 x%x\n",
3332 iotag, phba->sli.last_iotag,
3333 *(((uint32_t *) &prspiocb->iocb) + 7));
3334 return NULL;
3335}
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349static struct lpfc_iocbq *
3350lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3351 struct lpfc_sli_ring *pring, uint16_t iotag)
3352{
3353 struct lpfc_iocbq *cmd_iocb = NULL;
3354 spinlock_t *temp_lock = NULL;
3355 unsigned long iflag = 0;
3356
3357 if (phba->sli_rev == LPFC_SLI_REV4)
3358 temp_lock = &pring->ring_lock;
3359 else
3360 temp_lock = &phba->hbalock;
3361
3362 spin_lock_irqsave(temp_lock, iflag);
3363 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3364 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3365 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3366
3367 list_del_init(&cmd_iocb->list);
3368 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3369 pring->txcmplq_cnt--;
3370 spin_unlock_irqrestore(temp_lock, iflag);
3371 return cmd_iocb;
3372 }
3373 }
3374
3375 spin_unlock_irqrestore(temp_lock, iflag);
3376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3377 "0372 iotag x%x lookup error: max iotag (x%x) "
3378 "iocb_flag x%x\n",
3379 iotag, phba->sli.last_iotag,
3380 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3381 return NULL;
3382}
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401static int
3402lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3403 struct lpfc_iocbq *saveq)
3404{
3405 struct lpfc_iocbq *cmdiocbp;
3406 int rc = 1;
3407 unsigned long iflag;
3408
3409 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3410 if (cmdiocbp) {
3411 if (cmdiocbp->iocb_cmpl) {
3412
3413
3414
3415
3416 if (saveq->iocb.ulpStatus &&
3417 (pring->ringno == LPFC_ELS_RING) &&
3418 (cmdiocbp->iocb.ulpCommand ==
3419 CMD_ELS_REQUEST64_CR))
3420 lpfc_send_els_failure_event(phba,
3421 cmdiocbp, saveq);
3422
3423
3424
3425
3426
3427 if (pring->ringno == LPFC_ELS_RING) {
3428 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3429 (cmdiocbp->iocb_flag &
3430 LPFC_DRIVER_ABORTED)) {
3431 spin_lock_irqsave(&phba->hbalock,
3432 iflag);
3433 cmdiocbp->iocb_flag &=
3434 ~LPFC_DRIVER_ABORTED;
3435 spin_unlock_irqrestore(&phba->hbalock,
3436 iflag);
3437 saveq->iocb.ulpStatus =
3438 IOSTAT_LOCAL_REJECT;
3439 saveq->iocb.un.ulpWord[4] =
3440 IOERR_SLI_ABORTED;
3441
3442
3443
3444
3445
3446 spin_lock_irqsave(&phba->hbalock,
3447 iflag);
3448 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3449 spin_unlock_irqrestore(&phba->hbalock,
3450 iflag);
3451 }
3452 if (phba->sli_rev == LPFC_SLI_REV4) {
3453 if (saveq->iocb_flag &
3454 LPFC_EXCHANGE_BUSY) {
3455
3456
3457
3458
3459
3460
3461 spin_lock_irqsave(
3462 &phba->hbalock, iflag);
3463 cmdiocbp->iocb_flag |=
3464 LPFC_EXCHANGE_BUSY;
3465 spin_unlock_irqrestore(
3466 &phba->hbalock, iflag);
3467 }
3468 if (cmdiocbp->iocb_flag &
3469 LPFC_DRIVER_ABORTED) {
3470
3471
3472
3473
3474
3475 spin_lock_irqsave(
3476 &phba->hbalock, iflag);
3477 cmdiocbp->iocb_flag &=
3478 ~LPFC_DRIVER_ABORTED;
3479 spin_unlock_irqrestore(
3480 &phba->hbalock, iflag);
3481 cmdiocbp->iocb.ulpStatus =
3482 IOSTAT_LOCAL_REJECT;
3483 cmdiocbp->iocb.un.ulpWord[4] =
3484 IOERR_ABORT_REQUESTED;
3485
3486
3487
3488
3489
3490
3491 saveq->iocb.ulpStatus =
3492 IOSTAT_LOCAL_REJECT;
3493 saveq->iocb.un.ulpWord[4] =
3494 IOERR_SLI_ABORTED;
3495 spin_lock_irqsave(
3496 &phba->hbalock, iflag);
3497 saveq->iocb_flag |=
3498 LPFC_DELAY_MEM_FREE;
3499 spin_unlock_irqrestore(
3500 &phba->hbalock, iflag);
3501 }
3502 }
3503 }
3504 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3505 } else
3506 lpfc_sli_release_iocbq(phba, cmdiocbp);
3507 } else {
3508
3509
3510
3511
3512
3513 if (pring->ringno != LPFC_ELS_RING) {
3514
3515
3516
3517
3518 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3519 "0322 Ring %d handler: "
3520 "unexpected completion IoTag x%x "
3521 "Data: x%x x%x x%x x%x\n",
3522 pring->ringno,
3523 saveq->iocb.ulpIoTag,
3524 saveq->iocb.ulpStatus,
3525 saveq->iocb.un.ulpWord[4],
3526 saveq->iocb.ulpCommand,
3527 saveq->iocb.ulpContext);
3528 }
3529 }
3530
3531 return rc;
3532}
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544static void
3545lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3546{
3547 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3548
3549
3550
3551
3552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3553 "0312 Ring %d handler: portRspPut %d "
3554 "is bigger than rsp ring %d\n",
3555 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3556 pring->sli.sli3.numRiocb);
3557
3558 phba->link_state = LPFC_HBA_ERROR;
3559
3560
3561
3562
3563
3564 phba->work_ha |= HA_ERATT;
3565 phba->work_hs = HS_FFER3;
3566
3567 lpfc_worker_wake_up(phba);
3568
3569 return;
3570}
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582void lpfc_poll_eratt(struct timer_list *t)
3583{
3584 struct lpfc_hba *phba;
3585 uint32_t eratt = 0;
3586 uint64_t sli_intr, cnt;
3587
3588 phba = from_timer(phba, t, eratt_poll);
3589
3590
3591 sli_intr = phba->sli.slistat.sli_intr;
3592
3593 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3594 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3595 sli_intr);
3596 else
3597 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3598
3599
3600 do_div(cnt, phba->eratt_poll_interval);
3601 phba->sli.slistat.sli_ips = cnt;
3602
3603 phba->sli.slistat.sli_prev_intr = sli_intr;
3604
3605
3606 eratt = lpfc_sli_check_eratt(phba);
3607
3608 if (eratt)
3609
3610 lpfc_worker_wake_up(phba);
3611 else
3612
3613 mod_timer(&phba->eratt_poll,
3614 jiffies +
3615 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3616 return;
3617}
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637int
3638lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3639 struct lpfc_sli_ring *pring, uint32_t mask)
3640{
3641 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3642 IOCB_t *irsp = NULL;
3643 IOCB_t *entry = NULL;
3644 struct lpfc_iocbq *cmdiocbq = NULL;
3645 struct lpfc_iocbq rspiocbq;
3646 uint32_t status;
3647 uint32_t portRspPut, portRspMax;
3648 int rc = 1;
3649 lpfc_iocb_type type;
3650 unsigned long iflag;
3651 uint32_t rsp_cmpl = 0;
3652
3653 spin_lock_irqsave(&phba->hbalock, iflag);
3654 pring->stats.iocb_event++;
3655
3656
3657
3658
3659
3660 portRspMax = pring->sli.sli3.numRiocb;
3661 portRspPut = le32_to_cpu(pgp->rspPutInx);
3662 if (unlikely(portRspPut >= portRspMax)) {
3663 lpfc_sli_rsp_pointers_error(phba, pring);
3664 spin_unlock_irqrestore(&phba->hbalock, iflag);
3665 return 1;
3666 }
3667 if (phba->fcp_ring_in_use) {
3668 spin_unlock_irqrestore(&phba->hbalock, iflag);
3669 return 1;
3670 } else
3671 phba->fcp_ring_in_use = 1;
3672
3673 rmb();
3674 while (pring->sli.sli3.rspidx != portRspPut) {
3675
3676
3677
3678
3679
3680 entry = lpfc_resp_iocb(phba, pring);
3681 phba->last_completion_time = jiffies;
3682
3683 if (++pring->sli.sli3.rspidx >= portRspMax)
3684 pring->sli.sli3.rspidx = 0;
3685
3686 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3687 (uint32_t *) &rspiocbq.iocb,
3688 phba->iocb_rsp_size);
3689 INIT_LIST_HEAD(&(rspiocbq.list));
3690 irsp = &rspiocbq.iocb;
3691
3692 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3693 pring->stats.iocb_rsp++;
3694 rsp_cmpl++;
3695
3696 if (unlikely(irsp->ulpStatus)) {
3697
3698
3699
3700
3701 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3702 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3703 IOERR_NO_RESOURCES)) {
3704 spin_unlock_irqrestore(&phba->hbalock, iflag);
3705 phba->lpfc_rampdown_queue_depth(phba);
3706 spin_lock_irqsave(&phba->hbalock, iflag);
3707 }
3708
3709
3710 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3711 "0336 Rsp Ring %d error: IOCB Data: "
3712 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3713 pring->ringno,
3714 irsp->un.ulpWord[0],
3715 irsp->un.ulpWord[1],
3716 irsp->un.ulpWord[2],
3717 irsp->un.ulpWord[3],
3718 irsp->un.ulpWord[4],
3719 irsp->un.ulpWord[5],
3720 *(uint32_t *)&irsp->un1,
3721 *((uint32_t *)&irsp->un1 + 1));
3722 }
3723
3724 switch (type) {
3725 case LPFC_ABORT_IOCB:
3726 case LPFC_SOL_IOCB:
3727
3728
3729
3730
3731 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3732 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3733 "0333 IOCB cmd 0x%x"
3734 " processed. Skipping"
3735 " completion\n",
3736 irsp->ulpCommand);
3737 break;
3738 }
3739
3740 spin_unlock_irqrestore(&phba->hbalock, iflag);
3741 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3742 &rspiocbq);
3743 spin_lock_irqsave(&phba->hbalock, iflag);
3744 if (unlikely(!cmdiocbq))
3745 break;
3746 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3747 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3748 if (cmdiocbq->iocb_cmpl) {
3749 spin_unlock_irqrestore(&phba->hbalock, iflag);
3750 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3751 &rspiocbq);
3752 spin_lock_irqsave(&phba->hbalock, iflag);
3753 }
3754 break;
3755 case LPFC_UNSOL_IOCB:
3756 spin_unlock_irqrestore(&phba->hbalock, iflag);
3757 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3758 spin_lock_irqsave(&phba->hbalock, iflag);
3759 break;
3760 default:
3761 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3762 char adaptermsg[LPFC_MAX_ADPTMSG];
3763 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3764 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3765 MAX_MSG_DATA);
3766 dev_warn(&((phba->pcidev)->dev),
3767 "lpfc%d: %s\n",
3768 phba->brd_no, adaptermsg);
3769 } else {
3770
3771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772 "0334 Unknown IOCB command "
3773 "Data: x%x, x%x x%x x%x x%x\n",
3774 type, irsp->ulpCommand,
3775 irsp->ulpStatus,
3776 irsp->ulpIoTag,
3777 irsp->ulpContext);
3778 }
3779 break;
3780 }
3781
3782
3783
3784
3785
3786
3787
3788 writel(pring->sli.sli3.rspidx,
3789 &phba->host_gp[pring->ringno].rspGetInx);
3790
3791 if (pring->sli.sli3.rspidx == portRspPut)
3792 portRspPut = le32_to_cpu(pgp->rspPutInx);
3793 }
3794
3795 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3796 pring->stats.iocb_rsp_full++;
3797 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3798 writel(status, phba->CAregaddr);
3799 readl(phba->CAregaddr);
3800 }
3801 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3802 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3803 pring->stats.iocb_cmd_empty++;
3804
3805
3806 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3807 lpfc_sli_resume_iocb(phba, pring);
3808
3809 if ((pring->lpfc_sli_cmd_available))
3810 (pring->lpfc_sli_cmd_available) (phba, pring);
3811
3812 }
3813
3814 phba->fcp_ring_in_use = 0;
3815 spin_unlock_irqrestore(&phba->hbalock, iflag);
3816 return rc;
3817}
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837static struct lpfc_iocbq *
3838lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3839 struct lpfc_iocbq *rspiocbp)
3840{
3841 struct lpfc_iocbq *saveq;
3842 struct lpfc_iocbq *cmdiocbp;
3843 struct lpfc_iocbq *next_iocb;
3844 IOCB_t *irsp = NULL;
3845 uint32_t free_saveq;
3846 uint8_t iocb_cmd_type;
3847 lpfc_iocb_type type;
3848 unsigned long iflag;
3849 int rc;
3850
3851 spin_lock_irqsave(&phba->hbalock, iflag);
3852
3853 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3854 pring->iocb_continueq_cnt++;
3855
3856
3857 irsp = &rspiocbp->iocb;
3858 if (irsp->ulpLe) {
3859
3860
3861
3862
3863 free_saveq = 1;
3864 saveq = list_get_first(&pring->iocb_continueq,
3865 struct lpfc_iocbq, list);
3866 irsp = &(saveq->iocb);
3867 list_del_init(&pring->iocb_continueq);
3868 pring->iocb_continueq_cnt = 0;
3869
3870 pring->stats.iocb_rsp++;
3871
3872
3873
3874
3875
3876 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3877 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3878 IOERR_NO_RESOURCES)) {
3879 spin_unlock_irqrestore(&phba->hbalock, iflag);
3880 phba->lpfc_rampdown_queue_depth(phba);
3881 spin_lock_irqsave(&phba->hbalock, iflag);
3882 }
3883
3884 if (irsp->ulpStatus) {
3885
3886 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3887 "0328 Rsp Ring %d error: "
3888 "IOCB Data: "
3889 "x%x x%x x%x x%x "
3890 "x%x x%x x%x x%x "
3891 "x%x x%x x%x x%x "
3892 "x%x x%x x%x x%x\n",
3893 pring->ringno,
3894 irsp->un.ulpWord[0],
3895 irsp->un.ulpWord[1],
3896 irsp->un.ulpWord[2],
3897 irsp->un.ulpWord[3],
3898 irsp->un.ulpWord[4],
3899 irsp->un.ulpWord[5],
3900 *(((uint32_t *) irsp) + 6),
3901 *(((uint32_t *) irsp) + 7),
3902 *(((uint32_t *) irsp) + 8),
3903 *(((uint32_t *) irsp) + 9),
3904 *(((uint32_t *) irsp) + 10),
3905 *(((uint32_t *) irsp) + 11),
3906 *(((uint32_t *) irsp) + 12),
3907 *(((uint32_t *) irsp) + 13),
3908 *(((uint32_t *) irsp) + 14),
3909 *(((uint32_t *) irsp) + 15));
3910 }
3911
3912
3913
3914
3915
3916
3917
3918 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3919 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3920 switch (type) {
3921 case LPFC_SOL_IOCB:
3922 spin_unlock_irqrestore(&phba->hbalock, iflag);
3923 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3924 spin_lock_irqsave(&phba->hbalock, iflag);
3925 break;
3926
3927 case LPFC_UNSOL_IOCB:
3928 spin_unlock_irqrestore(&phba->hbalock, iflag);
3929 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3930 spin_lock_irqsave(&phba->hbalock, iflag);
3931 if (!rc)
3932 free_saveq = 0;
3933 break;
3934
3935 case LPFC_ABORT_IOCB:
3936 cmdiocbp = NULL;
3937 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3938 spin_unlock_irqrestore(&phba->hbalock, iflag);
3939 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3940 saveq);
3941 spin_lock_irqsave(&phba->hbalock, iflag);
3942 }
3943 if (cmdiocbp) {
3944
3945 if (cmdiocbp->iocb_cmpl) {
3946 spin_unlock_irqrestore(&phba->hbalock,
3947 iflag);
3948 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3949 saveq);
3950 spin_lock_irqsave(&phba->hbalock,
3951 iflag);
3952 } else
3953 __lpfc_sli_release_iocbq(phba,
3954 cmdiocbp);
3955 }
3956 break;
3957
3958 case LPFC_UNKNOWN_IOCB:
3959 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3960 char adaptermsg[LPFC_MAX_ADPTMSG];
3961 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3962 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3963 MAX_MSG_DATA);
3964 dev_warn(&((phba->pcidev)->dev),
3965 "lpfc%d: %s\n",
3966 phba->brd_no, adaptermsg);
3967 } else {
3968
3969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3970 "0335 Unknown IOCB "
3971 "command Data: x%x "
3972 "x%x x%x x%x\n",
3973 irsp->ulpCommand,
3974 irsp->ulpStatus,
3975 irsp->ulpIoTag,
3976 irsp->ulpContext);
3977 }
3978 break;
3979 }
3980
3981 if (free_saveq) {
3982 list_for_each_entry_safe(rspiocbp, next_iocb,
3983 &saveq->list, list) {
3984 list_del_init(&rspiocbp->list);
3985 __lpfc_sli_release_iocbq(phba, rspiocbp);
3986 }
3987 __lpfc_sli_release_iocbq(phba, saveq);
3988 }
3989 rspiocbp = NULL;
3990 }
3991 spin_unlock_irqrestore(&phba->hbalock, iflag);
3992 return rspiocbp;
3993}
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004void
4005lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4006 struct lpfc_sli_ring *pring, uint32_t mask)
4007{
4008 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4009}
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022static void
4023lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4024 struct lpfc_sli_ring *pring, uint32_t mask)
4025{
4026 struct lpfc_pgp *pgp;
4027 IOCB_t *entry;
4028 IOCB_t *irsp = NULL;
4029 struct lpfc_iocbq *rspiocbp = NULL;
4030 uint32_t portRspPut, portRspMax;
4031 unsigned long iflag;
4032 uint32_t status;
4033
4034 pgp = &phba->port_gp[pring->ringno];
4035 spin_lock_irqsave(&phba->hbalock, iflag);
4036 pring->stats.iocb_event++;
4037
4038
4039
4040
4041
4042 portRspMax = pring->sli.sli3.numRiocb;
4043 portRspPut = le32_to_cpu(pgp->rspPutInx);
4044 if (portRspPut >= portRspMax) {
4045
4046
4047
4048
4049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4050 "0303 Ring %d handler: portRspPut %d "
4051 "is bigger than rsp ring %d\n",
4052 pring->ringno, portRspPut, portRspMax);
4053
4054 phba->link_state = LPFC_HBA_ERROR;
4055 spin_unlock_irqrestore(&phba->hbalock, iflag);
4056
4057 phba->work_hs = HS_FFER3;
4058 lpfc_handle_eratt(phba);
4059
4060 return;
4061 }
4062
4063 rmb();
4064 while (pring->sli.sli3.rspidx != portRspPut) {
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078 entry = lpfc_resp_iocb(phba, pring);
4079
4080 phba->last_completion_time = jiffies;
4081 rspiocbp = __lpfc_sli_get_iocbq(phba);
4082 if (rspiocbp == NULL) {
4083 printk(KERN_ERR "%s: out of buffers! Failing "
4084 "completion.\n", __func__);
4085 break;
4086 }
4087
4088 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4089 phba->iocb_rsp_size);
4090 irsp = &rspiocbp->iocb;
4091
4092 if (++pring->sli.sli3.rspidx >= portRspMax)
4093 pring->sli.sli3.rspidx = 0;
4094
4095 if (pring->ringno == LPFC_ELS_RING) {
4096 lpfc_debugfs_slow_ring_trc(phba,
4097 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4098 *(((uint32_t *) irsp) + 4),
4099 *(((uint32_t *) irsp) + 6),
4100 *(((uint32_t *) irsp) + 7));
4101 }
4102
4103 writel(pring->sli.sli3.rspidx,
4104 &phba->host_gp[pring->ringno].rspGetInx);
4105
4106 spin_unlock_irqrestore(&phba->hbalock, iflag);
4107
4108 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4109 spin_lock_irqsave(&phba->hbalock, iflag);
4110
4111
4112
4113
4114
4115
4116 if (pring->sli.sli3.rspidx == portRspPut) {
4117 portRspPut = le32_to_cpu(pgp->rspPutInx);
4118 }
4119 }
4120
4121 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4122
4123 pring->stats.iocb_rsp_full++;
4124
4125 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4126 writel(status, phba->CAregaddr);
4127 readl(phba->CAregaddr);
4128 }
4129 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4130 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4131 pring->stats.iocb_cmd_empty++;
4132
4133
4134 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4135 lpfc_sli_resume_iocb(phba, pring);
4136
4137 if ((pring->lpfc_sli_cmd_available))
4138 (pring->lpfc_sli_cmd_available) (phba, pring);
4139
4140 }
4141
4142 spin_unlock_irqrestore(&phba->hbalock, iflag);
4143 return;
4144}
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158static void
4159lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4160 struct lpfc_sli_ring *pring, uint32_t mask)
4161{
4162 struct lpfc_iocbq *irspiocbq;
4163 struct hbq_dmabuf *dmabuf;
4164 struct lpfc_cq_event *cq_event;
4165 unsigned long iflag;
4166 int count = 0;
4167
4168 spin_lock_irqsave(&phba->hbalock, iflag);
4169 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4170 spin_unlock_irqrestore(&phba->hbalock, iflag);
4171 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4172
4173 spin_lock_irqsave(&phba->hbalock, iflag);
4174 list_remove_head(&phba->sli4_hba.sp_queue_event,
4175 cq_event, struct lpfc_cq_event, list);
4176 spin_unlock_irqrestore(&phba->hbalock, iflag);
4177
4178 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4179 case CQE_CODE_COMPL_WQE:
4180 irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4181 cq_event);
4182
4183 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4184 irspiocbq);
4185 if (irspiocbq)
4186 lpfc_sli_sp_handle_rspiocb(phba, pring,
4187 irspiocbq);
4188 count++;
4189 break;
4190 case CQE_CODE_RECEIVE:
4191 case CQE_CODE_RECEIVE_V1:
4192 dmabuf = container_of(cq_event, struct hbq_dmabuf,
4193 cq_event);
4194 lpfc_sli4_handle_received_buffer(phba, dmabuf);
4195 count++;
4196 break;
4197 default:
4198 break;
4199 }
4200
4201
4202 if (count == 64)
4203 break;
4204 }
4205}
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217void
4218lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4219{
4220 LIST_HEAD(completions);
4221 struct lpfc_iocbq *iocb, *next_iocb;
4222
4223 if (pring->ringno == LPFC_ELS_RING) {
4224 lpfc_fabric_abort_hba(phba);
4225 }
4226
4227
4228
4229
4230 if (phba->sli_rev >= LPFC_SLI_REV4) {
4231 spin_lock_irq(&pring->ring_lock);
4232 list_splice_init(&pring->txq, &completions);
4233 pring->txq_cnt = 0;
4234 spin_unlock_irq(&pring->ring_lock);
4235
4236 spin_lock_irq(&phba->hbalock);
4237
4238 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4239 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4240 spin_unlock_irq(&phba->hbalock);
4241 } else {
4242 spin_lock_irq(&phba->hbalock);
4243 list_splice_init(&pring->txq, &completions);
4244 pring->txq_cnt = 0;
4245
4246
4247 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4248 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4249 spin_unlock_irq(&phba->hbalock);
4250 }
4251
4252 lpfc_issue_hb_tmo(phba);
4253
4254
4255 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4256 IOERR_SLI_ABORTED);
4257}
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268void
4269lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4270{
4271 struct lpfc_sli *psli = &phba->sli;
4272 struct lpfc_sli_ring *pring;
4273 uint32_t i;
4274
4275
4276 if (phba->sli_rev >= LPFC_SLI_REV4) {
4277 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4278 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4279 lpfc_sli_abort_iocb_ring(phba, pring);
4280 }
4281 } else {
4282 pring = &psli->sli3_ring[LPFC_FCP_RING];
4283 lpfc_sli_abort_iocb_ring(phba, pring);
4284 }
4285}
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297void
4298lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4299{
4300 LIST_HEAD(txq);
4301 LIST_HEAD(txcmplq);
4302 struct lpfc_sli *psli = &phba->sli;
4303 struct lpfc_sli_ring *pring;
4304 uint32_t i;
4305 struct lpfc_iocbq *piocb, *next_iocb;
4306
4307 spin_lock_irq(&phba->hbalock);
4308 if (phba->hba_flag & HBA_IOQ_FLUSH ||
4309 !phba->sli4_hba.hdwq) {
4310 spin_unlock_irq(&phba->hbalock);
4311 return;
4312 }
4313
4314 phba->hba_flag |= HBA_IOQ_FLUSH;
4315 spin_unlock_irq(&phba->hbalock);
4316
4317
4318 if (phba->sli_rev >= LPFC_SLI_REV4) {
4319 for (i = 0; i < phba->cfg_hdw_queue; i++) {
4320 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4321
4322 spin_lock_irq(&pring->ring_lock);
4323
4324 list_splice_init(&pring->txq, &txq);
4325 list_for_each_entry_safe(piocb, next_iocb,
4326 &pring->txcmplq, list)
4327 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4328
4329 list_splice_init(&pring->txcmplq, &txcmplq);
4330 pring->txq_cnt = 0;
4331 pring->txcmplq_cnt = 0;
4332 spin_unlock_irq(&pring->ring_lock);
4333
4334
4335 lpfc_sli_cancel_iocbs(phba, &txq,
4336 IOSTAT_LOCAL_REJECT,
4337 IOERR_SLI_DOWN);
4338
4339 lpfc_sli_cancel_iocbs(phba, &txcmplq,
4340 IOSTAT_LOCAL_REJECT,
4341 IOERR_SLI_DOWN);
4342 }
4343 } else {
4344 pring = &psli->sli3_ring[LPFC_FCP_RING];
4345
4346 spin_lock_irq(&phba->hbalock);
4347
4348 list_splice_init(&pring->txq, &txq);
4349 list_for_each_entry_safe(piocb, next_iocb,
4350 &pring->txcmplq, list)
4351 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
4352
4353 list_splice_init(&pring->txcmplq, &txcmplq);
4354 pring->txq_cnt = 0;
4355 pring->txcmplq_cnt = 0;
4356 spin_unlock_irq(&phba->hbalock);
4357
4358
4359 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4360 IOERR_SLI_DOWN);
4361
4362 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4363 IOERR_SLI_DOWN);
4364 }
4365}
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380static int
4381lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4382{
4383 uint32_t status;
4384 int i = 0;
4385 int retval = 0;
4386
4387
4388 if (lpfc_readl(phba->HSregaddr, &status))
4389 return 1;
4390
4391 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4392
4393
4394
4395
4396
4397
4398
4399 while (((status & mask) != mask) &&
4400 !(status & HS_FFERM) &&
4401 i++ < 20) {
4402
4403 if (i <= 5)
4404 msleep(10);
4405 else if (i <= 10)
4406 msleep(500);
4407 else
4408 msleep(2500);
4409
4410 if (i == 15) {
4411
4412 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4413 lpfc_sli_brdrestart(phba);
4414 }
4415
4416 if (lpfc_readl(phba->HSregaddr, &status)) {
4417 retval = 1;
4418 break;
4419 }
4420 }
4421
4422
4423 if ((status & HS_FFERM) || (i >= 20)) {
4424 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4425 "2751 Adapter failed to restart, "
4426 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4427 status,
4428 readl(phba->MBslimaddr + 0xa8),
4429 readl(phba->MBslimaddr + 0xac));
4430 phba->link_state = LPFC_HBA_ERROR;
4431 retval = 1;
4432 }
4433
4434 return retval;
4435}
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448static int
4449lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4450{
4451 uint32_t status;
4452 int retval = 0;
4453
4454
4455 status = lpfc_sli4_post_status_check(phba);
4456
4457 if (status) {
4458 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4459 lpfc_sli_brdrestart(phba);
4460 status = lpfc_sli4_post_status_check(phba);
4461 }
4462
4463
4464 if (status) {
4465 phba->link_state = LPFC_HBA_ERROR;
4466 retval = 1;
4467 } else
4468 phba->sli4_hba.intr_enable = 0;
4469
4470 return retval;
4471}
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481int
4482lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4483{
4484 return phba->lpfc_sli_brdready(phba, mask);
4485}
4486
4487#define BARRIER_TEST_PATTERN (0xdeadbeef)
4488
4489
4490
4491
4492
4493
4494
4495
4496void lpfc_reset_barrier(struct lpfc_hba *phba)
4497{
4498 uint32_t __iomem *resp_buf;
4499 uint32_t __iomem *mbox_buf;
4500 volatile uint32_t mbox;
4501 uint32_t hc_copy, ha_copy, resp_data;
4502 int i;
4503 uint8_t hdrtype;
4504
4505 lockdep_assert_held(&phba->hbalock);
4506
4507 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4508 if (hdrtype != 0x80 ||
4509 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4510 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4511 return;
4512
4513
4514
4515
4516
4517 resp_buf = phba->MBslimaddr;
4518
4519
4520 if (lpfc_readl(phba->HCregaddr, &hc_copy))
4521 return;
4522 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4523 readl(phba->HCregaddr);
4524 phba->link_flag |= LS_IGNORE_ERATT;
4525
4526 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4527 return;
4528 if (ha_copy & HA_ERATT) {
4529
4530 writel(HA_ERATT, phba->HAregaddr);
4531 phba->pport->stopped = 1;
4532 }
4533
4534 mbox = 0;
4535 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4536 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4537
4538 writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4539 mbox_buf = phba->MBslimaddr;
4540 writel(mbox, mbox_buf);
4541
4542 for (i = 0; i < 50; i++) {
4543 if (lpfc_readl((resp_buf + 1), &resp_data))
4544 return;
4545 if (resp_data != ~(BARRIER_TEST_PATTERN))
4546 mdelay(1);
4547 else
4548 break;
4549 }
4550 resp_data = 0;
4551 if (lpfc_readl((resp_buf + 1), &resp_data))
4552 return;
4553 if (resp_data != ~(BARRIER_TEST_PATTERN)) {
4554 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4555 phba->pport->stopped)
4556 goto restore_hc;
4557 else
4558 goto clear_errat;
4559 }
4560
4561 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4562 resp_data = 0;
4563 for (i = 0; i < 500; i++) {
4564 if (lpfc_readl(resp_buf, &resp_data))
4565 return;
4566 if (resp_data != mbox)
4567 mdelay(1);
4568 else
4569 break;
4570 }
4571
4572clear_errat:
4573
4574 while (++i < 500) {
4575 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4576 return;
4577 if (!(ha_copy & HA_ERATT))
4578 mdelay(1);
4579 else
4580 break;
4581 }
4582
4583 if (readl(phba->HAregaddr) & HA_ERATT) {
4584 writel(HA_ERATT, phba->HAregaddr);
4585 phba->pport->stopped = 1;
4586 }
4587
4588restore_hc:
4589 phba->link_flag &= ~LS_IGNORE_ERATT;
4590 writel(hc_copy, phba->HCregaddr);
4591 readl(phba->HCregaddr);
4592}
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605int
4606lpfc_sli_brdkill(struct lpfc_hba *phba)
4607{
4608 struct lpfc_sli *psli;
4609 LPFC_MBOXQ_t *pmb;
4610 uint32_t status;
4611 uint32_t ha_copy;
4612 int retval;
4613 int i = 0;
4614
4615 psli = &phba->sli;
4616
4617
4618 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4619 "0329 Kill HBA Data: x%x x%x\n",
4620 phba->pport->port_state, psli->sli_flag);
4621
4622 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4623 if (!pmb)
4624 return 1;
4625
4626
4627 spin_lock_irq(&phba->hbalock);
4628 if (lpfc_readl(phba->HCregaddr, &status)) {
4629 spin_unlock_irq(&phba->hbalock);
4630 mempool_free(pmb, phba->mbox_mem_pool);
4631 return 1;
4632 }
4633 status &= ~HC_ERINT_ENA;
4634 writel(status, phba->HCregaddr);
4635 readl(phba->HCregaddr);
4636 phba->link_flag |= LS_IGNORE_ERATT;
4637 spin_unlock_irq(&phba->hbalock);
4638
4639 lpfc_kill_board(phba, pmb);
4640 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4641 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4642
4643 if (retval != MBX_SUCCESS) {
4644 if (retval != MBX_BUSY)
4645 mempool_free(pmb, phba->mbox_mem_pool);
4646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4647 "2752 KILL_BOARD command failed retval %d\n",
4648 retval);
4649 spin_lock_irq(&phba->hbalock);
4650 phba->link_flag &= ~LS_IGNORE_ERATT;
4651 spin_unlock_irq(&phba->hbalock);
4652 return 1;
4653 }
4654
4655 spin_lock_irq(&phba->hbalock);
4656 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4657 spin_unlock_irq(&phba->hbalock);
4658
4659 mempool_free(pmb, phba->mbox_mem_pool);
4660
4661
4662
4663
4664
4665
4666 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4667 return 1;
4668 while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4669 mdelay(100);
4670 if (lpfc_readl(phba->HAregaddr, &ha_copy))
4671 return 1;
4672 }
4673
4674 del_timer_sync(&psli->mbox_tmo);
4675 if (ha_copy & HA_ERATT) {
4676 writel(HA_ERATT, phba->HAregaddr);
4677 phba->pport->stopped = 1;
4678 }
4679 spin_lock_irq(&phba->hbalock);
4680 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4681 psli->mbox_active = NULL;
4682 phba->link_flag &= ~LS_IGNORE_ERATT;
4683 spin_unlock_irq(&phba->hbalock);
4684
4685 lpfc_hba_down_post(phba);
4686 phba->link_state = LPFC_HBA_ERROR;
4687
4688 return ha_copy & HA_ERATT ? 0 : 1;
4689}
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702int
4703lpfc_sli_brdreset(struct lpfc_hba *phba)
4704{
4705 struct lpfc_sli *psli;
4706 struct lpfc_sli_ring *pring;
4707 uint16_t cfg_value;
4708 int i;
4709
4710 psli = &phba->sli;
4711
4712
4713 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4714 "0325 Reset HBA Data: x%x x%x\n",
4715 (phba->pport) ? phba->pport->port_state : 0,
4716 psli->sli_flag);
4717
4718
4719 phba->fc_eventTag = 0;
4720 phba->link_events = 0;
4721 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
4722 if (phba->pport) {
4723 phba->pport->fc_myDID = 0;
4724 phba->pport->fc_prevDID = 0;
4725 }
4726
4727
4728 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
4729 return -EIO;
4730
4731 pci_write_config_word(phba->pcidev, PCI_COMMAND,
4732 (cfg_value &
4733 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4734
4735 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4736
4737
4738 writel(HC_INITFF, phba->HCregaddr);
4739 mdelay(1);
4740 readl(phba->HCregaddr);
4741 writel(0, phba->HCregaddr);
4742 readl(phba->HCregaddr);
4743
4744
4745 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4746
4747
4748 for (i = 0; i < psli->num_rings; i++) {
4749 pring = &psli->sli3_ring[i];
4750 pring->flag = 0;
4751 pring->sli.sli3.rspidx = 0;
4752 pring->sli.sli3.next_cmdidx = 0;
4753 pring->sli.sli3.local_getidx = 0;
4754 pring->sli.sli3.cmdidx = 0;
4755 pring->missbufcnt = 0;
4756 }
4757
4758 phba->link_state = LPFC_WARM_START;
4759 return 0;
4760}
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772int
4773lpfc_sli4_brdreset(struct lpfc_hba *phba)
4774{
4775 struct lpfc_sli *psli = &phba->sli;
4776 uint16_t cfg_value;
4777 int rc = 0;
4778
4779
4780 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4781 "0295 Reset HBA Data: x%x x%x x%x\n",
4782 phba->pport->port_state, psli->sli_flag,
4783 phba->hba_flag);
4784
4785
4786 phba->fc_eventTag = 0;
4787 phba->link_events = 0;
4788 phba->pport->fc_myDID = 0;
4789 phba->pport->fc_prevDID = 0;
4790
4791 spin_lock_irq(&phba->hbalock);
4792 psli->sli_flag &= ~(LPFC_PROCESS_LA);
4793 phba->fcf.fcf_flag = 0;
4794 spin_unlock_irq(&phba->hbalock);
4795
4796
4797 if (phba->hba_flag & HBA_FW_DUMP_OP) {
4798 phba->hba_flag &= ~HBA_FW_DUMP_OP;
4799 return rc;
4800 }
4801
4802
4803 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4804 "0389 Performing PCI function reset!\n");
4805
4806
4807 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
4808 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4809 "3205 PCI read Config failed\n");
4810 return -EIO;
4811 }
4812
4813 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4814 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4815
4816
4817 rc = lpfc_pci_function_reset(phba);
4818
4819
4820 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4821
4822 return rc;
4823}
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838static int
4839lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4840{
4841 MAILBOX_t *mb;
4842 struct lpfc_sli *psli;
4843 volatile uint32_t word0;
4844 void __iomem *to_slim;
4845 uint32_t hba_aer_enabled;
4846
4847 spin_lock_irq(&phba->hbalock);
4848
4849
4850 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4851
4852 psli = &phba->sli;
4853
4854
4855 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4856 "0337 Restart HBA Data: x%x x%x\n",
4857 (phba->pport) ? phba->pport->port_state : 0,
4858 psli->sli_flag);
4859
4860 word0 = 0;
4861 mb = (MAILBOX_t *) &word0;
4862 mb->mbxCommand = MBX_RESTART;
4863 mb->mbxHc = 1;
4864
4865 lpfc_reset_barrier(phba);
4866
4867 to_slim = phba->MBslimaddr;
4868 writel(*(uint32_t *) mb, to_slim);
4869 readl(to_slim);
4870
4871
4872 if (phba->pport && phba->pport->port_state)
4873 word0 = 1;
4874 else
4875 word0 = 0;
4876 to_slim = phba->MBslimaddr + sizeof (uint32_t);
4877 writel(*(uint32_t *) mb, to_slim);
4878 readl(to_slim);
4879
4880 lpfc_sli_brdreset(phba);
4881 if (phba->pport)
4882 phba->pport->stopped = 0;
4883 phba->link_state = LPFC_INIT_START;
4884 phba->hba_flag = 0;
4885 spin_unlock_irq(&phba->hbalock);
4886
4887 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4888 psli->stats_start = ktime_get_seconds();
4889
4890
4891 mdelay(100);
4892
4893
4894 if (hba_aer_enabled)
4895 pci_disable_pcie_error_reporting(phba->pcidev);
4896
4897 lpfc_hba_down_post(phba);
4898
4899 return 0;
4900}
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911static int
4912lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4913{
4914 struct lpfc_sli *psli = &phba->sli;
4915 uint32_t hba_aer_enabled;
4916 int rc;
4917
4918
4919 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4920 "0296 Restart HBA Data: x%x x%x\n",
4921 phba->pport->port_state, psli->sli_flag);
4922
4923
4924 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4925
4926 rc = lpfc_sli4_brdreset(phba);
4927 if (rc) {
4928 phba->link_state = LPFC_HBA_ERROR;
4929 goto hba_down_queue;
4930 }
4931
4932 spin_lock_irq(&phba->hbalock);
4933 phba->pport->stopped = 0;
4934 phba->link_state = LPFC_INIT_START;
4935 phba->hba_flag = 0;
4936 spin_unlock_irq(&phba->hbalock);
4937
4938 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4939 psli->stats_start = ktime_get_seconds();
4940
4941
4942 if (hba_aer_enabled)
4943 pci_disable_pcie_error_reporting(phba->pcidev);
4944
4945hba_down_queue:
4946 lpfc_hba_down_post(phba);
4947 lpfc_sli4_queue_destroy(phba);
4948
4949 return rc;
4950}
4951
4952
4953
4954
4955
4956
4957
4958
4959int
4960lpfc_sli_brdrestart(struct lpfc_hba *phba)
4961{
4962 return phba->lpfc_sli_brdrestart(phba);
4963}
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975int
4976lpfc_sli_chipset_init(struct lpfc_hba *phba)
4977{
4978 uint32_t status, i = 0;
4979
4980
4981 if (lpfc_readl(phba->HSregaddr, &status))
4982 return -EIO;
4983
4984
4985 i = 0;
4986 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996 if (i++ >= 200) {
4997
4998
4999 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5000 "0436 Adapter failed to init, "
5001 "timeout, status reg x%x, "
5002 "FW Data: A8 x%x AC x%x\n", status,
5003 readl(phba->MBslimaddr + 0xa8),
5004 readl(phba->MBslimaddr + 0xac));
5005 phba->link_state = LPFC_HBA_ERROR;
5006 return -ETIMEDOUT;
5007 }
5008
5009
5010 if (status & HS_FFERM) {
5011
5012
5013
5014 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5015 "0437 Adapter failed to init, "
5016 "chipset, status reg x%x, "
5017 "FW Data: A8 x%x AC x%x\n", status,
5018 readl(phba->MBslimaddr + 0xa8),
5019 readl(phba->MBslimaddr + 0xac));
5020 phba->link_state = LPFC_HBA_ERROR;
5021 return -EIO;
5022 }
5023
5024 if (i <= 10)
5025 msleep(10);
5026 else if (i <= 100)
5027 msleep(100);
5028 else
5029 msleep(1000);
5030
5031 if (i == 150) {
5032
5033 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5034 lpfc_sli_brdrestart(phba);
5035 }
5036
5037 if (lpfc_readl(phba->HSregaddr, &status))
5038 return -EIO;
5039 }
5040
5041
5042 if (status & HS_FFERM) {
5043
5044
5045 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5046 "0438 Adapter failed to init, chipset, "
5047 "status reg x%x, "
5048 "FW Data: A8 x%x AC x%x\n", status,
5049 readl(phba->MBslimaddr + 0xa8),
5050 readl(phba->MBslimaddr + 0xac));
5051 phba->link_state = LPFC_HBA_ERROR;
5052 return -EIO;
5053 }
5054
5055 phba->hba_flag |= HBA_NEEDS_CFG_PORT;
5056
5057
5058 writel(0, phba->HCregaddr);
5059 readl(phba->HCregaddr);
5060
5061
5062 writel(0xffffffff, phba->HAregaddr);
5063 readl(phba->HAregaddr);
5064 return 0;
5065}
5066
5067
5068
5069
5070
5071
5072
5073int
5074lpfc_sli_hbq_count(void)
5075{
5076 return ARRAY_SIZE(lpfc_hbq_defs);
5077}
5078
5079
5080
5081
5082
5083
5084
5085
5086static int
5087lpfc_sli_hbq_entry_count(void)
5088{
5089 int hbq_count = lpfc_sli_hbq_count();
5090 int count = 0;
5091 int i;
5092
5093 for (i = 0; i < hbq_count; ++i)
5094 count += lpfc_hbq_defs[i]->entry_count;
5095 return count;
5096}
5097
5098
5099
5100
5101
5102
5103
5104int
5105lpfc_sli_hbq_size(void)
5106{
5107 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5108}
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119static int
5120lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5121{
5122 int hbq_count = lpfc_sli_hbq_count();
5123 LPFC_MBOXQ_t *pmb;
5124 MAILBOX_t *pmbox;
5125 uint32_t hbqno;
5126 uint32_t hbq_entry_index;
5127
5128
5129
5130
5131 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5132
5133 if (!pmb)
5134 return -ENOMEM;
5135
5136 pmbox = &pmb->u.mb;
5137
5138
5139 phba->link_state = LPFC_INIT_MBX_CMDS;
5140 phba->hbq_in_use = 1;
5141
5142 hbq_entry_index = 0;
5143 for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5144 phba->hbqs[hbqno].next_hbqPutIdx = 0;
5145 phba->hbqs[hbqno].hbqPutIdx = 0;
5146 phba->hbqs[hbqno].local_hbqGetIdx = 0;
5147 phba->hbqs[hbqno].entry_count =
5148 lpfc_hbq_defs[hbqno]->entry_count;
5149 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5150 hbq_entry_index, pmb);
5151 hbq_entry_index += phba->hbqs[hbqno].entry_count;
5152
5153 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5154
5155
5156
5157 lpfc_printf_log(phba, KERN_ERR,
5158 LOG_SLI | LOG_VPORT,
5159 "1805 Adapter failed to init. "
5160 "Data: x%x x%x x%x\n",
5161 pmbox->mbxCommand,
5162 pmbox->mbxStatus, hbqno);
5163
5164 phba->link_state = LPFC_HBA_ERROR;
5165 mempool_free(pmb, phba->mbox_mem_pool);
5166 return -ENXIO;
5167 }
5168 }
5169 phba->hbq_count = hbq_count;
5170
5171 mempool_free(pmb, phba->mbox_mem_pool);
5172
5173
5174 for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5175 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5176 return 0;
5177}
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188static int
5189lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5190{
5191 phba->hbq_in_use = 1;
5192
5193
5194
5195
5196
5197 if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5198 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5199 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5200 else
5201 phba->hbqs[LPFC_ELS_HBQ].entry_count =
5202 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5203 phba->hbq_count = 1;
5204 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5205
5206 return 0;
5207}
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222int
5223lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5224{
5225 LPFC_MBOXQ_t *pmb;
5226 uint32_t resetcount = 0, rc = 0, done = 0;
5227
5228 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5229 if (!pmb) {
5230 phba->link_state = LPFC_HBA_ERROR;
5231 return -ENOMEM;
5232 }
5233
5234 phba->sli_rev = sli_mode;
5235 while (resetcount < 2 && !done) {
5236 spin_lock_irq(&phba->hbalock);
5237 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5238 spin_unlock_irq(&phba->hbalock);
5239 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5240 lpfc_sli_brdrestart(phba);
5241 rc = lpfc_sli_chipset_init(phba);
5242 if (rc)
5243 break;
5244
5245 spin_lock_irq(&phba->hbalock);
5246 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5247 spin_unlock_irq(&phba->hbalock);
5248 resetcount++;
5249
5250
5251
5252
5253
5254
5255 rc = lpfc_config_port_prep(phba);
5256 if (rc == -ERESTART) {
5257 phba->link_state = LPFC_LINK_UNKNOWN;
5258 continue;
5259 } else if (rc)
5260 break;
5261
5262 phba->link_state = LPFC_INIT_MBX_CMDS;
5263 lpfc_config_port(phba, pmb);
5264 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5265 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5266 LPFC_SLI3_HBQ_ENABLED |
5267 LPFC_SLI3_CRP_ENABLED |
5268 LPFC_SLI3_DSS_ENABLED);
5269 if (rc != MBX_SUCCESS) {
5270 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5271 "0442 Adapter failed to init, mbxCmd x%x "
5272 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5273 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5274 spin_lock_irq(&phba->hbalock);
5275 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5276 spin_unlock_irq(&phba->hbalock);
5277 rc = -ENXIO;
5278 } else {
5279
5280 spin_lock_irq(&phba->hbalock);
5281 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5282 spin_unlock_irq(&phba->hbalock);
5283 done = 1;
5284
5285 if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5286 (pmb->u.mb.un.varCfgPort.gasabt == 0))
5287 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5288 "3110 Port did not grant ASABT\n");
5289 }
5290 }
5291 if (!done) {
5292 rc = -EINVAL;
5293 goto do_prep_failed;
5294 }
5295 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5296 if (!pmb->u.mb.un.varCfgPort.cMA) {
5297 rc = -ENXIO;
5298 goto do_prep_failed;
5299 }
5300 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5301 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5302 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5303 phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5304 phba->max_vpi : phba->max_vports;
5305
5306 } else
5307 phba->max_vpi = 0;
5308 if (pmb->u.mb.un.varCfgPort.gerbm)
5309 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5310 if (pmb->u.mb.un.varCfgPort.gcrp)
5311 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5312
5313 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5314 phba->port_gp = phba->mbox->us.s3_pgp.port;
5315
5316 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5317 if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5318 phba->cfg_enable_bg = 0;
5319 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5321 "0443 Adapter did not grant "
5322 "BlockGuard\n");
5323 }
5324 }
5325 } else {
5326 phba->hbq_get = NULL;
5327 phba->port_gp = phba->mbox->us.s2.port;
5328 phba->max_vpi = 0;
5329 }
5330do_prep_failed:
5331 mempool_free(pmb, phba->mbox_mem_pool);
5332 return rc;
5333}
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349int
5350lpfc_sli_hba_setup(struct lpfc_hba *phba)
5351{
5352 uint32_t rc;
5353 int i;
5354 int longs;
5355
5356
5357 if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
5358 rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5359 if (rc)
5360 return -EIO;
5361 phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
5362 }
5363 phba->fcp_embed_io = 0;
5364
5365
5366 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
5367 rc = pci_enable_pcie_error_reporting(phba->pcidev);
5368 if (!rc) {
5369 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5370 "2709 This device supports "
5371 "Advanced Error Reporting (AER)\n");
5372 spin_lock_irq(&phba->hbalock);
5373 phba->hba_flag |= HBA_AER_ENABLED;
5374 spin_unlock_irq(&phba->hbalock);
5375 } else {
5376 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5377 "2708 This device does not support "
5378 "Advanced Error Reporting (AER): %d\n",
5379 rc);
5380 phba->cfg_aer_support = 0;
5381 }
5382 }
5383
5384 if (phba->sli_rev == 3) {
5385 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5386 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5387 } else {
5388 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5389 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5390 phba->sli3_options = 0;
5391 }
5392
5393 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5394 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5395 phba->sli_rev, phba->max_vpi);
5396 rc = lpfc_sli_ring_map(phba);
5397
5398 if (rc)
5399 goto lpfc_sli_hba_setup_error;
5400
5401
5402 if (phba->sli_rev == LPFC_SLI_REV3) {
5403
5404
5405
5406
5407
5408 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5409 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5410 phba->vpi_bmask = kcalloc(longs,
5411 sizeof(unsigned long),
5412 GFP_KERNEL);
5413 if (!phba->vpi_bmask) {
5414 rc = -ENOMEM;
5415 goto lpfc_sli_hba_setup_error;
5416 }
5417
5418 phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5419 sizeof(uint16_t),
5420 GFP_KERNEL);
5421 if (!phba->vpi_ids) {
5422 kfree(phba->vpi_bmask);
5423 rc = -ENOMEM;
5424 goto lpfc_sli_hba_setup_error;
5425 }
5426 for (i = 0; i < phba->max_vpi; i++)
5427 phba->vpi_ids[i] = i;
5428 }
5429 }
5430
5431
5432 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5433 rc = lpfc_sli_hbq_setup(phba);
5434 if (rc)
5435 goto lpfc_sli_hba_setup_error;
5436 }
5437 spin_lock_irq(&phba->hbalock);
5438 phba->sli.sli_flag |= LPFC_PROCESS_LA;
5439 spin_unlock_irq(&phba->hbalock);
5440
5441 rc = lpfc_config_port_post(phba);
5442 if (rc)
5443 goto lpfc_sli_hba_setup_error;
5444
5445 return rc;
5446
5447lpfc_sli_hba_setup_error:
5448 phba->link_state = LPFC_HBA_ERROR;
5449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5450 "0445 Firmware initialization failed\n");
5451 return rc;
5452}
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462static int
5463lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5464{
5465 LPFC_MBOXQ_t *mboxq;
5466 struct lpfc_dmabuf *mp;
5467 struct lpfc_mqe *mqe;
5468 uint32_t data_length;
5469 int rc;
5470
5471
5472 phba->valid_vlan = 0;
5473 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5474 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5475 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5476
5477 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5478 if (!mboxq)
5479 return -ENOMEM;
5480
5481 mqe = &mboxq->u.mqe;
5482 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5483 rc = -ENOMEM;
5484 goto out_free_mboxq;
5485 }
5486
5487 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
5488 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5489
5490 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5491 "(%d):2571 Mailbox cmd x%x Status x%x "
5492 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5493 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5494 "CQ: x%x x%x x%x x%x\n",
5495 mboxq->vport ? mboxq->vport->vpi : 0,
5496 bf_get(lpfc_mqe_command, mqe),
5497 bf_get(lpfc_mqe_status, mqe),
5498 mqe->un.mb_words[0], mqe->un.mb_words[1],
5499 mqe->un.mb_words[2], mqe->un.mb_words[3],
5500 mqe->un.mb_words[4], mqe->un.mb_words[5],
5501 mqe->un.mb_words[6], mqe->un.mb_words[7],
5502 mqe->un.mb_words[8], mqe->un.mb_words[9],
5503 mqe->un.mb_words[10], mqe->un.mb_words[11],
5504 mqe->un.mb_words[12], mqe->un.mb_words[13],
5505 mqe->un.mb_words[14], mqe->un.mb_words[15],
5506 mqe->un.mb_words[16], mqe->un.mb_words[50],
5507 mboxq->mcqe.word0,
5508 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
5509 mboxq->mcqe.trailer);
5510
5511 if (rc) {
5512 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5513 kfree(mp);
5514 rc = -EIO;
5515 goto out_free_mboxq;
5516 }
5517 data_length = mqe->un.mb_words[5];
5518 if (data_length > DMP_RGN23_SIZE) {
5519 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5520 kfree(mp);
5521 rc = -EIO;
5522 goto out_free_mboxq;
5523 }
5524
5525 lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5526 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5527 kfree(mp);
5528 rc = 0;
5529
5530out_free_mboxq:
5531 mempool_free(mboxq, phba->mbox_mem_pool);
5532 return rc;
5533}
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550static int
5551lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5552 uint8_t *vpd, uint32_t *vpd_size)
5553{
5554 int rc = 0;
5555 uint32_t dma_size;
5556 struct lpfc_dmabuf *dmabuf;
5557 struct lpfc_mqe *mqe;
5558
5559 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5560 if (!dmabuf)
5561 return -ENOMEM;
5562
5563
5564
5565
5566
5567 dma_size = *vpd_size;
5568 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5569 &dmabuf->phys, GFP_KERNEL);
5570 if (!dmabuf->virt) {
5571 kfree(dmabuf);
5572 return -ENOMEM;
5573 }
5574
5575
5576
5577
5578
5579
5580 lpfc_read_rev(phba, mboxq);
5581 mqe = &mboxq->u.mqe;
5582 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5583 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5584 mqe->un.read_rev.word1 &= 0x0000FFFF;
5585 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5586 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5587
5588 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5589 if (rc) {
5590 dma_free_coherent(&phba->pcidev->dev, dma_size,
5591 dmabuf->virt, dmabuf->phys);
5592 kfree(dmabuf);
5593 return -EIO;
5594 }
5595
5596
5597
5598
5599
5600
5601 if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5602 *vpd_size = mqe->un.read_rev.avail_vpd_len;
5603
5604 memcpy(vpd, dmabuf->virt, *vpd_size);
5605
5606 dma_free_coherent(&phba->pcidev->dev, dma_size,
5607 dmabuf->virt, dmabuf->phys);
5608 kfree(dmabuf);
5609 return 0;
5610}
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623static int
5624lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5625{
5626 LPFC_MBOXQ_t *mboxq;
5627 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5628 struct lpfc_controller_attribute *cntl_attr;
5629 void *virtaddr = NULL;
5630 uint32_t alloclen, reqlen;
5631 uint32_t shdr_status, shdr_add_status;
5632 union lpfc_sli4_cfg_shdr *shdr;
5633 int rc;
5634
5635 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5636 if (!mboxq)
5637 return -ENOMEM;
5638
5639
5640 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5641 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5642 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5643 LPFC_SLI4_MBX_NEMBED);
5644
5645 if (alloclen < reqlen) {
5646 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5647 "3084 Allocated DMA memory size (%d) is "
5648 "less than the requested DMA memory size "
5649 "(%d)\n", alloclen, reqlen);
5650 rc = -ENOMEM;
5651 goto out_free_mboxq;
5652 }
5653 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5654 virtaddr = mboxq->sge_array->addr[0];
5655 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5656 shdr = &mbx_cntl_attr->cfg_shdr;
5657 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5658 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5659 if (shdr_status || shdr_add_status || rc) {
5660 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5661 "3085 Mailbox x%x (x%x/x%x) failed, "
5662 "rc:x%x, status:x%x, add_status:x%x\n",
5663 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5664 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5665 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5666 rc, shdr_status, shdr_add_status);
5667 rc = -ENXIO;
5668 goto out_free_mboxq;
5669 }
5670
5671 cntl_attr = &mbx_cntl_attr->cntl_attr;
5672 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5673 phba->sli4_hba.lnk_info.lnk_tp =
5674 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5675 phba->sli4_hba.lnk_info.lnk_no =
5676 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5677
5678 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
5679 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
5680 sizeof(phba->BIOSVersion));
5681
5682 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5683 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5684 phba->sli4_hba.lnk_info.lnk_tp,
5685 phba->sli4_hba.lnk_info.lnk_no,
5686 phba->BIOSVersion);
5687out_free_mboxq:
5688 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5689 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5690 else
5691 mempool_free(mboxq, phba->mbox_mem_pool);
5692 return rc;
5693}
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706static int
5707lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5708{
5709 LPFC_MBOXQ_t *mboxq;
5710 struct lpfc_mbx_get_port_name *get_port_name;
5711 uint32_t shdr_status, shdr_add_status;
5712 union lpfc_sli4_cfg_shdr *shdr;
5713 char cport_name = 0;
5714 int rc;
5715
5716
5717 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5718 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5719
5720 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5721 if (!mboxq)
5722 return -ENOMEM;
5723
5724 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5725 lpfc_sli4_read_config(phba);
5726 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5727 goto retrieve_ppname;
5728
5729
5730 rc = lpfc_sli4_get_ctl_attr(phba);
5731 if (rc)
5732 goto out_free_mboxq;
5733
5734retrieve_ppname:
5735 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5736 LPFC_MBOX_OPCODE_GET_PORT_NAME,
5737 sizeof(struct lpfc_mbx_get_port_name) -
5738 sizeof(struct lpfc_sli4_cfg_mhdr),
5739 LPFC_SLI4_MBX_EMBED);
5740 get_port_name = &mboxq->u.mqe.un.get_port_name;
5741 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5742 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5743 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5744 phba->sli4_hba.lnk_info.lnk_tp);
5745 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5746 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5747 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5748 if (shdr_status || shdr_add_status || rc) {
5749 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5750 "3087 Mailbox x%x (x%x/x%x) failed: "
5751 "rc:x%x, status:x%x, add_status:x%x\n",
5752 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5753 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5754 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5755 rc, shdr_status, shdr_add_status);
5756 rc = -ENXIO;
5757 goto out_free_mboxq;
5758 }
5759 switch (phba->sli4_hba.lnk_info.lnk_no) {
5760 case LPFC_LINK_NUMBER_0:
5761 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5762 &get_port_name->u.response);
5763 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5764 break;
5765 case LPFC_LINK_NUMBER_1:
5766 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5767 &get_port_name->u.response);
5768 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5769 break;
5770 case LPFC_LINK_NUMBER_2:
5771 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5772 &get_port_name->u.response);
5773 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5774 break;
5775 case LPFC_LINK_NUMBER_3:
5776 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5777 &get_port_name->u.response);
5778 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5779 break;
5780 default:
5781 break;
5782 }
5783
5784 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5785 phba->Port[0] = cport_name;
5786 phba->Port[1] = '\0';
5787 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5788 "3091 SLI get port name: %s\n", phba->Port);
5789 }
5790
5791out_free_mboxq:
5792 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5793 lpfc_sli4_mbox_cmd_free(phba, mboxq);
5794 else
5795 mempool_free(mboxq, phba->mbox_mem_pool);
5796 return rc;
5797}
5798
5799
5800
5801
5802
5803
5804
5805
5806static void
5807lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5808{
5809 int qidx;
5810 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
5811 struct lpfc_sli4_hdw_queue *qp;
5812 struct lpfc_queue *eq;
5813
5814 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
5815 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
5816 if (sli4_hba->nvmels_cq)
5817 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
5818 LPFC_QUEUE_REARM);
5819
5820 if (sli4_hba->hdwq) {
5821
5822 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
5823 qp = &sli4_hba->hdwq[qidx];
5824
5825 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
5826 LPFC_QUEUE_REARM);
5827 }
5828
5829
5830 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
5831 eq = sli4_hba->hba_eq_hdl[qidx].eq;
5832
5833 sli4_hba->sli4_write_eq_db(phba, eq,
5834 0, LPFC_QUEUE_REARM);
5835 }
5836 }
5837
5838 if (phba->nvmet_support) {
5839 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5840 sli4_hba->sli4_write_cq_db(phba,
5841 sli4_hba->nvmet_cqset[qidx], 0,
5842 LPFC_QUEUE_REARM);
5843 }
5844 }
5845}
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859int
5860lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5861 uint16_t *extnt_count, uint16_t *extnt_size)
5862{
5863 int rc = 0;
5864 uint32_t length;
5865 uint32_t mbox_tmo;
5866 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5867 LPFC_MBOXQ_t *mbox;
5868
5869 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5870 if (!mbox)
5871 return -ENOMEM;
5872
5873
5874 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5875 sizeof(struct lpfc_sli4_cfg_mhdr));
5876 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5877 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5878 length, LPFC_SLI4_MBX_EMBED);
5879
5880
5881 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5882 LPFC_SLI4_MBX_EMBED);
5883 if (unlikely(rc)) {
5884 rc = -EIO;
5885 goto err_exit;
5886 }
5887
5888 if (!phba->sli4_hba.intr_enable)
5889 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5890 else {
5891 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5892 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5893 }
5894 if (unlikely(rc)) {
5895 rc = -EIO;
5896 goto err_exit;
5897 }
5898
5899 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5900 if (bf_get(lpfc_mbox_hdr_status,
5901 &rsrc_info->header.cfg_shdr.response)) {
5902 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5903 "2930 Failed to get resource extents "
5904 "Status 0x%x Add'l Status 0x%x\n",
5905 bf_get(lpfc_mbox_hdr_status,
5906 &rsrc_info->header.cfg_shdr.response),
5907 bf_get(lpfc_mbox_hdr_add_status,
5908 &rsrc_info->header.cfg_shdr.response));
5909 rc = -EIO;
5910 goto err_exit;
5911 }
5912
5913 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5914 &rsrc_info->u.rsp);
5915 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5916 &rsrc_info->u.rsp);
5917
5918 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5919 "3162 Retrieved extents type-%d from port: count:%d, "
5920 "size:%d\n", type, *extnt_count, *extnt_size);
5921
5922err_exit:
5923 mempool_free(mbox, phba->mbox_mem_pool);
5924 return rc;
5925}
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940
5941
5942static int
5943lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5944{
5945 uint16_t curr_ext_cnt, rsrc_ext_cnt;
5946 uint16_t size_diff, rsrc_ext_size;
5947 int rc = 0;
5948 struct lpfc_rsrc_blks *rsrc_entry;
5949 struct list_head *rsrc_blk_list = NULL;
5950
5951 size_diff = 0;
5952 curr_ext_cnt = 0;
5953 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5954 &rsrc_ext_cnt,
5955 &rsrc_ext_size);
5956 if (unlikely(rc))
5957 return -EIO;
5958
5959 switch (type) {
5960 case LPFC_RSC_TYPE_FCOE_RPI:
5961 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5962 break;
5963 case LPFC_RSC_TYPE_FCOE_VPI:
5964 rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5965 break;
5966 case LPFC_RSC_TYPE_FCOE_XRI:
5967 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5968 break;
5969 case LPFC_RSC_TYPE_FCOE_VFI:
5970 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5971 break;
5972 default:
5973 break;
5974 }
5975
5976 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5977 curr_ext_cnt++;
5978 if (rsrc_entry->rsrc_size != rsrc_ext_size)
5979 size_diff++;
5980 }
5981
5982 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5983 rc = 1;
5984
5985 return rc;
5986}
5987
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005static int
6006lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6007 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6008{
6009 int rc = 0;
6010 uint32_t req_len;
6011 uint32_t emb_len;
6012 uint32_t alloc_len, mbox_tmo;
6013
6014
6015 req_len = extnt_cnt * sizeof(uint16_t);
6016
6017
6018
6019
6020
6021 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6022 sizeof(uint32_t);
6023
6024
6025
6026
6027
6028 *emb = LPFC_SLI4_MBX_EMBED;
6029 if (req_len > emb_len) {
6030 req_len = extnt_cnt * sizeof(uint16_t) +
6031 sizeof(union lpfc_sli4_cfg_shdr) +
6032 sizeof(uint32_t);
6033 *emb = LPFC_SLI4_MBX_NEMBED;
6034 }
6035
6036 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6037 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6038 req_len, *emb);
6039 if (alloc_len < req_len) {
6040 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6041 "2982 Allocated DMA memory size (x%x) is "
6042 "less than the requested DMA memory "
6043 "size (x%x)\n", alloc_len, req_len);
6044 return -ENOMEM;
6045 }
6046 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6047 if (unlikely(rc))
6048 return -EIO;
6049
6050 if (!phba->sli4_hba.intr_enable)
6051 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6052 else {
6053 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6054 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6055 }
6056
6057 if (unlikely(rc))
6058 rc = -EIO;
6059 return rc;
6060}
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070static int
6071lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6072{
6073 bool emb = false;
6074 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6075 uint16_t rsrc_id, rsrc_start, j, k;
6076 uint16_t *ids;
6077 int i, rc;
6078 unsigned long longs;
6079 unsigned long *bmask;
6080 struct lpfc_rsrc_blks *rsrc_blks;
6081 LPFC_MBOXQ_t *mbox;
6082 uint32_t length;
6083 struct lpfc_id_range *id_array = NULL;
6084 void *virtaddr = NULL;
6085 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6086 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6087 struct list_head *ext_blk_list;
6088
6089 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6090 &rsrc_cnt,
6091 &rsrc_size);
6092 if (unlikely(rc))
6093 return -EIO;
6094
6095 if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6096 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6097 "3009 No available Resource Extents "
6098 "for resource type 0x%x: Count: 0x%x, "
6099 "Size 0x%x\n", type, rsrc_cnt,
6100 rsrc_size);
6101 return -ENOMEM;
6102 }
6103
6104 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6105 "2903 Post resource extents type-0x%x: "
6106 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6107
6108 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6109 if (!mbox)
6110 return -ENOMEM;
6111
6112 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6113 if (unlikely(rc)) {
6114 rc = -EIO;
6115 goto err_exit;
6116 }
6117
6118
6119
6120
6121
6122
6123
6124 if (emb == LPFC_SLI4_MBX_EMBED) {
6125 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6126 id_array = &rsrc_ext->u.rsp.id[0];
6127 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6128 } else {
6129 virtaddr = mbox->sge_array->addr[0];
6130 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6131 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6132 id_array = &n_rsrc->id;
6133 }
6134
6135 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6136 rsrc_id_cnt = rsrc_cnt * rsrc_size;
6137
6138
6139
6140
6141
6142 length = sizeof(struct lpfc_rsrc_blks);
6143 switch (type) {
6144 case LPFC_RSC_TYPE_FCOE_RPI:
6145 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6146 sizeof(unsigned long),
6147 GFP_KERNEL);
6148 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6149 rc = -ENOMEM;
6150 goto err_exit;
6151 }
6152 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6153 sizeof(uint16_t),
6154 GFP_KERNEL);
6155 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6156 kfree(phba->sli4_hba.rpi_bmask);
6157 rc = -ENOMEM;
6158 goto err_exit;
6159 }
6160
6161
6162
6163
6164
6165
6166 phba->sli4_hba.next_rpi = rsrc_id_cnt;
6167
6168
6169 bmask = phba->sli4_hba.rpi_bmask;
6170 ids = phba->sli4_hba.rpi_ids;
6171 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6172 break;
6173 case LPFC_RSC_TYPE_FCOE_VPI:
6174 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6175 GFP_KERNEL);
6176 if (unlikely(!phba->vpi_bmask)) {
6177 rc = -ENOMEM;
6178 goto err_exit;
6179 }
6180 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6181 GFP_KERNEL);
6182 if (unlikely(!phba->vpi_ids)) {
6183 kfree(phba->vpi_bmask);
6184 rc = -ENOMEM;
6185 goto err_exit;
6186 }
6187
6188
6189 bmask = phba->vpi_bmask;
6190 ids = phba->vpi_ids;
6191 ext_blk_list = &phba->lpfc_vpi_blk_list;
6192 break;
6193 case LPFC_RSC_TYPE_FCOE_XRI:
6194 phba->sli4_hba.xri_bmask = kcalloc(longs,
6195 sizeof(unsigned long),
6196 GFP_KERNEL);
6197 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6198 rc = -ENOMEM;
6199 goto err_exit;
6200 }
6201 phba->sli4_hba.max_cfg_param.xri_used = 0;
6202 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6203 sizeof(uint16_t),
6204 GFP_KERNEL);
6205 if (unlikely(!phba->sli4_hba.xri_ids)) {
6206 kfree(phba->sli4_hba.xri_bmask);
6207 rc = -ENOMEM;
6208 goto err_exit;
6209 }
6210
6211
6212 bmask = phba->sli4_hba.xri_bmask;
6213 ids = phba->sli4_hba.xri_ids;
6214 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6215 break;
6216 case LPFC_RSC_TYPE_FCOE_VFI:
6217 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6218 sizeof(unsigned long),
6219 GFP_KERNEL);
6220 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6221 rc = -ENOMEM;
6222 goto err_exit;
6223 }
6224 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6225 sizeof(uint16_t),
6226 GFP_KERNEL);
6227 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6228 kfree(phba->sli4_hba.vfi_bmask);
6229 rc = -ENOMEM;
6230 goto err_exit;
6231 }
6232
6233
6234 bmask = phba->sli4_hba.vfi_bmask;
6235 ids = phba->sli4_hba.vfi_ids;
6236 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6237 break;
6238 default:
6239
6240 id_array = NULL;
6241 bmask = NULL;
6242 ids = NULL;
6243 ext_blk_list = NULL;
6244 goto err_exit;
6245 }
6246
6247
6248
6249
6250
6251
6252
6253 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6254 if ((i % 2) == 0)
6255 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6256 &id_array[k]);
6257 else
6258 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6259 &id_array[k]);
6260
6261 rsrc_blks = kzalloc(length, GFP_KERNEL);
6262 if (unlikely(!rsrc_blks)) {
6263 rc = -ENOMEM;
6264 kfree(bmask);
6265 kfree(ids);
6266 goto err_exit;
6267 }
6268 rsrc_blks->rsrc_start = rsrc_id;
6269 rsrc_blks->rsrc_size = rsrc_size;
6270 list_add_tail(&rsrc_blks->list, ext_blk_list);
6271 rsrc_start = rsrc_id;
6272 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6273 phba->sli4_hba.io_xri_start = rsrc_start +
6274 lpfc_sli4_get_iocb_cnt(phba);
6275 }
6276
6277 while (rsrc_id < (rsrc_start + rsrc_size)) {
6278 ids[j] = rsrc_id;
6279 rsrc_id++;
6280 j++;
6281 }
6282
6283 if ((i % 2) == 1)
6284 k++;
6285 }
6286 err_exit:
6287 lpfc_sli4_mbox_cmd_free(phba, mbox);
6288 return rc;
6289}
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302static int
6303lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6304{
6305 int rc;
6306 uint32_t length, mbox_tmo = 0;
6307 LPFC_MBOXQ_t *mbox;
6308 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6309 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6310
6311 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6312 if (!mbox)
6313 return -ENOMEM;
6314
6315
6316
6317
6318
6319
6320 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6321 sizeof(struct lpfc_sli4_cfg_mhdr));
6322 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6323 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6324 length, LPFC_SLI4_MBX_EMBED);
6325
6326
6327 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6328 LPFC_SLI4_MBX_EMBED);
6329 if (unlikely(rc)) {
6330 rc = -EIO;
6331 goto out_free_mbox;
6332 }
6333 if (!phba->sli4_hba.intr_enable)
6334 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6335 else {
6336 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6337 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6338 }
6339 if (unlikely(rc)) {
6340 rc = -EIO;
6341 goto out_free_mbox;
6342 }
6343
6344 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6345 if (bf_get(lpfc_mbox_hdr_status,
6346 &dealloc_rsrc->header.cfg_shdr.response)) {
6347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6348 "2919 Failed to release resource extents "
6349 "for type %d - Status 0x%x Add'l Status 0x%x. "
6350 "Resource memory not released.\n",
6351 type,
6352 bf_get(lpfc_mbox_hdr_status,
6353 &dealloc_rsrc->header.cfg_shdr.response),
6354 bf_get(lpfc_mbox_hdr_add_status,
6355 &dealloc_rsrc->header.cfg_shdr.response));
6356 rc = -EIO;
6357 goto out_free_mbox;
6358 }
6359
6360
6361 switch (type) {
6362 case LPFC_RSC_TYPE_FCOE_VPI:
6363 kfree(phba->vpi_bmask);
6364 kfree(phba->vpi_ids);
6365 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6366 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6367 &phba->lpfc_vpi_blk_list, list) {
6368 list_del_init(&rsrc_blk->list);
6369 kfree(rsrc_blk);
6370 }
6371 phba->sli4_hba.max_cfg_param.vpi_used = 0;
6372 break;
6373 case LPFC_RSC_TYPE_FCOE_XRI:
6374 kfree(phba->sli4_hba.xri_bmask);
6375 kfree(phba->sli4_hba.xri_ids);
6376 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6377 &phba->sli4_hba.lpfc_xri_blk_list, list) {
6378 list_del_init(&rsrc_blk->list);
6379 kfree(rsrc_blk);
6380 }
6381 break;
6382 case LPFC_RSC_TYPE_FCOE_VFI:
6383 kfree(phba->sli4_hba.vfi_bmask);
6384 kfree(phba->sli4_hba.vfi_ids);
6385 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6386 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6387 &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6388 list_del_init(&rsrc_blk->list);
6389 kfree(rsrc_blk);
6390 }
6391 break;
6392 case LPFC_RSC_TYPE_FCOE_RPI:
6393
6394 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6395 &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6396 list_del_init(&rsrc_blk->list);
6397 kfree(rsrc_blk);
6398 }
6399 break;
6400 default:
6401 break;
6402 }
6403
6404 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6405
6406 out_free_mbox:
6407 mempool_free(mbox, phba->mbox_mem_pool);
6408 return rc;
6409}
6410
6411static void
6412lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6413 uint32_t feature)
6414{
6415 uint32_t len;
6416
6417 len = sizeof(struct lpfc_mbx_set_feature) -
6418 sizeof(struct lpfc_sli4_cfg_mhdr);
6419 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6420 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6421 LPFC_SLI4_MBX_EMBED);
6422
6423 switch (feature) {
6424 case LPFC_SET_UE_RECOVERY:
6425 bf_set(lpfc_mbx_set_feature_UER,
6426 &mbox->u.mqe.un.set_feature, 1);
6427 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6428 mbox->u.mqe.un.set_feature.param_len = 8;
6429 break;
6430 case LPFC_SET_MDS_DIAGS:
6431 bf_set(lpfc_mbx_set_feature_mds,
6432 &mbox->u.mqe.un.set_feature, 1);
6433 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6434 &mbox->u.mqe.un.set_feature, 1);
6435 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6436 mbox->u.mqe.un.set_feature.param_len = 8;
6437 break;
6438 case LPFC_SET_DUAL_DUMP:
6439 bf_set(lpfc_mbx_set_feature_dd,
6440 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6441 bf_set(lpfc_mbx_set_feature_ddquery,
6442 &mbox->u.mqe.un.set_feature, 0);
6443 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6444 mbox->u.mqe.un.set_feature.param_len = 4;
6445 break;
6446 }
6447
6448 return;
6449}
6450
6451
6452
6453
6454
6455
6456
6457
6458void
6459lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6460{
6461 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6462
6463 spin_lock_irq(&phba->hbalock);
6464 ras_fwlog->state = INACTIVE;
6465 spin_unlock_irq(&phba->hbalock);
6466
6467
6468 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6469 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6470
6471
6472 usleep_range(10 * 1000, 20 * 1000);
6473}
6474
6475
6476
6477
6478
6479
6480
6481
6482void
6483lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6484{
6485 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6486 struct lpfc_dmabuf *dmabuf, *next;
6487
6488 if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6489 list_for_each_entry_safe(dmabuf, next,
6490 &ras_fwlog->fwlog_buff_list,
6491 list) {
6492 list_del(&dmabuf->list);
6493 dma_free_coherent(&phba->pcidev->dev,
6494 LPFC_RAS_MAX_ENTRY_SIZE,
6495 dmabuf->virt, dmabuf->phys);
6496 kfree(dmabuf);
6497 }
6498 }
6499
6500 if (ras_fwlog->lwpd.virt) {
6501 dma_free_coherent(&phba->pcidev->dev,
6502 sizeof(uint32_t) * 2,
6503 ras_fwlog->lwpd.virt,
6504 ras_fwlog->lwpd.phys);
6505 ras_fwlog->lwpd.virt = NULL;
6506 }
6507
6508 spin_lock_irq(&phba->hbalock);
6509 ras_fwlog->state = INACTIVE;
6510 spin_unlock_irq(&phba->hbalock);
6511}
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524static int
6525lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6526 uint32_t fwlog_buff_count)
6527{
6528 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6529 struct lpfc_dmabuf *dmabuf;
6530 int rc = 0, i = 0;
6531
6532
6533 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6534
6535
6536 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6537 sizeof(uint32_t) * 2,
6538 &ras_fwlog->lwpd.phys,
6539 GFP_KERNEL);
6540 if (!ras_fwlog->lwpd.virt) {
6541 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6542 "6185 LWPD Memory Alloc Failed\n");
6543
6544 return -ENOMEM;
6545 }
6546
6547 ras_fwlog->fw_buffcount = fwlog_buff_count;
6548 for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6549 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6550 GFP_KERNEL);
6551 if (!dmabuf) {
6552 rc = -ENOMEM;
6553 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6554 "6186 Memory Alloc failed FW logging");
6555 goto free_mem;
6556 }
6557
6558 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6559 LPFC_RAS_MAX_ENTRY_SIZE,
6560 &dmabuf->phys, GFP_KERNEL);
6561 if (!dmabuf->virt) {
6562 kfree(dmabuf);
6563 rc = -ENOMEM;
6564 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6565 "6187 DMA Alloc Failed FW logging");
6566 goto free_mem;
6567 }
6568 dmabuf->buffer_tag = i;
6569 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6570 }
6571
6572free_mem:
6573 if (rc)
6574 lpfc_sli4_ras_dma_free(phba);
6575
6576 return rc;
6577}
6578
6579
6580
6581
6582
6583
6584
6585
6586static void
6587lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6588{
6589 MAILBOX_t *mb;
6590 union lpfc_sli4_cfg_shdr *shdr;
6591 uint32_t shdr_status, shdr_add_status;
6592 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6593
6594 mb = &pmb->u.mb;
6595
6596 shdr = (union lpfc_sli4_cfg_shdr *)
6597 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6598 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6599 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6600
6601 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6603 "6188 FW LOG mailbox "
6604 "completed with status x%x add_status x%x,"
6605 " mbx status x%x\n",
6606 shdr_status, shdr_add_status, mb->mbxStatus);
6607
6608 ras_fwlog->ras_hwsupport = false;
6609 goto disable_ras;
6610 }
6611
6612 spin_lock_irq(&phba->hbalock);
6613 ras_fwlog->state = ACTIVE;
6614 spin_unlock_irq(&phba->hbalock);
6615 mempool_free(pmb, phba->mbox_mem_pool);
6616
6617 return;
6618
6619disable_ras:
6620
6621 lpfc_sli4_ras_dma_free(phba);
6622 mempool_free(pmb, phba->mbox_mem_pool);
6623}
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634int
6635lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
6636 uint32_t fwlog_level,
6637 uint32_t fwlog_enable)
6638{
6639 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6640 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
6641 struct lpfc_dmabuf *dmabuf;
6642 LPFC_MBOXQ_t *mbox;
6643 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
6644 int rc = 0;
6645
6646 spin_lock_irq(&phba->hbalock);
6647 ras_fwlog->state = INACTIVE;
6648 spin_unlock_irq(&phba->hbalock);
6649
6650 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
6651 phba->cfg_ras_fwlog_buffsize);
6652 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
6653
6654
6655
6656
6657
6658 if (!ras_fwlog->lwpd.virt) {
6659 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
6660 if (rc) {
6661 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6662 "6189 FW Log Memory Allocation Failed");
6663 return rc;
6664 }
6665 }
6666
6667
6668 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6669 if (!mbox) {
6670 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6671 "6190 RAS MBX Alloc Failed");
6672 rc = -ENOMEM;
6673 goto mem_free;
6674 }
6675
6676 ras_fwlog->fw_loglevel = fwlog_level;
6677 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
6678 sizeof(struct lpfc_sli4_cfg_mhdr));
6679
6680 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
6681 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
6682 len, LPFC_SLI4_MBX_EMBED);
6683
6684 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
6685 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
6686 fwlog_enable);
6687 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
6688 ras_fwlog->fw_loglevel);
6689 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
6690 ras_fwlog->fw_buffcount);
6691 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
6692 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
6693
6694
6695 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
6696 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
6697
6698 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
6699 putPaddrLow(dmabuf->phys);
6700
6701 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
6702 putPaddrHigh(dmabuf->phys);
6703 }
6704
6705
6706 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
6707 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
6708
6709 spin_lock_irq(&phba->hbalock);
6710 ras_fwlog->state = REG_INPROGRESS;
6711 spin_unlock_irq(&phba->hbalock);
6712 mbox->vport = phba->pport;
6713 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
6714
6715 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
6716
6717 if (rc == MBX_NOT_FINISHED) {
6718 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6719 "6191 FW-Log Mailbox failed. "
6720 "status %d mbxStatus : x%x", rc,
6721 bf_get(lpfc_mqe_status, &mbox->u.mqe));
6722 mempool_free(mbox, phba->mbox_mem_pool);
6723 rc = -EIO;
6724 goto mem_free;
6725 } else
6726 rc = 0;
6727mem_free:
6728 if (rc)
6729 lpfc_sli4_ras_dma_free(phba);
6730
6731 return rc;
6732}
6733
6734
6735
6736
6737
6738
6739
6740void
6741lpfc_sli4_ras_setup(struct lpfc_hba *phba)
6742{
6743
6744 if (lpfc_check_fwlog_support(phba))
6745 return;
6746
6747 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
6748 LPFC_RAS_ENABLE_LOGGING);
6749}
6750
6751
6752
6753
6754
6755
6756
6757int
6758lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
6759{
6760 int i, rc, error = 0;
6761 uint16_t count, base;
6762 unsigned long longs;
6763
6764 if (!phba->sli4_hba.rpi_hdrs_in_use)
6765 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
6766 if (phba->sli4_hba.extents_in_use) {
6767
6768
6769
6770
6771
6772 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6773 LPFC_IDX_RSRC_RDY) {
6774
6775
6776
6777
6778
6779 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6780 LPFC_RSC_TYPE_FCOE_VFI);
6781 if (rc != 0)
6782 error++;
6783 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6784 LPFC_RSC_TYPE_FCOE_VPI);
6785 if (rc != 0)
6786 error++;
6787 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6788 LPFC_RSC_TYPE_FCOE_XRI);
6789 if (rc != 0)
6790 error++;
6791 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
6792 LPFC_RSC_TYPE_FCOE_RPI);
6793 if (rc != 0)
6794 error++;
6795
6796
6797
6798
6799
6800
6801
6802 if (error) {
6803 lpfc_printf_log(phba, KERN_INFO,
6804 LOG_MBOX | LOG_INIT,
6805 "2931 Detected extent resource "
6806 "change. Reallocating all "
6807 "extents.\n");
6808 rc = lpfc_sli4_dealloc_extent(phba,
6809 LPFC_RSC_TYPE_FCOE_VFI);
6810 rc = lpfc_sli4_dealloc_extent(phba,
6811 LPFC_RSC_TYPE_FCOE_VPI);
6812 rc = lpfc_sli4_dealloc_extent(phba,
6813 LPFC_RSC_TYPE_FCOE_XRI);
6814 rc = lpfc_sli4_dealloc_extent(phba,
6815 LPFC_RSC_TYPE_FCOE_RPI);
6816 } else
6817 return 0;
6818 }
6819
6820 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6821 if (unlikely(rc))
6822 goto err_exit;
6823
6824 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6825 if (unlikely(rc))
6826 goto err_exit;
6827
6828 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6829 if (unlikely(rc))
6830 goto err_exit;
6831
6832 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6833 if (unlikely(rc))
6834 goto err_exit;
6835 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6836 LPFC_IDX_RSRC_RDY);
6837 return rc;
6838 } else {
6839
6840
6841
6842
6843
6844
6845
6846 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6847 LPFC_IDX_RSRC_RDY) {
6848 lpfc_sli4_dealloc_resource_identifiers(phba);
6849 lpfc_sli4_remove_rpis(phba);
6850 }
6851
6852 count = phba->sli4_hba.max_cfg_param.max_rpi;
6853 if (count <= 0) {
6854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6855 "3279 Invalid provisioning of "
6856 "rpi:%d\n", count);
6857 rc = -EINVAL;
6858 goto err_exit;
6859 }
6860 base = phba->sli4_hba.max_cfg_param.rpi_base;
6861 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6862 phba->sli4_hba.rpi_bmask = kcalloc(longs,
6863 sizeof(unsigned long),
6864 GFP_KERNEL);
6865 if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6866 rc = -ENOMEM;
6867 goto err_exit;
6868 }
6869 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
6870 GFP_KERNEL);
6871 if (unlikely(!phba->sli4_hba.rpi_ids)) {
6872 rc = -ENOMEM;
6873 goto free_rpi_bmask;
6874 }
6875
6876 for (i = 0; i < count; i++)
6877 phba->sli4_hba.rpi_ids[i] = base + i;
6878
6879
6880 count = phba->sli4_hba.max_cfg_param.max_vpi;
6881 if (count <= 0) {
6882 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6883 "3280 Invalid provisioning of "
6884 "vpi:%d\n", count);
6885 rc = -EINVAL;
6886 goto free_rpi_ids;
6887 }
6888 base = phba->sli4_hba.max_cfg_param.vpi_base;
6889 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6890 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6891 GFP_KERNEL);
6892 if (unlikely(!phba->vpi_bmask)) {
6893 rc = -ENOMEM;
6894 goto free_rpi_ids;
6895 }
6896 phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
6897 GFP_KERNEL);
6898 if (unlikely(!phba->vpi_ids)) {
6899 rc = -ENOMEM;
6900 goto free_vpi_bmask;
6901 }
6902
6903 for (i = 0; i < count; i++)
6904 phba->vpi_ids[i] = base + i;
6905
6906
6907 count = phba->sli4_hba.max_cfg_param.max_xri;
6908 if (count <= 0) {
6909 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6910 "3281 Invalid provisioning of "
6911 "xri:%d\n", count);
6912 rc = -EINVAL;
6913 goto free_vpi_ids;
6914 }
6915 base = phba->sli4_hba.max_cfg_param.xri_base;
6916 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6917 phba->sli4_hba.xri_bmask = kcalloc(longs,
6918 sizeof(unsigned long),
6919 GFP_KERNEL);
6920 if (unlikely(!phba->sli4_hba.xri_bmask)) {
6921 rc = -ENOMEM;
6922 goto free_vpi_ids;
6923 }
6924 phba->sli4_hba.max_cfg_param.xri_used = 0;
6925 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
6926 GFP_KERNEL);
6927 if (unlikely(!phba->sli4_hba.xri_ids)) {
6928 rc = -ENOMEM;
6929 goto free_xri_bmask;
6930 }
6931
6932 for (i = 0; i < count; i++)
6933 phba->sli4_hba.xri_ids[i] = base + i;
6934
6935
6936 count = phba->sli4_hba.max_cfg_param.max_vfi;
6937 if (count <= 0) {
6938 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6939 "3282 Invalid provisioning of "
6940 "vfi:%d\n", count);
6941 rc = -EINVAL;
6942 goto free_xri_ids;
6943 }
6944 base = phba->sli4_hba.max_cfg_param.vfi_base;
6945 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6946 phba->sli4_hba.vfi_bmask = kcalloc(longs,
6947 sizeof(unsigned long),
6948 GFP_KERNEL);
6949 if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6950 rc = -ENOMEM;
6951 goto free_xri_ids;
6952 }
6953 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
6954 GFP_KERNEL);
6955 if (unlikely(!phba->sli4_hba.vfi_ids)) {
6956 rc = -ENOMEM;
6957 goto free_vfi_bmask;
6958 }
6959
6960 for (i = 0; i < count; i++)
6961 phba->sli4_hba.vfi_ids[i] = base + i;
6962
6963
6964
6965
6966
6967 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6968 LPFC_IDX_RSRC_RDY);
6969 return 0;
6970 }
6971
6972 free_vfi_bmask:
6973 kfree(phba->sli4_hba.vfi_bmask);
6974 phba->sli4_hba.vfi_bmask = NULL;
6975 free_xri_ids:
6976 kfree(phba->sli4_hba.xri_ids);
6977 phba->sli4_hba.xri_ids = NULL;
6978 free_xri_bmask:
6979 kfree(phba->sli4_hba.xri_bmask);
6980 phba->sli4_hba.xri_bmask = NULL;
6981 free_vpi_ids:
6982 kfree(phba->vpi_ids);
6983 phba->vpi_ids = NULL;
6984 free_vpi_bmask:
6985 kfree(phba->vpi_bmask);
6986 phba->vpi_bmask = NULL;
6987 free_rpi_ids:
6988 kfree(phba->sli4_hba.rpi_ids);
6989 phba->sli4_hba.rpi_ids = NULL;
6990 free_rpi_bmask:
6991 kfree(phba->sli4_hba.rpi_bmask);
6992 phba->sli4_hba.rpi_bmask = NULL;
6993 err_exit:
6994 return rc;
6995}
6996
6997
6998
6999
7000
7001
7002
7003
7004int
7005lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7006{
7007 if (phba->sli4_hba.extents_in_use) {
7008 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7009 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7010 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7011 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7012 } else {
7013 kfree(phba->vpi_bmask);
7014 phba->sli4_hba.max_cfg_param.vpi_used = 0;
7015 kfree(phba->vpi_ids);
7016 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7017 kfree(phba->sli4_hba.xri_bmask);
7018 kfree(phba->sli4_hba.xri_ids);
7019 kfree(phba->sli4_hba.vfi_bmask);
7020 kfree(phba->sli4_hba.vfi_ids);
7021 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7022 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7023 }
7024
7025 return 0;
7026}
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038int
7039lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7040 uint16_t *extnt_cnt, uint16_t *extnt_size)
7041{
7042 bool emb;
7043 int rc = 0;
7044 uint16_t curr_blks = 0;
7045 uint32_t req_len, emb_len;
7046 uint32_t alloc_len, mbox_tmo;
7047 struct list_head *blk_list_head;
7048 struct lpfc_rsrc_blks *rsrc_blk;
7049 LPFC_MBOXQ_t *mbox;
7050 void *virtaddr = NULL;
7051 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7052 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7053 union lpfc_sli4_cfg_shdr *shdr;
7054
7055 switch (type) {
7056 case LPFC_RSC_TYPE_FCOE_VPI:
7057 blk_list_head = &phba->lpfc_vpi_blk_list;
7058 break;
7059 case LPFC_RSC_TYPE_FCOE_XRI:
7060 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7061 break;
7062 case LPFC_RSC_TYPE_FCOE_VFI:
7063 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7064 break;
7065 case LPFC_RSC_TYPE_FCOE_RPI:
7066 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7067 break;
7068 default:
7069 return -EIO;
7070 }
7071
7072
7073 list_for_each_entry(rsrc_blk, blk_list_head, list) {
7074 if (curr_blks == 0) {
7075
7076
7077
7078
7079
7080
7081
7082 *extnt_size = rsrc_blk->rsrc_size;
7083 }
7084 curr_blks++;
7085 }
7086
7087
7088
7089
7090
7091 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7092 sizeof(uint32_t);
7093
7094
7095
7096
7097
7098 emb = LPFC_SLI4_MBX_EMBED;
7099 req_len = emb_len;
7100 if (req_len > emb_len) {
7101 req_len = curr_blks * sizeof(uint16_t) +
7102 sizeof(union lpfc_sli4_cfg_shdr) +
7103 sizeof(uint32_t);
7104 emb = LPFC_SLI4_MBX_NEMBED;
7105 }
7106
7107 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7108 if (!mbox)
7109 return -ENOMEM;
7110 memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7111
7112 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7113 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7114 req_len, emb);
7115 if (alloc_len < req_len) {
7116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7117 "2983 Allocated DMA memory size (x%x) is "
7118 "less than the requested DMA memory "
7119 "size (x%x)\n", alloc_len, req_len);
7120 rc = -ENOMEM;
7121 goto err_exit;
7122 }
7123 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7124 if (unlikely(rc)) {
7125 rc = -EIO;
7126 goto err_exit;
7127 }
7128
7129 if (!phba->sli4_hba.intr_enable)
7130 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7131 else {
7132 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7133 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7134 }
7135
7136 if (unlikely(rc)) {
7137 rc = -EIO;
7138 goto err_exit;
7139 }
7140
7141
7142
7143
7144
7145
7146
7147 if (emb == LPFC_SLI4_MBX_EMBED) {
7148 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7149 shdr = &rsrc_ext->header.cfg_shdr;
7150 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7151 } else {
7152 virtaddr = mbox->sge_array->addr[0];
7153 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7154 shdr = &n_rsrc->cfg_shdr;
7155 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7156 }
7157
7158 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7159 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7160 "2984 Failed to read allocated resources "
7161 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7162 type,
7163 bf_get(lpfc_mbox_hdr_status, &shdr->response),
7164 bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7165 rc = -EIO;
7166 goto err_exit;
7167 }
7168 err_exit:
7169 lpfc_sli4_mbox_cmd_free(phba, mbox);
7170 return rc;
7171}
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189static int
7190lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7191 struct list_head *sgl_list, int cnt)
7192{
7193 struct lpfc_sglq *sglq_entry = NULL;
7194 struct lpfc_sglq *sglq_entry_next = NULL;
7195 struct lpfc_sglq *sglq_entry_first = NULL;
7196 int status, total_cnt;
7197 int post_cnt = 0, num_posted = 0, block_cnt = 0;
7198 int last_xritag = NO_XRI;
7199 LIST_HEAD(prep_sgl_list);
7200 LIST_HEAD(blck_sgl_list);
7201 LIST_HEAD(allc_sgl_list);
7202 LIST_HEAD(post_sgl_list);
7203 LIST_HEAD(free_sgl_list);
7204
7205 spin_lock_irq(&phba->hbalock);
7206 spin_lock(&phba->sli4_hba.sgl_list_lock);
7207 list_splice_init(sgl_list, &allc_sgl_list);
7208 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7209 spin_unlock_irq(&phba->hbalock);
7210
7211 total_cnt = cnt;
7212 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7213 &allc_sgl_list, list) {
7214 list_del_init(&sglq_entry->list);
7215 block_cnt++;
7216 if ((last_xritag != NO_XRI) &&
7217 (sglq_entry->sli4_xritag != last_xritag + 1)) {
7218
7219 list_splice_init(&prep_sgl_list, &blck_sgl_list);
7220 post_cnt = block_cnt - 1;
7221
7222 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7223 block_cnt = 1;
7224 } else {
7225
7226 list_add_tail(&sglq_entry->list, &prep_sgl_list);
7227
7228 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7229 list_splice_init(&prep_sgl_list,
7230 &blck_sgl_list);
7231 post_cnt = block_cnt;
7232 block_cnt = 0;
7233 }
7234 }
7235 num_posted++;
7236
7237
7238 last_xritag = sglq_entry->sli4_xritag;
7239
7240
7241 if (num_posted == total_cnt) {
7242 if (post_cnt == 0) {
7243 list_splice_init(&prep_sgl_list,
7244 &blck_sgl_list);
7245 post_cnt = block_cnt;
7246 } else if (block_cnt == 1) {
7247 status = lpfc_sli4_post_sgl(phba,
7248 sglq_entry->phys, 0,
7249 sglq_entry->sli4_xritag);
7250 if (!status) {
7251
7252 list_add_tail(&sglq_entry->list,
7253 &post_sgl_list);
7254 } else {
7255
7256 lpfc_printf_log(phba, KERN_WARNING,
7257 LOG_SLI,
7258 "3159 Failed to post "
7259 "sgl, xritag:x%x\n",
7260 sglq_entry->sli4_xritag);
7261 list_add_tail(&sglq_entry->list,
7262 &free_sgl_list);
7263 total_cnt--;
7264 }
7265 }
7266 }
7267
7268
7269 if (post_cnt == 0)
7270 continue;
7271
7272
7273 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7274 post_cnt);
7275
7276 if (!status) {
7277
7278 list_splice_init(&blck_sgl_list, &post_sgl_list);
7279 } else {
7280
7281 sglq_entry_first = list_first_entry(&blck_sgl_list,
7282 struct lpfc_sglq,
7283 list);
7284 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7285 "3160 Failed to post sgl-list, "
7286 "xritag:x%x-x%x\n",
7287 sglq_entry_first->sli4_xritag,
7288 (sglq_entry_first->sli4_xritag +
7289 post_cnt - 1));
7290 list_splice_init(&blck_sgl_list, &free_sgl_list);
7291 total_cnt -= post_cnt;
7292 }
7293
7294
7295 if (block_cnt == 0)
7296 last_xritag = NO_XRI;
7297
7298
7299 post_cnt = 0;
7300 }
7301
7302
7303 lpfc_free_sgl_list(phba, &free_sgl_list);
7304
7305
7306 if (!list_empty(&post_sgl_list)) {
7307 spin_lock_irq(&phba->hbalock);
7308 spin_lock(&phba->sli4_hba.sgl_list_lock);
7309 list_splice_init(&post_sgl_list, sgl_list);
7310 spin_unlock(&phba->sli4_hba.sgl_list_lock);
7311 spin_unlock_irq(&phba->hbalock);
7312 } else {
7313 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7314 "3161 Failure to post sgl to port.\n");
7315 return -EIO;
7316 }
7317
7318
7319 return total_cnt;
7320}
7321
7322
7323
7324
7325
7326
7327
7328
7329
7330
7331
7332
7333
7334static int
7335lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7336{
7337 LIST_HEAD(post_nblist);
7338 int num_posted, rc = 0;
7339
7340
7341 lpfc_io_buf_flush(phba, &post_nblist);
7342
7343
7344 if (!list_empty(&post_nblist)) {
7345 num_posted = lpfc_sli4_post_io_sgl_list(
7346 phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7347
7348 if (num_posted == 0)
7349 rc = -EIO;
7350 }
7351 return rc;
7352}
7353
7354static void
7355lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7356{
7357 uint32_t len;
7358
7359 len = sizeof(struct lpfc_mbx_set_host_data) -
7360 sizeof(struct lpfc_sli4_cfg_mhdr);
7361 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7362 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7363 LPFC_SLI4_MBX_EMBED);
7364
7365 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7366 mbox->u.mqe.un.set_host_data.param_len =
7367 LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7368 snprintf(mbox->u.mqe.un.set_host_data.data,
7369 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7370 "Linux %s v"LPFC_DRIVER_VERSION,
7371 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
7372}
7373
7374int
7375lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7376 struct lpfc_queue *drq, int count, int idx)
7377{
7378 int rc, i;
7379 struct lpfc_rqe hrqe;
7380 struct lpfc_rqe drqe;
7381 struct lpfc_rqb *rqbp;
7382 unsigned long flags;
7383 struct rqb_dmabuf *rqb_buffer;
7384 LIST_HEAD(rqb_buf_list);
7385
7386 rqbp = hrq->rqbp;
7387 for (i = 0; i < count; i++) {
7388 spin_lock_irqsave(&phba->hbalock, flags);
7389
7390 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7391 spin_unlock_irqrestore(&phba->hbalock, flags);
7392 break;
7393 }
7394 spin_unlock_irqrestore(&phba->hbalock, flags);
7395
7396 rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7397 if (!rqb_buffer)
7398 break;
7399 rqb_buffer->hrq = hrq;
7400 rqb_buffer->drq = drq;
7401 rqb_buffer->idx = idx;
7402 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7403 }
7404
7405 spin_lock_irqsave(&phba->hbalock, flags);
7406 while (!list_empty(&rqb_buf_list)) {
7407 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7408 hbuf.list);
7409
7410 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7411 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7412 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7413 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7414 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7415 if (rc < 0) {
7416 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7417 "6421 Cannot post to HRQ %d: %x %x %x "
7418 "DRQ %x %x\n",
7419 hrq->queue_id,
7420 hrq->host_index,
7421 hrq->hba_index,
7422 hrq->entry_count,
7423 drq->host_index,
7424 drq->hba_index);
7425 rqbp->rqb_free_buffer(phba, rqb_buffer);
7426 } else {
7427 list_add_tail(&rqb_buffer->hbuf.list,
7428 &rqbp->rqb_buffer_list);
7429 rqbp->buffer_count++;
7430 }
7431 }
7432 spin_unlock_irqrestore(&phba->hbalock, flags);
7433 return 1;
7434}
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444
7445
7446static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7447{
7448 int i;
7449 struct lpfc_sli4_hdw_queue *hdwq;
7450 struct lpfc_queue *cq;
7451 struct lpfc_idle_stat *idle_stat;
7452 u64 wall;
7453
7454 for_each_present_cpu(i) {
7455 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7456 cq = hdwq->io_cq;
7457
7458
7459 if (cq->chann != i)
7460 continue;
7461
7462 idle_stat = &phba->sli4_hba.idle_stat[i];
7463
7464 idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7465 idle_stat->prev_wall = wall;
7466
7467 if (phba->nvmet_support)
7468 cq->poll_mode = LPFC_QUEUE_WORK;
7469 else
7470 cq->poll_mode = LPFC_IRQ_POLL;
7471 }
7472
7473 if (!phba->nvmet_support)
7474 schedule_delayed_work(&phba->idle_stat_delay_work,
7475 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7476}
7477
7478static void lpfc_sli4_dip(struct lpfc_hba *phba)
7479{
7480 uint32_t if_type;
7481
7482 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7483 if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7484 if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7485 struct lpfc_register reg_data;
7486
7487 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7488 ®_data.word0))
7489 return;
7490
7491 if (bf_get(lpfc_sliport_status_dip, ®_data))
7492 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7493 "2904 Firmware Dump Image Present"
7494 " on Adapter");
7495 }
7496}
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506
7507int
7508lpfc_sli4_hba_setup(struct lpfc_hba *phba)
7509{
7510 int rc, i, cnt, len, dd;
7511 LPFC_MBOXQ_t *mboxq;
7512 struct lpfc_mqe *mqe;
7513 uint8_t *vpd;
7514 uint32_t vpd_size;
7515 uint32_t ftr_rsp = 0;
7516 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
7517 struct lpfc_vport *vport = phba->pport;
7518 struct lpfc_dmabuf *mp;
7519 struct lpfc_rqb *rqbp;
7520
7521
7522 rc = lpfc_pci_function_reset(phba);
7523 if (unlikely(rc))
7524 return -ENODEV;
7525
7526
7527 rc = lpfc_sli4_post_status_check(phba);
7528 if (unlikely(rc))
7529 return -ENODEV;
7530 else {
7531 spin_lock_irq(&phba->hbalock);
7532 phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
7533 spin_unlock_irq(&phba->hbalock);
7534 }
7535
7536 lpfc_sli4_dip(phba);
7537
7538
7539
7540
7541
7542 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7543 if (!mboxq)
7544 return -ENOMEM;
7545
7546
7547 vpd_size = SLI4_PAGE_SIZE;
7548 vpd = kzalloc(vpd_size, GFP_KERNEL);
7549 if (!vpd) {
7550 rc = -ENOMEM;
7551 goto out_free_mbox;
7552 }
7553
7554 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
7555 if (unlikely(rc)) {
7556 kfree(vpd);
7557 goto out_free_mbox;
7558 }
7559
7560 mqe = &mboxq->u.mqe;
7561 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
7562 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
7563 phba->hba_flag |= HBA_FCOE_MODE;
7564 phba->fcp_embed_io = 0;
7565 } else {
7566 phba->hba_flag &= ~HBA_FCOE_MODE;
7567 }
7568
7569 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
7570 LPFC_DCBX_CEE_MODE)
7571 phba->hba_flag |= HBA_FIP_SUPPORT;
7572 else
7573 phba->hba_flag &= ~HBA_FIP_SUPPORT;
7574
7575 phba->hba_flag &= ~HBA_IOQ_FLUSH;
7576
7577 if (phba->sli_rev != LPFC_SLI_REV4) {
7578 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7579 "0376 READ_REV Error. SLI Level %d "
7580 "FCoE enabled %d\n",
7581 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
7582 rc = -EIO;
7583 kfree(vpd);
7584 goto out_free_mbox;
7585 }
7586
7587
7588
7589
7590
7591
7592 if (phba->hba_flag & HBA_FCOE_MODE &&
7593 lpfc_sli4_read_fcoe_params(phba))
7594 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
7595 "2570 Failed to read FCoE parameters\n");
7596
7597
7598
7599
7600
7601 rc = lpfc_sli4_retrieve_pport_name(phba);
7602 if (!rc)
7603 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7604 "3080 Successful retrieving SLI4 device "
7605 "physical port name: %s.\n", phba->Port);
7606
7607 rc = lpfc_sli4_get_ctl_attr(phba);
7608 if (!rc)
7609 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7610 "8351 Successful retrieving SLI4 device "
7611 "CTL ATTR\n");
7612
7613
7614
7615
7616
7617
7618 rc = lpfc_parse_vpd(phba, vpd, vpd_size);
7619 if (unlikely(!rc)) {
7620 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7621 "0377 Error %d parsing vpd. "
7622 "Using defaults.\n", rc);
7623 rc = 0;
7624 }
7625 kfree(vpd);
7626
7627
7628 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
7629 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
7630
7631
7632
7633
7634
7635 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7636 LPFC_SLI_INTF_IF_TYPE_6) &&
7637 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
7638 (phba->vpd.rev.smRev == 0) &&
7639 (phba->cfg_nvme_embed_cmd == 1))
7640 phba->cfg_nvme_embed_cmd = 0;
7641
7642 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
7643 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
7644 &mqe->un.read_rev);
7645 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
7646 &mqe->un.read_rev);
7647 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
7648 &mqe->un.read_rev);
7649 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
7650 &mqe->un.read_rev);
7651 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
7652 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
7653 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
7654 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
7655 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
7656 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
7657 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7658 "(%d):0380 READ_REV Status x%x "
7659 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7660 mboxq->vport ? mboxq->vport->vpi : 0,
7661 bf_get(lpfc_mqe_status, mqe),
7662 phba->vpd.rev.opFwName,
7663 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
7664 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
7665
7666 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
7667 LPFC_SLI_INTF_IF_TYPE_0) {
7668 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
7669 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7670 if (rc == MBX_SUCCESS) {
7671 phba->hba_flag |= HBA_RECOVERABLE_UE;
7672
7673 phba->eratt_poll_interval = 1;
7674 phba->sli4_hba.ue_to_sr = bf_get(
7675 lpfc_mbx_set_feature_UESR,
7676 &mboxq->u.mqe.un.set_feature);
7677 phba->sli4_hba.ue_to_rp = bf_get(
7678 lpfc_mbx_set_feature_UERP,
7679 &mboxq->u.mqe.un.set_feature);
7680 }
7681 }
7682
7683 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
7684
7685 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
7686 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7687 if (rc != MBX_SUCCESS)
7688 phba->mds_diags_support = 0;
7689 }
7690
7691
7692
7693
7694
7695 lpfc_request_features(phba, mboxq);
7696 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7697 if (unlikely(rc)) {
7698 rc = -EIO;
7699 goto out_free_mbox;
7700 }
7701
7702
7703 if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
7704 &mqe->un.req_ftrs))) {
7705 bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
7706 phba->cfg_vmid_app_header = 0;
7707 lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
7708 "1242 vmid feature not supported\n");
7709 }
7710
7711
7712
7713
7714
7715 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
7716 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7717 "0378 No support for fcpi mode.\n");
7718 ftr_rsp++;
7719 }
7720
7721
7722 if (phba->hba_flag & HBA_FCOE_MODE) {
7723 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
7724 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
7725 else
7726 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
7727 }
7728
7729
7730
7731
7732
7733
7734 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
7735 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
7736 phba->cfg_enable_bg = 0;
7737 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
7738 ftr_rsp++;
7739 }
7740 }
7741
7742 if (phba->max_vpi && phba->cfg_enable_npiv &&
7743 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7744 ftr_rsp++;
7745
7746 if (ftr_rsp) {
7747 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7748 "0379 Feature Mismatch Data: x%08x %08x "
7749 "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
7750 mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
7751 phba->cfg_enable_npiv, phba->max_vpi);
7752 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
7753 phba->cfg_enable_bg = 0;
7754 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
7755 phba->cfg_enable_npiv = 0;
7756 }
7757
7758
7759 spin_lock_irq(&phba->hbalock);
7760 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
7761 spin_unlock_irq(&phba->hbalock);
7762
7763
7764 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
7765 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7766 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
7767 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
7768 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7769 "6448 Dual Dump is enabled\n");
7770 else
7771 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
7772 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7773 "rc:x%x dd:x%x\n",
7774 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
7775 lpfc_sli_config_mbox_subsys_get(
7776 phba, mboxq),
7777 lpfc_sli_config_mbox_opcode_get(
7778 phba, mboxq),
7779 rc, dd);
7780
7781
7782
7783
7784 rc = lpfc_sli4_alloc_resource_identifiers(phba);
7785 if (rc) {
7786 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7787 "2920 Failed to alloc Resource IDs "
7788 "rc = x%x\n", rc);
7789 goto out_free_mbox;
7790 }
7791
7792 lpfc_set_host_data(phba, mboxq);
7793
7794 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7795 if (rc) {
7796 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
7797 "2134 Failed to set host os driver version %x",
7798 rc);
7799 }
7800
7801
7802 rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
7803 if (rc) {
7804 phba->link_state = LPFC_HBA_ERROR;
7805 rc = -ENOMEM;
7806 goto out_free_mbox;
7807 }
7808
7809 mboxq->vport = vport;
7810 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7811 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
7812 if (rc == MBX_SUCCESS) {
7813 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
7814 rc = 0;
7815 }
7816
7817
7818
7819
7820
7821 lpfc_mbuf_free(phba, mp->virt, mp->phys);
7822 kfree(mp);
7823 mboxq->ctx_buf = NULL;
7824 if (unlikely(rc)) {
7825 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7826 "0382 READ_SPARAM command failed "
7827 "status %d, mbxStatus x%x\n",
7828 rc, bf_get(lpfc_mqe_status, mqe));
7829 phba->link_state = LPFC_HBA_ERROR;
7830 rc = -EIO;
7831 goto out_free_mbox;
7832 }
7833
7834 lpfc_update_vport_wwn(vport);
7835
7836
7837 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
7838 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
7839
7840
7841 rc = lpfc_sli4_queue_create(phba);
7842 if (rc) {
7843 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7844 "3089 Failed to allocate queues\n");
7845 rc = -ENODEV;
7846 goto out_free_mbox;
7847 }
7848
7849 rc = lpfc_sli4_queue_setup(phba);
7850 if (unlikely(rc)) {
7851 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7852 "0381 Error %d during queue setup.\n ", rc);
7853 goto out_stop_timers;
7854 }
7855
7856 lpfc_sli4_setup(phba);
7857 lpfc_sli4_queue_init(phba);
7858
7859
7860 rc = lpfc_sli4_els_sgl_update(phba);
7861 if (unlikely(rc)) {
7862 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7863 "1400 Failed to update xri-sgl size and "
7864 "mapping: %d\n", rc);
7865 goto out_destroy_queue;
7866 }
7867
7868
7869 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
7870 phba->sli4_hba.els_xri_cnt);
7871 if (unlikely(rc < 0)) {
7872 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7873 "0582 Error %d during els sgl post "
7874 "operation\n", rc);
7875 rc = -ENODEV;
7876 goto out_destroy_queue;
7877 }
7878 phba->sli4_hba.els_xri_cnt = rc;
7879
7880 if (phba->nvmet_support) {
7881
7882 rc = lpfc_sli4_nvmet_sgl_update(phba);
7883 if (unlikely(rc)) {
7884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7885 "6308 Failed to update nvmet-sgl size "
7886 "and mapping: %d\n", rc);
7887 goto out_destroy_queue;
7888 }
7889
7890
7891 rc = lpfc_sli4_repost_sgl_list(
7892 phba,
7893 &phba->sli4_hba.lpfc_nvmet_sgl_list,
7894 phba->sli4_hba.nvmet_xri_cnt);
7895 if (unlikely(rc < 0)) {
7896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7897 "3117 Error %d during nvmet "
7898 "sgl post\n", rc);
7899 rc = -ENODEV;
7900 goto out_destroy_queue;
7901 }
7902 phba->sli4_hba.nvmet_xri_cnt = rc;
7903
7904
7905
7906
7907 cnt = phba->sli4_hba.nvmet_xri_cnt +
7908 phba->sli4_hba.max_cfg_param.max_xri;
7909 } else {
7910
7911 rc = lpfc_sli4_io_sgl_update(phba);
7912 if (unlikely(rc)) {
7913 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7914 "6082 Failed to update nvme-sgl size "
7915 "and mapping: %d\n", rc);
7916 goto out_destroy_queue;
7917 }
7918
7919
7920 rc = lpfc_sli4_repost_io_sgl_list(phba);
7921 if (unlikely(rc)) {
7922 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7923 "6116 Error %d during nvme sgl post "
7924 "operation\n", rc);
7925
7926
7927 rc = -ENODEV;
7928 goto out_destroy_queue;
7929 }
7930
7931
7932
7933 cnt = phba->sli4_hba.max_cfg_param.max_xri;
7934 }
7935
7936 if (!phba->sli.iocbq_lookup) {
7937
7938 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7939 "2821 initialize iocb list with %d entries\n",
7940 cnt);
7941 rc = lpfc_init_iocb_list(phba, cnt);
7942 if (rc) {
7943 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7944 "1413 Failed to init iocb list.\n");
7945 goto out_destroy_queue;
7946 }
7947 }
7948
7949 if (phba->nvmet_support)
7950 lpfc_nvmet_create_targetport(phba);
7951
7952 if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
7953
7954 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
7955 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
7956 INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
7957 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
7958 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
7959 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
7960 rqbp->buffer_count = 0;
7961
7962 lpfc_post_rq_buffer(
7963 phba, phba->sli4_hba.nvmet_mrq_hdr[i],
7964 phba->sli4_hba.nvmet_mrq_data[i],
7965 phba->cfg_nvmet_mrq_post, i);
7966 }
7967 }
7968
7969
7970 rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7971 if (unlikely(rc)) {
7972 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7973 "0393 Error %d during rpi post operation\n",
7974 rc);
7975 rc = -ENODEV;
7976 goto out_free_iocblist;
7977 }
7978 lpfc_sli4_node_prep(phba);
7979
7980 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7981 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7982
7983
7984
7985 lpfc_reg_fcfi(phba, mboxq);
7986 mboxq->vport = phba->pport;
7987 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7988 if (rc != MBX_SUCCESS)
7989 goto out_unset_queue;
7990 rc = 0;
7991 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7992 &mboxq->u.mqe.un.reg_fcfi);
7993 } else {
7994
7995
7996
7997 lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7998 mboxq->vport = phba->pport;
7999 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8000 if (rc != MBX_SUCCESS)
8001 goto out_unset_queue;
8002 rc = 0;
8003 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8004 &mboxq->u.mqe.un.reg_fcfi_mrq);
8005
8006
8007 lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8008 mboxq->vport = phba->pport;
8009 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8010 if (rc != MBX_SUCCESS)
8011 goto out_unset_queue;
8012 rc = 0;
8013 }
8014
8015 lpfc_sli_read_link_ste(phba);
8016 }
8017
8018
8019
8020
8021 if (phba->nvmet_support == 0) {
8022 if (phba->sli4_hba.io_xri_cnt == 0) {
8023 len = lpfc_new_io_buf(
8024 phba, phba->sli4_hba.io_xri_max);
8025 if (len == 0) {
8026 rc = -ENOMEM;
8027 goto out_unset_queue;
8028 }
8029
8030 if (phba->cfg_xri_rebalancing)
8031 lpfc_create_multixri_pools(phba);
8032 }
8033 } else {
8034 phba->cfg_xri_rebalancing = 0;
8035 }
8036
8037
8038 spin_lock_irq(&phba->hbalock);
8039 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8040 spin_unlock_irq(&phba->hbalock);
8041
8042
8043 lpfc_sli4_rb_setup(phba);
8044
8045
8046 phba->fcf.fcf_flag = 0;
8047 phba->fcf.current_rec.flag = 0;
8048
8049
8050 mod_timer(&vport->els_tmofunc,
8051 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
8052
8053
8054 mod_timer(&phba->hb_tmofunc,
8055 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
8056 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
8057 phba->last_completion_time = jiffies;
8058
8059
8060 if (phba->cfg_auto_imax)
8061 queue_delayed_work(phba->wq, &phba->eq_delay_work,
8062 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
8063
8064
8065 lpfc_init_idle_stat_hb(phba);
8066
8067
8068 mod_timer(&phba->eratt_poll,
8069 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
8070
8071
8072 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
8073 rc = pci_enable_pcie_error_reporting(phba->pcidev);
8074 if (!rc) {
8075 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8076 "2829 This device supports "
8077 "Advanced Error Reporting (AER)\n");
8078 spin_lock_irq(&phba->hbalock);
8079 phba->hba_flag |= HBA_AER_ENABLED;
8080 spin_unlock_irq(&phba->hbalock);
8081 } else {
8082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8083 "2830 This device does not support "
8084 "Advanced Error Reporting (AER)\n");
8085 phba->cfg_aer_support = 0;
8086 }
8087 rc = 0;
8088 }
8089
8090
8091
8092
8093
8094 spin_lock_irq(&phba->hbalock);
8095 phba->link_state = LPFC_LINK_DOWN;
8096
8097
8098 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
8099 phba->trunk_link.link0.state = LPFC_LINK_DOWN;
8100 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
8101 phba->trunk_link.link1.state = LPFC_LINK_DOWN;
8102 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
8103 phba->trunk_link.link2.state = LPFC_LINK_DOWN;
8104 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
8105 phba->trunk_link.link3.state = LPFC_LINK_DOWN;
8106 spin_unlock_irq(&phba->hbalock);
8107
8108
8109 lpfc_sli4_arm_cqeq_intr(phba);
8110
8111
8112 phba->sli4_hba.intr_enable = 1;
8113
8114 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
8115 (phba->hba_flag & LINK_DISABLED)) {
8116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8117 "3103 Adapter Link is disabled.\n");
8118 lpfc_down_link(phba, mboxq);
8119 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8120 if (rc != MBX_SUCCESS) {
8121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8122 "3104 Adapter failed to issue "
8123 "DOWN_LINK mbox cmd, rc:x%x\n", rc);
8124 goto out_io_buff_free;
8125 }
8126 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
8127
8128 if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
8129 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
8130 if (rc)
8131 goto out_io_buff_free;
8132 }
8133 }
8134 mempool_free(mboxq, phba->mbox_mem_pool);
8135 return rc;
8136out_io_buff_free:
8137
8138 lpfc_io_free(phba);
8139out_unset_queue:
8140
8141 lpfc_sli4_queue_unset(phba);
8142out_free_iocblist:
8143 lpfc_free_iocb_list(phba);
8144out_destroy_queue:
8145 lpfc_sli4_queue_destroy(phba);
8146out_stop_timers:
8147 lpfc_stop_hba_timers(phba);
8148out_free_mbox:
8149 mempool_free(mboxq, phba->mbox_mem_pool);
8150 return rc;
8151}
8152
8153
8154
8155
8156
8157
8158
8159
8160
8161
8162
8163
8164
8165void
8166lpfc_mbox_timeout(struct timer_list *t)
8167{
8168 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo);
8169 unsigned long iflag;
8170 uint32_t tmo_posted;
8171
8172 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
8173 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
8174 if (!tmo_posted)
8175 phba->pport->work_port_events |= WORKER_MBOX_TMO;
8176 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
8177
8178 if (!tmo_posted)
8179 lpfc_worker_wake_up(phba);
8180 return;
8181}
8182
8183
8184
8185
8186
8187
8188
8189
8190
8191static bool
8192lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
8193{
8194
8195 uint32_t idx;
8196 struct lpfc_queue *mcq;
8197 struct lpfc_mcqe *mcqe;
8198 bool pending_completions = false;
8199 uint8_t qe_valid;
8200
8201 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8202 return false;
8203
8204
8205
8206 mcq = phba->sli4_hba.mbx_cq;
8207 idx = mcq->hba_index;
8208 qe_valid = mcq->qe_valid;
8209 while (bf_get_le32(lpfc_cqe_valid,
8210 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
8211 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
8212 if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
8213 (!bf_get_le32(lpfc_trailer_async, mcqe))) {
8214 pending_completions = true;
8215 break;
8216 }
8217 idx = (idx + 1) % mcq->entry_count;
8218 if (mcq->hba_index == idx)
8219 break;
8220
8221
8222 if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
8223 qe_valid = (qe_valid) ? 0 : 1;
8224 }
8225 return pending_completions;
8226
8227}
8228
8229
8230
8231
8232
8233
8234
8235
8236
8237
8238
8239
8240static bool
8241lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
8242{
8243 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
8244 uint32_t eqidx;
8245 struct lpfc_queue *fpeq = NULL;
8246 struct lpfc_queue *eq;
8247 bool mbox_pending;
8248
8249 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
8250 return false;
8251
8252
8253 if (sli4_hba->hdwq) {
8254 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
8255 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
8256 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
8257 fpeq = eq;
8258 break;
8259 }
8260 }
8261 }
8262 if (!fpeq)
8263 return false;
8264
8265
8266
8267 sli4_hba->sli4_eq_clr_intr(fpeq);
8268
8269
8270
8271 mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
8272
8273
8274
8275
8276
8277
8278
8279
8280 if (mbox_pending)
8281
8282 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
8283 else
8284
8285 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
8286
8287 return mbox_pending;
8288
8289}
8290
8291
8292
8293
8294
8295
8296
8297
8298
8299void
8300lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
8301{
8302 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
8303 MAILBOX_t *mb = NULL;
8304
8305 struct lpfc_sli *psli = &phba->sli;
8306
8307
8308 lpfc_sli4_process_missed_mbox_completions(phba);
8309
8310 if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
8311 return;
8312
8313 if (pmbox != NULL)
8314 mb = &pmbox->u.mb;
8315
8316
8317
8318
8319
8320 spin_lock_irq(&phba->hbalock);
8321 if (pmbox == NULL) {
8322 lpfc_printf_log(phba, KERN_WARNING,
8323 LOG_MBOX | LOG_SLI,
8324 "0353 Active Mailbox cleared - mailbox timeout "
8325 "exiting\n");
8326 spin_unlock_irq(&phba->hbalock);
8327 return;
8328 }
8329
8330
8331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8332 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8333 mb->mbxCommand,
8334 phba->pport->port_state,
8335 phba->sli.sli_flag,
8336 phba->sli.mbox_active);
8337 spin_unlock_irq(&phba->hbalock);
8338
8339
8340
8341
8342
8343 spin_lock_irq(&phba->pport->work_port_lock);
8344 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
8345 spin_unlock_irq(&phba->pport->work_port_lock);
8346 spin_lock_irq(&phba->hbalock);
8347 phba->link_state = LPFC_LINK_UNKNOWN;
8348 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
8349 spin_unlock_irq(&phba->hbalock);
8350
8351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8352 "0345 Resetting board due to mailbox timeout\n");
8353
8354
8355 lpfc_reset_hba(phba);
8356}
8357
8358
8359
8360
8361
8362
8363
8364
8365
8366
8367
8368
8369
8370
8371
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384static int
8385lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
8386 uint32_t flag)
8387{
8388 MAILBOX_t *mbx;
8389 struct lpfc_sli *psli = &phba->sli;
8390 uint32_t status, evtctr;
8391 uint32_t ha_copy, hc_copy;
8392 int i;
8393 unsigned long timeout;
8394 unsigned long drvr_flag = 0;
8395 uint32_t word0, ldata;
8396 void __iomem *to_slim;
8397 int processing_queue = 0;
8398
8399 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8400 if (!pmbox) {
8401 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8402
8403 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8404 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8405 return MBX_SUCCESS;
8406 }
8407 processing_queue = 1;
8408 pmbox = lpfc_mbox_get(phba);
8409 if (!pmbox) {
8410 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8411 return MBX_SUCCESS;
8412 }
8413 }
8414
8415 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
8416 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
8417 if(!pmbox->vport) {
8418 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8419 lpfc_printf_log(phba, KERN_ERR,
8420 LOG_MBOX | LOG_VPORT,
8421 "1806 Mbox x%x failed. No vport\n",
8422 pmbox->u.mb.mbxCommand);
8423 dump_stack();
8424 goto out_not_finished;
8425 }
8426 }
8427
8428
8429 if (unlikely(pci_channel_offline(phba->pcidev))) {
8430 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8431 goto out_not_finished;
8432 }
8433
8434
8435 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
8436 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8437 goto out_not_finished;
8438 }
8439
8440 psli = &phba->sli;
8441
8442 mbx = &pmbox->u.mb;
8443 status = MBX_SUCCESS;
8444
8445 if (phba->link_state == LPFC_HBA_ERROR) {
8446 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8447
8448
8449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8450 "(%d):0311 Mailbox command x%x cannot "
8451 "issue Data: x%x x%x\n",
8452 pmbox->vport ? pmbox->vport->vpi : 0,
8453 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8454 goto out_not_finished;
8455 }
8456
8457 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
8458 if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
8459 !(hc_copy & HC_MBINT_ENA)) {
8460 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8462 "(%d):2528 Mailbox command x%x cannot "
8463 "issue Data: x%x x%x\n",
8464 pmbox->vport ? pmbox->vport->vpi : 0,
8465 pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
8466 goto out_not_finished;
8467 }
8468 }
8469
8470 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8471
8472
8473
8474
8475
8476 if (flag & MBX_POLL) {
8477 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8478
8479
8480 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8481 "(%d):2529 Mailbox command x%x "
8482 "cannot issue Data: x%x x%x\n",
8483 pmbox->vport ? pmbox->vport->vpi : 0,
8484 pmbox->u.mb.mbxCommand,
8485 psli->sli_flag, flag);
8486 goto out_not_finished;
8487 }
8488
8489 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
8490 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8491
8492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8493 "(%d):2530 Mailbox command x%x "
8494 "cannot issue Data: x%x x%x\n",
8495 pmbox->vport ? pmbox->vport->vpi : 0,
8496 pmbox->u.mb.mbxCommand,
8497 psli->sli_flag, flag);
8498 goto out_not_finished;
8499 }
8500
8501
8502
8503
8504 lpfc_mbox_put(phba, pmbox);
8505
8506
8507 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8508 "(%d):0308 Mbox cmd issue - BUSY Data: "
8509 "x%x x%x x%x x%x\n",
8510 pmbox->vport ? pmbox->vport->vpi : 0xffffff,
8511 mbx->mbxCommand,
8512 phba->pport ? phba->pport->port_state : 0xff,
8513 psli->sli_flag, flag);
8514
8515 psli->slistat.mbox_busy++;
8516 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8517
8518 if (pmbox->vport) {
8519 lpfc_debugfs_disc_trc(pmbox->vport,
8520 LPFC_DISC_TRC_MBOX_VPORT,
8521 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8522 (uint32_t)mbx->mbxCommand,
8523 mbx->un.varWords[0], mbx->un.varWords[1]);
8524 }
8525 else {
8526 lpfc_debugfs_disc_trc(phba->pport,
8527 LPFC_DISC_TRC_MBOX,
8528 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8529 (uint32_t)mbx->mbxCommand,
8530 mbx->un.varWords[0], mbx->un.varWords[1]);
8531 }
8532
8533 return MBX_BUSY;
8534 }
8535
8536 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8537
8538
8539 if (flag != MBX_POLL) {
8540 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
8541 (mbx->mbxCommand != MBX_KILL_BOARD)) {
8542 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8543 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8544
8545 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8546 "(%d):2531 Mailbox command x%x "
8547 "cannot issue Data: x%x x%x\n",
8548 pmbox->vport ? pmbox->vport->vpi : 0,
8549 pmbox->u.mb.mbxCommand,
8550 psli->sli_flag, flag);
8551 goto out_not_finished;
8552 }
8553
8554 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8555 1000);
8556 mod_timer(&psli->mbox_tmo, jiffies + timeout);
8557 }
8558
8559
8560 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8561 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8562 "x%x\n",
8563 pmbox->vport ? pmbox->vport->vpi : 0,
8564 mbx->mbxCommand,
8565 phba->pport ? phba->pport->port_state : 0xff,
8566 psli->sli_flag, flag);
8567
8568 if (mbx->mbxCommand != MBX_HEARTBEAT) {
8569 if (pmbox->vport) {
8570 lpfc_debugfs_disc_trc(pmbox->vport,
8571 LPFC_DISC_TRC_MBOX_VPORT,
8572 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8573 (uint32_t)mbx->mbxCommand,
8574 mbx->un.varWords[0], mbx->un.varWords[1]);
8575 }
8576 else {
8577 lpfc_debugfs_disc_trc(phba->pport,
8578 LPFC_DISC_TRC_MBOX,
8579 "MBOX Send: cmd:x%x mb:x%x x%x",
8580 (uint32_t)mbx->mbxCommand,
8581 mbx->un.varWords[0], mbx->un.varWords[1]);
8582 }
8583 }
8584
8585 psli->slistat.mbox_cmd++;
8586 evtctr = psli->slistat.mbox_event;
8587
8588
8589 mbx->mbxOwner = OWN_CHIP;
8590
8591 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8592
8593 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
8594 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8595 = (uint8_t *)phba->mbox_ext
8596 - (uint8_t *)phba->mbox;
8597 }
8598
8599
8600 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {
8601 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,
8602 (uint8_t *)phba->mbox_ext,
8603 pmbox->in_ext_byte_len);
8604 }
8605
8606 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
8607 } else {
8608
8609 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
8610 *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
8611 = MAILBOX_HBA_EXT_OFFSET;
8612
8613
8614 if (pmbox->in_ext_byte_len && pmbox->ctx_buf)
8615 lpfc_memcpy_to_slim(phba->MBslimaddr +
8616 MAILBOX_HBA_EXT_OFFSET,
8617 pmbox->ctx_buf, pmbox->in_ext_byte_len);
8618
8619 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8620
8621 lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
8622 MAILBOX_CMD_SIZE);
8623
8624
8625
8626 to_slim = phba->MBslimaddr + sizeof (uint32_t);
8627 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
8628 MAILBOX_CMD_SIZE - sizeof (uint32_t));
8629
8630
8631 ldata = *((uint32_t *)mbx);
8632 to_slim = phba->MBslimaddr;
8633 writel(ldata, to_slim);
8634 readl(to_slim);
8635
8636 if (mbx->mbxCommand == MBX_CONFIG_PORT)
8637
8638 psli->sli_flag |= LPFC_SLI_ACTIVE;
8639 }
8640
8641 wmb();
8642
8643 switch (flag) {
8644 case MBX_NOWAIT:
8645
8646 psli->mbox_active = pmbox;
8647
8648 writel(CA_MBATT, phba->CAregaddr);
8649 readl(phba->CAregaddr);
8650
8651 break;
8652
8653 case MBX_POLL:
8654
8655 psli->mbox_active = NULL;
8656
8657 writel(CA_MBATT, phba->CAregaddr);
8658 readl(phba->CAregaddr);
8659
8660 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8661
8662 word0 = *((uint32_t *)phba->mbox);
8663 word0 = le32_to_cpu(word0);
8664 } else {
8665
8666 if (lpfc_readl(phba->MBslimaddr, &word0)) {
8667 spin_unlock_irqrestore(&phba->hbalock,
8668 drvr_flag);
8669 goto out_not_finished;
8670 }
8671 }
8672
8673
8674 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8675 spin_unlock_irqrestore(&phba->hbalock,
8676 drvr_flag);
8677 goto out_not_finished;
8678 }
8679 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
8680 1000) + jiffies;
8681 i = 0;
8682
8683 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
8684 (!(ha_copy & HA_MBATT) &&
8685 (phba->link_state > LPFC_WARM_START))) {
8686 if (time_after(jiffies, timeout)) {
8687 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8688 spin_unlock_irqrestore(&phba->hbalock,
8689 drvr_flag);
8690 goto out_not_finished;
8691 }
8692
8693
8694
8695 if (((word0 & OWN_CHIP) != OWN_CHIP)
8696 && (evtctr != psli->slistat.mbox_event))
8697 break;
8698
8699 if (i++ > 10) {
8700 spin_unlock_irqrestore(&phba->hbalock,
8701 drvr_flag);
8702 msleep(1);
8703 spin_lock_irqsave(&phba->hbalock, drvr_flag);
8704 }
8705
8706 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8707
8708 word0 = *((uint32_t *)phba->mbox);
8709 word0 = le32_to_cpu(word0);
8710 if (mbx->mbxCommand == MBX_CONFIG_PORT) {
8711 MAILBOX_t *slimmb;
8712 uint32_t slimword0;
8713
8714 slimword0 = readl(phba->MBslimaddr);
8715 slimmb = (MAILBOX_t *) & slimword0;
8716 if (((slimword0 & OWN_CHIP) != OWN_CHIP)
8717 && slimmb->mbxStatus) {
8718 psli->sli_flag &=
8719 ~LPFC_SLI_ACTIVE;
8720 word0 = slimword0;
8721 }
8722 }
8723 } else {
8724
8725 word0 = readl(phba->MBslimaddr);
8726 }
8727
8728 if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
8729 spin_unlock_irqrestore(&phba->hbalock,
8730 drvr_flag);
8731 goto out_not_finished;
8732 }
8733 }
8734
8735 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
8736
8737 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
8738 MAILBOX_CMD_SIZE);
8739
8740 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8741 lpfc_sli_pcimem_bcopy(phba->mbox_ext,
8742 pmbox->ctx_buf,
8743 pmbox->out_ext_byte_len);
8744 }
8745 } else {
8746
8747 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
8748 MAILBOX_CMD_SIZE);
8749
8750 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {
8751 lpfc_memcpy_from_slim(
8752 pmbox->ctx_buf,
8753 phba->MBslimaddr +
8754 MAILBOX_HBA_EXT_OFFSET,
8755 pmbox->out_ext_byte_len);
8756 }
8757 }
8758
8759 writel(HA_MBATT, phba->HAregaddr);
8760 readl(phba->HAregaddr);
8761
8762 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8763 status = mbx->mbxStatus;
8764 }
8765
8766 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
8767 return status;
8768
8769out_not_finished:
8770 if (processing_queue) {
8771 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
8772 lpfc_mbox_cmpl_put(phba, pmbox);
8773 }
8774 return MBX_NOT_FINISHED;
8775}
8776
8777
8778
8779
8780
8781
8782
8783
8784
8785
8786
8787
8788
8789static int
8790lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
8791{
8792 struct lpfc_sli *psli = &phba->sli;
8793 int rc = 0;
8794 unsigned long timeout = 0;
8795
8796
8797 spin_lock_irq(&phba->hbalock);
8798 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
8799
8800
8801
8802 if (phba->sli.mbox_active)
8803 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
8804 phba->sli.mbox_active) *
8805 1000) + jiffies;
8806 spin_unlock_irq(&phba->hbalock);
8807
8808
8809 if (timeout)
8810 lpfc_sli4_process_missed_mbox_completions(phba);
8811
8812
8813 while (phba->sli.mbox_active) {
8814
8815 msleep(2);
8816 if (time_after(jiffies, timeout)) {
8817
8818 rc = 1;
8819 break;
8820 }
8821 }
8822
8823
8824 if (rc) {
8825 spin_lock_irq(&phba->hbalock);
8826 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8827 spin_unlock_irq(&phba->hbalock);
8828 }
8829 return rc;
8830}
8831
8832
8833
8834
8835
8836
8837
8838
8839
8840
8841
8842
8843static void
8844lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
8845{
8846 struct lpfc_sli *psli = &phba->sli;
8847
8848 spin_lock_irq(&phba->hbalock);
8849 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8850
8851 spin_unlock_irq(&phba->hbalock);
8852 return;
8853 }
8854
8855
8856
8857
8858
8859
8860 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8861 spin_unlock_irq(&phba->hbalock);
8862
8863
8864 lpfc_worker_wake_up(phba);
8865}
8866
8867
8868
8869
8870
8871
8872
8873
8874
8875
8876
8877
8878static int
8879lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8880{
8881 uint32_t db_ready;
8882 unsigned long timeout;
8883 struct lpfc_register bmbx_reg;
8884
8885 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
8886 * 1000) + jiffies;
8887
8888 do {
8889 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
8890 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
8891 if (!db_ready)
8892 mdelay(2);
8893
8894 if (time_after(jiffies, timeout))
8895 return MBXERR_ERROR;
8896 } while (!db_ready);
8897
8898 return 0;
8899}
8900
8901
8902
8903
8904
8905
8906
8907
8908
8909
8910
8911
8912
8913
8914
8915
8916
8917static int
8918lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8919{
8920 int rc = MBX_SUCCESS;
8921 unsigned long iflag;
8922 uint32_t mcqe_status;
8923 uint32_t mbx_cmnd;
8924 struct lpfc_sli *psli = &phba->sli;
8925 struct lpfc_mqe *mb = &mboxq->u.mqe;
8926 struct lpfc_bmbx_create *mbox_rgn;
8927 struct dma_address *dma_address;
8928
8929
8930
8931
8932
8933 spin_lock_irqsave(&phba->hbalock, iflag);
8934 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8935 spin_unlock_irqrestore(&phba->hbalock, iflag);
8936 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8937 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8938 "cannot issue Data: x%x x%x\n",
8939 mboxq->vport ? mboxq->vport->vpi : 0,
8940 mboxq->u.mb.mbxCommand,
8941 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8942 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8943 psli->sli_flag, MBX_POLL);
8944 return MBXERR_ERROR;
8945 }
8946
8947 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8948 phba->sli.mbox_active = mboxq;
8949 spin_unlock_irqrestore(&phba->hbalock, iflag);
8950
8951
8952 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8953 if (rc)
8954 goto exit;
8955
8956
8957
8958
8959
8960 mbx_cmnd = bf_get(lpfc_mqe_command, mb);
8961 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
8962 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
8963 sizeof(struct lpfc_mqe));
8964
8965
8966 dma_address = &phba->sli4_hba.bmbx.dma_address;
8967 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
8968
8969
8970 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8971 if (rc)
8972 goto exit;
8973
8974
8975 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
8976
8977
8978 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
8979 if (rc)
8980 goto exit;
8981
8982
8983
8984
8985
8986
8987 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
8988 sizeof(struct lpfc_mqe));
8989 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
8990 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
8991 sizeof(struct lpfc_mcqe));
8992 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
8993
8994
8995
8996
8997
8998 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
8999 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9000 bf_set(lpfc_mqe_status, mb,
9001 (LPFC_MBX_ERROR_RANGE | mcqe_status));
9002 rc = MBXERR_ERROR;
9003 } else
9004 lpfc_sli4_swap_str(phba, mboxq);
9005
9006 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9007 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
9008 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
9009 " x%x x%x CQ: x%x x%x x%x x%x\n",
9010 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9011 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9012 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9013 bf_get(lpfc_mqe_status, mb),
9014 mb->un.mb_words[0], mb->un.mb_words[1],
9015 mb->un.mb_words[2], mb->un.mb_words[3],
9016 mb->un.mb_words[4], mb->un.mb_words[5],
9017 mb->un.mb_words[6], mb->un.mb_words[7],
9018 mb->un.mb_words[8], mb->un.mb_words[9],
9019 mb->un.mb_words[10], mb->un.mb_words[11],
9020 mb->un.mb_words[12], mboxq->mcqe.word0,
9021 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
9022 mboxq->mcqe.trailer);
9023exit:
9024
9025 spin_lock_irqsave(&phba->hbalock, iflag);
9026 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9027 phba->sli.mbox_active = NULL;
9028 spin_unlock_irqrestore(&phba->hbalock, iflag);
9029 return rc;
9030}
9031
9032
9033
9034
9035
9036
9037
9038
9039
9040
9041
9042
9043
9044static int
9045lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
9046 uint32_t flag)
9047{
9048 struct lpfc_sli *psli = &phba->sli;
9049 unsigned long iflags;
9050 int rc;
9051
9052
9053 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
9054
9055 rc = lpfc_mbox_dev_check(phba);
9056 if (unlikely(rc)) {
9057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9058 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9059 "cannot issue Data: x%x x%x\n",
9060 mboxq->vport ? mboxq->vport->vpi : 0,
9061 mboxq->u.mb.mbxCommand,
9062 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9063 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9064 psli->sli_flag, flag);
9065 goto out_not_finished;
9066 }
9067
9068
9069 if (!phba->sli4_hba.intr_enable) {
9070 if (flag == MBX_POLL)
9071 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9072 else
9073 rc = -EIO;
9074 if (rc != MBX_SUCCESS)
9075 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9076 "(%d):2541 Mailbox command x%x "
9077 "(x%x/x%x) failure: "
9078 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9079 "Data: x%x x%x\n,",
9080 mboxq->vport ? mboxq->vport->vpi : 0,
9081 mboxq->u.mb.mbxCommand,
9082 lpfc_sli_config_mbox_subsys_get(phba,
9083 mboxq),
9084 lpfc_sli_config_mbox_opcode_get(phba,
9085 mboxq),
9086 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9087 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9088 bf_get(lpfc_mcqe_ext_status,
9089 &mboxq->mcqe),
9090 psli->sli_flag, flag);
9091 return rc;
9092 } else if (flag == MBX_POLL) {
9093 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
9094 "(%d):2542 Try to issue mailbox command "
9095 "x%x (x%x/x%x) synchronously ahead of async "
9096 "mailbox command queue: x%x x%x\n",
9097 mboxq->vport ? mboxq->vport->vpi : 0,
9098 mboxq->u.mb.mbxCommand,
9099 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9100 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9101 psli->sli_flag, flag);
9102
9103 rc = lpfc_sli4_async_mbox_block(phba);
9104 if (!rc) {
9105
9106 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
9107 if (rc != MBX_SUCCESS)
9108 lpfc_printf_log(phba, KERN_WARNING,
9109 LOG_MBOX | LOG_SLI,
9110 "(%d):2597 Sync Mailbox command "
9111 "x%x (x%x/x%x) failure: "
9112 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9113 "Data: x%x x%x\n,",
9114 mboxq->vport ? mboxq->vport->vpi : 0,
9115 mboxq->u.mb.mbxCommand,
9116 lpfc_sli_config_mbox_subsys_get(phba,
9117 mboxq),
9118 lpfc_sli_config_mbox_opcode_get(phba,
9119 mboxq),
9120 bf_get(lpfc_mqe_status, &mboxq->u.mqe),
9121 bf_get(lpfc_mcqe_status, &mboxq->mcqe),
9122 bf_get(lpfc_mcqe_ext_status,
9123 &mboxq->mcqe),
9124 psli->sli_flag, flag);
9125
9126 lpfc_sli4_async_mbox_unblock(phba);
9127 }
9128 return rc;
9129 }
9130
9131
9132 rc = lpfc_mbox_cmd_check(phba, mboxq);
9133 if (rc) {
9134 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9135 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9136 "cannot issue Data: x%x x%x\n",
9137 mboxq->vport ? mboxq->vport->vpi : 0,
9138 mboxq->u.mb.mbxCommand,
9139 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9140 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9141 psli->sli_flag, flag);
9142 goto out_not_finished;
9143 }
9144
9145
9146 psli->slistat.mbox_busy++;
9147 spin_lock_irqsave(&phba->hbalock, iflags);
9148 lpfc_mbox_put(phba, mboxq);
9149 spin_unlock_irqrestore(&phba->hbalock, iflags);
9150 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9151 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9152 "x%x (x%x/x%x) x%x x%x x%x\n",
9153 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
9154 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
9155 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9156 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9157 phba->pport->port_state,
9158 psli->sli_flag, MBX_NOWAIT);
9159
9160 lpfc_worker_wake_up(phba);
9161
9162 return MBX_BUSY;
9163
9164out_not_finished:
9165 return MBX_NOT_FINISHED;
9166}
9167
9168
9169
9170
9171
9172
9173
9174
9175
9176int
9177lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
9178{
9179 struct lpfc_sli *psli = &phba->sli;
9180 LPFC_MBOXQ_t *mboxq;
9181 int rc = MBX_SUCCESS;
9182 unsigned long iflags;
9183 struct lpfc_mqe *mqe;
9184 uint32_t mbx_cmnd;
9185
9186
9187 if (unlikely(!phba->sli4_hba.intr_enable))
9188 return MBX_NOT_FINISHED;
9189
9190
9191 spin_lock_irqsave(&phba->hbalock, iflags);
9192 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9193 spin_unlock_irqrestore(&phba->hbalock, iflags);
9194 return MBX_NOT_FINISHED;
9195 }
9196 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9197 spin_unlock_irqrestore(&phba->hbalock, iflags);
9198 return MBX_NOT_FINISHED;
9199 }
9200 if (unlikely(phba->sli.mbox_active)) {
9201 spin_unlock_irqrestore(&phba->hbalock, iflags);
9202 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9203 "0384 There is pending active mailbox cmd\n");
9204 return MBX_NOT_FINISHED;
9205 }
9206
9207 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9208
9209
9210 mboxq = lpfc_mbox_get(phba);
9211
9212
9213 if (!mboxq) {
9214 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9215 spin_unlock_irqrestore(&phba->hbalock, iflags);
9216 return MBX_SUCCESS;
9217 }
9218 phba->sli.mbox_active = mboxq;
9219 spin_unlock_irqrestore(&phba->hbalock, iflags);
9220
9221
9222 rc = lpfc_mbox_dev_check(phba);
9223 if (unlikely(rc))
9224
9225 goto out_not_finished;
9226
9227
9228 mqe = &mboxq->u.mqe;
9229 mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
9230
9231
9232 mod_timer(&psli->mbox_tmo, (jiffies +
9233 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
9234
9235 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9236 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9237 "x%x x%x\n",
9238 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
9239 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9240 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9241 phba->pport->port_state, psli->sli_flag);
9242
9243 if (mbx_cmnd != MBX_HEARTBEAT) {
9244 if (mboxq->vport) {
9245 lpfc_debugfs_disc_trc(mboxq->vport,
9246 LPFC_DISC_TRC_MBOX_VPORT,
9247 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9248 mbx_cmnd, mqe->un.mb_words[0],
9249 mqe->un.mb_words[1]);
9250 } else {
9251 lpfc_debugfs_disc_trc(phba->pport,
9252 LPFC_DISC_TRC_MBOX,
9253 "MBOX Send: cmd:x%x mb:x%x x%x",
9254 mbx_cmnd, mqe->un.mb_words[0],
9255 mqe->un.mb_words[1]);
9256 }
9257 }
9258 psli->slistat.mbox_cmd++;
9259
9260
9261 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
9262 if (rc != MBX_SUCCESS) {
9263 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9264 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9265 "cannot issue Data: x%x x%x\n",
9266 mboxq->vport ? mboxq->vport->vpi : 0,
9267 mboxq->u.mb.mbxCommand,
9268 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9269 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9270 psli->sli_flag, MBX_NOWAIT);
9271 goto out_not_finished;
9272 }
9273
9274 return rc;
9275
9276out_not_finished:
9277 spin_lock_irqsave(&phba->hbalock, iflags);
9278 if (phba->sli.mbox_active) {
9279 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
9280 __lpfc_mbox_cmpl_put(phba, mboxq);
9281
9282 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9283 phba->sli.mbox_active = NULL;
9284 }
9285 spin_unlock_irqrestore(&phba->hbalock, iflags);
9286
9287 return MBX_NOT_FINISHED;
9288}
9289
9290
9291
9292
9293
9294
9295
9296
9297
9298
9299
9300
9301
9302int
9303lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
9304{
9305 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
9306}
9307
9308
9309
9310
9311
9312
9313
9314
9315
9316
9317int
9318lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9319{
9320
9321 switch (dev_grp) {
9322 case LPFC_PCI_DEV_LP:
9323 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
9324 phba->lpfc_sli_handle_slow_ring_event =
9325 lpfc_sli_handle_slow_ring_event_s3;
9326 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
9327 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
9328 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
9329 break;
9330 case LPFC_PCI_DEV_OC:
9331 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
9332 phba->lpfc_sli_handle_slow_ring_event =
9333 lpfc_sli_handle_slow_ring_event_s4;
9334 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
9335 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
9336 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
9337 break;
9338 default:
9339 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9340 "1420 Invalid HBA PCI-device group: 0x%x\n",
9341 dev_grp);
9342 return -ENODEV;
9343 }
9344 return 0;
9345}
9346
9347
9348
9349
9350
9351
9352
9353
9354
9355
9356
9357
9358void
9359__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9360 struct lpfc_iocbq *piocb)
9361{
9362 if (phba->sli_rev == LPFC_SLI_REV4)
9363 lockdep_assert_held(&pring->ring_lock);
9364 else
9365 lockdep_assert_held(&phba->hbalock);
9366
9367 list_add_tail(&piocb->list, &pring->txq);
9368}
9369
9370
9371
9372
9373
9374
9375
9376
9377
9378
9379
9380
9381
9382
9383
9384
9385
9386
9387static struct lpfc_iocbq *
9388lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9389 struct lpfc_iocbq **piocb)
9390{
9391 struct lpfc_iocbq * nextiocb;
9392
9393 lockdep_assert_held(&phba->hbalock);
9394
9395 nextiocb = lpfc_sli_ringtx_get(phba, pring);
9396 if (!nextiocb) {
9397 nextiocb = *piocb;
9398 *piocb = NULL;
9399 }
9400
9401 return nextiocb;
9402}
9403
9404
9405
9406
9407
9408
9409
9410
9411
9412
9413
9414
9415
9416
9417
9418
9419
9420
9421
9422
9423
9424
9425
9426static int
9427__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
9428 struct lpfc_iocbq *piocb, uint32_t flag)
9429{
9430 struct lpfc_iocbq *nextiocb;
9431 IOCB_t *iocb;
9432 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
9433
9434 lockdep_assert_held(&phba->hbalock);
9435
9436 if (piocb->iocb_cmpl && (!piocb->vport) &&
9437 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
9438 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
9439 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9440 "1807 IOCB x%x failed. No vport\n",
9441 piocb->iocb.ulpCommand);
9442 dump_stack();
9443 return IOCB_ERROR;
9444 }
9445
9446
9447
9448 if (unlikely(pci_channel_offline(phba->pcidev)))
9449 return IOCB_ERROR;
9450
9451
9452 if (unlikely(phba->hba_flag & DEFER_ERATT))
9453 return IOCB_ERROR;
9454
9455
9456
9457
9458 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
9459 return IOCB_ERROR;
9460
9461
9462
9463
9464
9465 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
9466 goto iocb_busy;
9467
9468 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
9469
9470
9471
9472
9473 switch (piocb->iocb.ulpCommand) {
9474 case CMD_GEN_REQUEST64_CR:
9475 case CMD_GEN_REQUEST64_CX:
9476 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
9477 (piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
9478 FC_RCTL_DD_UNSOL_CMD) ||
9479 (piocb->iocb.un.genreq64.w5.hcsw.Type !=
9480 MENLO_TRANSPORT_TYPE))
9481
9482 goto iocb_busy;
9483 break;
9484 case CMD_QUE_RING_BUF_CN:
9485 case CMD_QUE_RING_BUF64_CN:
9486
9487
9488
9489
9490 if (piocb->iocb_cmpl)
9491 piocb->iocb_cmpl = NULL;
9492 fallthrough;
9493 case CMD_CREATE_XRI_CR:
9494 case CMD_CLOSE_XRI_CN:
9495 case CMD_CLOSE_XRI_CX:
9496 break;
9497 default:
9498 goto iocb_busy;
9499 }
9500
9501
9502
9503
9504
9505 } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
9506 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
9507 goto iocb_busy;
9508 }
9509
9510 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
9511 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
9512 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
9513
9514 if (iocb)
9515 lpfc_sli_update_ring(phba, pring);
9516 else
9517 lpfc_sli_update_full_ring(phba, pring);
9518
9519 if (!piocb)
9520 return IOCB_SUCCESS;
9521
9522 goto out_busy;
9523
9524 iocb_busy:
9525 pring->stats.iocb_cmd_delay++;
9526
9527 out_busy:
9528
9529 if (!(flag & SLI_IOCB_RET_IOCB)) {
9530 __lpfc_sli_ringtx_put(phba, pring, piocb);
9531 return IOCB_SUCCESS;
9532 }
9533
9534 return IOCB_BUSY;
9535}
9536
9537
9538
9539
9540
9541
9542
9543
9544
9545
9546
9547
9548
9549
9550
9551
9552
9553
9554static uint16_t
9555lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
9556 struct lpfc_sglq *sglq)
9557{
9558 uint16_t xritag = NO_XRI;
9559 struct ulp_bde64 *bpl = NULL;
9560 struct ulp_bde64 bde;
9561 struct sli4_sge *sgl = NULL;
9562 struct lpfc_dmabuf *dmabuf;
9563 IOCB_t *icmd;
9564 int numBdes = 0;
9565 int i = 0;
9566 uint32_t offset = 0;
9567 int inbound = 0;
9568
9569 if (!piocbq || !sglq)
9570 return xritag;
9571
9572 sgl = (struct sli4_sge *)sglq->sgl;
9573 icmd = &piocbq->iocb;
9574 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
9575 return sglq->sli4_xritag;
9576 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9577 numBdes = icmd->un.genreq64.bdl.bdeSize /
9578 sizeof(struct ulp_bde64);
9579
9580
9581
9582
9583 if (piocbq->context3)
9584 dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
9585 else
9586 return xritag;
9587
9588 bpl = (struct ulp_bde64 *)dmabuf->virt;
9589 if (!bpl)
9590 return xritag;
9591
9592 for (i = 0; i < numBdes; i++) {
9593
9594 sgl->addr_hi = bpl->addrHigh;
9595 sgl->addr_lo = bpl->addrLow;
9596
9597 sgl->word2 = le32_to_cpu(sgl->word2);
9598 if ((i+1) == numBdes)
9599 bf_set(lpfc_sli4_sge_last, sgl, 1);
9600 else
9601 bf_set(lpfc_sli4_sge_last, sgl, 0);
9602
9603
9604
9605 bde.tus.w = le32_to_cpu(bpl->tus.w);
9606 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
9607
9608
9609
9610
9611 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
9612
9613 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
9614 inbound++;
9615
9616 if (inbound == 1)
9617 offset = 0;
9618 bf_set(lpfc_sli4_sge_offset, sgl, offset);
9619 bf_set(lpfc_sli4_sge_type, sgl,
9620 LPFC_SGE_TYPE_DATA);
9621 offset += bde.tus.f.bdeSize;
9622 }
9623 sgl->word2 = cpu_to_le32(sgl->word2);
9624 bpl++;
9625 sgl++;
9626 }
9627 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
9628
9629
9630
9631
9632 sgl->addr_hi =
9633 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
9634 sgl->addr_lo =
9635 cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
9636 sgl->word2 = le32_to_cpu(sgl->word2);
9637 bf_set(lpfc_sli4_sge_last, sgl, 1);
9638 sgl->word2 = cpu_to_le32(sgl->word2);
9639 sgl->sge_len =
9640 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
9641 }
9642 return sglq->sli4_xritag;
9643}
9644
9645
9646
9647
9648
9649
9650
9651
9652
9653
9654
9655
9656
9657
9658
9659static int
9660lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
9661 union lpfc_wqe128 *wqe)
9662{
9663 uint32_t xmit_len = 0, total_len = 0;
9664 uint8_t ct = 0;
9665 uint32_t fip;
9666 uint32_t abort_tag;
9667 uint8_t command_type = ELS_COMMAND_NON_FIP;
9668 uint8_t cmnd;
9669 uint16_t xritag;
9670 uint16_t abrt_iotag;
9671 struct lpfc_iocbq *abrtiocbq;
9672 struct ulp_bde64 *bpl = NULL;
9673 uint32_t els_id = LPFC_ELS_ID_DEFAULT;
9674 int numBdes, i;
9675 struct ulp_bde64 bde;
9676 struct lpfc_nodelist *ndlp;
9677 uint32_t *pcmd;
9678 uint32_t if_type;
9679
9680 fip = phba->hba_flag & HBA_FIP_SUPPORT;
9681
9682 if (iocbq->iocb_flag & LPFC_IO_FCP)
9683 command_type = FCP_COMMAND;
9684 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
9685 command_type = ELS_COMMAND_FIP;
9686 else
9687 command_type = ELS_COMMAND_NON_FIP;
9688
9689 if (phba->fcp_embed_io)
9690 memset(wqe, 0, sizeof(union lpfc_wqe128));
9691
9692 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
9693
9694 wqe->generic.wqe_com.word7 = 0;
9695 wqe->generic.wqe_com.word10 = 0;
9696
9697 abort_tag = (uint32_t) iocbq->iotag;
9698 xritag = iocbq->sli4_xritag;
9699
9700 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
9701 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9702 sizeof(struct ulp_bde64);
9703 bpl = (struct ulp_bde64 *)
9704 ((struct lpfc_dmabuf *)iocbq->context3)->virt;
9705 if (!bpl)
9706 return IOCB_ERROR;
9707
9708
9709 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
9710 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
9711
9712
9713
9714 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
9715 xmit_len = wqe->generic.bde.tus.f.bdeSize;
9716 total_len = 0;
9717 for (i = 0; i < numBdes; i++) {
9718 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9719 total_len += bde.tus.f.bdeSize;
9720 }
9721 } else
9722 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
9723
9724 iocbq->iocb.ulpIoTag = iocbq->iotag;
9725 cmnd = iocbq->iocb.ulpCommand;
9726
9727 switch (iocbq->iocb.ulpCommand) {
9728 case CMD_ELS_REQUEST64_CR:
9729 if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
9730 ndlp = iocbq->context_un.ndlp;
9731 else
9732 ndlp = (struct lpfc_nodelist *)iocbq->context1;
9733 if (!iocbq->iocb.ulpLe) {
9734 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9735 "2007 Only Limited Edition cmd Format"
9736 " supported 0x%x\n",
9737 iocbq->iocb.ulpCommand);
9738 return IOCB_ERROR;
9739 }
9740
9741 wqe->els_req.payload_len = xmit_len;
9742
9743 bf_set(wqe_tmo, &wqe->els_req.wqe_com,
9744 iocbq->iocb.ulpTimeout);
9745
9746 bf_set(els_req64_vf, &wqe->els_req, 0);
9747
9748 bf_set(els_req64_vfid, &wqe->els_req, 0);
9749 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9750 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9751 iocbq->iocb.ulpContext);
9752 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
9753 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
9754
9755 if (command_type == ELS_COMMAND_FIP)
9756 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
9757 >> LPFC_FIP_ELS_ID_SHIFT);
9758 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9759 iocbq->context2)->virt);
9760 if_type = bf_get(lpfc_sli_intf_if_type,
9761 &phba->sli4_hba.sli_intf);
9762 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
9763 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
9764 *pcmd == ELS_CMD_SCR ||
9765 *pcmd == ELS_CMD_RDF ||
9766 *pcmd == ELS_CMD_RSCN_XMT ||
9767 *pcmd == ELS_CMD_FDISC ||
9768 *pcmd == ELS_CMD_LOGO ||
9769 *pcmd == ELS_CMD_QFPA ||
9770 *pcmd == ELS_CMD_UVEM ||
9771 *pcmd == ELS_CMD_PLOGI)) {
9772 bf_set(els_req64_sp, &wqe->els_req, 1);
9773 bf_set(els_req64_sid, &wqe->els_req,
9774 iocbq->vport->fc_myDID);
9775 if ((*pcmd == ELS_CMD_FLOGI) &&
9776 !(phba->fc_topology ==
9777 LPFC_TOPOLOGY_LOOP))
9778 bf_set(els_req64_sid, &wqe->els_req, 0);
9779 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
9780 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9781 phba->vpi_ids[iocbq->vport->vpi]);
9782 } else if (pcmd && iocbq->context1) {
9783 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
9784 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
9785 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9786 }
9787 }
9788 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
9789 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9790 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
9791 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
9792 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
9793 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
9794 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9795 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
9796 wqe->els_req.max_response_payload_len = total_len - xmit_len;
9797 break;
9798 case CMD_XMIT_SEQUENCE64_CX:
9799 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
9800 iocbq->iocb.un.ulpWord[3]);
9801 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
9802 iocbq->iocb.unsli3.rcvsli3.ox_id);
9803
9804 xmit_len = total_len;
9805 cmnd = CMD_XMIT_SEQUENCE64_CR;
9806 if (phba->link_flag & LS_LOOPBACK_MODE)
9807 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
9808 fallthrough;
9809 case CMD_XMIT_SEQUENCE64_CR:
9810
9811 wqe->xmit_sequence.rsvd3 = 0;
9812
9813
9814 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
9815 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
9816 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
9817 LPFC_WQE_IOD_WRITE);
9818 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
9819 LPFC_WQE_LENLOC_WORD12);
9820 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
9821 wqe->xmit_sequence.xmit_len = xmit_len;
9822 command_type = OTHER_COMMAND;
9823 break;
9824 case CMD_XMIT_BCAST64_CN:
9825
9826 wqe->xmit_bcast64.seq_payload_len = xmit_len;
9827
9828
9829
9830 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
9831 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9832 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
9833 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
9834 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
9835 LPFC_WQE_LENLOC_WORD3);
9836 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
9837 break;
9838 case CMD_FCP_IWRITE64_CR:
9839 command_type = FCP_COMMAND_DATA_OUT;
9840
9841
9842 bf_set(payload_offset_len, &wqe->fcp_iwrite,
9843 xmit_len + sizeof(struct fcp_rsp));
9844 bf_set(cmd_buff_len, &wqe->fcp_iwrite,
9845 0);
9846
9847
9848 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
9849 iocbq->iocb.ulpFCP2Rcvy);
9850 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
9851
9852 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
9853 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
9854 LPFC_WQE_LENLOC_WORD4);
9855 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
9856 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
9857 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9858 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
9859 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
9860 if (iocbq->priority) {
9861 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9862 (iocbq->priority << 1));
9863 } else {
9864 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
9865 (phba->cfg_XLanePriority << 1));
9866 }
9867 }
9868
9869
9870
9871 if (phba->cfg_enable_pbde)
9872 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
9873 else
9874 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
9875
9876 if (phba->fcp_embed_io) {
9877 struct lpfc_io_buf *lpfc_cmd;
9878 struct sli4_sge *sgl;
9879 struct fcp_cmnd *fcp_cmnd;
9880 uint32_t *ptr;
9881
9882
9883
9884 lpfc_cmd = iocbq->context1;
9885 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9886 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9887
9888
9889 wqe->generic.bde.tus.f.bdeFlags =
9890 BUFF_TYPE_BDE_IMMED;
9891 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9892 wqe->generic.bde.addrHigh = 0;
9893 wqe->generic.bde.addrLow = 88;
9894
9895 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
9896 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
9897
9898
9899 ptr = &wqe->words[22];
9900 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9901 }
9902 break;
9903 case CMD_FCP_IREAD64_CR:
9904
9905
9906 bf_set(payload_offset_len, &wqe->fcp_iread,
9907 xmit_len + sizeof(struct fcp_rsp));
9908 bf_set(cmd_buff_len, &wqe->fcp_iread,
9909 0);
9910
9911
9912 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
9913 iocbq->iocb.ulpFCP2Rcvy);
9914 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
9915
9916 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
9917 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
9918 LPFC_WQE_LENLOC_WORD4);
9919 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
9920 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
9921 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9922 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
9923 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
9924 if (iocbq->priority) {
9925 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9926 (iocbq->priority << 1));
9927 } else {
9928 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
9929 (phba->cfg_XLanePriority << 1));
9930 }
9931 }
9932
9933
9934
9935 if (phba->cfg_enable_pbde)
9936 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
9937 else
9938 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
9939
9940 if (phba->fcp_embed_io) {
9941 struct lpfc_io_buf *lpfc_cmd;
9942 struct sli4_sge *sgl;
9943 struct fcp_cmnd *fcp_cmnd;
9944 uint32_t *ptr;
9945
9946
9947
9948 lpfc_cmd = iocbq->context1;
9949 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
9950 fcp_cmnd = lpfc_cmd->fcp_cmnd;
9951
9952
9953 wqe->generic.bde.tus.f.bdeFlags =
9954 BUFF_TYPE_BDE_IMMED;
9955 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
9956 wqe->generic.bde.addrHigh = 0;
9957 wqe->generic.bde.addrLow = 88;
9958
9959 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
9960 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
9961
9962
9963 ptr = &wqe->words[22];
9964 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9965 }
9966 break;
9967 case CMD_FCP_ICMND64_CR:
9968
9969
9970 bf_set(payload_offset_len, &wqe->fcp_icmd,
9971 xmit_len + sizeof(struct fcp_rsp));
9972 bf_set(cmd_buff_len, &wqe->fcp_icmd,
9973 0);
9974
9975 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
9976
9977 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
9978 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
9979 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
9980 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
9981 LPFC_WQE_LENLOC_NONE);
9982 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
9983 iocbq->iocb.ulpFCP2Rcvy);
9984 if (iocbq->iocb_flag & LPFC_IO_OAS) {
9985 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
9986 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
9987 if (iocbq->priority) {
9988 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9989 (iocbq->priority << 1));
9990 } else {
9991 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
9992 (phba->cfg_XLanePriority << 1));
9993 }
9994 }
9995
9996
9997 if (phba->fcp_embed_io) {
9998 struct lpfc_io_buf *lpfc_cmd;
9999 struct sli4_sge *sgl;
10000 struct fcp_cmnd *fcp_cmnd;
10001 uint32_t *ptr;
10002
10003
10004
10005 lpfc_cmd = iocbq->context1;
10006 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10007 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10008
10009
10010 wqe->generic.bde.tus.f.bdeFlags =
10011 BUFF_TYPE_BDE_IMMED;
10012 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10013 wqe->generic.bde.addrHigh = 0;
10014 wqe->generic.bde.addrLow = 88;
10015
10016 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
10017 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
10018
10019
10020 ptr = &wqe->words[22];
10021 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10022 }
10023 break;
10024 case CMD_GEN_REQUEST64_CR:
10025
10026
10027
10028 xmit_len = 0;
10029 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
10030 sizeof(struct ulp_bde64);
10031 for (i = 0; i < numBdes; i++) {
10032 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
10033 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
10034 break;
10035 xmit_len += bde.tus.f.bdeSize;
10036 }
10037
10038 wqe->gen_req.request_payload_len = xmit_len;
10039
10040
10041
10042 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
10043 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
10044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10045 "2015 Invalid CT %x command 0x%x\n",
10046 ct, iocbq->iocb.ulpCommand);
10047 return IOCB_ERROR;
10048 }
10049 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
10050 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
10051 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
10052 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
10053 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
10054 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
10055 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
10056 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
10057 wqe->gen_req.max_response_payload_len = total_len - xmit_len;
10058 command_type = OTHER_COMMAND;
10059 break;
10060 case CMD_XMIT_ELS_RSP64_CX:
10061 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10062
10063
10064 wqe->xmit_els_rsp.response_payload_len = xmit_len;
10065
10066 wqe->xmit_els_rsp.word4 = 0;
10067
10068 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
10069 iocbq->iocb.un.xseq64.xmit_els_remoteID);
10070
10071 if_type = bf_get(lpfc_sli_intf_if_type,
10072 &phba->sli4_hba.sli_intf);
10073 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10074 if (iocbq->vport->fc_flag & FC_PT2PT) {
10075 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10076 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10077 iocbq->vport->fc_myDID);
10078 if (iocbq->vport->fc_myDID == Fabric_DID) {
10079 bf_set(wqe_els_did,
10080 &wqe->xmit_els_rsp.wqe_dest, 0);
10081 }
10082 }
10083 }
10084 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
10085 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10086 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
10087 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
10088 iocbq->iocb.unsli3.rcvsli3.ox_id);
10089 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
10090 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10091 phba->vpi_ids[iocbq->vport->vpi]);
10092 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
10093 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
10094 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
10095 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
10096 LPFC_WQE_LENLOC_WORD3);
10097 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
10098 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
10099 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
10100 pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
10101 iocbq->context2)->virt);
10102 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
10103 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
10104 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
10105 iocbq->vport->fc_myDID);
10106 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
10107 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
10108 phba->vpi_ids[phba->pport->vpi]);
10109 }
10110 command_type = OTHER_COMMAND;
10111 break;
10112 case CMD_CLOSE_XRI_CN:
10113 case CMD_ABORT_XRI_CN:
10114 case CMD_ABORT_XRI_CX:
10115
10116
10117 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
10118 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
10119 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
10120 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
10121 } else
10122 fip = 0;
10123
10124 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
10125
10126
10127
10128
10129
10130 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
10131 else
10132 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
10133 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
10134
10135 wqe->abort_cmd.rsrvd5 = 0;
10136 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
10137 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
10138 abort_tag = iocbq->iocb.un.acxri.abortIoTag;
10139
10140
10141
10142
10143 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10144 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
10145 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
10146 LPFC_WQE_LENLOC_NONE);
10147 cmnd = CMD_ABORT_XRI_CX;
10148 command_type = OTHER_COMMAND;
10149 xritag = 0;
10150 break;
10151 case CMD_XMIT_BLS_RSP64_CX:
10152 ndlp = (struct lpfc_nodelist *)iocbq->context1;
10153
10154
10155
10156
10157 memset(wqe, 0, sizeof(*wqe));
10158
10159 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
10160 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
10161 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
10162 LPFC_ABTS_UNSOL_INT) {
10163
10164
10165
10166
10167 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10168 iocbq->sli4_xritag);
10169 } else {
10170
10171
10172
10173
10174 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10175 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
10176 }
10177 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
10178 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
10179
10180
10181 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
10182 ndlp->nlp_DID);
10183 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
10184 iocbq->iocb.ulpContext);
10185 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
10186 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
10187 phba->vpi_ids[phba->pport->vpi]);
10188 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
10189 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
10190 LPFC_WQE_LENLOC_NONE);
10191
10192 command_type = OTHER_COMMAND;
10193 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
10194 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
10195 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
10196 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
10197 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
10198 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
10199 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
10200 }
10201
10202 break;
10203 case CMD_SEND_FRAME:
10204 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME);
10205 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E);
10206 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41);
10207 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1);
10208 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1);
10209 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10210 bf_set(wqe_xc, &wqe->generic.wqe_com, 1);
10211 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA);
10212 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10213 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10214 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10215 return 0;
10216 case CMD_XRI_ABORTED_CX:
10217 case CMD_CREATE_XRI_CR:
10218 case CMD_IOCB_FCP_IBIDIR64_CR:
10219 case CMD_FCP_TSEND64_CX:
10220 case CMD_FCP_TRSP64_CX:
10221 case CMD_FCP_AUTO_TRSP_CX:
10222 default:
10223 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10224 "2014 Invalid command 0x%x\n",
10225 iocbq->iocb.ulpCommand);
10226 return IOCB_ERROR;
10227 }
10228
10229 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
10230 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
10231 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
10232 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
10233 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
10234 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
10235 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
10236 LPFC_IO_DIF_INSERT);
10237 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
10238 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
10239 wqe->generic.wqe_com.abort_tag = abort_tag;
10240 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
10241 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
10242 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
10243 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10244 return 0;
10245}
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257
10258
10259
10260
10261static int
10262__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10263 struct lpfc_iocbq *piocb, uint32_t flag)
10264{
10265 unsigned long iflags;
10266 int rc;
10267
10268 spin_lock_irqsave(&phba->hbalock, iflags);
10269 rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10270 spin_unlock_irqrestore(&phba->hbalock, iflags);
10271
10272 return rc;
10273}
10274
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289static int
10290__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10291 struct lpfc_iocbq *piocb, uint32_t flag)
10292{
10293 int rc;
10294 struct lpfc_io_buf *lpfc_cmd =
10295 (struct lpfc_io_buf *)piocb->context1;
10296 union lpfc_wqe128 *wqe = &piocb->wqe;
10297 struct sli4_sge *sgl;
10298
10299
10300 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10301
10302 if (phba->fcp_embed_io) {
10303 struct fcp_cmnd *fcp_cmnd;
10304 u32 *ptr;
10305
10306 fcp_cmnd = lpfc_cmd->fcp_cmnd;
10307
10308
10309 wqe->generic.bde.tus.f.bdeFlags =
10310 BUFF_TYPE_BDE_IMMED;
10311 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10312 wqe->generic.bde.addrHigh = 0;
10313 wqe->generic.bde.addrLow = 88;
10314
10315 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10316 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10317
10318
10319 ptr = &wqe->words[22];
10320 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
10321 } else {
10322
10323 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
10324 wqe->generic.bde.tus.f.bdeSize = sizeof(struct fcp_cmnd);
10325 wqe->generic.bde.addrHigh = sgl->addr_hi;
10326 wqe->generic.bde.addrLow = sgl->addr_lo;
10327
10328
10329 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10330 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10331 }
10332
10333
10334 if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) {
10335 if (phba->pport->vmid_priority_tagging) {
10336 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10337 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10338 (piocb->vmid_tag.cs_ctl_vmid));
10339 } else {
10340 bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10341 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10342 wqe->words[31] = piocb->vmid_tag.app_id;
10343 }
10344 }
10345 rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10346 return rc;
10347}
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363static int
10364__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10365 struct lpfc_iocbq *piocb, uint32_t flag)
10366{
10367 struct lpfc_sglq *sglq;
10368 union lpfc_wqe128 wqe;
10369 struct lpfc_queue *wq;
10370 struct lpfc_sli_ring *pring;
10371
10372
10373 if ((piocb->iocb_flag & LPFC_IO_FCP) ||
10374 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10375 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10376 } else {
10377 wq = phba->sli4_hba.els_wq;
10378 }
10379
10380
10381 pring = wq->pring;
10382
10383
10384
10385
10386
10387 lockdep_assert_held(&pring->ring_lock);
10388
10389 if (piocb->sli4_xritag == NO_XRI) {
10390 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10391 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
10392 sglq = NULL;
10393 else {
10394 if (!list_empty(&pring->txq)) {
10395 if (!(flag & SLI_IOCB_RET_IOCB)) {
10396 __lpfc_sli_ringtx_put(phba,
10397 pring, piocb);
10398 return IOCB_SUCCESS;
10399 } else {
10400 return IOCB_BUSY;
10401 }
10402 } else {
10403 sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10404 if (!sglq) {
10405 if (!(flag & SLI_IOCB_RET_IOCB)) {
10406 __lpfc_sli_ringtx_put(phba,
10407 pring,
10408 piocb);
10409 return IOCB_SUCCESS;
10410 } else
10411 return IOCB_BUSY;
10412 }
10413 }
10414 }
10415 } else if (piocb->iocb_flag & LPFC_IO_FCP) {
10416
10417 sglq = NULL;
10418 }
10419 else {
10420
10421
10422
10423
10424 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10425 if (!sglq)
10426 return IOCB_ERROR;
10427 }
10428
10429 if (sglq) {
10430 piocb->sli4_lxritag = sglq->sli4_lxritag;
10431 piocb->sli4_xritag = sglq->sli4_xritag;
10432 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
10433 return IOCB_ERROR;
10434 }
10435
10436 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
10437 return IOCB_ERROR;
10438
10439 if (lpfc_sli4_wq_put(wq, &wqe))
10440 return IOCB_ERROR;
10441 lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10442
10443 return 0;
10444}
10445
10446
10447
10448
10449
10450
10451
10452
10453
10454
10455
10456
10457
10458int
10459lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10460 struct lpfc_iocbq *piocb, uint32_t flag)
10461{
10462 return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10463}
10464
10465
10466
10467
10468
10469
10470
10471
10472
10473
10474
10475
10476int
10477__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10478 struct lpfc_iocbq *piocb, uint32_t flag)
10479{
10480 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10481}
10482
10483
10484
10485
10486
10487
10488
10489
10490
10491
10492int
10493lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10494{
10495
10496 switch (dev_grp) {
10497 case LPFC_PCI_DEV_LP:
10498 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
10499 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
10500 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
10501 break;
10502 case LPFC_PCI_DEV_OC:
10503 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
10504 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
10505 phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
10506 break;
10507 default:
10508 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10509 "1419 Invalid HBA PCI-device group: 0x%x\n",
10510 dev_grp);
10511 return -ENODEV;
10512 }
10513 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
10514 return 0;
10515}
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527struct lpfc_sli_ring *
10528lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
10529{
10530 struct lpfc_io_buf *lpfc_cmd;
10531
10532 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
10533 if (unlikely(!phba->sli4_hba.hdwq))
10534 return NULL;
10535
10536
10537
10538
10539 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
10540 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1;
10541 piocb->hba_wqidx = lpfc_cmd->hdwq_no;
10542 }
10543 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
10544 } else {
10545 if (unlikely(!phba->sli4_hba.els_wq))
10546 return NULL;
10547 piocb->hba_wqidx = 0;
10548 return phba->sli4_hba.els_wq->pring;
10549 }
10550}
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565int
10566lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10567 struct lpfc_iocbq *piocb, uint32_t flag)
10568{
10569 struct lpfc_sli_ring *pring;
10570 struct lpfc_queue *eq;
10571 unsigned long iflags;
10572 int rc;
10573
10574 if (phba->sli_rev == LPFC_SLI_REV4) {
10575 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
10576
10577 pring = lpfc_sli4_calc_ring(phba, piocb);
10578 if (unlikely(pring == NULL))
10579 return IOCB_ERROR;
10580
10581 spin_lock_irqsave(&pring->ring_lock, iflags);
10582 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10583 spin_unlock_irqrestore(&pring->ring_lock, iflags);
10584
10585 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
10586 } else {
10587
10588 spin_lock_irqsave(&phba->hbalock, iflags);
10589 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10590 spin_unlock_irqrestore(&phba->hbalock, iflags);
10591 }
10592 return rc;
10593}
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606static int
10607lpfc_extra_ring_setup( struct lpfc_hba *phba)
10608{
10609 struct lpfc_sli *psli;
10610 struct lpfc_sli_ring *pring;
10611
10612 psli = &phba->sli;
10613
10614
10615
10616
10617 pring = &psli->sli3_ring[LPFC_FCP_RING];
10618 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10619 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10620 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10621 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10622
10623
10624 pring = &psli->sli3_ring[LPFC_EXTRA_RING];
10625
10626 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10627 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10628 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10629 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10630
10631
10632 pring->iotag_max = 4096;
10633 pring->num_mask = 1;
10634 pring->prt[0].profile = 0;
10635 pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
10636 pring->prt[0].type = phba->cfg_multi_ring_type;
10637 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
10638 return 0;
10639}
10640
10641static void
10642lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
10643 struct lpfc_nodelist *ndlp)
10644{
10645 unsigned long iflags;
10646 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;
10647
10648 spin_lock_irqsave(&phba->hbalock, iflags);
10649 if (!list_empty(&evtp->evt_listp)) {
10650 spin_unlock_irqrestore(&phba->hbalock, iflags);
10651 return;
10652 }
10653
10654
10655 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
10656 if (!evtp->evt_arg1) {
10657 spin_unlock_irqrestore(&phba->hbalock, iflags);
10658 return;
10659 }
10660 evtp->evt = LPFC_EVT_RECOVER_PORT;
10661 list_add_tail(&evtp->evt_listp, &phba->work_list);
10662 spin_unlock_irqrestore(&phba->hbalock, iflags);
10663
10664 lpfc_worker_wake_up(phba);
10665}
10666
10667
10668
10669
10670
10671
10672
10673
10674
10675
10676
10677
10678
10679static void
10680lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
10681 struct lpfc_iocbq *iocbq)
10682{
10683 struct lpfc_nodelist *ndlp = NULL;
10684 uint16_t rpi = 0, vpi = 0;
10685 struct lpfc_vport *vport = NULL;
10686
10687
10688 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
10689 rpi = iocbq->iocb.ulpContext;
10690
10691 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10692 "3092 Port generated ABTS async event "
10693 "on vpi %d rpi %d status 0x%x\n",
10694 vpi, rpi, iocbq->iocb.ulpStatus);
10695
10696 vport = lpfc_find_vport_by_vpid(phba, vpi);
10697 if (!vport)
10698 goto err_exit;
10699 ndlp = lpfc_findnode_rpi(vport, rpi);
10700 if (!ndlp)
10701 goto err_exit;
10702
10703 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
10704 lpfc_sli_abts_recover_port(vport, ndlp);
10705 return;
10706
10707 err_exit:
10708 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10709 "3095 Event Context not found, no "
10710 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10711 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
10712 vpi, rpi);
10713}
10714
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725void
10726lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
10727 struct lpfc_nodelist *ndlp,
10728 struct sli4_wcqe_xri_aborted *axri)
10729{
10730 uint32_t ext_status = 0;
10731
10732 if (!ndlp) {
10733 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10734 "3115 Node Context not found, driver "
10735 "ignoring abts err event\n");
10736 return;
10737 }
10738
10739 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
10740 "3116 Port generated FCP XRI ABORT event on "
10741 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10742 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
10743 bf_get(lpfc_wcqe_xa_xri, axri),
10744 bf_get(lpfc_wcqe_xa_status, axri),
10745 axri->parameter);
10746
10747
10748
10749
10750
10751
10752 ext_status = axri->parameter & IOERR_PARAM_MASK;
10753 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
10754 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
10755 lpfc_sli_post_recovery_event(phba, ndlp);
10756}
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770
10771static void
10772lpfc_sli_async_event_handler(struct lpfc_hba * phba,
10773 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
10774{
10775 IOCB_t *icmd;
10776 uint16_t evt_code;
10777 struct temp_event temp_event_data;
10778 struct Scsi_Host *shost;
10779 uint32_t *iocb_w;
10780
10781 icmd = &iocbq->iocb;
10782 evt_code = icmd->un.asyncstat.evt_code;
10783
10784 switch (evt_code) {
10785 case ASYNC_TEMP_WARN:
10786 case ASYNC_TEMP_SAFE:
10787 temp_event_data.data = (uint32_t) icmd->ulpContext;
10788 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
10789 if (evt_code == ASYNC_TEMP_WARN) {
10790 temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
10791 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10792 "0347 Adapter is very hot, please take "
10793 "corrective action. temperature : %d Celsius\n",
10794 (uint32_t) icmd->ulpContext);
10795 } else {
10796 temp_event_data.event_code = LPFC_NORMAL_TEMP;
10797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10798 "0340 Adapter temperature is OK now. "
10799 "temperature : %d Celsius\n",
10800 (uint32_t) icmd->ulpContext);
10801 }
10802
10803
10804 shost = lpfc_shost_from_vport(phba->pport);
10805 fc_host_post_vendor_event(shost, fc_get_event_number(),
10806 sizeof(temp_event_data), (char *) &temp_event_data,
10807 LPFC_NL_VENDOR_ID);
10808 break;
10809 case ASYNC_STATUS_CN:
10810 lpfc_sli_abts_err_handler(phba, iocbq);
10811 break;
10812 default:
10813 iocb_w = (uint32_t *) icmd;
10814 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10815 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10816 " evt_code 0x%x\n"
10817 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10818 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10819 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10820 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10821 pring->ringno, icmd->un.asyncstat.evt_code,
10822 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
10823 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
10824 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
10825 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
10826
10827 break;
10828 }
10829}
10830
10831
10832
10833
10834
10835
10836
10837
10838
10839
10840
10841
10842
10843int
10844lpfc_sli4_setup(struct lpfc_hba *phba)
10845{
10846 struct lpfc_sli_ring *pring;
10847
10848 pring = phba->sli4_hba.els_wq->pring;
10849 pring->num_mask = LPFC_MAX_RING_MASK;
10850 pring->prt[0].profile = 0;
10851 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10852 pring->prt[0].type = FC_TYPE_ELS;
10853 pring->prt[0].lpfc_sli_rcv_unsol_event =
10854 lpfc_els_unsol_event;
10855 pring->prt[1].profile = 0;
10856 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10857 pring->prt[1].type = FC_TYPE_ELS;
10858 pring->prt[1].lpfc_sli_rcv_unsol_event =
10859 lpfc_els_unsol_event;
10860 pring->prt[2].profile = 0;
10861
10862 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10863
10864 pring->prt[2].type = FC_TYPE_CT;
10865 pring->prt[2].lpfc_sli_rcv_unsol_event =
10866 lpfc_ct_unsol_event;
10867 pring->prt[3].profile = 0;
10868
10869 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10870
10871 pring->prt[3].type = FC_TYPE_CT;
10872 pring->prt[3].lpfc_sli_rcv_unsol_event =
10873 lpfc_ct_unsol_event;
10874 return 0;
10875}
10876
10877
10878
10879
10880
10881
10882
10883
10884
10885
10886
10887
10888int
10889lpfc_sli_setup(struct lpfc_hba *phba)
10890{
10891 int i, totiocbsize = 0;
10892 struct lpfc_sli *psli = &phba->sli;
10893 struct lpfc_sli_ring *pring;
10894
10895 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
10896 psli->sli_flag = 0;
10897
10898 psli->iocbq_lookup = NULL;
10899 psli->iocbq_lookup_len = 0;
10900 psli->last_iotag = 0;
10901
10902 for (i = 0; i < psli->num_rings; i++) {
10903 pring = &psli->sli3_ring[i];
10904 switch (i) {
10905 case LPFC_FCP_RING:
10906
10907 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
10908 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
10909 pring->sli.sli3.numCiocb +=
10910 SLI2_IOCB_CMD_R1XTRA_ENTRIES;
10911 pring->sli.sli3.numRiocb +=
10912 SLI2_IOCB_RSP_R1XTRA_ENTRIES;
10913 pring->sli.sli3.numCiocb +=
10914 SLI2_IOCB_CMD_R3XTRA_ENTRIES;
10915 pring->sli.sli3.numRiocb +=
10916 SLI2_IOCB_RSP_R3XTRA_ENTRIES;
10917 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10918 SLI3_IOCB_CMD_SIZE :
10919 SLI2_IOCB_CMD_SIZE;
10920 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10921 SLI3_IOCB_RSP_SIZE :
10922 SLI2_IOCB_RSP_SIZE;
10923 pring->iotag_ctr = 0;
10924 pring->iotag_max =
10925 (phba->cfg_hba_queue_depth * 2);
10926 pring->fast_iotag = pring->iotag_max;
10927 pring->num_mask = 0;
10928 break;
10929 case LPFC_EXTRA_RING:
10930
10931 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
10932 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
10933 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10934 SLI3_IOCB_CMD_SIZE :
10935 SLI2_IOCB_CMD_SIZE;
10936 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10937 SLI3_IOCB_RSP_SIZE :
10938 SLI2_IOCB_RSP_SIZE;
10939 pring->iotag_max = phba->cfg_hba_queue_depth;
10940 pring->num_mask = 0;
10941 break;
10942 case LPFC_ELS_RING:
10943
10944 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
10945 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
10946 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
10947 SLI3_IOCB_CMD_SIZE :
10948 SLI2_IOCB_CMD_SIZE;
10949 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
10950 SLI3_IOCB_RSP_SIZE :
10951 SLI2_IOCB_RSP_SIZE;
10952 pring->fast_iotag = 0;
10953 pring->iotag_ctr = 0;
10954 pring->iotag_max = 4096;
10955 pring->lpfc_sli_rcv_async_status =
10956 lpfc_sli_async_event_handler;
10957 pring->num_mask = LPFC_MAX_RING_MASK;
10958 pring->prt[0].profile = 0;
10959 pring->prt[0].rctl = FC_RCTL_ELS_REQ;
10960 pring->prt[0].type = FC_TYPE_ELS;
10961 pring->prt[0].lpfc_sli_rcv_unsol_event =
10962 lpfc_els_unsol_event;
10963 pring->prt[1].profile = 0;
10964 pring->prt[1].rctl = FC_RCTL_ELS_REP;
10965 pring->prt[1].type = FC_TYPE_ELS;
10966 pring->prt[1].lpfc_sli_rcv_unsol_event =
10967 lpfc_els_unsol_event;
10968 pring->prt[2].profile = 0;
10969
10970 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
10971
10972 pring->prt[2].type = FC_TYPE_CT;
10973 pring->prt[2].lpfc_sli_rcv_unsol_event =
10974 lpfc_ct_unsol_event;
10975 pring->prt[3].profile = 0;
10976
10977 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
10978
10979 pring->prt[3].type = FC_TYPE_CT;
10980 pring->prt[3].lpfc_sli_rcv_unsol_event =
10981 lpfc_ct_unsol_event;
10982 break;
10983 }
10984 totiocbsize += (pring->sli.sli3.numCiocb *
10985 pring->sli.sli3.sizeCiocb) +
10986 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
10987 }
10988 if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
10989
10990 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
10991 "SLI2 SLIM Data: x%x x%lx\n",
10992 phba->brd_no, totiocbsize,
10993 (unsigned long) MAX_SLIM_IOCB_SIZE);
10994 }
10995 if (phba->cfg_multi_ring_support == 2)
10996 lpfc_extra_ring_setup(phba);
10997
10998 return 0;
10999}
11000
11001
11002
11003
11004
11005
11006
11007
11008
11009
11010
11011
11012void
11013lpfc_sli4_queue_init(struct lpfc_hba *phba)
11014{
11015 struct lpfc_sli *psli;
11016 struct lpfc_sli_ring *pring;
11017 int i;
11018
11019 psli = &phba->sli;
11020 spin_lock_irq(&phba->hbalock);
11021 INIT_LIST_HEAD(&psli->mboxq);
11022 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11023
11024 for (i = 0; i < phba->cfg_hdw_queue; i++) {
11025 pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11026 pring->flag = 0;
11027 pring->ringno = LPFC_FCP_RING;
11028 pring->txcmplq_cnt = 0;
11029 INIT_LIST_HEAD(&pring->txq);
11030 INIT_LIST_HEAD(&pring->txcmplq);
11031 INIT_LIST_HEAD(&pring->iocb_continueq);
11032 spin_lock_init(&pring->ring_lock);
11033 }
11034 pring = phba->sli4_hba.els_wq->pring;
11035 pring->flag = 0;
11036 pring->ringno = LPFC_ELS_RING;
11037 pring->txcmplq_cnt = 0;
11038 INIT_LIST_HEAD(&pring->txq);
11039 INIT_LIST_HEAD(&pring->txcmplq);
11040 INIT_LIST_HEAD(&pring->iocb_continueq);
11041 spin_lock_init(&pring->ring_lock);
11042
11043 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11044 pring = phba->sli4_hba.nvmels_wq->pring;
11045 pring->flag = 0;
11046 pring->ringno = LPFC_ELS_RING;
11047 pring->txcmplq_cnt = 0;
11048 INIT_LIST_HEAD(&pring->txq);
11049 INIT_LIST_HEAD(&pring->txcmplq);
11050 INIT_LIST_HEAD(&pring->iocb_continueq);
11051 spin_lock_init(&pring->ring_lock);
11052 }
11053
11054 spin_unlock_irq(&phba->hbalock);
11055}
11056
11057
11058
11059
11060
11061
11062
11063
11064
11065
11066
11067
11068void
11069lpfc_sli_queue_init(struct lpfc_hba *phba)
11070{
11071 struct lpfc_sli *psli;
11072 struct lpfc_sli_ring *pring;
11073 int i;
11074
11075 psli = &phba->sli;
11076 spin_lock_irq(&phba->hbalock);
11077 INIT_LIST_HEAD(&psli->mboxq);
11078 INIT_LIST_HEAD(&psli->mboxq_cmpl);
11079
11080 for (i = 0; i < psli->num_rings; i++) {
11081 pring = &psli->sli3_ring[i];
11082 pring->ringno = i;
11083 pring->sli.sli3.next_cmdidx = 0;
11084 pring->sli.sli3.local_getidx = 0;
11085 pring->sli.sli3.cmdidx = 0;
11086 INIT_LIST_HEAD(&pring->iocb_continueq);
11087 INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11088 INIT_LIST_HEAD(&pring->postbufq);
11089 pring->flag = 0;
11090 INIT_LIST_HEAD(&pring->txq);
11091 INIT_LIST_HEAD(&pring->txcmplq);
11092 spin_lock_init(&pring->ring_lock);
11093 }
11094 spin_unlock_irq(&phba->hbalock);
11095}
11096
11097
11098
11099
11100
11101
11102
11103
11104
11105
11106
11107
11108
11109
11110
11111
11112static void
11113lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11114{
11115 LIST_HEAD(completions);
11116 struct lpfc_sli *psli = &phba->sli;
11117 LPFC_MBOXQ_t *pmb;
11118 unsigned long iflag;
11119
11120
11121 local_bh_disable();
11122
11123
11124 spin_lock_irqsave(&phba->hbalock, iflag);
11125
11126
11127 list_splice_init(&phba->sli.mboxq, &completions);
11128
11129 if (psli->mbox_active) {
11130 list_add_tail(&psli->mbox_active->list, &completions);
11131 psli->mbox_active = NULL;
11132 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11133 }
11134
11135 list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11136 spin_unlock_irqrestore(&phba->hbalock, iflag);
11137
11138
11139 local_bh_enable();
11140
11141
11142 while (!list_empty(&completions)) {
11143 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11144 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11145 if (pmb->mbox_cmpl)
11146 pmb->mbox_cmpl(phba, pmb);
11147 }
11148}
11149
11150
11151
11152
11153
11154
11155
11156
11157
11158
11159
11160
11161
11162
11163
11164
11165
11166
11167int
11168lpfc_sli_host_down(struct lpfc_vport *vport)
11169{
11170 LIST_HEAD(completions);
11171 struct lpfc_hba *phba = vport->phba;
11172 struct lpfc_sli *psli = &phba->sli;
11173 struct lpfc_queue *qp = NULL;
11174 struct lpfc_sli_ring *pring;
11175 struct lpfc_iocbq *iocb, *next_iocb;
11176 int i;
11177 unsigned long flags = 0;
11178 uint16_t prev_pring_flag;
11179
11180 lpfc_cleanup_discovery_resources(vport);
11181
11182 spin_lock_irqsave(&phba->hbalock, flags);
11183
11184
11185
11186
11187
11188
11189 if (phba->sli_rev != LPFC_SLI_REV4) {
11190 for (i = 0; i < psli->num_rings; i++) {
11191 pring = &psli->sli3_ring[i];
11192 prev_pring_flag = pring->flag;
11193
11194 if (pring->ringno == LPFC_ELS_RING) {
11195 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11196
11197 set_bit(LPFC_DATA_READY, &phba->data_flags);
11198 }
11199 list_for_each_entry_safe(iocb, next_iocb,
11200 &pring->txq, list) {
11201 if (iocb->vport != vport)
11202 continue;
11203 list_move_tail(&iocb->list, &completions);
11204 }
11205 list_for_each_entry_safe(iocb, next_iocb,
11206 &pring->txcmplq, list) {
11207 if (iocb->vport != vport)
11208 continue;
11209 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11210 NULL);
11211 }
11212 pring->flag = prev_pring_flag;
11213 }
11214 } else {
11215 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11216 pring = qp->pring;
11217 if (!pring)
11218 continue;
11219 if (pring == phba->sli4_hba.els_wq->pring) {
11220 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11221
11222 set_bit(LPFC_DATA_READY, &phba->data_flags);
11223 }
11224 prev_pring_flag = pring->flag;
11225 spin_lock(&pring->ring_lock);
11226 list_for_each_entry_safe(iocb, next_iocb,
11227 &pring->txq, list) {
11228 if (iocb->vport != vport)
11229 continue;
11230 list_move_tail(&iocb->list, &completions);
11231 }
11232 spin_unlock(&pring->ring_lock);
11233 list_for_each_entry_safe(iocb, next_iocb,
11234 &pring->txcmplq, list) {
11235 if (iocb->vport != vport)
11236 continue;
11237 lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11238 NULL);
11239 }
11240 pring->flag = prev_pring_flag;
11241 }
11242 }
11243 spin_unlock_irqrestore(&phba->hbalock, flags);
11244
11245
11246 lpfc_issue_hb_tmo(phba);
11247
11248
11249 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11250 IOERR_SLI_DOWN);
11251 return 1;
11252}
11253
11254
11255
11256
11257
11258
11259
11260
11261
11262
11263
11264
11265
11266
11267
11268
11269int
11270lpfc_sli_hba_down(struct lpfc_hba *phba)
11271{
11272 LIST_HEAD(completions);
11273 struct lpfc_sli *psli = &phba->sli;
11274 struct lpfc_queue *qp = NULL;
11275 struct lpfc_sli_ring *pring;
11276 struct lpfc_dmabuf *buf_ptr;
11277 unsigned long flags = 0;
11278 int i;
11279
11280
11281 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
11282
11283 lpfc_hba_down_prep(phba);
11284
11285
11286 local_bh_disable();
11287
11288 lpfc_fabric_abort_hba(phba);
11289
11290 spin_lock_irqsave(&phba->hbalock, flags);
11291
11292
11293
11294
11295
11296 if (phba->sli_rev != LPFC_SLI_REV4) {
11297 for (i = 0; i < psli->num_rings; i++) {
11298 pring = &psli->sli3_ring[i];
11299
11300 if (pring->ringno == LPFC_ELS_RING) {
11301 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11302
11303 set_bit(LPFC_DATA_READY, &phba->data_flags);
11304 }
11305 list_splice_init(&pring->txq, &completions);
11306 }
11307 } else {
11308 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11309 pring = qp->pring;
11310 if (!pring)
11311 continue;
11312 spin_lock(&pring->ring_lock);
11313 list_splice_init(&pring->txq, &completions);
11314 spin_unlock(&pring->ring_lock);
11315 if (pring == phba->sli4_hba.els_wq->pring) {
11316 pring->flag |= LPFC_DEFERRED_RING_EVENT;
11317
11318 set_bit(LPFC_DATA_READY, &phba->data_flags);
11319 }
11320 }
11321 }
11322 spin_unlock_irqrestore(&phba->hbalock, flags);
11323
11324
11325 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11326 IOERR_SLI_DOWN);
11327
11328 spin_lock_irqsave(&phba->hbalock, flags);
11329 list_splice_init(&phba->elsbuf, &completions);
11330 phba->elsbuf_cnt = 0;
11331 phba->elsbuf_prev_cnt = 0;
11332 spin_unlock_irqrestore(&phba->hbalock, flags);
11333
11334 while (!list_empty(&completions)) {
11335 list_remove_head(&completions, buf_ptr,
11336 struct lpfc_dmabuf, list);
11337 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
11338 kfree(buf_ptr);
11339 }
11340
11341
11342 local_bh_enable();
11343
11344
11345 del_timer_sync(&psli->mbox_tmo);
11346
11347 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
11348 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
11349 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
11350
11351 return 1;
11352}
11353
11354
11355
11356
11357
11358
11359
11360
11361
11362
11363
11364
11365
11366void
11367lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
11368{
11369 uint32_t *src = srcp;
11370 uint32_t *dest = destp;
11371 uint32_t ldata;
11372 int i;
11373
11374 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
11375 ldata = *src;
11376 ldata = le32_to_cpu(ldata);
11377 *dest = ldata;
11378 src++;
11379 dest++;
11380 }
11381}
11382
11383
11384
11385
11386
11387
11388
11389
11390
11391
11392
11393
11394void
11395lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
11396{
11397 uint32_t *src = srcp;
11398 uint32_t *dest = destp;
11399 uint32_t ldata;
11400 int i;
11401
11402 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
11403 ldata = *src;
11404 ldata = be32_to_cpu(ldata);
11405 *dest = ldata;
11406 src++;
11407 dest++;
11408 }
11409}
11410
11411
11412
11413
11414
11415
11416
11417
11418
11419
11420
11421int
11422lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11423 struct lpfc_dmabuf *mp)
11424{
11425
11426
11427 spin_lock_irq(&phba->hbalock);
11428 list_add_tail(&mp->list, &pring->postbufq);
11429 pring->postbufq_cnt++;
11430 spin_unlock_irq(&phba->hbalock);
11431 return 0;
11432}
11433
11434
11435
11436
11437
11438
11439
11440
11441
11442
11443
11444
11445uint32_t
11446lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
11447{
11448 spin_lock_irq(&phba->hbalock);
11449 phba->buffer_tag_count++;
11450
11451
11452
11453
11454 phba->buffer_tag_count |= QUE_BUFTAG_BIT;
11455 spin_unlock_irq(&phba->hbalock);
11456 return phba->buffer_tag_count;
11457}
11458
11459
11460
11461
11462
11463
11464
11465
11466
11467
11468
11469
11470
11471
11472
11473
11474struct lpfc_dmabuf *
11475lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11476 uint32_t tag)
11477{
11478 struct lpfc_dmabuf *mp, *next_mp;
11479 struct list_head *slp = &pring->postbufq;
11480
11481
11482 spin_lock_irq(&phba->hbalock);
11483 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11484 if (mp->buffer_tag == tag) {
11485 list_del_init(&mp->list);
11486 pring->postbufq_cnt--;
11487 spin_unlock_irq(&phba->hbalock);
11488 return mp;
11489 }
11490 }
11491
11492 spin_unlock_irq(&phba->hbalock);
11493 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11494 "0402 Cannot find virtual addr for buffer tag on "
11495 "ring %d Data x%lx x%px x%px x%x\n",
11496 pring->ringno, (unsigned long) tag,
11497 slp->next, slp->prev, pring->postbufq_cnt);
11498
11499 return NULL;
11500}
11501
11502
11503
11504
11505
11506
11507
11508
11509
11510
11511
11512
11513
11514
11515
11516
11517
11518struct lpfc_dmabuf *
11519lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11520 dma_addr_t phys)
11521{
11522 struct lpfc_dmabuf *mp, *next_mp;
11523 struct list_head *slp = &pring->postbufq;
11524
11525
11526 spin_lock_irq(&phba->hbalock);
11527 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
11528 if (mp->phys == phys) {
11529 list_del_init(&mp->list);
11530 pring->postbufq_cnt--;
11531 spin_unlock_irq(&phba->hbalock);
11532 return mp;
11533 }
11534 }
11535
11536 spin_unlock_irq(&phba->hbalock);
11537 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11538 "0410 Cannot find virtual addr for mapped buf on "
11539 "ring %d Data x%llx x%px x%px x%x\n",
11540 pring->ringno, (unsigned long long)phys,
11541 slp->next, slp->prev, pring->postbufq_cnt);
11542 return NULL;
11543}
11544
11545
11546
11547
11548
11549
11550
11551
11552
11553
11554
11555
11556static void
11557lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11558 struct lpfc_iocbq *rspiocb)
11559{
11560 IOCB_t *irsp = &rspiocb->iocb;
11561 uint16_t abort_iotag, abort_context;
11562 struct lpfc_iocbq *abort_iocb = NULL;
11563
11564 if (irsp->ulpStatus) {
11565
11566
11567
11568
11569
11570 abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
11571 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
11572
11573 spin_lock_irq(&phba->hbalock);
11574 if (phba->sli_rev < LPFC_SLI_REV4) {
11575 if (irsp->ulpCommand == CMD_ABORT_XRI_CX &&
11576 irsp->ulpStatus == IOSTAT_LOCAL_REJECT &&
11577 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) {
11578 spin_unlock_irq(&phba->hbalock);
11579 goto release_iocb;
11580 }
11581 if (abort_iotag != 0 &&
11582 abort_iotag <= phba->sli.last_iotag)
11583 abort_iocb =
11584 phba->sli.iocbq_lookup[abort_iotag];
11585 } else
11586
11587
11588
11589
11590
11591 abort_iocb = phba->sli.iocbq_lookup[abort_context];
11592
11593 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
11594 "0327 Cannot abort els iocb x%px "
11595 "with tag %x context %x, abort status %x, "
11596 "abort code %x\n",
11597 abort_iocb, abort_iotag, abort_context,
11598 irsp->ulpStatus, irsp->un.ulpWord[4]);
11599
11600 spin_unlock_irq(&phba->hbalock);
11601 }
11602release_iocb:
11603 lpfc_sli_release_iocbq(phba, cmdiocb);
11604 return;
11605}
11606
11607
11608
11609
11610
11611
11612
11613
11614
11615
11616
11617
11618void
11619lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11620 struct lpfc_iocbq *rspiocb)
11621{
11622 IOCB_t *irsp = &rspiocb->iocb;
11623
11624
11625 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
11626 "0139 Ignoring ELS cmd tag x%x completion Data: "
11627 "x%x x%x x%x\n",
11628 irsp->ulpIoTag, irsp->ulpStatus,
11629 irsp->un.ulpWord[4], irsp->ulpTimeout);
11630 lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1);
11631 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
11632 lpfc_ct_free_iocb(phba, cmdiocb);
11633 else
11634 lpfc_els_free_iocb(phba, cmdiocb);
11635}
11636
11637
11638
11639
11640
11641
11642
11643
11644
11645
11646
11647
11648
11649
11650
11651
11652int
11653lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
11654 struct lpfc_iocbq *cmdiocb, void *cmpl)
11655{
11656 struct lpfc_vport *vport = cmdiocb->vport;
11657 struct lpfc_iocbq *abtsiocbp;
11658 IOCB_t *icmd = NULL;
11659 IOCB_t *iabt = NULL;
11660 int retval = IOCB_ERROR;
11661 unsigned long iflags;
11662 struct lpfc_nodelist *ndlp;
11663
11664
11665
11666
11667
11668
11669 icmd = &cmdiocb->iocb;
11670 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11671 icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
11672 cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED)
11673 return IOCB_ABORTING;
11674
11675 if (!pring) {
11676 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11677 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11678 else
11679 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11680 return retval;
11681 }
11682
11683
11684
11685
11686
11687 if ((vport->load_flag & FC_UNLOADING) &&
11688 pring->ringno == LPFC_ELS_RING) {
11689 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
11690 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
11691 else
11692 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
11693 return retval;
11694 }
11695
11696
11697 abtsiocbp = __lpfc_sli_get_iocbq(phba);
11698 if (abtsiocbp == NULL)
11699 return IOCB_NORESOURCE;
11700
11701
11702
11703
11704 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
11705
11706 iabt = &abtsiocbp->iocb;
11707 iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
11708 iabt->un.acxri.abortContextTag = icmd->ulpContext;
11709 if (phba->sli_rev == LPFC_SLI_REV4) {
11710 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
11711 if (pring->ringno == LPFC_ELS_RING)
11712 iabt->un.acxri.abortContextTag = cmdiocb->iotag;
11713 } else {
11714 iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
11715 if (pring->ringno == LPFC_ELS_RING) {
11716 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1);
11717 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi;
11718 }
11719 }
11720 iabt->ulpLe = 1;
11721 iabt->ulpClass = icmd->ulpClass;
11722
11723
11724 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
11725 if (cmdiocb->iocb_flag & LPFC_IO_FCP) {
11726 abtsiocbp->iocb_flag |= LPFC_IO_FCP;
11727 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
11728 }
11729 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
11730 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
11731
11732 if (phba->link_state >= LPFC_LINK_UP)
11733 iabt->ulpCommand = CMD_ABORT_XRI_CN;
11734 else
11735 iabt->ulpCommand = CMD_CLOSE_XRI_CN;
11736
11737 if (cmpl)
11738 abtsiocbp->iocb_cmpl = cmpl;
11739 else
11740 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
11741 abtsiocbp->vport = vport;
11742
11743 if (phba->sli_rev == LPFC_SLI_REV4) {
11744 pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
11745 if (unlikely(pring == NULL))
11746 goto abort_iotag_exit;
11747
11748 spin_lock_irqsave(&pring->ring_lock, iflags);
11749 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11750 abtsiocbp, 0);
11751 spin_unlock_irqrestore(&pring->ring_lock, iflags);
11752 } else {
11753 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
11754 abtsiocbp, 0);
11755 }
11756
11757abort_iotag_exit:
11758
11759 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
11760 "0339 Abort xri x%x, original iotag x%x, "
11761 "abort cmd iotag x%x retval x%x\n",
11762 iabt->un.acxri.abortIoTag,
11763 iabt->un.acxri.abortContextTag,
11764 abtsiocbp->iotag, retval);
11765
11766 if (retval) {
11767 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
11768 __lpfc_sli_release_iocbq(phba, abtsiocbp);
11769 }
11770
11771
11772
11773
11774
11775
11776 return retval;
11777}
11778
11779
11780
11781
11782
11783
11784
11785void
11786lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
11787{
11788 struct lpfc_sli *psli = &phba->sli;
11789 struct lpfc_sli_ring *pring;
11790 struct lpfc_queue *qp = NULL;
11791 int i;
11792
11793 if (phba->sli_rev != LPFC_SLI_REV4) {
11794 for (i = 0; i < psli->num_rings; i++) {
11795 pring = &psli->sli3_ring[i];
11796 lpfc_sli_abort_iocb_ring(phba, pring);
11797 }
11798 return;
11799 }
11800 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11801 pring = qp->pring;
11802 if (!pring)
11803 continue;
11804 lpfc_sli_abort_iocb_ring(phba, pring);
11805 }
11806}
11807
11808
11809
11810
11811
11812
11813
11814
11815
11816
11817
11818
11819
11820
11821
11822
11823
11824
11825
11826
11827
11828
11829
11830static int
11831lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
11832 uint16_t tgt_id, uint64_t lun_id,
11833 lpfc_ctx_cmd ctx_cmd)
11834{
11835 struct lpfc_io_buf *lpfc_cmd;
11836 IOCB_t *icmd = NULL;
11837 int rc = 1;
11838
11839 if (!iocbq || iocbq->vport != vport)
11840 return rc;
11841
11842 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
11843 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
11844 iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11845 return rc;
11846
11847 icmd = &iocbq->iocb;
11848 if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
11849 icmd->ulpCommand == CMD_CLOSE_XRI_CN)
11850 return rc;
11851
11852 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
11853
11854 if (lpfc_cmd->pCmd == NULL)
11855 return rc;
11856
11857 switch (ctx_cmd) {
11858 case LPFC_CTX_LUN:
11859 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11860 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
11861 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
11862 rc = 0;
11863 break;
11864 case LPFC_CTX_TGT:
11865 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
11866 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
11867 rc = 0;
11868 break;
11869 case LPFC_CTX_HOST:
11870 rc = 0;
11871 break;
11872 default:
11873 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
11874 __func__, ctx_cmd);
11875 break;
11876 }
11877
11878 return rc;
11879}
11880
11881
11882
11883
11884
11885
11886
11887
11888
11889
11890
11891
11892
11893
11894
11895
11896
11897
11898
11899
11900int
11901lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
11902 lpfc_ctx_cmd ctx_cmd)
11903{
11904 struct lpfc_hba *phba = vport->phba;
11905 struct lpfc_iocbq *iocbq;
11906 int sum, i;
11907
11908 spin_lock_irq(&phba->hbalock);
11909 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
11910 iocbq = phba->sli.iocbq_lookup[i];
11911
11912 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
11913 ctx_cmd) == 0)
11914 sum++;
11915 }
11916 spin_unlock_irq(&phba->hbalock);
11917
11918 return sum;
11919}
11920
11921
11922
11923
11924
11925
11926
11927
11928
11929
11930
11931void
11932lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11933 struct lpfc_wcqe_complete *wcqe)
11934{
11935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11936 "3017 ABORT_XRI_CN completing on rpi x%x "
11937 "original iotag x%x, abort cmd iotag x%x "
11938 "status 0x%x, reason 0x%x\n",
11939 cmdiocb->iocb.un.acxri.abortContextTag,
11940 cmdiocb->iocb.un.acxri.abortIoTag,
11941 cmdiocb->iotag,
11942 (bf_get(lpfc_wcqe_c_status, wcqe)
11943 & LPFC_IOCB_STATUS_MASK),
11944 wcqe->parameter);
11945 lpfc_sli_release_iocbq(phba, cmdiocb);
11946}
11947
11948
11949
11950
11951
11952
11953
11954
11955
11956
11957
11958void
11959lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
11960 struct lpfc_iocbq *rspiocb)
11961{
11962 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11963 "3096 ABORT_XRI_CN completing on rpi x%x "
11964 "original iotag x%x, abort cmd iotag x%x "
11965 "status 0x%x, reason 0x%x\n",
11966 cmdiocb->iocb.un.acxri.abortContextTag,
11967 cmdiocb->iocb.un.acxri.abortIoTag,
11968 cmdiocb->iotag, rspiocb->iocb.ulpStatus,
11969 rspiocb->iocb.un.ulpWord[4]);
11970 lpfc_sli_release_iocbq(phba, cmdiocb);
11971 return;
11972}
11973
11974
11975
11976
11977
11978
11979
11980
11981
11982
11983
11984
11985
11986
11987
11988
11989
11990
11991
11992
11993
11994
11995
11996int
11997lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
11998 lpfc_ctx_cmd abort_cmd)
11999{
12000 struct lpfc_hba *phba = vport->phba;
12001 struct lpfc_sli_ring *pring = NULL;
12002 struct lpfc_iocbq *iocbq;
12003 int errcnt = 0, ret_val = 0;
12004 unsigned long iflags;
12005 int i;
12006 void *fcp_cmpl = NULL;
12007
12008
12009 if (phba->hba_flag & HBA_IOQ_FLUSH)
12010 return errcnt;
12011
12012 for (i = 1; i <= phba->sli.last_iotag; i++) {
12013 iocbq = phba->sli.iocbq_lookup[i];
12014
12015 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12016 abort_cmd) != 0)
12017 continue;
12018
12019 spin_lock_irqsave(&phba->hbalock, iflags);
12020 if (phba->sli_rev == LPFC_SLI_REV3) {
12021 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12022 fcp_cmpl = lpfc_sli_abort_fcp_cmpl;
12023 } else if (phba->sli_rev == LPFC_SLI_REV4) {
12024 pring = lpfc_sli4_calc_ring(phba, iocbq);
12025 fcp_cmpl = lpfc_sli4_abort_fcp_cmpl;
12026 }
12027 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12028 fcp_cmpl);
12029 spin_unlock_irqrestore(&phba->hbalock, iflags);
12030 if (ret_val != IOCB_SUCCESS)
12031 errcnt++;
12032 }
12033
12034 return errcnt;
12035}
12036
12037
12038
12039
12040
12041
12042
12043
12044
12045
12046
12047
12048
12049
12050
12051
12052
12053
12054
12055
12056
12057
12058
12059int
12060lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12061 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12062{
12063 struct lpfc_hba *phba = vport->phba;
12064 struct lpfc_io_buf *lpfc_cmd;
12065 struct lpfc_iocbq *abtsiocbq;
12066 struct lpfc_nodelist *ndlp;
12067 struct lpfc_iocbq *iocbq;
12068 IOCB_t *icmd;
12069 int sum, i, ret_val;
12070 unsigned long iflags;
12071 struct lpfc_sli_ring *pring_s4 = NULL;
12072
12073 spin_lock_irqsave(&phba->hbalock, iflags);
12074
12075
12076 if (phba->hba_flag & HBA_IOQ_FLUSH) {
12077 spin_unlock_irqrestore(&phba->hbalock, iflags);
12078 return 0;
12079 }
12080 sum = 0;
12081
12082 for (i = 1; i <= phba->sli.last_iotag; i++) {
12083 iocbq = phba->sli.iocbq_lookup[i];
12084
12085 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12086 cmd) != 0)
12087 continue;
12088
12089
12090 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12091 spin_lock(&lpfc_cmd->buf_lock);
12092
12093 if (!lpfc_cmd->pCmd) {
12094 spin_unlock(&lpfc_cmd->buf_lock);
12095 continue;
12096 }
12097
12098 if (phba->sli_rev == LPFC_SLI_REV4) {
12099 pring_s4 =
12100 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12101 if (!pring_s4) {
12102 spin_unlock(&lpfc_cmd->buf_lock);
12103 continue;
12104 }
12105
12106 spin_lock(&pring_s4->ring_lock);
12107 }
12108
12109
12110
12111
12112
12113 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) ||
12114 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
12115 if (phba->sli_rev == LPFC_SLI_REV4)
12116 spin_unlock(&pring_s4->ring_lock);
12117 spin_unlock(&lpfc_cmd->buf_lock);
12118 continue;
12119 }
12120
12121
12122 abtsiocbq = __lpfc_sli_get_iocbq(phba);
12123 if (!abtsiocbq) {
12124 if (phba->sli_rev == LPFC_SLI_REV4)
12125 spin_unlock(&pring_s4->ring_lock);
12126 spin_unlock(&lpfc_cmd->buf_lock);
12127 continue;
12128 }
12129
12130 icmd = &iocbq->iocb;
12131 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
12132 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
12133 if (phba->sli_rev == LPFC_SLI_REV4)
12134 abtsiocbq->iocb.un.acxri.abortIoTag =
12135 iocbq->sli4_xritag;
12136 else
12137 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
12138 abtsiocbq->iocb.ulpLe = 1;
12139 abtsiocbq->iocb.ulpClass = icmd->ulpClass;
12140 abtsiocbq->vport = vport;
12141
12142
12143 abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12144 if (iocbq->iocb_flag & LPFC_IO_FCP)
12145 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
12146 if (iocbq->iocb_flag & LPFC_IO_FOF)
12147 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
12148
12149 ndlp = lpfc_cmd->rdata->pnode;
12150
12151 if (lpfc_is_link_up(phba) &&
12152 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
12153 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
12154 else
12155 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
12156
12157
12158 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
12159
12160
12161
12162
12163
12164 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
12165
12166 if (phba->sli_rev == LPFC_SLI_REV4) {
12167 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12168 abtsiocbq, 0);
12169 spin_unlock(&pring_s4->ring_lock);
12170 } else {
12171 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12172 abtsiocbq, 0);
12173 }
12174
12175 spin_unlock(&lpfc_cmd->buf_lock);
12176
12177 if (ret_val == IOCB_ERROR)
12178 __lpfc_sli_release_iocbq(phba, abtsiocbq);
12179 else
12180 sum++;
12181 }
12182 spin_unlock_irqrestore(&phba->hbalock, iflags);
12183 return sum;
12184}
12185
12186
12187
12188
12189
12190
12191
12192
12193
12194
12195
12196
12197
12198
12199
12200
12201
12202
12203static void
12204lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12205 struct lpfc_iocbq *cmdiocbq,
12206 struct lpfc_iocbq *rspiocbq)
12207{
12208 wait_queue_head_t *pdone_q;
12209 unsigned long iflags;
12210 struct lpfc_io_buf *lpfc_cmd;
12211
12212 spin_lock_irqsave(&phba->hbalock, iflags);
12213 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
12214
12215
12216
12217
12218
12219
12220
12221 spin_unlock_irqrestore(&phba->hbalock, iflags);
12222 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
12223 cmdiocbq->wait_iocb_cmpl = NULL;
12224 if (cmdiocbq->iocb_cmpl)
12225 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
12226 else
12227 lpfc_sli_release_iocbq(phba, cmdiocbq);
12228 return;
12229 }
12230
12231 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
12232 if (cmdiocbq->context2 && rspiocbq)
12233 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
12234 &rspiocbq->iocb, sizeof(IOCB_t));
12235
12236
12237 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
12238 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
12239 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
12240 cur_iocbq);
12241 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY))
12242 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
12243 else
12244 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
12245 }
12246
12247 pdone_q = cmdiocbq->context_un.wait_queue;
12248 if (pdone_q)
12249 wake_up(pdone_q);
12250 spin_unlock_irqrestore(&phba->hbalock, iflags);
12251 return;
12252}
12253
12254
12255
12256
12257
12258
12259
12260
12261
12262
12263
12264
12265
12266static int
12267lpfc_chk_iocb_flg(struct lpfc_hba *phba,
12268 struct lpfc_iocbq *piocbq, uint32_t flag)
12269{
12270 unsigned long iflags;
12271 int ret;
12272
12273 spin_lock_irqsave(&phba->hbalock, iflags);
12274 ret = piocbq->iocb_flag & flag;
12275 spin_unlock_irqrestore(&phba->hbalock, iflags);
12276 return ret;
12277
12278}
12279
12280
12281
12282
12283
12284
12285
12286
12287
12288
12289
12290
12291
12292
12293
12294
12295
12296
12297
12298
12299
12300
12301
12302
12303
12304
12305
12306
12307
12308
12309
12310
12311
12312
12313
12314
12315
12316int
12317lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
12318 uint32_t ring_number,
12319 struct lpfc_iocbq *piocb,
12320 struct lpfc_iocbq *prspiocbq,
12321 uint32_t timeout)
12322{
12323 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
12324 long timeleft, timeout_req = 0;
12325 int retval = IOCB_SUCCESS;
12326 uint32_t creg_val;
12327 struct lpfc_iocbq *iocb;
12328 int txq_cnt = 0;
12329 int txcmplq_cnt = 0;
12330 struct lpfc_sli_ring *pring;
12331 unsigned long iflags;
12332 bool iocb_completed = true;
12333
12334 if (phba->sli_rev >= LPFC_SLI_REV4)
12335 pring = lpfc_sli4_calc_ring(phba, piocb);
12336 else
12337 pring = &phba->sli.sli3_ring[ring_number];
12338
12339
12340
12341
12342 if (prspiocbq) {
12343 if (piocb->context2)
12344 return IOCB_ERROR;
12345 piocb->context2 = prspiocbq;
12346 }
12347
12348 piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
12349 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
12350 piocb->context_un.wait_queue = &done_q;
12351 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
12352
12353 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12354 if (lpfc_readl(phba->HCregaddr, &creg_val))
12355 return IOCB_ERROR;
12356 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
12357 writel(creg_val, phba->HCregaddr);
12358 readl(phba->HCregaddr);
12359 }
12360
12361 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
12362 SLI_IOCB_RET_IOCB);
12363 if (retval == IOCB_SUCCESS) {
12364 timeout_req = msecs_to_jiffies(timeout * 1000);
12365 timeleft = wait_event_timeout(done_q,
12366 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
12367 timeout_req);
12368 spin_lock_irqsave(&phba->hbalock, iflags);
12369 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
12370
12371
12372
12373
12374
12375
12376 iocb_completed = false;
12377 piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
12378 }
12379 spin_unlock_irqrestore(&phba->hbalock, iflags);
12380 if (iocb_completed) {
12381 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12382 "0331 IOCB wake signaled\n");
12383
12384
12385
12386
12387
12388 } else if (timeleft == 0) {
12389 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12390 "0338 IOCB wait timeout error - no "
12391 "wake response Data x%x\n", timeout);
12392 retval = IOCB_TIMEDOUT;
12393 } else {
12394 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12395 "0330 IOCB wake NOT set, "
12396 "Data x%x x%lx\n",
12397 timeout, (timeleft / jiffies));
12398 retval = IOCB_TIMEDOUT;
12399 }
12400 } else if (retval == IOCB_BUSY) {
12401 if (phba->cfg_log_verbose & LOG_SLI) {
12402 list_for_each_entry(iocb, &pring->txq, list) {
12403 txq_cnt++;
12404 }
12405 list_for_each_entry(iocb, &pring->txcmplq, list) {
12406 txcmplq_cnt++;
12407 }
12408 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12409 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12410 phba->iocb_cnt, txq_cnt, txcmplq_cnt);
12411 }
12412 return retval;
12413 } else {
12414 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12415 "0332 IOCB wait issue failed, Data x%x\n",
12416 retval);
12417 retval = IOCB_ERROR;
12418 }
12419
12420 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
12421 if (lpfc_readl(phba->HCregaddr, &creg_val))
12422 return IOCB_ERROR;
12423 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
12424 writel(creg_val, phba->HCregaddr);
12425 readl(phba->HCregaddr);
12426 }
12427
12428 if (prspiocbq)
12429 piocb->context2 = NULL;
12430
12431 piocb->context_un.wait_queue = NULL;
12432 piocb->iocb_cmpl = NULL;
12433 return retval;
12434}
12435
12436
12437
12438
12439
12440
12441
12442
12443
12444
12445
12446
12447
12448
12449
12450
12451
12452
12453
12454
12455
12456
12457
12458
12459
12460
12461
12462int
12463lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
12464 uint32_t timeout)
12465{
12466 struct completion mbox_done;
12467 int retval;
12468 unsigned long flag;
12469
12470 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
12471
12472 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
12473
12474
12475 init_completion(&mbox_done);
12476 pmboxq->context3 = &mbox_done;
12477
12478 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
12479 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
12480 wait_for_completion_timeout(&mbox_done,
12481 msecs_to_jiffies(timeout * 1000));
12482
12483 spin_lock_irqsave(&phba->hbalock, flag);
12484 pmboxq->context3 = NULL;
12485
12486
12487
12488
12489 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
12490 retval = MBX_SUCCESS;
12491 } else {
12492 retval = MBX_TIMEOUT;
12493 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
12494 }
12495 spin_unlock_irqrestore(&phba->hbalock, flag);
12496 }
12497 return retval;
12498}
12499
12500
12501
12502
12503
12504
12505
12506
12507
12508
12509
12510
12511
12512
12513
12514
12515
12516void
12517lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
12518{
12519 struct lpfc_sli *psli = &phba->sli;
12520 unsigned long timeout;
12521
12522 if (mbx_action == LPFC_MBX_NO_WAIT) {
12523
12524 msleep(100);
12525 lpfc_sli_mbox_sys_flush(phba);
12526 return;
12527 }
12528 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
12529
12530
12531 local_bh_disable();
12532
12533 spin_lock_irq(&phba->hbalock);
12534 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
12535
12536 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
12537
12538
12539
12540 if (phba->sli.mbox_active)
12541 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
12542 phba->sli.mbox_active) *
12543 1000) + jiffies;
12544 spin_unlock_irq(&phba->hbalock);
12545
12546
12547 local_bh_enable();
12548
12549 while (phba->sli.mbox_active) {
12550
12551 msleep(2);
12552 if (time_after(jiffies, timeout))
12553
12554
12555
12556 break;
12557 }
12558 } else {
12559 spin_unlock_irq(&phba->hbalock);
12560
12561
12562 local_bh_enable();
12563 }
12564
12565 lpfc_sli_mbox_sys_flush(phba);
12566}
12567
12568
12569
12570
12571
12572
12573
12574
12575
12576
12577
12578
12579static int
12580lpfc_sli_eratt_read(struct lpfc_hba *phba)
12581{
12582 uint32_t ha_copy;
12583
12584
12585 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12586 goto unplug_err;
12587
12588 if (ha_copy & HA_ERATT) {
12589
12590 if (lpfc_sli_read_hs(phba))
12591 goto unplug_err;
12592
12593
12594 if ((HS_FFER1 & phba->work_hs) &&
12595 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12596 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
12597 phba->hba_flag |= DEFER_ERATT;
12598
12599 writel(0, phba->HCregaddr);
12600 readl(phba->HCregaddr);
12601 }
12602
12603
12604 phba->work_ha |= HA_ERATT;
12605
12606 phba->hba_flag |= HBA_ERATT_HANDLED;
12607 return 1;
12608 }
12609 return 0;
12610
12611unplug_err:
12612
12613 phba->work_hs |= UNPLUG_ERR;
12614
12615 phba->work_ha |= HA_ERATT;
12616
12617 phba->hba_flag |= HBA_ERATT_HANDLED;
12618 return 1;
12619}
12620
12621
12622
12623
12624
12625
12626
12627
12628
12629
12630
12631
12632static int
12633lpfc_sli4_eratt_read(struct lpfc_hba *phba)
12634{
12635 uint32_t uerr_sta_hi, uerr_sta_lo;
12636 uint32_t if_type, portsmphr;
12637 struct lpfc_register portstat_reg;
12638
12639
12640
12641
12642
12643 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12644 switch (if_type) {
12645 case LPFC_SLI_INTF_IF_TYPE_0:
12646 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
12647 &uerr_sta_lo) ||
12648 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
12649 &uerr_sta_hi)) {
12650 phba->work_hs |= UNPLUG_ERR;
12651 phba->work_ha |= HA_ERATT;
12652 phba->hba_flag |= HBA_ERATT_HANDLED;
12653 return 1;
12654 }
12655 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
12656 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
12657 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12658 "1423 HBA Unrecoverable error: "
12659 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12660 "ue_mask_lo_reg=0x%x, "
12661 "ue_mask_hi_reg=0x%x\n",
12662 uerr_sta_lo, uerr_sta_hi,
12663 phba->sli4_hba.ue_mask_lo,
12664 phba->sli4_hba.ue_mask_hi);
12665 phba->work_status[0] = uerr_sta_lo;
12666 phba->work_status[1] = uerr_sta_hi;
12667 phba->work_ha |= HA_ERATT;
12668 phba->hba_flag |= HBA_ERATT_HANDLED;
12669 return 1;
12670 }
12671 break;
12672 case LPFC_SLI_INTF_IF_TYPE_2:
12673 case LPFC_SLI_INTF_IF_TYPE_6:
12674 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
12675 &portstat_reg.word0) ||
12676 lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
12677 &portsmphr)){
12678 phba->work_hs |= UNPLUG_ERR;
12679 phba->work_ha |= HA_ERATT;
12680 phba->hba_flag |= HBA_ERATT_HANDLED;
12681 return 1;
12682 }
12683 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
12684 phba->work_status[0] =
12685 readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
12686 phba->work_status[1] =
12687 readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
12688 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12689 "2885 Port Status Event: "
12690 "port status reg 0x%x, "
12691 "port smphr reg 0x%x, "
12692 "error 1=0x%x, error 2=0x%x\n",
12693 portstat_reg.word0,
12694 portsmphr,
12695 phba->work_status[0],
12696 phba->work_status[1]);
12697 phba->work_ha |= HA_ERATT;
12698 phba->hba_flag |= HBA_ERATT_HANDLED;
12699 return 1;
12700 }
12701 break;
12702 case LPFC_SLI_INTF_IF_TYPE_1:
12703 default:
12704 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12705 "2886 HBA Error Attention on unsupported "
12706 "if type %d.", if_type);
12707 return 1;
12708 }
12709
12710 return 0;
12711}
12712
12713
12714
12715
12716
12717
12718
12719
12720
12721
12722
12723int
12724lpfc_sli_check_eratt(struct lpfc_hba *phba)
12725{
12726 uint32_t ha_copy;
12727
12728
12729
12730
12731 if (phba->link_flag & LS_IGNORE_ERATT)
12732 return 0;
12733
12734
12735 spin_lock_irq(&phba->hbalock);
12736 if (phba->hba_flag & HBA_ERATT_HANDLED) {
12737
12738 spin_unlock_irq(&phba->hbalock);
12739 return 0;
12740 }
12741
12742
12743
12744
12745
12746 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12747 spin_unlock_irq(&phba->hbalock);
12748 return 0;
12749 }
12750
12751
12752 if (unlikely(pci_channel_offline(phba->pcidev))) {
12753 spin_unlock_irq(&phba->hbalock);
12754 return 0;
12755 }
12756
12757 switch (phba->sli_rev) {
12758 case LPFC_SLI_REV2:
12759 case LPFC_SLI_REV3:
12760
12761 ha_copy = lpfc_sli_eratt_read(phba);
12762 break;
12763 case LPFC_SLI_REV4:
12764
12765 ha_copy = lpfc_sli4_eratt_read(phba);
12766 break;
12767 default:
12768 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12769 "0299 Invalid SLI revision (%d)\n",
12770 phba->sli_rev);
12771 ha_copy = 0;
12772 break;
12773 }
12774 spin_unlock_irq(&phba->hbalock);
12775
12776 return ha_copy;
12777}
12778
12779
12780
12781
12782
12783
12784
12785
12786
12787
12788
12789static inline int
12790lpfc_intr_state_check(struct lpfc_hba *phba)
12791{
12792
12793 if (unlikely(pci_channel_offline(phba->pcidev)))
12794 return -EIO;
12795
12796
12797 phba->sli.slistat.sli_intr++;
12798
12799
12800 if (unlikely(phba->link_state < LPFC_LINK_DOWN))
12801 return -EIO;
12802
12803 return 0;
12804}
12805
12806
12807
12808
12809
12810
12811
12812
12813
12814
12815
12816
12817
12818
12819
12820
12821
12822
12823
12824
12825
12826
12827irqreturn_t
12828lpfc_sli_sp_intr_handler(int irq, void *dev_id)
12829{
12830 struct lpfc_hba *phba;
12831 uint32_t ha_copy, hc_copy;
12832 uint32_t work_ha_copy;
12833 unsigned long status;
12834 unsigned long iflag;
12835 uint32_t control;
12836
12837 MAILBOX_t *mbox, *pmbox;
12838 struct lpfc_vport *vport;
12839 struct lpfc_nodelist *ndlp;
12840 struct lpfc_dmabuf *mp;
12841 LPFC_MBOXQ_t *pmb;
12842 int rc;
12843
12844
12845
12846
12847
12848 phba = (struct lpfc_hba *)dev_id;
12849
12850 if (unlikely(!phba))
12851 return IRQ_NONE;
12852
12853
12854
12855
12856
12857 if (phba->intr_type == MSIX) {
12858
12859 if (lpfc_intr_state_check(phba))
12860 return IRQ_NONE;
12861
12862 spin_lock_irqsave(&phba->hbalock, iflag);
12863 if (lpfc_readl(phba->HAregaddr, &ha_copy))
12864 goto unplug_error;
12865
12866
12867
12868 if (phba->link_flag & LS_IGNORE_ERATT)
12869 ha_copy &= ~HA_ERATT;
12870
12871 if (ha_copy & HA_ERATT) {
12872 if (phba->hba_flag & HBA_ERATT_HANDLED)
12873
12874 ha_copy &= ~HA_ERATT;
12875 else
12876
12877 phba->hba_flag |= HBA_ERATT_HANDLED;
12878 }
12879
12880
12881
12882
12883
12884 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12885 spin_unlock_irqrestore(&phba->hbalock, iflag);
12886 return IRQ_NONE;
12887 }
12888
12889
12890 if (lpfc_readl(phba->HCregaddr, &hc_copy))
12891 goto unplug_error;
12892
12893 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
12894 HC_LAINT_ENA | HC_ERINT_ENA),
12895 phba->HCregaddr);
12896 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
12897 phba->HAregaddr);
12898 writel(hc_copy, phba->HCregaddr);
12899 readl(phba->HAregaddr);
12900 spin_unlock_irqrestore(&phba->hbalock, iflag);
12901 } else
12902 ha_copy = phba->ha_copy;
12903
12904 work_ha_copy = ha_copy & phba->work_ha_mask;
12905
12906 if (work_ha_copy) {
12907 if (work_ha_copy & HA_LATT) {
12908 if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
12909
12910
12911
12912
12913 spin_lock_irqsave(&phba->hbalock, iflag);
12914 phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
12915 if (lpfc_readl(phba->HCregaddr, &control))
12916 goto unplug_error;
12917 control &= ~HC_LAINT_ENA;
12918 writel(control, phba->HCregaddr);
12919 readl(phba->HCregaddr);
12920 spin_unlock_irqrestore(&phba->hbalock, iflag);
12921 }
12922 else
12923 work_ha_copy &= ~HA_LATT;
12924 }
12925
12926 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
12927
12928
12929
12930
12931 status = (work_ha_copy &
12932 (HA_RXMASK << (4*LPFC_ELS_RING)));
12933 status >>= (4*LPFC_ELS_RING);
12934 if (status & HA_RXMASK) {
12935 spin_lock_irqsave(&phba->hbalock, iflag);
12936 if (lpfc_readl(phba->HCregaddr, &control))
12937 goto unplug_error;
12938
12939 lpfc_debugfs_slow_ring_trc(phba,
12940 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12941 control, status,
12942 (uint32_t)phba->sli.slistat.sli_intr);
12943
12944 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
12945 lpfc_debugfs_slow_ring_trc(phba,
12946 "ISR Disable ring:"
12947 "pwork:x%x hawork:x%x wait:x%x",
12948 phba->work_ha, work_ha_copy,
12949 (uint32_t)((unsigned long)
12950 &phba->work_waitq));
12951
12952 control &=
12953 ~(HC_R0INT_ENA << LPFC_ELS_RING);
12954 writel(control, phba->HCregaddr);
12955 readl(phba->HCregaddr);
12956 }
12957 else {
12958 lpfc_debugfs_slow_ring_trc(phba,
12959 "ISR slow ring: pwork:"
12960 "x%x hawork:x%x wait:x%x",
12961 phba->work_ha, work_ha_copy,
12962 (uint32_t)((unsigned long)
12963 &phba->work_waitq));
12964 }
12965 spin_unlock_irqrestore(&phba->hbalock, iflag);
12966 }
12967 }
12968 spin_lock_irqsave(&phba->hbalock, iflag);
12969 if (work_ha_copy & HA_ERATT) {
12970 if (lpfc_sli_read_hs(phba))
12971 goto unplug_error;
12972
12973
12974
12975
12976 if ((HS_FFER1 & phba->work_hs) &&
12977 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
12978 HS_FFER6 | HS_FFER7 | HS_FFER8) &
12979 phba->work_hs)) {
12980 phba->hba_flag |= DEFER_ERATT;
12981
12982 writel(0, phba->HCregaddr);
12983 readl(phba->HCregaddr);
12984 }
12985 }
12986
12987 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
12988 pmb = phba->sli.mbox_active;
12989 pmbox = &pmb->u.mb;
12990 mbox = phba->mbox;
12991 vport = pmb->vport;
12992
12993
12994 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
12995 if (pmbox->mbxOwner != OWN_HOST) {
12996 spin_unlock_irqrestore(&phba->hbalock, iflag);
12997
12998
12999
13000
13001 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13002 "(%d):0304 Stray Mailbox "
13003 "Interrupt mbxCommand x%x "
13004 "mbxStatus x%x\n",
13005 (vport ? vport->vpi : 0),
13006 pmbox->mbxCommand,
13007 pmbox->mbxStatus);
13008
13009 work_ha_copy &= ~HA_MBATT;
13010 } else {
13011 phba->sli.mbox_active = NULL;
13012 spin_unlock_irqrestore(&phba->hbalock, iflag);
13013 phba->last_completion_time = jiffies;
13014 del_timer(&phba->sli.mbox_tmo);
13015 if (pmb->mbox_cmpl) {
13016 lpfc_sli_pcimem_bcopy(mbox, pmbox,
13017 MAILBOX_CMD_SIZE);
13018 if (pmb->out_ext_byte_len &&
13019 pmb->ctx_buf)
13020 lpfc_sli_pcimem_bcopy(
13021 phba->mbox_ext,
13022 pmb->ctx_buf,
13023 pmb->out_ext_byte_len);
13024 }
13025 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13026 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13027
13028 lpfc_debugfs_disc_trc(vport,
13029 LPFC_DISC_TRC_MBOX_VPORT,
13030 "MBOX dflt rpi: : "
13031 "status:x%x rpi:x%x",
13032 (uint32_t)pmbox->mbxStatus,
13033 pmbox->un.varWords[0], 0);
13034
13035 if (!pmbox->mbxStatus) {
13036 mp = (struct lpfc_dmabuf *)
13037 (pmb->ctx_buf);
13038 ndlp = (struct lpfc_nodelist *)
13039 pmb->ctx_ndlp;
13040
13041
13042
13043
13044
13045
13046 lpfc_unreg_login(phba,
13047 vport->vpi,
13048 pmbox->un.varWords[0],
13049 pmb);
13050 pmb->mbox_cmpl =
13051 lpfc_mbx_cmpl_dflt_rpi;
13052 pmb->ctx_buf = mp;
13053 pmb->ctx_ndlp = ndlp;
13054 pmb->vport = vport;
13055 rc = lpfc_sli_issue_mbox(phba,
13056 pmb,
13057 MBX_NOWAIT);
13058 if (rc != MBX_BUSY)
13059 lpfc_printf_log(phba,
13060 KERN_ERR,
13061 LOG_TRACE_EVENT,
13062 "0350 rc should have"
13063 "been MBX_BUSY\n");
13064 if (rc != MBX_NOT_FINISHED)
13065 goto send_current_mbox;
13066 }
13067 }
13068 spin_lock_irqsave(
13069 &phba->pport->work_port_lock,
13070 iflag);
13071 phba->pport->work_port_events &=
13072 ~WORKER_MBOX_TMO;
13073 spin_unlock_irqrestore(
13074 &phba->pport->work_port_lock,
13075 iflag);
13076
13077
13078
13079
13080 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13081
13082 phba->sli.mbox_active = NULL;
13083 phba->sli.sli_flag &=
13084 ~LPFC_SLI_MBOX_ACTIVE;
13085 if (pmb->mbox_cmpl)
13086 pmb->mbox_cmpl(phba, pmb);
13087 } else {
13088
13089 lpfc_mbox_cmpl_put(phba, pmb);
13090 }
13091 }
13092 } else
13093 spin_unlock_irqrestore(&phba->hbalock, iflag);
13094
13095 if ((work_ha_copy & HA_MBATT) &&
13096 (phba->sli.mbox_active == NULL)) {
13097send_current_mbox:
13098
13099 do {
13100 rc = lpfc_sli_issue_mbox(phba, NULL,
13101 MBX_NOWAIT);
13102 } while (rc == MBX_NOT_FINISHED);
13103 if (rc != MBX_SUCCESS)
13104 lpfc_printf_log(phba, KERN_ERR,
13105 LOG_TRACE_EVENT,
13106 "0349 rc should be "
13107 "MBX_SUCCESS\n");
13108 }
13109
13110 spin_lock_irqsave(&phba->hbalock, iflag);
13111 phba->work_ha |= work_ha_copy;
13112 spin_unlock_irqrestore(&phba->hbalock, iflag);
13113 lpfc_worker_wake_up(phba);
13114 }
13115 return IRQ_HANDLED;
13116unplug_error:
13117 spin_unlock_irqrestore(&phba->hbalock, iflag);
13118 return IRQ_HANDLED;
13119
13120}
13121
13122
13123
13124
13125
13126
13127
13128
13129
13130
13131
13132
13133
13134
13135
13136
13137
13138
13139
13140
13141irqreturn_t
13142lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13143{
13144 struct lpfc_hba *phba;
13145 uint32_t ha_copy;
13146 unsigned long status;
13147 unsigned long iflag;
13148 struct lpfc_sli_ring *pring;
13149
13150
13151
13152
13153 phba = (struct lpfc_hba *) dev_id;
13154
13155 if (unlikely(!phba))
13156 return IRQ_NONE;
13157
13158
13159
13160
13161
13162 if (phba->intr_type == MSIX) {
13163
13164 if (lpfc_intr_state_check(phba))
13165 return IRQ_NONE;
13166
13167 if (lpfc_readl(phba->HAregaddr, &ha_copy))
13168 return IRQ_HANDLED;
13169
13170 spin_lock_irqsave(&phba->hbalock, iflag);
13171
13172
13173
13174
13175 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13176 spin_unlock_irqrestore(&phba->hbalock, iflag);
13177 return IRQ_NONE;
13178 }
13179 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13180 phba->HAregaddr);
13181 readl(phba->HAregaddr);
13182 spin_unlock_irqrestore(&phba->hbalock, iflag);
13183 } else
13184 ha_copy = phba->ha_copy;
13185
13186
13187
13188
13189 ha_copy &= ~(phba->work_ha_mask);
13190
13191 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13192 status >>= (4*LPFC_FCP_RING);
13193 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13194 if (status & HA_RXMASK)
13195 lpfc_sli_handle_fast_ring_event(phba, pring, status);
13196
13197 if (phba->cfg_multi_ring_support == 2) {
13198
13199
13200
13201
13202 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13203 status >>= (4*LPFC_EXTRA_RING);
13204 if (status & HA_RXMASK) {
13205 lpfc_sli_handle_fast_ring_event(phba,
13206 &phba->sli.sli3_ring[LPFC_EXTRA_RING],
13207 status);
13208 }
13209 }
13210 return IRQ_HANDLED;
13211}
13212
13213
13214
13215
13216
13217
13218
13219
13220
13221
13222
13223
13224
13225
13226
13227
13228
13229
13230irqreturn_t
13231lpfc_sli_intr_handler(int irq, void *dev_id)
13232{
13233 struct lpfc_hba *phba;
13234 irqreturn_t sp_irq_rc, fp_irq_rc;
13235 unsigned long status1, status2;
13236 uint32_t hc_copy;
13237
13238
13239
13240
13241
13242 phba = (struct lpfc_hba *) dev_id;
13243
13244 if (unlikely(!phba))
13245 return IRQ_NONE;
13246
13247
13248 if (lpfc_intr_state_check(phba))
13249 return IRQ_NONE;
13250
13251 spin_lock(&phba->hbalock);
13252 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
13253 spin_unlock(&phba->hbalock);
13254 return IRQ_HANDLED;
13255 }
13256
13257 if (unlikely(!phba->ha_copy)) {
13258 spin_unlock(&phba->hbalock);
13259 return IRQ_NONE;
13260 } else if (phba->ha_copy & HA_ERATT) {
13261 if (phba->hba_flag & HBA_ERATT_HANDLED)
13262
13263 phba->ha_copy &= ~HA_ERATT;
13264 else
13265
13266 phba->hba_flag |= HBA_ERATT_HANDLED;
13267 }
13268
13269
13270
13271
13272 if (unlikely(phba->hba_flag & DEFER_ERATT)) {
13273 spin_unlock(&phba->hbalock);
13274 return IRQ_NONE;
13275 }
13276
13277
13278 if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
13279 spin_unlock(&phba->hbalock);
13280 return IRQ_HANDLED;
13281 }
13282 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
13283 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
13284 phba->HCregaddr);
13285 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
13286 writel(hc_copy, phba->HCregaddr);
13287 readl(phba->HAregaddr);
13288 spin_unlock(&phba->hbalock);
13289
13290
13291
13292
13293
13294
13295 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
13296
13297
13298 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
13299 status2 >>= (4*LPFC_ELS_RING);
13300
13301 if (status1 || (status2 & HA_RXMASK))
13302 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
13303 else
13304 sp_irq_rc = IRQ_NONE;
13305
13306
13307
13308
13309
13310
13311 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13312 status1 >>= (4*LPFC_FCP_RING);
13313
13314
13315 if (phba->cfg_multi_ring_support == 2) {
13316 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13317 status2 >>= (4*LPFC_EXTRA_RING);
13318 } else
13319 status2 = 0;
13320
13321 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
13322 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
13323 else
13324 fp_irq_rc = IRQ_NONE;
13325
13326
13327 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
13328}
13329
13330
13331
13332
13333
13334
13335
13336
13337void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
13338{
13339 struct lpfc_cq_event *cq_event;
13340 unsigned long iflags;
13341
13342
13343 spin_lock_irqsave(&phba->hbalock, iflags);
13344 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
13345 spin_unlock_irqrestore(&phba->hbalock, iflags);
13346
13347
13348 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13349 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
13350
13351 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
13352 cq_event, struct lpfc_cq_event, list);
13353 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13354 iflags);
13355
13356 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
13357
13358
13359 lpfc_sli4_cq_event_release(phba, cq_event);
13360 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13361 iflags);
13362 }
13363 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
13364}
13365
13366
13367
13368
13369
13370
13371
13372
13373
13374
13375
13376
13377static void
13378lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
13379 struct lpfc_iocbq *pIocbIn,
13380 struct lpfc_iocbq *pIocbOut,
13381 struct lpfc_wcqe_complete *wcqe)
13382{
13383 int numBdes, i;
13384 unsigned long iflags;
13385 uint32_t status, max_response;
13386 struct lpfc_dmabuf *dmabuf;
13387 struct ulp_bde64 *bpl, bde;
13388 size_t offset = offsetof(struct lpfc_iocbq, iocb);
13389
13390 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
13391 sizeof(struct lpfc_iocbq) - offset);
13392
13393 status = bf_get(lpfc_wcqe_c_status, wcqe);
13394 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
13395 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
13396 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
13397 pIocbIn->iocb.un.fcpi.fcpi_parm =
13398 pIocbOut->iocb.un.fcpi.fcpi_parm -
13399 wcqe->total_data_placed;
13400 else
13401 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13402 else {
13403 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
13404 switch (pIocbOut->iocb.ulpCommand) {
13405 case CMD_ELS_REQUEST64_CR:
13406 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13407 bpl = (struct ulp_bde64 *)dmabuf->virt;
13408 bde.tus.w = le32_to_cpu(bpl[1].tus.w);
13409 max_response = bde.tus.f.bdeSize;
13410 break;
13411 case CMD_GEN_REQUEST64_CR:
13412 max_response = 0;
13413 if (!pIocbOut->context3)
13414 break;
13415 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
13416 sizeof(struct ulp_bde64);
13417 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
13418 bpl = (struct ulp_bde64 *)dmabuf->virt;
13419 for (i = 0; i < numBdes; i++) {
13420 bde.tus.w = le32_to_cpu(bpl[i].tus.w);
13421 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
13422 max_response += bde.tus.f.bdeSize;
13423 }
13424 break;
13425 default:
13426 max_response = wcqe->total_data_placed;
13427 break;
13428 }
13429 if (max_response < wcqe->total_data_placed)
13430 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
13431 else
13432 pIocbIn->iocb.un.genreq64.bdl.bdeSize =
13433 wcqe->total_data_placed;
13434 }
13435
13436
13437 if (status == CQE_STATUS_DI_ERROR) {
13438 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
13439
13440 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
13441 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
13442 else
13443 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
13444
13445 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
13446 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
13447 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13448 BGS_GUARD_ERR_MASK;
13449 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
13450 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13451 BGS_APPTAG_ERR_MASK;
13452 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
13453 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13454 BGS_REFTAG_ERR_MASK;
13455
13456
13457 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
13458 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13459 BGS_HI_WATER_MARK_PRESENT_MASK;
13460 pIocbIn->iocb.unsli3.sli3_bg.bghm =
13461 wcqe->total_data_placed;
13462 }
13463
13464
13465
13466
13467
13468 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
13469 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
13470 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
13471 BGS_GUARD_ERR_MASK);
13472 }
13473
13474
13475 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
13476 spin_lock_irqsave(&phba->hbalock, iflags);
13477 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
13478 spin_unlock_irqrestore(&phba->hbalock, iflags);
13479 }
13480}
13481
13482
13483
13484
13485
13486
13487
13488
13489
13490
13491
13492
13493static struct lpfc_iocbq *
13494lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
13495 struct lpfc_iocbq *irspiocbq)
13496{
13497 struct lpfc_sli_ring *pring;
13498 struct lpfc_iocbq *cmdiocbq;
13499 struct lpfc_wcqe_complete *wcqe;
13500 unsigned long iflags;
13501
13502 pring = lpfc_phba_elsring(phba);
13503 if (unlikely(!pring))
13504 return NULL;
13505
13506 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
13507 pring->stats.iocb_event++;
13508
13509 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13510 bf_get(lpfc_wcqe_c_request_tag, wcqe));
13511 if (unlikely(!cmdiocbq)) {
13512 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13513 "0386 ELS complete with no corresponding "
13514 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13515 wcqe->word0, wcqe->total_data_placed,
13516 wcqe->parameter, wcqe->word3);
13517 lpfc_sli_release_iocbq(phba, irspiocbq);
13518 return NULL;
13519 }
13520
13521 spin_lock_irqsave(&pring->ring_lock, iflags);
13522
13523 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
13524 spin_unlock_irqrestore(&pring->ring_lock, iflags);
13525
13526
13527 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
13528
13529 return irspiocbq;
13530}
13531
13532inline struct lpfc_cq_event *
13533lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
13534{
13535 struct lpfc_cq_event *cq_event;
13536
13537
13538 cq_event = lpfc_sli4_cq_event_alloc(phba);
13539 if (!cq_event) {
13540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13541 "0602 Failed to alloc CQ_EVENT entry\n");
13542 return NULL;
13543 }
13544
13545
13546 memcpy(&cq_event->cqe, entry, size);
13547 return cq_event;
13548}
13549
13550
13551
13552
13553
13554
13555
13556
13557
13558
13559
13560static bool
13561lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13562{
13563 struct lpfc_cq_event *cq_event;
13564 unsigned long iflags;
13565
13566 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13567 "0392 Async Event: word0:x%x, word1:x%x, "
13568 "word2:x%x, word3:x%x\n", mcqe->word0,
13569 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
13570
13571 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
13572 if (!cq_event)
13573 return false;
13574
13575 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
13576 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
13577 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
13578
13579
13580 spin_lock_irqsave(&phba->hbalock, iflags);
13581 phba->hba_flag |= ASYNC_EVENT;
13582 spin_unlock_irqrestore(&phba->hbalock, iflags);
13583
13584 return true;
13585}
13586
13587
13588
13589
13590
13591
13592
13593
13594
13595
13596
13597static bool
13598lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
13599{
13600 uint32_t mcqe_status;
13601 MAILBOX_t *mbox, *pmbox;
13602 struct lpfc_mqe *mqe;
13603 struct lpfc_vport *vport;
13604 struct lpfc_nodelist *ndlp;
13605 struct lpfc_dmabuf *mp;
13606 unsigned long iflags;
13607 LPFC_MBOXQ_t *pmb;
13608 bool workposted = false;
13609 int rc;
13610
13611
13612 if (!bf_get(lpfc_trailer_completed, mcqe))
13613 goto out_no_mqe_complete;
13614
13615
13616 spin_lock_irqsave(&phba->hbalock, iflags);
13617 pmb = phba->sli.mbox_active;
13618 if (unlikely(!pmb)) {
13619 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13620 "1832 No pending MBOX command to handle\n");
13621 spin_unlock_irqrestore(&phba->hbalock, iflags);
13622 goto out_no_mqe_complete;
13623 }
13624 spin_unlock_irqrestore(&phba->hbalock, iflags);
13625 mqe = &pmb->u.mqe;
13626 pmbox = (MAILBOX_t *)&pmb->u.mqe;
13627 mbox = phba->mbox;
13628 vport = pmb->vport;
13629
13630
13631 phba->last_completion_time = jiffies;
13632 del_timer(&phba->sli.mbox_tmo);
13633
13634
13635 if (pmb->mbox_cmpl && mbox)
13636 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
13637
13638
13639
13640
13641
13642 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
13643 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
13644 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
13645 bf_set(lpfc_mqe_status, mqe,
13646 (LPFC_MBX_ERROR_RANGE | mcqe_status));
13647 }
13648 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13649 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13650 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
13651 "MBOX dflt rpi: status:x%x rpi:x%x",
13652 mcqe_status,
13653 pmbox->un.varWords[0], 0);
13654 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
13655 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
13656 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
13657
13658
13659
13660
13661
13662
13663 spin_lock_irqsave(&ndlp->lock, iflags);
13664 ndlp->nlp_flag |= NLP_UNREG_INP;
13665 spin_unlock_irqrestore(&ndlp->lock, iflags);
13666 lpfc_unreg_login(phba, vport->vpi,
13667 pmbox->un.varWords[0], pmb);
13668 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
13669 pmb->ctx_buf = mp;
13670
13671
13672
13673
13674
13675
13676 pmb->ctx_ndlp = ndlp;
13677 pmb->vport = vport;
13678 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
13679 if (rc != MBX_BUSY)
13680 lpfc_printf_log(phba, KERN_ERR,
13681 LOG_TRACE_EVENT,
13682 "0385 rc should "
13683 "have been MBX_BUSY\n");
13684 if (rc != MBX_NOT_FINISHED)
13685 goto send_current_mbox;
13686 }
13687 }
13688 spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
13689 phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
13690 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
13691
13692
13693 if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13694 spin_lock_irqsave(&phba->hbalock, iflags);
13695
13696 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13697 phba->sli.mbox_active = NULL;
13698 if (bf_get(lpfc_trailer_consumed, mcqe))
13699 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13700 spin_unlock_irqrestore(&phba->hbalock, iflags);
13701
13702
13703 lpfc_sli4_post_async_mbox(phba);
13704
13705
13706 if (pmb->mbox_cmpl)
13707 pmb->mbox_cmpl(phba, pmb);
13708 return false;
13709 }
13710
13711
13712 spin_lock_irqsave(&phba->hbalock, iflags);
13713 __lpfc_mbox_cmpl_put(phba, pmb);
13714 phba->work_ha |= HA_MBATT;
13715 spin_unlock_irqrestore(&phba->hbalock, iflags);
13716 workposted = true;
13717
13718send_current_mbox:
13719 spin_lock_irqsave(&phba->hbalock, iflags);
13720
13721 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13722
13723 phba->sli.mbox_active = NULL;
13724 if (bf_get(lpfc_trailer_consumed, mcqe))
13725 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13726 spin_unlock_irqrestore(&phba->hbalock, iflags);
13727
13728 lpfc_worker_wake_up(phba);
13729 return workposted;
13730
13731out_no_mqe_complete:
13732 spin_lock_irqsave(&phba->hbalock, iflags);
13733 if (bf_get(lpfc_trailer_consumed, mcqe))
13734 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
13735 spin_unlock_irqrestore(&phba->hbalock, iflags);
13736 return false;
13737}
13738
13739
13740
13741
13742
13743
13744
13745
13746
13747
13748
13749
13750
13751static bool
13752lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13753 struct lpfc_cqe *cqe)
13754{
13755 struct lpfc_mcqe mcqe;
13756 bool workposted;
13757
13758 cq->CQ_mbox++;
13759
13760
13761 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
13762
13763
13764 if (!bf_get(lpfc_trailer_async, &mcqe))
13765 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
13766 else
13767 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
13768 return workposted;
13769}
13770
13771
13772
13773
13774
13775
13776
13777
13778
13779
13780
13781static bool
13782lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13783 struct lpfc_wcqe_complete *wcqe)
13784{
13785 struct lpfc_iocbq *irspiocbq;
13786 unsigned long iflags;
13787 struct lpfc_sli_ring *pring = cq->pring;
13788 int txq_cnt = 0;
13789 int txcmplq_cnt = 0;
13790
13791
13792 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13793
13794 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13795 "0357 ELS CQE error: status=x%x: "
13796 "CQE: %08x %08x %08x %08x\n",
13797 bf_get(lpfc_wcqe_c_status, wcqe),
13798 wcqe->word0, wcqe->total_data_placed,
13799 wcqe->parameter, wcqe->word3);
13800 }
13801
13802
13803 irspiocbq = lpfc_sli_get_iocbq(phba);
13804 if (!irspiocbq) {
13805 if (!list_empty(&pring->txq))
13806 txq_cnt++;
13807 if (!list_empty(&pring->txcmplq))
13808 txcmplq_cnt++;
13809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13810 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13811 "els_txcmplq_cnt=%d\n",
13812 txq_cnt, phba->iocb_cnt,
13813 txcmplq_cnt);
13814 return false;
13815 }
13816
13817
13818 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
13819 spin_lock_irqsave(&phba->hbalock, iflags);
13820 list_add_tail(&irspiocbq->cq_event.list,
13821 &phba->sli4_hba.sp_queue_event);
13822 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13823 spin_unlock_irqrestore(&phba->hbalock, iflags);
13824
13825 return true;
13826}
13827
13828
13829
13830
13831
13832
13833
13834
13835
13836static void
13837lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
13838 struct lpfc_wcqe_release *wcqe)
13839{
13840
13841 if (unlikely(!phba->sli4_hba.els_wq))
13842 return;
13843
13844 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
13845 lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
13846 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13847 else
13848 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13849 "2579 Slow-path wqe consume event carries "
13850 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13851 bf_get(lpfc_wcqe_r_wqe_index, wcqe),
13852 phba->sli4_hba.els_wq->queue_id);
13853}
13854
13855
13856
13857
13858
13859
13860
13861
13862
13863
13864
13865static bool
13866lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
13867 struct lpfc_queue *cq,
13868 struct sli4_wcqe_xri_aborted *wcqe)
13869{
13870 bool workposted = false;
13871 struct lpfc_cq_event *cq_event;
13872 unsigned long iflags;
13873
13874 switch (cq->subtype) {
13875 case LPFC_IO:
13876 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
13877 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13878
13879 if (phba->nvmet_support)
13880 lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
13881 }
13882 workposted = false;
13883 break;
13884 case LPFC_NVME_LS:
13885 case LPFC_ELS:
13886 cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
13887 if (!cq_event) {
13888 workposted = false;
13889 break;
13890 }
13891 cq_event->hdwq = cq->hdwq;
13892 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
13893 iflags);
13894 list_add_tail(&cq_event->list,
13895 &phba->sli4_hba.sp_els_xri_aborted_work_queue);
13896
13897 phba->hba_flag |= ELS_XRI_ABORT_EVENT;
13898 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
13899 iflags);
13900 workposted = true;
13901 break;
13902 default:
13903 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13904 "0603 Invalid CQ subtype %d: "
13905 "%08x %08x %08x %08x\n",
13906 cq->subtype, wcqe->word0, wcqe->parameter,
13907 wcqe->word2, wcqe->word3);
13908 workposted = false;
13909 break;
13910 }
13911 return workposted;
13912}
13913
13914#define FC_RCTL_MDS_DIAGS 0xF4
13915
13916
13917
13918
13919
13920
13921
13922
13923
13924
13925static bool
13926lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
13927{
13928 bool workposted = false;
13929 struct fc_frame_header *fc_hdr;
13930 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
13931 struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
13932 struct lpfc_nvmet_tgtport *tgtp;
13933 struct hbq_dmabuf *dma_buf;
13934 uint32_t status, rq_id;
13935 unsigned long iflags;
13936
13937
13938 if (unlikely(!hrq) || unlikely(!drq))
13939 return workposted;
13940
13941 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13942 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13943 else
13944 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13945 if (rq_id != hrq->queue_id)
13946 goto out;
13947
13948 status = bf_get(lpfc_rcqe_status, rcqe);
13949 switch (status) {
13950 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13952 "2537 Receive Frame Truncated!!\n");
13953 fallthrough;
13954 case FC_STATUS_RQ_SUCCESS:
13955 spin_lock_irqsave(&phba->hbalock, iflags);
13956 lpfc_sli4_rq_release(hrq, drq);
13957 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
13958 if (!dma_buf) {
13959 hrq->RQ_no_buf_found++;
13960 spin_unlock_irqrestore(&phba->hbalock, iflags);
13961 goto out;
13962 }
13963 hrq->RQ_rcv_buf++;
13964 hrq->RQ_buf_posted--;
13965 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
13966
13967 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13968
13969 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
13970 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
13971 spin_unlock_irqrestore(&phba->hbalock, iflags);
13972
13973 if (!(phba->pport->load_flag & FC_UNLOADING))
13974 lpfc_sli4_handle_mds_loopback(phba->pport,
13975 dma_buf);
13976 else
13977 lpfc_in_buf_free(phba, &dma_buf->dbuf);
13978 break;
13979 }
13980
13981
13982 list_add_tail(&dma_buf->cq_event.list,
13983 &phba->sli4_hba.sp_queue_event);
13984
13985 phba->hba_flag |= HBA_SP_QUEUE_EVT;
13986 spin_unlock_irqrestore(&phba->hbalock, iflags);
13987 workposted = true;
13988 break;
13989 case FC_STATUS_INSUFF_BUF_FRM_DISC:
13990 if (phba->nvmet_support) {
13991 tgtp = phba->targetport->private;
13992 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13993 "6402 RQE Error x%x, posted %d err_cnt "
13994 "%d: %x %x %x\n",
13995 status, hrq->RQ_buf_posted,
13996 hrq->RQ_no_posted_buf,
13997 atomic_read(&tgtp->rcv_fcp_cmd_in),
13998 atomic_read(&tgtp->rcv_fcp_cmd_out),
13999 atomic_read(&tgtp->xmt_fcp_release));
14000 }
14001 fallthrough;
14002
14003 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14004 hrq->RQ_no_posted_buf++;
14005
14006 spin_lock_irqsave(&phba->hbalock, iflags);
14007 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
14008 spin_unlock_irqrestore(&phba->hbalock, iflags);
14009 workposted = true;
14010 break;
14011 }
14012out:
14013 return workposted;
14014}
14015
14016
14017
14018
14019
14020
14021
14022
14023
14024
14025
14026
14027static bool
14028lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14029 struct lpfc_cqe *cqe)
14030{
14031 struct lpfc_cqe cqevt;
14032 bool workposted = false;
14033
14034
14035 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14036
14037
14038 switch (bf_get(lpfc_cqe_code, &cqevt)) {
14039 case CQE_CODE_COMPL_WQE:
14040
14041 phba->last_completion_time = jiffies;
14042 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14043 (struct lpfc_wcqe_complete *)&cqevt);
14044 break;
14045 case CQE_CODE_RELEASE_WQE:
14046
14047 lpfc_sli4_sp_handle_rel_wcqe(phba,
14048 (struct lpfc_wcqe_release *)&cqevt);
14049 break;
14050 case CQE_CODE_XRI_ABORTED:
14051
14052 phba->last_completion_time = jiffies;
14053 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14054 (struct sli4_wcqe_xri_aborted *)&cqevt);
14055 break;
14056 case CQE_CODE_RECEIVE:
14057 case CQE_CODE_RECEIVE_V1:
14058
14059 phba->last_completion_time = jiffies;
14060 workposted = lpfc_sli4_sp_handle_rcqe(phba,
14061 (struct lpfc_rcqe *)&cqevt);
14062 break;
14063 default:
14064 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14065 "0388 Not a valid WCQE code: x%x\n",
14066 bf_get(lpfc_cqe_code, &cqevt));
14067 break;
14068 }
14069 return workposted;
14070}
14071
14072
14073
14074
14075
14076
14077
14078
14079
14080
14081
14082
14083
14084
14085
14086static void
14087lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14088 struct lpfc_queue *speq)
14089{
14090 struct lpfc_queue *cq = NULL, *childq;
14091 uint16_t cqid;
14092 int ret = 0;
14093
14094
14095 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14096
14097 list_for_each_entry(childq, &speq->child_list, list) {
14098 if (childq->queue_id == cqid) {
14099 cq = childq;
14100 break;
14101 }
14102 }
14103 if (unlikely(!cq)) {
14104 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14105 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14106 "0365 Slow-path CQ identifier "
14107 "(%d) does not exist\n", cqid);
14108 return;
14109 }
14110
14111
14112 cq->assoc_qp = speq;
14113
14114 if (is_kdump_kernel())
14115 ret = queue_work(phba->wq, &cq->spwork);
14116 else
14117 ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14118
14119 if (!ret)
14120 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14121 "0390 Cannot schedule queue work "
14122 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14123 cqid, cq->queue_id, raw_smp_processor_id());
14124}
14125
14126
14127
14128
14129
14130
14131
14132
14133
14134
14135
14136
14137
14138
14139
14140
14141
14142
14143
14144
14145
14146
14147static bool
14148__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14149 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14150 struct lpfc_cqe *), unsigned long *delay,
14151 enum lpfc_poll_mode poll_mode)
14152{
14153 struct lpfc_cqe *cqe;
14154 bool workposted = false;
14155 int count = 0, consumed = 0;
14156 bool arm = true;
14157
14158
14159 *delay = 0;
14160
14161 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14162 goto rearm_and_exit;
14163
14164
14165 cq->q_flag = 0;
14166 cqe = lpfc_sli4_cq_get(cq);
14167 while (cqe) {
14168 workposted |= handler(phba, cq, cqe);
14169 __lpfc_sli4_consume_cqe(phba, cq, cqe);
14170
14171 consumed++;
14172 if (!(++count % cq->max_proc_limit))
14173 break;
14174
14175 if (!(count % cq->notify_interval)) {
14176 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14177 LPFC_QUEUE_NOARM);
14178 consumed = 0;
14179 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14180 }
14181
14182 if (count == LPFC_NVMET_CQ_NOTIFY)
14183 cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14184
14185 cqe = lpfc_sli4_cq_get(cq);
14186 }
14187 if (count >= phba->cfg_cq_poll_threshold) {
14188 *delay = 1;
14189 arm = false;
14190 }
14191
14192
14193 if (poll_mode == LPFC_IRQ_POLL)
14194 irq_poll_complete(&cq->iop);
14195
14196
14197 if (count > cq->CQ_max_cqe)
14198 cq->CQ_max_cqe = count;
14199
14200 cq->assoc_qp->EQ_cqe_cnt += count;
14201
14202
14203 if (unlikely(count == 0))
14204 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14205 "0369 No entry from completion queue "
14206 "qid=%d\n", cq->queue_id);
14207
14208 xchg(&cq->queue_claimed, 0);
14209
14210rearm_and_exit:
14211 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14212 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14213
14214 return workposted;
14215}
14216
14217
14218
14219
14220
14221
14222
14223
14224
14225
14226
14227
14228
14229
14230
14231
14232static void
14233__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14234{
14235 struct lpfc_hba *phba = cq->phba;
14236 unsigned long delay;
14237 bool workposted = false;
14238 int ret = 0;
14239
14240
14241 switch (cq->type) {
14242 case LPFC_MCQ:
14243 workposted |= __lpfc_sli4_process_cq(phba, cq,
14244 lpfc_sli4_sp_handle_mcqe,
14245 &delay, LPFC_QUEUE_WORK);
14246 break;
14247 case LPFC_WCQ:
14248 if (cq->subtype == LPFC_IO)
14249 workposted |= __lpfc_sli4_process_cq(phba, cq,
14250 lpfc_sli4_fp_handle_cqe,
14251 &delay, LPFC_QUEUE_WORK);
14252 else
14253 workposted |= __lpfc_sli4_process_cq(phba, cq,
14254 lpfc_sli4_sp_handle_cqe,
14255 &delay, LPFC_QUEUE_WORK);
14256 break;
14257 default:
14258 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14259 "0370 Invalid completion queue type (%d)\n",
14260 cq->type);
14261 return;
14262 }
14263
14264 if (delay) {
14265 if (is_kdump_kernel())
14266 ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14267 delay);
14268 else
14269 ret = queue_delayed_work_on(cq->chann, phba->wq,
14270 &cq->sched_spwork, delay);
14271 if (!ret)
14272 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14273 "0394 Cannot schedule queue work "
14274 "for cqid=%d on CPU %d\n",
14275 cq->queue_id, cq->chann);
14276 }
14277
14278
14279 if (workposted)
14280 lpfc_worker_wake_up(phba);
14281}
14282
14283
14284
14285
14286
14287
14288
14289
14290static void
14291lpfc_sli4_sp_process_cq(struct work_struct *work)
14292{
14293 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14294
14295 __lpfc_sli4_sp_process_cq(cq);
14296}
14297
14298
14299
14300
14301
14302
14303
14304static void
14305lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
14306{
14307 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14308 struct lpfc_queue, sched_spwork);
14309
14310 __lpfc_sli4_sp_process_cq(cq);
14311}
14312
14313
14314
14315
14316
14317
14318
14319
14320
14321
14322static void
14323lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14324 struct lpfc_wcqe_complete *wcqe)
14325{
14326 struct lpfc_sli_ring *pring = cq->pring;
14327 struct lpfc_iocbq *cmdiocbq;
14328 struct lpfc_iocbq irspiocbq;
14329 unsigned long iflags;
14330
14331
14332 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14333
14334
14335
14336 if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
14337 IOSTAT_LOCAL_REJECT)) &&
14338 ((wcqe->parameter & IOERR_PARAM_MASK) ==
14339 IOERR_NO_RESOURCES))
14340 phba->lpfc_rampdown_queue_depth(phba);
14341
14342
14343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14344 "0373 FCP CQE cmpl: status=x%x: "
14345 "CQE: %08x %08x %08x %08x\n",
14346 bf_get(lpfc_wcqe_c_status, wcqe),
14347 wcqe->word0, wcqe->total_data_placed,
14348 wcqe->parameter, wcqe->word3);
14349 }
14350
14351
14352 spin_lock_irqsave(&pring->ring_lock, iflags);
14353 pring->stats.iocb_event++;
14354 spin_unlock_irqrestore(&pring->ring_lock, iflags);
14355 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14356 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14357 if (unlikely(!cmdiocbq)) {
14358 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14359 "0374 FCP complete with no corresponding "
14360 "cmdiocb: iotag (%d)\n",
14361 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14362 return;
14363 }
14364#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14365 cmdiocbq->isr_timestamp = cq->isr_timestamp;
14366#endif
14367 if (cmdiocbq->iocb_cmpl == NULL) {
14368 if (cmdiocbq->wqe_cmpl) {
14369
14370 if (!(cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
14371 cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14372 spin_lock_irqsave(&phba->hbalock, iflags);
14373 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14374 spin_unlock_irqrestore(&phba->hbalock, iflags);
14375 }
14376
14377
14378 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
14379 return;
14380 }
14381 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14382 "0375 FCP cmdiocb not callback function "
14383 "iotag: (%d)\n",
14384 bf_get(lpfc_wcqe_c_request_tag, wcqe));
14385 return;
14386 }
14387
14388
14389
14390 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
14391
14392 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
14393 spin_lock_irqsave(&phba->hbalock, iflags);
14394 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
14395 spin_unlock_irqrestore(&phba->hbalock, iflags);
14396 }
14397
14398
14399 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
14400}
14401
14402
14403
14404
14405
14406
14407
14408
14409
14410
14411static void
14412lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14413 struct lpfc_wcqe_release *wcqe)
14414{
14415 struct lpfc_queue *childwq;
14416 bool wqid_matched = false;
14417 uint16_t hba_wqid;
14418
14419
14420 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
14421 list_for_each_entry(childwq, &cq->child_list, list) {
14422 if (childwq->queue_id == hba_wqid) {
14423 lpfc_sli4_wq_release(childwq,
14424 bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14425 if (childwq->q_flag & HBA_NVMET_WQFULL)
14426 lpfc_nvmet_wqfull_process(phba, childwq);
14427 wqid_matched = true;
14428 break;
14429 }
14430 }
14431
14432 if (wqid_matched != true)
14433 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14434 "2580 Fast-path wqe consume event carries "
14435 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
14436}
14437
14438
14439
14440
14441
14442
14443
14444
14445
14446
14447
14448static bool
14449lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14450 struct lpfc_rcqe *rcqe)
14451{
14452 bool workposted = false;
14453 struct lpfc_queue *hrq;
14454 struct lpfc_queue *drq;
14455 struct rqb_dmabuf *dma_buf;
14456 struct fc_frame_header *fc_hdr;
14457 struct lpfc_nvmet_tgtport *tgtp;
14458 uint32_t status, rq_id;
14459 unsigned long iflags;
14460 uint32_t fctl, idx;
14461
14462 if ((phba->nvmet_support == 0) ||
14463 (phba->sli4_hba.nvmet_cqset == NULL))
14464 return workposted;
14465
14466 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
14467 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
14468 drq = phba->sli4_hba.nvmet_mrq_data[idx];
14469
14470
14471 if (unlikely(!hrq) || unlikely(!drq))
14472 return workposted;
14473
14474 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14475 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14476 else
14477 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14478
14479 if ((phba->nvmet_support == 0) ||
14480 (rq_id != hrq->queue_id))
14481 return workposted;
14482
14483 status = bf_get(lpfc_rcqe_status, rcqe);
14484 switch (status) {
14485 case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14486 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14487 "6126 Receive Frame Truncated!!\n");
14488 fallthrough;
14489 case FC_STATUS_RQ_SUCCESS:
14490 spin_lock_irqsave(&phba->hbalock, iflags);
14491 lpfc_sli4_rq_release(hrq, drq);
14492 dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
14493 if (!dma_buf) {
14494 hrq->RQ_no_buf_found++;
14495 spin_unlock_irqrestore(&phba->hbalock, iflags);
14496 goto out;
14497 }
14498 spin_unlock_irqrestore(&phba->hbalock, iflags);
14499 hrq->RQ_rcv_buf++;
14500 hrq->RQ_buf_posted--;
14501 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14502
14503
14504 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
14505 fc_hdr->fh_f_ctl[1] << 8 |
14506 fc_hdr->fh_f_ctl[2]);
14507 if (((fctl &
14508 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
14509 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
14510 (fc_hdr->fh_seq_cnt != 0))
14511 goto drop;
14512
14513 if (fc_hdr->fh_type == FC_TYPE_FCP) {
14514 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
14515 lpfc_nvmet_unsol_fcp_event(
14516 phba, idx, dma_buf, cq->isr_timestamp,
14517 cq->q_flag & HBA_NVMET_CQ_NOTIFY);
14518 return false;
14519 }
14520drop:
14521 lpfc_rq_buf_free(phba, &dma_buf->hbuf);
14522 break;
14523 case FC_STATUS_INSUFF_BUF_FRM_DISC:
14524 if (phba->nvmet_support) {
14525 tgtp = phba->targetport->private;
14526 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14527 "6401 RQE Error x%x, posted %d err_cnt "
14528 "%d: %x %x %x\n",
14529 status, hrq->RQ_buf_posted,
14530 hrq->RQ_no_posted_buf,
14531 atomic_read(&tgtp->rcv_fcp_cmd_in),
14532 atomic_read(&tgtp->rcv_fcp_cmd_out),
14533 atomic_read(&tgtp->xmt_fcp_release));
14534 }
14535 fallthrough;
14536
14537 case FC_STATUS_INSUFF_BUF_NEED_BUF:
14538 hrq->RQ_no_posted_buf++;
14539
14540 break;
14541 }
14542out:
14543 return workposted;
14544}
14545
14546
14547
14548
14549
14550
14551
14552
14553
14554
14555
14556
14557static bool
14558lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14559 struct lpfc_cqe *cqe)
14560{
14561 struct lpfc_wcqe_release wcqe;
14562 bool workposted = false;
14563
14564
14565 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
14566
14567
14568 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
14569 case CQE_CODE_COMPL_WQE:
14570 case CQE_CODE_NVME_ERSP:
14571 cq->CQ_wq++;
14572
14573 phba->last_completion_time = jiffies;
14574 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
14575 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
14576 (struct lpfc_wcqe_complete *)&wcqe);
14577 break;
14578 case CQE_CODE_RELEASE_WQE:
14579 cq->CQ_release_wqe++;
14580
14581 lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
14582 (struct lpfc_wcqe_release *)&wcqe);
14583 break;
14584 case CQE_CODE_XRI_ABORTED:
14585 cq->CQ_xri_aborted++;
14586
14587 phba->last_completion_time = jiffies;
14588 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14589 (struct sli4_wcqe_xri_aborted *)&wcqe);
14590 break;
14591 case CQE_CODE_RECEIVE_V1:
14592 case CQE_CODE_RECEIVE:
14593 phba->last_completion_time = jiffies;
14594 if (cq->subtype == LPFC_NVMET) {
14595 workposted = lpfc_sli4_nvmet_handle_rcqe(
14596 phba, cq, (struct lpfc_rcqe *)&wcqe);
14597 }
14598 break;
14599 default:
14600 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14601 "0144 Not a valid CQE code: x%x\n",
14602 bf_get(lpfc_wcqe_c_code, &wcqe));
14603 break;
14604 }
14605 return workposted;
14606}
14607
14608
14609
14610
14611
14612
14613
14614
14615
14616
14617
14618
14619
14620
14621
14622static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba,
14623 struct lpfc_queue *cq, uint16_t cqid)
14624{
14625 int ret = 0;
14626
14627 switch (cq->poll_mode) {
14628 case LPFC_IRQ_POLL:
14629 irq_poll_sched(&cq->iop);
14630 break;
14631 case LPFC_QUEUE_WORK:
14632 default:
14633 if (is_kdump_kernel())
14634 ret = queue_work(phba->wq, &cq->irqwork);
14635 else
14636 ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
14637 if (!ret)
14638 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14639 "0383 Cannot schedule queue work "
14640 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14641 cqid, cq->queue_id,
14642 raw_smp_processor_id());
14643 }
14644}
14645
14646
14647
14648
14649
14650
14651
14652
14653
14654
14655
14656
14657
14658
14659static void
14660lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
14661 struct lpfc_eqe *eqe)
14662{
14663 struct lpfc_queue *cq = NULL;
14664 uint32_t qidx = eq->hdwq;
14665 uint16_t cqid, id;
14666
14667 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
14668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14669 "0366 Not a valid completion "
14670 "event: majorcode=x%x, minorcode=x%x\n",
14671 bf_get_le32(lpfc_eqe_major_code, eqe),
14672 bf_get_le32(lpfc_eqe_minor_code, eqe));
14673 return;
14674 }
14675
14676
14677 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14678
14679
14680 if (cqid <= phba->sli4_hba.cq_max) {
14681 cq = phba->sli4_hba.cq_lookup[cqid];
14682 if (cq)
14683 goto work_cq;
14684 }
14685
14686
14687 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
14688 id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
14689 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
14690
14691 cq = phba->sli4_hba.nvmet_cqset[cqid - id];
14692 goto process_cq;
14693 }
14694 }
14695
14696 if (phba->sli4_hba.nvmels_cq &&
14697 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
14698
14699 cq = phba->sli4_hba.nvmels_cq;
14700 }
14701
14702
14703 if (cq == NULL) {
14704 lpfc_sli4_sp_handle_eqe(phba, eqe,
14705 phba->sli4_hba.hdwq[qidx].hba_eq);
14706 return;
14707 }
14708
14709process_cq:
14710 if (unlikely(cqid != cq->queue_id)) {
14711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14712 "0368 Miss-matched fast-path completion "
14713 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14714 cqid, cq->queue_id);
14715 return;
14716 }
14717
14718work_cq:
14719#if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14720 if (phba->ktime_on)
14721 cq->isr_timestamp = ktime_get_ns();
14722 else
14723 cq->isr_timestamp = 0;
14724#endif
14725 lpfc_sli4_sched_cq_work(phba, cq, cqid);
14726}
14727
14728
14729
14730
14731
14732
14733
14734
14735
14736
14737
14738
14739
14740
14741
14742
14743
14744static void
14745__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq,
14746 enum lpfc_poll_mode poll_mode)
14747{
14748 struct lpfc_hba *phba = cq->phba;
14749 unsigned long delay;
14750 bool workposted = false;
14751 int ret = 0;
14752
14753
14754 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
14755 &delay, poll_mode);
14756
14757 if (delay) {
14758 if (is_kdump_kernel())
14759 ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
14760 delay);
14761 else
14762 ret = queue_delayed_work_on(cq->chann, phba->wq,
14763 &cq->sched_irqwork, delay);
14764 if (!ret)
14765 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14766 "0367 Cannot schedule queue work "
14767 "for cqid=%d on CPU %d\n",
14768 cq->queue_id, cq->chann);
14769 }
14770
14771
14772 if (workposted)
14773 lpfc_worker_wake_up(phba);
14774}
14775
14776
14777
14778
14779
14780
14781
14782
14783static void
14784lpfc_sli4_hba_process_cq(struct work_struct *work)
14785{
14786 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
14787
14788 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14789}
14790
14791
14792
14793
14794
14795
14796
14797static void
14798lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
14799{
14800 struct lpfc_queue *cq = container_of(to_delayed_work(work),
14801 struct lpfc_queue, sched_irqwork);
14802
14803 __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK);
14804}
14805
14806
14807
14808
14809
14810
14811
14812
14813
14814
14815
14816
14817
14818
14819
14820
14821
14822
14823
14824
14825
14826
14827
14828
14829
14830
14831
14832irqreturn_t
14833lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
14834{
14835 struct lpfc_hba *phba;
14836 struct lpfc_hba_eq_hdl *hba_eq_hdl;
14837 struct lpfc_queue *fpeq;
14838 unsigned long iflag;
14839 int ecount = 0;
14840 int hba_eqidx;
14841 struct lpfc_eq_intr_info *eqi;
14842
14843
14844 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
14845 phba = hba_eq_hdl->phba;
14846 hba_eqidx = hba_eq_hdl->idx;
14847
14848 if (unlikely(!phba))
14849 return IRQ_NONE;
14850 if (unlikely(!phba->sli4_hba.hdwq))
14851 return IRQ_NONE;
14852
14853
14854 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
14855 if (unlikely(!fpeq))
14856 return IRQ_NONE;
14857
14858
14859 if (unlikely(lpfc_intr_state_check(phba))) {
14860
14861 spin_lock_irqsave(&phba->hbalock, iflag);
14862 if (phba->link_state < LPFC_LINK_DOWN)
14863
14864 lpfc_sli4_eqcq_flush(phba, fpeq);
14865 spin_unlock_irqrestore(&phba->hbalock, iflag);
14866 return IRQ_NONE;
14867 }
14868
14869 eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
14870 eqi->icnt++;
14871
14872 fpeq->last_cpu = raw_smp_processor_id();
14873
14874 if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
14875 fpeq->q_flag & HBA_EQ_DELAY_CHK &&
14876 phba->cfg_auto_imax &&
14877 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
14878 phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
14879 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
14880
14881
14882 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM);
14883
14884 if (unlikely(ecount == 0)) {
14885 fpeq->EQ_no_entry++;
14886 if (phba->intr_type == MSIX)
14887
14888 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14889 "0358 MSI-X interrupt with no EQE\n");
14890 else
14891
14892 return IRQ_NONE;
14893 }
14894
14895 return IRQ_HANDLED;
14896}
14897
14898
14899
14900
14901
14902
14903
14904
14905
14906
14907
14908
14909
14910
14911
14912
14913
14914
14915irqreturn_t
14916lpfc_sli4_intr_handler(int irq, void *dev_id)
14917{
14918 struct lpfc_hba *phba;
14919 irqreturn_t hba_irq_rc;
14920 bool hba_handled = false;
14921 int qidx;
14922
14923
14924 phba = (struct lpfc_hba *)dev_id;
14925
14926 if (unlikely(!phba))
14927 return IRQ_NONE;
14928
14929
14930
14931
14932 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
14933 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
14934 &phba->sli4_hba.hba_eq_hdl[qidx]);
14935 if (hba_irq_rc == IRQ_HANDLED)
14936 hba_handled |= true;
14937 }
14938
14939 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
14940}
14941
14942void lpfc_sli4_poll_hbtimer(struct timer_list *t)
14943{
14944 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
14945 struct lpfc_queue *eq;
14946 int i = 0;
14947
14948 rcu_read_lock();
14949
14950 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
14951 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
14952 if (!list_empty(&phba->poll_list))
14953 mod_timer(&phba->cpuhp_poll_timer,
14954 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14955
14956 rcu_read_unlock();
14957}
14958
14959inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
14960{
14961 struct lpfc_hba *phba = eq->phba;
14962 int i = 0;
14963
14964
14965
14966
14967
14968
14969
14970
14971 smp_rmb();
14972
14973 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
14974
14975
14976
14977
14978
14979
14980
14981 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
14982
14983 return i;
14984}
14985
14986static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
14987{
14988 struct lpfc_hba *phba = eq->phba;
14989
14990
14991 if (list_empty(&phba->poll_list))
14992 mod_timer(&phba->cpuhp_poll_timer,
14993 jiffies + msecs_to_jiffies(LPFC_POLL_HB));
14994
14995 list_add_rcu(&eq->_poll_list, &phba->poll_list);
14996 synchronize_rcu();
14997}
14998
14999static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15000{
15001 struct lpfc_hba *phba = eq->phba;
15002
15003
15004
15005
15006 list_del_rcu(&eq->_poll_list);
15007 synchronize_rcu();
15008
15009 if (list_empty(&phba->poll_list))
15010 del_timer_sync(&phba->cpuhp_poll_timer);
15011}
15012
15013void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15014{
15015 struct lpfc_queue *eq, *next;
15016
15017 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15018 list_del(&eq->_poll_list);
15019
15020 INIT_LIST_HEAD(&phba->poll_list);
15021 synchronize_rcu();
15022}
15023
15024static inline void
15025__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15026{
15027 if (mode == eq->mode)
15028 return;
15029
15030
15031
15032
15033
15034
15035
15036
15037
15038
15039
15040 WRITE_ONCE(eq->mode, mode);
15041
15042 smp_wmb();
15043
15044
15045
15046
15047
15048
15049
15050
15051
15052
15053
15054
15055
15056 mode ? lpfc_sli4_add_to_poll_list(eq) :
15057 lpfc_sli4_remove_from_poll_list(eq);
15058}
15059
15060void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15061{
15062 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15063}
15064
15065void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15066{
15067 struct lpfc_hba *phba = eq->phba;
15068
15069 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15070
15071
15072
15073
15074
15075
15076
15077
15078 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15079}
15080
15081
15082
15083
15084
15085
15086
15087
15088
15089void
15090lpfc_sli4_queue_free(struct lpfc_queue *queue)
15091{
15092 struct lpfc_dmabuf *dmabuf;
15093
15094 if (!queue)
15095 return;
15096
15097 if (!list_empty(&queue->wq_list))
15098 list_del(&queue->wq_list);
15099
15100 while (!list_empty(&queue->page_list)) {
15101 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15102 list);
15103 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15104 dmabuf->virt, dmabuf->phys);
15105 kfree(dmabuf);
15106 }
15107 if (queue->rqbp) {
15108 lpfc_free_rq_buffer(queue->phba, queue);
15109 kfree(queue->rqbp);
15110 }
15111
15112 if (!list_empty(&queue->cpu_list))
15113 list_del(&queue->cpu_list);
15114
15115 kfree(queue);
15116 return;
15117}
15118
15119
15120
15121
15122
15123
15124
15125
15126
15127
15128
15129
15130
15131struct lpfc_queue *
15132lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15133 uint32_t entry_size, uint32_t entry_count, int cpu)
15134{
15135 struct lpfc_queue *queue;
15136 struct lpfc_dmabuf *dmabuf;
15137 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15138 uint16_t x, pgcnt;
15139
15140 if (!phba->sli4_hba.pc_sli4_params.supported)
15141 hw_page_size = page_size;
15142
15143 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15144
15145
15146 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15147 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15148
15149 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15150 GFP_KERNEL, cpu_to_node(cpu));
15151 if (!queue)
15152 return NULL;
15153
15154 INIT_LIST_HEAD(&queue->list);
15155 INIT_LIST_HEAD(&queue->_poll_list);
15156 INIT_LIST_HEAD(&queue->wq_list);
15157 INIT_LIST_HEAD(&queue->wqfull_list);
15158 INIT_LIST_HEAD(&queue->page_list);
15159 INIT_LIST_HEAD(&queue->child_list);
15160 INIT_LIST_HEAD(&queue->cpu_list);
15161
15162
15163
15164
15165 queue->page_count = pgcnt;
15166 queue->q_pgs = (void **)&queue[1];
15167 queue->entry_cnt_per_pg = hw_page_size / entry_size;
15168 queue->entry_size = entry_size;
15169 queue->entry_count = entry_count;
15170 queue->page_size = hw_page_size;
15171 queue->phba = phba;
15172
15173 for (x = 0; x < queue->page_count; x++) {
15174 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15175 dev_to_node(&phba->pcidev->dev));
15176 if (!dmabuf)
15177 goto out_fail;
15178 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15179 hw_page_size, &dmabuf->phys,
15180 GFP_KERNEL);
15181 if (!dmabuf->virt) {
15182 kfree(dmabuf);
15183 goto out_fail;
15184 }
15185 dmabuf->buffer_tag = x;
15186 list_add_tail(&dmabuf->list, &queue->page_list);
15187
15188 queue->q_pgs[x] = dmabuf->virt;
15189 }
15190 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15191 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15192 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15193 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15194
15195
15196
15197 return queue;
15198out_fail:
15199 lpfc_sli4_queue_free(queue);
15200 return NULL;
15201}
15202
15203
15204
15205
15206
15207
15208
15209
15210
15211
15212static void __iomem *
15213lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15214{
15215 if (!phba->pcidev)
15216 return NULL;
15217
15218 switch (pci_barset) {
15219 case WQ_PCI_BAR_0_AND_1:
15220 return phba->pci_bar0_memmap_p;
15221 case WQ_PCI_BAR_2_AND_3:
15222 return phba->pci_bar2_memmap_p;
15223 case WQ_PCI_BAR_4_AND_5:
15224 return phba->pci_bar4_memmap_p;
15225 default:
15226 break;
15227 }
15228 return NULL;
15229}
15230
15231
15232
15233
15234
15235
15236
15237
15238
15239
15240
15241
15242
15243
15244
15245
15246
15247
15248
15249
15250
15251
15252
15253
15254void
15255lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15256 uint32_t numq, uint32_t usdelay)
15257{
15258 struct lpfc_mbx_modify_eq_delay *eq_delay;
15259 LPFC_MBOXQ_t *mbox;
15260 struct lpfc_queue *eq;
15261 int cnt = 0, rc, length;
15262 uint32_t shdr_status, shdr_add_status;
15263 uint32_t dmult;
15264 int qidx;
15265 union lpfc_sli4_cfg_shdr *shdr;
15266
15267 if (startq >= phba->cfg_irq_chann)
15268 return;
15269
15270 if (usdelay > 0xFFFF) {
15271 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15272 "6429 usdelay %d too large. Scaled down to "
15273 "0xFFFF.\n", usdelay);
15274 usdelay = 0xFFFF;
15275 }
15276
15277
15278 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15279 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15280 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15281 if (!eq)
15282 continue;
15283
15284 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15285
15286 if (++cnt >= numq)
15287 break;
15288 }
15289 return;
15290 }
15291
15292
15293
15294 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15295 if (!mbox) {
15296 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15297 "6428 Failed allocating mailbox cmd buffer."
15298 " EQ delay was not set.\n");
15299 return;
15300 }
15301 length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15302 sizeof(struct lpfc_sli4_cfg_mhdr));
15303 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15304 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15305 length, LPFC_SLI4_MBX_EMBED);
15306 eq_delay = &mbox->u.mqe.un.eq_delay;
15307
15308
15309 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15310 if (dmult)
15311 dmult--;
15312 if (dmult > LPFC_DMULT_MAX)
15313 dmult = LPFC_DMULT_MAX;
15314
15315 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15316 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15317 if (!eq)
15318 continue;
15319 eq->q_mode = usdelay;
15320 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
15321 eq_delay->u.request.eq[cnt].phase = 0;
15322 eq_delay->u.request.eq[cnt].delay_multi = dmult;
15323
15324 if (++cnt >= numq)
15325 break;
15326 }
15327 eq_delay->u.request.num_eq = cnt;
15328
15329 mbox->vport = phba->pport;
15330 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15331 mbox->ctx_buf = NULL;
15332 mbox->ctx_ndlp = NULL;
15333 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15334 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
15335 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15336 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15337 if (shdr_status || shdr_add_status || rc) {
15338 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15339 "2512 MODIFY_EQ_DELAY mailbox failed with "
15340 "status x%x add_status x%x, mbx status x%x\n",
15341 shdr_status, shdr_add_status, rc);
15342 }
15343 mempool_free(mbox, phba->mbox_mem_pool);
15344 return;
15345}
15346
15347
15348
15349
15350
15351
15352
15353
15354
15355
15356
15357
15358
15359
15360
15361
15362
15363
15364
15365
15366
15367int
15368lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
15369{
15370 struct lpfc_mbx_eq_create *eq_create;
15371 LPFC_MBOXQ_t *mbox;
15372 int rc, length, status = 0;
15373 struct lpfc_dmabuf *dmabuf;
15374 uint32_t shdr_status, shdr_add_status;
15375 union lpfc_sli4_cfg_shdr *shdr;
15376 uint16_t dmult;
15377 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15378
15379
15380 if (!eq)
15381 return -ENODEV;
15382 if (!phba->sli4_hba.pc_sli4_params.supported)
15383 hw_page_size = SLI4_PAGE_SIZE;
15384
15385 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15386 if (!mbox)
15387 return -ENOMEM;
15388 length = (sizeof(struct lpfc_mbx_eq_create) -
15389 sizeof(struct lpfc_sli4_cfg_mhdr));
15390 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15391 LPFC_MBOX_OPCODE_EQ_CREATE,
15392 length, LPFC_SLI4_MBX_EMBED);
15393 eq_create = &mbox->u.mqe.un.eq_create;
15394 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
15395 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
15396 eq->page_count);
15397 bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
15398 LPFC_EQE_SIZE);
15399 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
15400
15401
15402 if (phba->sli4_hba.pc_sli4_params.eqav) {
15403 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15404 LPFC_Q_CREATE_VERSION_2);
15405 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
15406 phba->sli4_hba.pc_sli4_params.eqav);
15407 }
15408
15409
15410 dmult = 0;
15411 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
15412 dmult);
15413 switch (eq->entry_count) {
15414 default:
15415 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15416 "0360 Unsupported EQ count. (%d)\n",
15417 eq->entry_count);
15418 if (eq->entry_count < 256) {
15419 status = -EINVAL;
15420 goto out;
15421 }
15422 fallthrough;
15423 case 256:
15424 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15425 LPFC_EQ_CNT_256);
15426 break;
15427 case 512:
15428 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15429 LPFC_EQ_CNT_512);
15430 break;
15431 case 1024:
15432 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15433 LPFC_EQ_CNT_1024);
15434 break;
15435 case 2048:
15436 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15437 LPFC_EQ_CNT_2048);
15438 break;
15439 case 4096:
15440 bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
15441 LPFC_EQ_CNT_4096);
15442 break;
15443 }
15444 list_for_each_entry(dmabuf, &eq->page_list, list) {
15445 memset(dmabuf->virt, 0, hw_page_size);
15446 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15447 putPaddrLow(dmabuf->phys);
15448 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15449 putPaddrHigh(dmabuf->phys);
15450 }
15451 mbox->vport = phba->pport;
15452 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15453 mbox->ctx_buf = NULL;
15454 mbox->ctx_ndlp = NULL;
15455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15456 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15457 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15458 if (shdr_status || shdr_add_status || rc) {
15459 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15460 "2500 EQ_CREATE mailbox failed with "
15461 "status x%x add_status x%x, mbx status x%x\n",
15462 shdr_status, shdr_add_status, rc);
15463 status = -ENXIO;
15464 }
15465 eq->type = LPFC_EQ;
15466 eq->subtype = LPFC_NONE;
15467 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
15468 if (eq->queue_id == 0xFFFF)
15469 status = -ENXIO;
15470 eq->host_index = 0;
15471 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
15472 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
15473out:
15474 mempool_free(mbox, phba->mbox_mem_pool);
15475 return status;
15476}
15477
15478static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget)
15479{
15480 struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop);
15481
15482 __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL);
15483
15484 return 1;
15485}
15486
15487
15488
15489
15490
15491
15492
15493
15494
15495
15496
15497
15498
15499
15500
15501
15502
15503
15504
15505
15506
15507
15508
15509
15510int
15511lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
15512 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
15513{
15514 struct lpfc_mbx_cq_create *cq_create;
15515 struct lpfc_dmabuf *dmabuf;
15516 LPFC_MBOXQ_t *mbox;
15517 int rc, length, status = 0;
15518 uint32_t shdr_status, shdr_add_status;
15519 union lpfc_sli4_cfg_shdr *shdr;
15520
15521
15522 if (!cq || !eq)
15523 return -ENODEV;
15524
15525 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15526 if (!mbox)
15527 return -ENOMEM;
15528 length = (sizeof(struct lpfc_mbx_cq_create) -
15529 sizeof(struct lpfc_sli4_cfg_mhdr));
15530 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15531 LPFC_MBOX_OPCODE_CQ_CREATE,
15532 length, LPFC_SLI4_MBX_EMBED);
15533 cq_create = &mbox->u.mqe.un.cq_create;
15534 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
15535 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
15536 cq->page_count);
15537 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
15538 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
15539 bf_set(lpfc_mbox_hdr_version, &shdr->request,
15540 phba->sli4_hba.pc_sli4_params.cqv);
15541 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
15542 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
15543 (cq->page_size / SLI4_PAGE_SIZE));
15544 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
15545 eq->queue_id);
15546 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
15547 phba->sli4_hba.pc_sli4_params.cqav);
15548 } else {
15549 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
15550 eq->queue_id);
15551 }
15552 switch (cq->entry_count) {
15553 case 2048:
15554 case 4096:
15555 if (phba->sli4_hba.pc_sli4_params.cqv ==
15556 LPFC_Q_CREATE_VERSION_2) {
15557 cq_create->u.request.context.lpfc_cq_context_count =
15558 cq->entry_count;
15559 bf_set(lpfc_cq_context_count,
15560 &cq_create->u.request.context,
15561 LPFC_CQ_CNT_WORD7);
15562 break;
15563 }
15564 fallthrough;
15565 default:
15566 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15567 "0361 Unsupported CQ count: "
15568 "entry cnt %d sz %d pg cnt %d\n",
15569 cq->entry_count, cq->entry_size,
15570 cq->page_count);
15571 if (cq->entry_count < 256) {
15572 status = -EINVAL;
15573 goto out;
15574 }
15575 fallthrough;
15576 case 256:
15577 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15578 LPFC_CQ_CNT_256);
15579 break;
15580 case 512:
15581 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15582 LPFC_CQ_CNT_512);
15583 break;
15584 case 1024:
15585 bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
15586 LPFC_CQ_CNT_1024);
15587 break;
15588 }
15589 list_for_each_entry(dmabuf, &cq->page_list, list) {
15590 memset(dmabuf->virt, 0, cq->page_size);
15591 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15592 putPaddrLow(dmabuf->phys);
15593 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15594 putPaddrHigh(dmabuf->phys);
15595 }
15596 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15597
15598
15599 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15600 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15601 if (shdr_status || shdr_add_status || rc) {
15602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15603 "2501 CQ_CREATE mailbox failed with "
15604 "status x%x add_status x%x, mbx status x%x\n",
15605 shdr_status, shdr_add_status, rc);
15606 status = -ENXIO;
15607 goto out;
15608 }
15609 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15610 if (cq->queue_id == 0xFFFF) {
15611 status = -ENXIO;
15612 goto out;
15613 }
15614
15615 list_add_tail(&cq->list, &eq->child_list);
15616
15617 cq->type = type;
15618 cq->subtype = subtype;
15619 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
15620 cq->assoc_qid = eq->queue_id;
15621 cq->assoc_qp = eq;
15622 cq->host_index = 0;
15623 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15624 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
15625
15626 if (cq->queue_id > phba->sli4_hba.cq_max)
15627 phba->sli4_hba.cq_max = cq->queue_id;
15628
15629 irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler);
15630out:
15631 mempool_free(mbox, phba->mbox_mem_pool);
15632 return status;
15633}
15634
15635
15636
15637
15638
15639
15640
15641
15642
15643
15644
15645
15646
15647
15648
15649
15650
15651
15652
15653
15654
15655
15656
15657
15658
15659int
15660lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
15661 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
15662 uint32_t subtype)
15663{
15664 struct lpfc_queue *cq;
15665 struct lpfc_queue *eq;
15666 struct lpfc_mbx_cq_create_set *cq_set;
15667 struct lpfc_dmabuf *dmabuf;
15668 LPFC_MBOXQ_t *mbox;
15669 int rc, length, alloclen, status = 0;
15670 int cnt, idx, numcq, page_idx = 0;
15671 uint32_t shdr_status, shdr_add_status;
15672 union lpfc_sli4_cfg_shdr *shdr;
15673 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15674
15675
15676 numcq = phba->cfg_nvmet_mrq;
15677 if (!cqp || !hdwq || !numcq)
15678 return -ENODEV;
15679
15680 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15681 if (!mbox)
15682 return -ENOMEM;
15683
15684 length = sizeof(struct lpfc_mbx_cq_create_set);
15685 length += ((numcq * cqp[0]->page_count) *
15686 sizeof(struct dma_address));
15687 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15688 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
15689 LPFC_SLI4_MBX_NEMBED);
15690 if (alloclen < length) {
15691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15692 "3098 Allocated DMA memory size (%d) is "
15693 "less than the requested DMA memory size "
15694 "(%d)\n", alloclen, length);
15695 status = -ENOMEM;
15696 goto out;
15697 }
15698 cq_set = mbox->sge_array->addr[0];
15699 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
15700 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
15701
15702 for (idx = 0; idx < numcq; idx++) {
15703 cq = cqp[idx];
15704 eq = hdwq[idx].hba_eq;
15705 if (!cq || !eq) {
15706 status = -ENOMEM;
15707 goto out;
15708 }
15709 if (!phba->sli4_hba.pc_sli4_params.supported)
15710 hw_page_size = cq->page_size;
15711
15712 switch (idx) {
15713 case 0:
15714 bf_set(lpfc_mbx_cq_create_set_page_size,
15715 &cq_set->u.request,
15716 (hw_page_size / SLI4_PAGE_SIZE));
15717 bf_set(lpfc_mbx_cq_create_set_num_pages,
15718 &cq_set->u.request, cq->page_count);
15719 bf_set(lpfc_mbx_cq_create_set_evt,
15720 &cq_set->u.request, 1);
15721 bf_set(lpfc_mbx_cq_create_set_valid,
15722 &cq_set->u.request, 1);
15723 bf_set(lpfc_mbx_cq_create_set_cqe_size,
15724 &cq_set->u.request, 0);
15725 bf_set(lpfc_mbx_cq_create_set_num_cq,
15726 &cq_set->u.request, numcq);
15727 bf_set(lpfc_mbx_cq_create_set_autovalid,
15728 &cq_set->u.request,
15729 phba->sli4_hba.pc_sli4_params.cqav);
15730 switch (cq->entry_count) {
15731 case 2048:
15732 case 4096:
15733 if (phba->sli4_hba.pc_sli4_params.cqv ==
15734 LPFC_Q_CREATE_VERSION_2) {
15735 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15736 &cq_set->u.request,
15737 cq->entry_count);
15738 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15739 &cq_set->u.request,
15740 LPFC_CQ_CNT_WORD7);
15741 break;
15742 }
15743 fallthrough;
15744 default:
15745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15746 "3118 Bad CQ count. (%d)\n",
15747 cq->entry_count);
15748 if (cq->entry_count < 256) {
15749 status = -EINVAL;
15750 goto out;
15751 }
15752 fallthrough;
15753 case 256:
15754 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15755 &cq_set->u.request, LPFC_CQ_CNT_256);
15756 break;
15757 case 512:
15758 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15759 &cq_set->u.request, LPFC_CQ_CNT_512);
15760 break;
15761 case 1024:
15762 bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
15763 &cq_set->u.request, LPFC_CQ_CNT_1024);
15764 break;
15765 }
15766 bf_set(lpfc_mbx_cq_create_set_eq_id0,
15767 &cq_set->u.request, eq->queue_id);
15768 break;
15769 case 1:
15770 bf_set(lpfc_mbx_cq_create_set_eq_id1,
15771 &cq_set->u.request, eq->queue_id);
15772 break;
15773 case 2:
15774 bf_set(lpfc_mbx_cq_create_set_eq_id2,
15775 &cq_set->u.request, eq->queue_id);
15776 break;
15777 case 3:
15778 bf_set(lpfc_mbx_cq_create_set_eq_id3,
15779 &cq_set->u.request, eq->queue_id);
15780 break;
15781 case 4:
15782 bf_set(lpfc_mbx_cq_create_set_eq_id4,
15783 &cq_set->u.request, eq->queue_id);
15784 break;
15785 case 5:
15786 bf_set(lpfc_mbx_cq_create_set_eq_id5,
15787 &cq_set->u.request, eq->queue_id);
15788 break;
15789 case 6:
15790 bf_set(lpfc_mbx_cq_create_set_eq_id6,
15791 &cq_set->u.request, eq->queue_id);
15792 break;
15793 case 7:
15794 bf_set(lpfc_mbx_cq_create_set_eq_id7,
15795 &cq_set->u.request, eq->queue_id);
15796 break;
15797 case 8:
15798 bf_set(lpfc_mbx_cq_create_set_eq_id8,
15799 &cq_set->u.request, eq->queue_id);
15800 break;
15801 case 9:
15802 bf_set(lpfc_mbx_cq_create_set_eq_id9,
15803 &cq_set->u.request, eq->queue_id);
15804 break;
15805 case 10:
15806 bf_set(lpfc_mbx_cq_create_set_eq_id10,
15807 &cq_set->u.request, eq->queue_id);
15808 break;
15809 case 11:
15810 bf_set(lpfc_mbx_cq_create_set_eq_id11,
15811 &cq_set->u.request, eq->queue_id);
15812 break;
15813 case 12:
15814 bf_set(lpfc_mbx_cq_create_set_eq_id12,
15815 &cq_set->u.request, eq->queue_id);
15816 break;
15817 case 13:
15818 bf_set(lpfc_mbx_cq_create_set_eq_id13,
15819 &cq_set->u.request, eq->queue_id);
15820 break;
15821 case 14:
15822 bf_set(lpfc_mbx_cq_create_set_eq_id14,
15823 &cq_set->u.request, eq->queue_id);
15824 break;
15825 case 15:
15826 bf_set(lpfc_mbx_cq_create_set_eq_id15,
15827 &cq_set->u.request, eq->queue_id);
15828 break;
15829 }
15830
15831
15832 list_add_tail(&cq->list, &eq->child_list);
15833
15834 cq->type = type;
15835 cq->subtype = subtype;
15836 cq->assoc_qid = eq->queue_id;
15837 cq->assoc_qp = eq;
15838 cq->host_index = 0;
15839 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
15840 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
15841 cq->entry_count);
15842 cq->chann = idx;
15843
15844 rc = 0;
15845 list_for_each_entry(dmabuf, &cq->page_list, list) {
15846 memset(dmabuf->virt, 0, hw_page_size);
15847 cnt = page_idx + dmabuf->buffer_tag;
15848 cq_set->u.request.page[cnt].addr_lo =
15849 putPaddrLow(dmabuf->phys);
15850 cq_set->u.request.page[cnt].addr_hi =
15851 putPaddrHigh(dmabuf->phys);
15852 rc++;
15853 }
15854 page_idx += rc;
15855 }
15856
15857 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15858
15859
15860 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15861 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15862 if (shdr_status || shdr_add_status || rc) {
15863 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15864 "3119 CQ_CREATE_SET mailbox failed with "
15865 "status x%x add_status x%x, mbx status x%x\n",
15866 shdr_status, shdr_add_status, rc);
15867 status = -ENXIO;
15868 goto out;
15869 }
15870 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
15871 if (rc == 0xFFFF) {
15872 status = -ENXIO;
15873 goto out;
15874 }
15875
15876 for (idx = 0; idx < numcq; idx++) {
15877 cq = cqp[idx];
15878 cq->queue_id = rc + idx;
15879 if (cq->queue_id > phba->sli4_hba.cq_max)
15880 phba->sli4_hba.cq_max = cq->queue_id;
15881 }
15882
15883out:
15884 lpfc_sli4_mbox_cmd_free(phba, mbox);
15885 return status;
15886}
15887
15888
15889
15890
15891
15892
15893
15894
15895
15896
15897
15898
15899
15900
15901
15902static void
15903lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
15904 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
15905{
15906 struct lpfc_mbx_mq_create *mq_create;
15907 struct lpfc_dmabuf *dmabuf;
15908 int length;
15909
15910 length = (sizeof(struct lpfc_mbx_mq_create) -
15911 sizeof(struct lpfc_sli4_cfg_mhdr));
15912 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15913 LPFC_MBOX_OPCODE_MQ_CREATE,
15914 length, LPFC_SLI4_MBX_EMBED);
15915 mq_create = &mbox->u.mqe.un.mq_create;
15916 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
15917 mq->page_count);
15918 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
15919 cq->queue_id);
15920 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
15921 switch (mq->entry_count) {
15922 case 16:
15923 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15924 LPFC_MQ_RING_SIZE_16);
15925 break;
15926 case 32:
15927 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15928 LPFC_MQ_RING_SIZE_32);
15929 break;
15930 case 64:
15931 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15932 LPFC_MQ_RING_SIZE_64);
15933 break;
15934 case 128:
15935 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
15936 LPFC_MQ_RING_SIZE_128);
15937 break;
15938 }
15939 list_for_each_entry(dmabuf, &mq->page_list, list) {
15940 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15941 putPaddrLow(dmabuf->phys);
15942 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15943 putPaddrHigh(dmabuf->phys);
15944 }
15945}
15946
15947
15948
15949
15950
15951
15952
15953
15954
15955
15956
15957
15958
15959
15960
15961
15962
15963
15964
15965
15966
15967
15968int32_t
15969lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
15970 struct lpfc_queue *cq, uint32_t subtype)
15971{
15972 struct lpfc_mbx_mq_create *mq_create;
15973 struct lpfc_mbx_mq_create_ext *mq_create_ext;
15974 struct lpfc_dmabuf *dmabuf;
15975 LPFC_MBOXQ_t *mbox;
15976 int rc, length, status = 0;
15977 uint32_t shdr_status, shdr_add_status;
15978 union lpfc_sli4_cfg_shdr *shdr;
15979 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15980
15981
15982 if (!mq || !cq)
15983 return -ENODEV;
15984 if (!phba->sli4_hba.pc_sli4_params.supported)
15985 hw_page_size = SLI4_PAGE_SIZE;
15986
15987 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15988 if (!mbox)
15989 return -ENOMEM;
15990 length = (sizeof(struct lpfc_mbx_mq_create_ext) -
15991 sizeof(struct lpfc_sli4_cfg_mhdr));
15992 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15993 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
15994 length, LPFC_SLI4_MBX_EMBED);
15995
15996 mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
15997 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
15998 bf_set(lpfc_mbx_mq_create_ext_num_pages,
15999 &mq_create_ext->u.request, mq->page_count);
16000 bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16001 &mq_create_ext->u.request, 1);
16002 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16003 &mq_create_ext->u.request, 1);
16004 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16005 &mq_create_ext->u.request, 1);
16006 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16007 &mq_create_ext->u.request, 1);
16008 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16009 &mq_create_ext->u.request, 1);
16010 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16011 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16012 phba->sli4_hba.pc_sli4_params.mqv);
16013 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16014 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16015 cq->queue_id);
16016 else
16017 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16018 cq->queue_id);
16019 switch (mq->entry_count) {
16020 default:
16021 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16022 "0362 Unsupported MQ count. (%d)\n",
16023 mq->entry_count);
16024 if (mq->entry_count < 16) {
16025 status = -EINVAL;
16026 goto out;
16027 }
16028 fallthrough;
16029 case 16:
16030 bf_set(lpfc_mq_context_ring_size,
16031 &mq_create_ext->u.request.context,
16032 LPFC_MQ_RING_SIZE_16);
16033 break;
16034 case 32:
16035 bf_set(lpfc_mq_context_ring_size,
16036 &mq_create_ext->u.request.context,
16037 LPFC_MQ_RING_SIZE_32);
16038 break;
16039 case 64:
16040 bf_set(lpfc_mq_context_ring_size,
16041 &mq_create_ext->u.request.context,
16042 LPFC_MQ_RING_SIZE_64);
16043 break;
16044 case 128:
16045 bf_set(lpfc_mq_context_ring_size,
16046 &mq_create_ext->u.request.context,
16047 LPFC_MQ_RING_SIZE_128);
16048 break;
16049 }
16050 list_for_each_entry(dmabuf, &mq->page_list, list) {
16051 memset(dmabuf->virt, 0, hw_page_size);
16052 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16053 putPaddrLow(dmabuf->phys);
16054 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16055 putPaddrHigh(dmabuf->phys);
16056 }
16057 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16058 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16059 &mq_create_ext->u.response);
16060 if (rc != MBX_SUCCESS) {
16061 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16062 "2795 MQ_CREATE_EXT failed with "
16063 "status x%x. Failback to MQ_CREATE.\n",
16064 rc);
16065 lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16066 mq_create = &mbox->u.mqe.un.mq_create;
16067 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16068 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16069 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16070 &mq_create->u.response);
16071 }
16072
16073
16074 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16075 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16076 if (shdr_status || shdr_add_status || rc) {
16077 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16078 "2502 MQ_CREATE mailbox failed with "
16079 "status x%x add_status x%x, mbx status x%x\n",
16080 shdr_status, shdr_add_status, rc);
16081 status = -ENXIO;
16082 goto out;
16083 }
16084 if (mq->queue_id == 0xFFFF) {
16085 status = -ENXIO;
16086 goto out;
16087 }
16088 mq->type = LPFC_MQ;
16089 mq->assoc_qid = cq->queue_id;
16090 mq->subtype = subtype;
16091 mq->host_index = 0;
16092 mq->hba_index = 0;
16093
16094
16095 list_add_tail(&mq->list, &cq->child_list);
16096out:
16097 mempool_free(mbox, phba->mbox_mem_pool);
16098 return status;
16099}
16100
16101
16102
16103
16104
16105
16106
16107
16108
16109
16110
16111
16112
16113
16114
16115
16116
16117
16118
16119
16120
16121
16122
16123int
16124lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16125 struct lpfc_queue *cq, uint32_t subtype)
16126{
16127 struct lpfc_mbx_wq_create *wq_create;
16128 struct lpfc_dmabuf *dmabuf;
16129 LPFC_MBOXQ_t *mbox;
16130 int rc, length, status = 0;
16131 uint32_t shdr_status, shdr_add_status;
16132 union lpfc_sli4_cfg_shdr *shdr;
16133 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16134 struct dma_address *page;
16135 void __iomem *bar_memmap_p;
16136 uint32_t db_offset;
16137 uint16_t pci_barset;
16138 uint8_t dpp_barset;
16139 uint32_t dpp_offset;
16140 uint8_t wq_create_version;
16141#ifdef CONFIG_X86
16142 unsigned long pg_addr;
16143#endif
16144
16145
16146 if (!wq || !cq)
16147 return -ENODEV;
16148 if (!phba->sli4_hba.pc_sli4_params.supported)
16149 hw_page_size = wq->page_size;
16150
16151 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16152 if (!mbox)
16153 return -ENOMEM;
16154 length = (sizeof(struct lpfc_mbx_wq_create) -
16155 sizeof(struct lpfc_sli4_cfg_mhdr));
16156 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16157 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16158 length, LPFC_SLI4_MBX_EMBED);
16159 wq_create = &mbox->u.mqe.un.wq_create;
16160 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16161 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16162 wq->page_count);
16163 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16164 cq->queue_id);
16165
16166
16167 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16168 phba->sli4_hba.pc_sli4_params.wqv);
16169
16170 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16171 (wq->page_size > SLI4_PAGE_SIZE))
16172 wq_create_version = LPFC_Q_CREATE_VERSION_1;
16173 else
16174 wq_create_version = LPFC_Q_CREATE_VERSION_0;
16175
16176 switch (wq_create_version) {
16177 case LPFC_Q_CREATE_VERSION_1:
16178 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16179 wq->entry_count);
16180 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16181 LPFC_Q_CREATE_VERSION_1);
16182
16183 switch (wq->entry_size) {
16184 default:
16185 case 64:
16186 bf_set(lpfc_mbx_wq_create_wqe_size,
16187 &wq_create->u.request_1,
16188 LPFC_WQ_WQE_SIZE_64);
16189 break;
16190 case 128:
16191 bf_set(lpfc_mbx_wq_create_wqe_size,
16192 &wq_create->u.request_1,
16193 LPFC_WQ_WQE_SIZE_128);
16194 break;
16195 }
16196
16197 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16198 bf_set(lpfc_mbx_wq_create_page_size,
16199 &wq_create->u.request_1,
16200 (wq->page_size / SLI4_PAGE_SIZE));
16201 page = wq_create->u.request_1.page;
16202 break;
16203 default:
16204 page = wq_create->u.request.page;
16205 break;
16206 }
16207
16208 list_for_each_entry(dmabuf, &wq->page_list, list) {
16209 memset(dmabuf->virt, 0, hw_page_size);
16210 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16211 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16212 }
16213
16214 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16215 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16216
16217 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16218
16219 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16220 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16221 if (shdr_status || shdr_add_status || rc) {
16222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16223 "2503 WQ_CREATE mailbox failed with "
16224 "status x%x add_status x%x, mbx status x%x\n",
16225 shdr_status, shdr_add_status, rc);
16226 status = -ENXIO;
16227 goto out;
16228 }
16229
16230 if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16231 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16232 &wq_create->u.response);
16233 else
16234 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16235 &wq_create->u.response_1);
16236
16237 if (wq->queue_id == 0xFFFF) {
16238 status = -ENXIO;
16239 goto out;
16240 }
16241
16242 wq->db_format = LPFC_DB_LIST_FORMAT;
16243 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16244 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16245 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16246 &wq_create->u.response);
16247 if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16248 (wq->db_format != LPFC_DB_RING_FORMAT)) {
16249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16250 "3265 WQ[%d] doorbell format "
16251 "not supported: x%x\n",
16252 wq->queue_id, wq->db_format);
16253 status = -EINVAL;
16254 goto out;
16255 }
16256 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16257 &wq_create->u.response);
16258 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16259 pci_barset);
16260 if (!bar_memmap_p) {
16261 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16262 "3263 WQ[%d] failed to memmap "
16263 "pci barset:x%x\n",
16264 wq->queue_id, pci_barset);
16265 status = -ENOMEM;
16266 goto out;
16267 }
16268 db_offset = wq_create->u.response.doorbell_offset;
16269 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
16270 (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
16271 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16272 "3252 WQ[%d] doorbell offset "
16273 "not supported: x%x\n",
16274 wq->queue_id, db_offset);
16275 status = -EINVAL;
16276 goto out;
16277 }
16278 wq->db_regaddr = bar_memmap_p + db_offset;
16279 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16280 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16281 "format:x%x\n", wq->queue_id,
16282 pci_barset, db_offset, wq->db_format);
16283 } else
16284 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16285 } else {
16286
16287 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
16288 &wq_create->u.response_1);
16289 if (wq->dpp_enable) {
16290 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
16291 &wq_create->u.response_1);
16292 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16293 pci_barset);
16294 if (!bar_memmap_p) {
16295 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16296 "3267 WQ[%d] failed to memmap "
16297 "pci barset:x%x\n",
16298 wq->queue_id, pci_barset);
16299 status = -ENOMEM;
16300 goto out;
16301 }
16302 db_offset = wq_create->u.response_1.doorbell_offset;
16303 wq->db_regaddr = bar_memmap_p + db_offset;
16304 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
16305 &wq_create->u.response_1);
16306 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
16307 &wq_create->u.response_1);
16308 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16309 dpp_barset);
16310 if (!bar_memmap_p) {
16311 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16312 "3268 WQ[%d] failed to memmap "
16313 "pci barset:x%x\n",
16314 wq->queue_id, dpp_barset);
16315 status = -ENOMEM;
16316 goto out;
16317 }
16318 dpp_offset = wq_create->u.response_1.dpp_offset;
16319 wq->dpp_regaddr = bar_memmap_p + dpp_offset;
16320 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16321 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16322 "dpp_id:x%x dpp_barset:x%x "
16323 "dpp_offset:x%x\n",
16324 wq->queue_id, pci_barset, db_offset,
16325 wq->dpp_id, dpp_barset, dpp_offset);
16326
16327#ifdef CONFIG_X86
16328
16329 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
16330 rc = set_memory_wc(pg_addr, 1);
16331 if (rc) {
16332 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16333 "3272 Cannot setup Combined "
16334 "Write on WQ[%d] - disable DPP\n",
16335 wq->queue_id);
16336 phba->cfg_enable_dpp = 0;
16337 }
16338#else
16339 phba->cfg_enable_dpp = 0;
16340#endif
16341 } else
16342 wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
16343 }
16344 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
16345 if (wq->pring == NULL) {
16346 status = -ENOMEM;
16347 goto out;
16348 }
16349 wq->type = LPFC_WQ;
16350 wq->assoc_qid = cq->queue_id;
16351 wq->subtype = subtype;
16352 wq->host_index = 0;
16353 wq->hba_index = 0;
16354 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
16355
16356
16357 list_add_tail(&wq->list, &cq->child_list);
16358out:
16359 mempool_free(mbox, phba->mbox_mem_pool);
16360 return status;
16361}
16362
16363
16364
16365
16366
16367
16368
16369
16370
16371
16372
16373
16374
16375
16376
16377
16378
16379
16380
16381
16382
16383
16384
16385
16386
16387int
16388lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
16389 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
16390{
16391 struct lpfc_mbx_rq_create *rq_create;
16392 struct lpfc_dmabuf *dmabuf;
16393 LPFC_MBOXQ_t *mbox;
16394 int rc, length, status = 0;
16395 uint32_t shdr_status, shdr_add_status;
16396 union lpfc_sli4_cfg_shdr *shdr;
16397 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16398 void __iomem *bar_memmap_p;
16399 uint32_t db_offset;
16400 uint16_t pci_barset;
16401
16402
16403 if (!hrq || !drq || !cq)
16404 return -ENODEV;
16405 if (!phba->sli4_hba.pc_sli4_params.supported)
16406 hw_page_size = SLI4_PAGE_SIZE;
16407
16408 if (hrq->entry_count != drq->entry_count)
16409 return -EINVAL;
16410 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16411 if (!mbox)
16412 return -ENOMEM;
16413 length = (sizeof(struct lpfc_mbx_rq_create) -
16414 sizeof(struct lpfc_sli4_cfg_mhdr));
16415 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16416 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16417 length, LPFC_SLI4_MBX_EMBED);
16418 rq_create = &mbox->u.mqe.un.rq_create;
16419 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16420 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16421 phba->sli4_hba.pc_sli4_params.rqv);
16422 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16423 bf_set(lpfc_rq_context_rqe_count_1,
16424 &rq_create->u.request.context,
16425 hrq->entry_count);
16426 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
16427 bf_set(lpfc_rq_context_rqe_size,
16428 &rq_create->u.request.context,
16429 LPFC_RQE_SIZE_8);
16430 bf_set(lpfc_rq_context_page_size,
16431 &rq_create->u.request.context,
16432 LPFC_RQ_PAGE_SIZE_4096);
16433 } else {
16434 switch (hrq->entry_count) {
16435 default:
16436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16437 "2535 Unsupported RQ count. (%d)\n",
16438 hrq->entry_count);
16439 if (hrq->entry_count < 512) {
16440 status = -EINVAL;
16441 goto out;
16442 }
16443 fallthrough;
16444 case 512:
16445 bf_set(lpfc_rq_context_rqe_count,
16446 &rq_create->u.request.context,
16447 LPFC_RQ_RING_SIZE_512);
16448 break;
16449 case 1024:
16450 bf_set(lpfc_rq_context_rqe_count,
16451 &rq_create->u.request.context,
16452 LPFC_RQ_RING_SIZE_1024);
16453 break;
16454 case 2048:
16455 bf_set(lpfc_rq_context_rqe_count,
16456 &rq_create->u.request.context,
16457 LPFC_RQ_RING_SIZE_2048);
16458 break;
16459 case 4096:
16460 bf_set(lpfc_rq_context_rqe_count,
16461 &rq_create->u.request.context,
16462 LPFC_RQ_RING_SIZE_4096);
16463 break;
16464 }
16465 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
16466 LPFC_HDR_BUF_SIZE);
16467 }
16468 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16469 cq->queue_id);
16470 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16471 hrq->page_count);
16472 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16473 memset(dmabuf->virt, 0, hw_page_size);
16474 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16475 putPaddrLow(dmabuf->phys);
16476 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16477 putPaddrHigh(dmabuf->phys);
16478 }
16479 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16480 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16481
16482 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16483
16484 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16485 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16486 if (shdr_status || shdr_add_status || rc) {
16487 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16488 "2504 RQ_CREATE mailbox failed with "
16489 "status x%x add_status x%x, mbx status x%x\n",
16490 shdr_status, shdr_add_status, rc);
16491 status = -ENXIO;
16492 goto out;
16493 }
16494 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16495 if (hrq->queue_id == 0xFFFF) {
16496 status = -ENXIO;
16497 goto out;
16498 }
16499
16500 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16501 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
16502 &rq_create->u.response);
16503 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
16504 (hrq->db_format != LPFC_DB_RING_FORMAT)) {
16505 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16506 "3262 RQ [%d] doorbell format not "
16507 "supported: x%x\n", hrq->queue_id,
16508 hrq->db_format);
16509 status = -EINVAL;
16510 goto out;
16511 }
16512
16513 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
16514 &rq_create->u.response);
16515 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
16516 if (!bar_memmap_p) {
16517 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16518 "3269 RQ[%d] failed to memmap pci "
16519 "barset:x%x\n", hrq->queue_id,
16520 pci_barset);
16521 status = -ENOMEM;
16522 goto out;
16523 }
16524
16525 db_offset = rq_create->u.response.doorbell_offset;
16526 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
16527 (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
16528 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16529 "3270 RQ[%d] doorbell offset not "
16530 "supported: x%x\n", hrq->queue_id,
16531 db_offset);
16532 status = -EINVAL;
16533 goto out;
16534 }
16535 hrq->db_regaddr = bar_memmap_p + db_offset;
16536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16537 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16538 "format:x%x\n", hrq->queue_id, pci_barset,
16539 db_offset, hrq->db_format);
16540 } else {
16541 hrq->db_format = LPFC_DB_RING_FORMAT;
16542 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16543 }
16544 hrq->type = LPFC_HRQ;
16545 hrq->assoc_qid = cq->queue_id;
16546 hrq->subtype = subtype;
16547 hrq->host_index = 0;
16548 hrq->hba_index = 0;
16549 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16550
16551
16552 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16553 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
16554 length, LPFC_SLI4_MBX_EMBED);
16555 bf_set(lpfc_mbox_hdr_version, &shdr->request,
16556 phba->sli4_hba.pc_sli4_params.rqv);
16557 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
16558 bf_set(lpfc_rq_context_rqe_count_1,
16559 &rq_create->u.request.context, hrq->entry_count);
16560 if (subtype == LPFC_NVMET)
16561 rq_create->u.request.context.buffer_size =
16562 LPFC_NVMET_DATA_BUF_SIZE;
16563 else
16564 rq_create->u.request.context.buffer_size =
16565 LPFC_DATA_BUF_SIZE;
16566 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
16567 LPFC_RQE_SIZE_8);
16568 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
16569 (PAGE_SIZE/SLI4_PAGE_SIZE));
16570 } else {
16571 switch (drq->entry_count) {
16572 default:
16573 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16574 "2536 Unsupported RQ count. (%d)\n",
16575 drq->entry_count);
16576 if (drq->entry_count < 512) {
16577 status = -EINVAL;
16578 goto out;
16579 }
16580 fallthrough;
16581 case 512:
16582 bf_set(lpfc_rq_context_rqe_count,
16583 &rq_create->u.request.context,
16584 LPFC_RQ_RING_SIZE_512);
16585 break;
16586 case 1024:
16587 bf_set(lpfc_rq_context_rqe_count,
16588 &rq_create->u.request.context,
16589 LPFC_RQ_RING_SIZE_1024);
16590 break;
16591 case 2048:
16592 bf_set(lpfc_rq_context_rqe_count,
16593 &rq_create->u.request.context,
16594 LPFC_RQ_RING_SIZE_2048);
16595 break;
16596 case 4096:
16597 bf_set(lpfc_rq_context_rqe_count,
16598 &rq_create->u.request.context,
16599 LPFC_RQ_RING_SIZE_4096);
16600 break;
16601 }
16602 if (subtype == LPFC_NVMET)
16603 bf_set(lpfc_rq_context_buf_size,
16604 &rq_create->u.request.context,
16605 LPFC_NVMET_DATA_BUF_SIZE);
16606 else
16607 bf_set(lpfc_rq_context_buf_size,
16608 &rq_create->u.request.context,
16609 LPFC_DATA_BUF_SIZE);
16610 }
16611 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
16612 cq->queue_id);
16613 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
16614 drq->page_count);
16615 list_for_each_entry(dmabuf, &drq->page_list, list) {
16616 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16617 putPaddrLow(dmabuf->phys);
16618 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16619 putPaddrHigh(dmabuf->phys);
16620 }
16621 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16622 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
16623 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16624
16625 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
16626 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16627 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16628 if (shdr_status || shdr_add_status || rc) {
16629 status = -ENXIO;
16630 goto out;
16631 }
16632 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16633 if (drq->queue_id == 0xFFFF) {
16634 status = -ENXIO;
16635 goto out;
16636 }
16637 drq->type = LPFC_DRQ;
16638 drq->assoc_qid = cq->queue_id;
16639 drq->subtype = subtype;
16640 drq->host_index = 0;
16641 drq->hba_index = 0;
16642 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16643
16644
16645 list_add_tail(&hrq->list, &cq->child_list);
16646 list_add_tail(&drq->list, &cq->child_list);
16647
16648out:
16649 mempool_free(mbox, phba->mbox_mem_pool);
16650 return status;
16651}
16652
16653
16654
16655
16656
16657
16658
16659
16660
16661
16662
16663
16664
16665
16666
16667
16668
16669
16670
16671
16672
16673
16674
16675
16676
16677int
16678lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
16679 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
16680 uint32_t subtype)
16681{
16682 struct lpfc_queue *hrq, *drq, *cq;
16683 struct lpfc_mbx_rq_create_v2 *rq_create;
16684 struct lpfc_dmabuf *dmabuf;
16685 LPFC_MBOXQ_t *mbox;
16686 int rc, length, alloclen, status = 0;
16687 int cnt, idx, numrq, page_idx = 0;
16688 uint32_t shdr_status, shdr_add_status;
16689 union lpfc_sli4_cfg_shdr *shdr;
16690 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16691
16692 numrq = phba->cfg_nvmet_mrq;
16693
16694 if (!hrqp || !drqp || !cqp || !numrq)
16695 return -ENODEV;
16696 if (!phba->sli4_hba.pc_sli4_params.supported)
16697 hw_page_size = SLI4_PAGE_SIZE;
16698
16699 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16700 if (!mbox)
16701 return -ENOMEM;
16702
16703 length = sizeof(struct lpfc_mbx_rq_create_v2);
16704 length += ((2 * numrq * hrqp[0]->page_count) *
16705 sizeof(struct dma_address));
16706
16707 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16708 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
16709 LPFC_SLI4_MBX_NEMBED);
16710 if (alloclen < length) {
16711 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16712 "3099 Allocated DMA memory size (%d) is "
16713 "less than the requested DMA memory size "
16714 "(%d)\n", alloclen, length);
16715 status = -ENOMEM;
16716 goto out;
16717 }
16718
16719
16720
16721 rq_create = mbox->sge_array->addr[0];
16722 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
16723
16724 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
16725 cnt = 0;
16726
16727 for (idx = 0; idx < numrq; idx++) {
16728 hrq = hrqp[idx];
16729 drq = drqp[idx];
16730 cq = cqp[idx];
16731
16732
16733 if (!hrq || !drq || !cq) {
16734 status = -ENODEV;
16735 goto out;
16736 }
16737
16738 if (hrq->entry_count != drq->entry_count) {
16739 status = -EINVAL;
16740 goto out;
16741 }
16742
16743 if (idx == 0) {
16744 bf_set(lpfc_mbx_rq_create_num_pages,
16745 &rq_create->u.request,
16746 hrq->page_count);
16747 bf_set(lpfc_mbx_rq_create_rq_cnt,
16748 &rq_create->u.request, (numrq * 2));
16749 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
16750 1);
16751 bf_set(lpfc_rq_context_base_cq,
16752 &rq_create->u.request.context,
16753 cq->queue_id);
16754 bf_set(lpfc_rq_context_data_size,
16755 &rq_create->u.request.context,
16756 LPFC_NVMET_DATA_BUF_SIZE);
16757 bf_set(lpfc_rq_context_hdr_size,
16758 &rq_create->u.request.context,
16759 LPFC_HDR_BUF_SIZE);
16760 bf_set(lpfc_rq_context_rqe_count_1,
16761 &rq_create->u.request.context,
16762 hrq->entry_count);
16763 bf_set(lpfc_rq_context_rqe_size,
16764 &rq_create->u.request.context,
16765 LPFC_RQE_SIZE_8);
16766 bf_set(lpfc_rq_context_page_size,
16767 &rq_create->u.request.context,
16768 (PAGE_SIZE/SLI4_PAGE_SIZE));
16769 }
16770 rc = 0;
16771 list_for_each_entry(dmabuf, &hrq->page_list, list) {
16772 memset(dmabuf->virt, 0, hw_page_size);
16773 cnt = page_idx + dmabuf->buffer_tag;
16774 rq_create->u.request.page[cnt].addr_lo =
16775 putPaddrLow(dmabuf->phys);
16776 rq_create->u.request.page[cnt].addr_hi =
16777 putPaddrHigh(dmabuf->phys);
16778 rc++;
16779 }
16780 page_idx += rc;
16781
16782 rc = 0;
16783 list_for_each_entry(dmabuf, &drq->page_list, list) {
16784 memset(dmabuf->virt, 0, hw_page_size);
16785 cnt = page_idx + dmabuf->buffer_tag;
16786 rq_create->u.request.page[cnt].addr_lo =
16787 putPaddrLow(dmabuf->phys);
16788 rq_create->u.request.page[cnt].addr_hi =
16789 putPaddrHigh(dmabuf->phys);
16790 rc++;
16791 }
16792 page_idx += rc;
16793
16794 hrq->db_format = LPFC_DB_RING_FORMAT;
16795 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16796 hrq->type = LPFC_HRQ;
16797 hrq->assoc_qid = cq->queue_id;
16798 hrq->subtype = subtype;
16799 hrq->host_index = 0;
16800 hrq->hba_index = 0;
16801 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16802
16803 drq->db_format = LPFC_DB_RING_FORMAT;
16804 drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
16805 drq->type = LPFC_DRQ;
16806 drq->assoc_qid = cq->queue_id;
16807 drq->subtype = subtype;
16808 drq->host_index = 0;
16809 drq->hba_index = 0;
16810 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
16811
16812 list_add_tail(&hrq->list, &cq->child_list);
16813 list_add_tail(&drq->list, &cq->child_list);
16814 }
16815
16816 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16817
16818 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16819 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16820 if (shdr_status || shdr_add_status || rc) {
16821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16822 "3120 RQ_CREATE mailbox failed with "
16823 "status x%x add_status x%x, mbx status x%x\n",
16824 shdr_status, shdr_add_status, rc);
16825 status = -ENXIO;
16826 goto out;
16827 }
16828 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
16829 if (rc == 0xFFFF) {
16830 status = -ENXIO;
16831 goto out;
16832 }
16833
16834
16835 for (idx = 0; idx < numrq; idx++) {
16836 hrq = hrqp[idx];
16837 hrq->queue_id = rc + (2 * idx);
16838 drq = drqp[idx];
16839 drq->queue_id = rc + (2 * idx) + 1;
16840 }
16841
16842out:
16843 lpfc_sli4_mbox_cmd_free(phba, mbox);
16844 return status;
16845}
16846
16847
16848
16849
16850
16851
16852
16853
16854
16855
16856
16857
16858
16859
16860int
16861lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
16862{
16863 LPFC_MBOXQ_t *mbox;
16864 int rc, length, status = 0;
16865 uint32_t shdr_status, shdr_add_status;
16866 union lpfc_sli4_cfg_shdr *shdr;
16867
16868
16869 if (!eq)
16870 return -ENODEV;
16871
16872 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
16873 if (!mbox)
16874 return -ENOMEM;
16875 length = (sizeof(struct lpfc_mbx_eq_destroy) -
16876 sizeof(struct lpfc_sli4_cfg_mhdr));
16877 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16878 LPFC_MBOX_OPCODE_EQ_DESTROY,
16879 length, LPFC_SLI4_MBX_EMBED);
16880 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
16881 eq->queue_id);
16882 mbox->vport = eq->phba->pport;
16883 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16884
16885 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
16886
16887 shdr = (union lpfc_sli4_cfg_shdr *)
16888 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
16889 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16890 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16891 if (shdr_status || shdr_add_status || rc) {
16892 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16893 "2505 EQ_DESTROY mailbox failed with "
16894 "status x%x add_status x%x, mbx status x%x\n",
16895 shdr_status, shdr_add_status, rc);
16896 status = -ENXIO;
16897 }
16898
16899
16900 list_del_init(&eq->list);
16901 mempool_free(mbox, eq->phba->mbox_mem_pool);
16902 return status;
16903}
16904
16905
16906
16907
16908
16909
16910
16911
16912
16913
16914
16915
16916
16917
16918int
16919lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
16920{
16921 LPFC_MBOXQ_t *mbox;
16922 int rc, length, status = 0;
16923 uint32_t shdr_status, shdr_add_status;
16924 union lpfc_sli4_cfg_shdr *shdr;
16925
16926
16927 if (!cq)
16928 return -ENODEV;
16929 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
16930 if (!mbox)
16931 return -ENOMEM;
16932 length = (sizeof(struct lpfc_mbx_cq_destroy) -
16933 sizeof(struct lpfc_sli4_cfg_mhdr));
16934 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16935 LPFC_MBOX_OPCODE_CQ_DESTROY,
16936 length, LPFC_SLI4_MBX_EMBED);
16937 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
16938 cq->queue_id);
16939 mbox->vport = cq->phba->pport;
16940 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16941 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
16942
16943 shdr = (union lpfc_sli4_cfg_shdr *)
16944 &mbox->u.mqe.un.wq_create.header.cfg_shdr;
16945 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16946 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16947 if (shdr_status || shdr_add_status || rc) {
16948 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16949 "2506 CQ_DESTROY mailbox failed with "
16950 "status x%x add_status x%x, mbx status x%x\n",
16951 shdr_status, shdr_add_status, rc);
16952 status = -ENXIO;
16953 }
16954
16955 list_del_init(&cq->list);
16956 mempool_free(mbox, cq->phba->mbox_mem_pool);
16957 return status;
16958}
16959
16960
16961
16962
16963
16964
16965
16966
16967
16968
16969
16970
16971
16972
16973int
16974lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
16975{
16976 LPFC_MBOXQ_t *mbox;
16977 int rc, length, status = 0;
16978 uint32_t shdr_status, shdr_add_status;
16979 union lpfc_sli4_cfg_shdr *shdr;
16980
16981
16982 if (!mq)
16983 return -ENODEV;
16984 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
16985 if (!mbox)
16986 return -ENOMEM;
16987 length = (sizeof(struct lpfc_mbx_mq_destroy) -
16988 sizeof(struct lpfc_sli4_cfg_mhdr));
16989 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16990 LPFC_MBOX_OPCODE_MQ_DESTROY,
16991 length, LPFC_SLI4_MBX_EMBED);
16992 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
16993 mq->queue_id);
16994 mbox->vport = mq->phba->pport;
16995 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16996 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
16997
16998 shdr = (union lpfc_sli4_cfg_shdr *)
16999 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17000 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17001 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17002 if (shdr_status || shdr_add_status || rc) {
17003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17004 "2507 MQ_DESTROY mailbox failed with "
17005 "status x%x add_status x%x, mbx status x%x\n",
17006 shdr_status, shdr_add_status, rc);
17007 status = -ENXIO;
17008 }
17009
17010 list_del_init(&mq->list);
17011 mempool_free(mbox, mq->phba->mbox_mem_pool);
17012 return status;
17013}
17014
17015
17016
17017
17018
17019
17020
17021
17022
17023
17024
17025
17026
17027
17028int
17029lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17030{
17031 LPFC_MBOXQ_t *mbox;
17032 int rc, length, status = 0;
17033 uint32_t shdr_status, shdr_add_status;
17034 union lpfc_sli4_cfg_shdr *shdr;
17035
17036
17037 if (!wq)
17038 return -ENODEV;
17039 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17040 if (!mbox)
17041 return -ENOMEM;
17042 length = (sizeof(struct lpfc_mbx_wq_destroy) -
17043 sizeof(struct lpfc_sli4_cfg_mhdr));
17044 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17045 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17046 length, LPFC_SLI4_MBX_EMBED);
17047 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17048 wq->queue_id);
17049 mbox->vport = wq->phba->pport;
17050 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17051 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17052 shdr = (union lpfc_sli4_cfg_shdr *)
17053 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17054 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17055 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17056 if (shdr_status || shdr_add_status || rc) {
17057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17058 "2508 WQ_DESTROY mailbox failed with "
17059 "status x%x add_status x%x, mbx status x%x\n",
17060 shdr_status, shdr_add_status, rc);
17061 status = -ENXIO;
17062 }
17063
17064 list_del_init(&wq->list);
17065 kfree(wq->pring);
17066 wq->pring = NULL;
17067 mempool_free(mbox, wq->phba->mbox_mem_pool);
17068 return status;
17069}
17070
17071
17072
17073
17074
17075
17076
17077
17078
17079
17080
17081
17082
17083
17084
17085int
17086lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17087 struct lpfc_queue *drq)
17088{
17089 LPFC_MBOXQ_t *mbox;
17090 int rc, length, status = 0;
17091 uint32_t shdr_status, shdr_add_status;
17092 union lpfc_sli4_cfg_shdr *shdr;
17093
17094
17095 if (!hrq || !drq)
17096 return -ENODEV;
17097 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17098 if (!mbox)
17099 return -ENOMEM;
17100 length = (sizeof(struct lpfc_mbx_rq_destroy) -
17101 sizeof(struct lpfc_sli4_cfg_mhdr));
17102 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17103 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17104 length, LPFC_SLI4_MBX_EMBED);
17105 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17106 hrq->queue_id);
17107 mbox->vport = hrq->phba->pport;
17108 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17109 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17110
17111 shdr = (union lpfc_sli4_cfg_shdr *)
17112 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17113 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17114 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17115 if (shdr_status || shdr_add_status || rc) {
17116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17117 "2509 RQ_DESTROY mailbox failed with "
17118 "status x%x add_status x%x, mbx status x%x\n",
17119 shdr_status, shdr_add_status, rc);
17120 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17121 return -ENXIO;
17122 }
17123 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17124 drq->queue_id);
17125 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17126 shdr = (union lpfc_sli4_cfg_shdr *)
17127 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17128 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17129 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17130 if (shdr_status || shdr_add_status || rc) {
17131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17132 "2510 RQ_DESTROY mailbox failed with "
17133 "status x%x add_status x%x, mbx status x%x\n",
17134 shdr_status, shdr_add_status, rc);
17135 status = -ENXIO;
17136 }
17137 list_del_init(&hrq->list);
17138 list_del_init(&drq->list);
17139 mempool_free(mbox, hrq->phba->mbox_mem_pool);
17140 return status;
17141}
17142
17143
17144
17145
17146
17147
17148
17149
17150
17151
17152
17153
17154
17155
17156
17157
17158
17159
17160
17161
17162
17163
17164
17165int
17166lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17167 dma_addr_t pdma_phys_addr0,
17168 dma_addr_t pdma_phys_addr1,
17169 uint16_t xritag)
17170{
17171 struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17172 LPFC_MBOXQ_t *mbox;
17173 int rc;
17174 uint32_t shdr_status, shdr_add_status;
17175 uint32_t mbox_tmo;
17176 union lpfc_sli4_cfg_shdr *shdr;
17177
17178 if (xritag == NO_XRI) {
17179 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17180 "0364 Invalid param:\n");
17181 return -EINVAL;
17182 }
17183
17184 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17185 if (!mbox)
17186 return -ENOMEM;
17187
17188 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17189 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17190 sizeof(struct lpfc_mbx_post_sgl_pages) -
17191 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17192
17193 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17194 &mbox->u.mqe.un.post_sgl_pages;
17195 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17196 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17197
17198 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
17199 cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17200 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17201 cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17202
17203 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
17204 cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17205 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17206 cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17207 if (!phba->sli4_hba.intr_enable)
17208 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17209 else {
17210 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17211 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17212 }
17213
17214 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17215 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17216 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17217 if (!phba->sli4_hba.intr_enable)
17218 mempool_free(mbox, phba->mbox_mem_pool);
17219 else if (rc != MBX_TIMEOUT)
17220 mempool_free(mbox, phba->mbox_mem_pool);
17221 if (shdr_status || shdr_add_status || rc) {
17222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17223 "2511 POST_SGL mailbox failed with "
17224 "status x%x add_status x%x, mbx status x%x\n",
17225 shdr_status, shdr_add_status, rc);
17226 }
17227 return 0;
17228}
17229
17230
17231
17232
17233
17234
17235
17236
17237
17238
17239
17240
17241
17242
17243static uint16_t
17244lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17245{
17246 unsigned long xri;
17247
17248
17249
17250
17251
17252 spin_lock_irq(&phba->hbalock);
17253 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
17254 phba->sli4_hba.max_cfg_param.max_xri, 0);
17255 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17256 spin_unlock_irq(&phba->hbalock);
17257 return NO_XRI;
17258 } else {
17259 set_bit(xri, phba->sli4_hba.xri_bmask);
17260 phba->sli4_hba.max_cfg_param.xri_used++;
17261 }
17262 spin_unlock_irq(&phba->hbalock);
17263 return xri;
17264}
17265
17266
17267
17268
17269
17270
17271
17272
17273
17274static void
17275__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17276{
17277 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
17278 phba->sli4_hba.max_cfg_param.xri_used--;
17279 }
17280}
17281
17282
17283
17284
17285
17286
17287
17288
17289
17290void
17291lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
17292{
17293 spin_lock_irq(&phba->hbalock);
17294 __lpfc_sli4_free_xri(phba, xri);
17295 spin_unlock_irq(&phba->hbalock);
17296}
17297
17298
17299
17300
17301
17302
17303
17304
17305
17306
17307
17308uint16_t
17309lpfc_sli4_next_xritag(struct lpfc_hba *phba)
17310{
17311 uint16_t xri_index;
17312
17313 xri_index = lpfc_sli4_alloc_xri(phba);
17314 if (xri_index == NO_XRI)
17315 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17316 "2004 Failed to allocate XRI.last XRITAG is %d"
17317 " Max XRI is %d, Used XRI is %d\n",
17318 xri_index,
17319 phba->sli4_hba.max_cfg_param.max_xri,
17320 phba->sli4_hba.max_cfg_param.xri_used);
17321 return xri_index;
17322}
17323
17324
17325
17326
17327
17328
17329
17330
17331
17332
17333
17334
17335static int
17336lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
17337 struct list_head *post_sgl_list,
17338 int post_cnt)
17339{
17340 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
17341 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17342 struct sgl_page_pairs *sgl_pg_pairs;
17343 void *viraddr;
17344 LPFC_MBOXQ_t *mbox;
17345 uint32_t reqlen, alloclen, pg_pairs;
17346 uint32_t mbox_tmo;
17347 uint16_t xritag_start = 0;
17348 int rc = 0;
17349 uint32_t shdr_status, shdr_add_status;
17350 union lpfc_sli4_cfg_shdr *shdr;
17351
17352 reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
17353 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17354 if (reqlen > SLI4_PAGE_SIZE) {
17355 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17356 "2559 Block sgl registration required DMA "
17357 "size (%d) great than a page\n", reqlen);
17358 return -ENOMEM;
17359 }
17360
17361 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17362 if (!mbox)
17363 return -ENOMEM;
17364
17365
17366 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17367 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
17368 LPFC_SLI4_MBX_NEMBED);
17369
17370 if (alloclen < reqlen) {
17371 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17372 "0285 Allocated DMA memory size (%d) is "
17373 "less than the requested DMA memory "
17374 "size (%d)\n", alloclen, reqlen);
17375 lpfc_sli4_mbox_cmd_free(phba, mbox);
17376 return -ENOMEM;
17377 }
17378
17379 viraddr = mbox->sge_array->addr[0];
17380 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17381 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17382
17383 pg_pairs = 0;
17384 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
17385
17386 sgl_pg_pairs->sgl_pg0_addr_lo =
17387 cpu_to_le32(putPaddrLow(sglq_entry->phys));
17388 sgl_pg_pairs->sgl_pg0_addr_hi =
17389 cpu_to_le32(putPaddrHigh(sglq_entry->phys));
17390 sgl_pg_pairs->sgl_pg1_addr_lo =
17391 cpu_to_le32(putPaddrLow(0));
17392 sgl_pg_pairs->sgl_pg1_addr_hi =
17393 cpu_to_le32(putPaddrHigh(0));
17394
17395
17396 if (pg_pairs == 0)
17397 xritag_start = sglq_entry->sli4_xritag;
17398 sgl_pg_pairs++;
17399 pg_pairs++;
17400 }
17401
17402
17403 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17404 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
17405 sgl->word0 = cpu_to_le32(sgl->word0);
17406
17407 if (!phba->sli4_hba.intr_enable)
17408 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17409 else {
17410 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17411 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17412 }
17413 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
17414 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17415 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17416 if (!phba->sli4_hba.intr_enable)
17417 lpfc_sli4_mbox_cmd_free(phba, mbox);
17418 else if (rc != MBX_TIMEOUT)
17419 lpfc_sli4_mbox_cmd_free(phba, mbox);
17420 if (shdr_status || shdr_add_status || rc) {
17421 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17422 "2513 POST_SGL_BLOCK mailbox command failed "
17423 "status x%x add_status x%x mbx status x%x\n",
17424 shdr_status, shdr_add_status, rc);
17425 rc = -ENXIO;
17426 }
17427 return rc;
17428}
17429
17430
17431
17432
17433
17434
17435
17436
17437
17438
17439
17440
17441static int
17442lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
17443 int count)
17444{
17445 struct lpfc_io_buf *lpfc_ncmd;
17446 struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
17447 struct sgl_page_pairs *sgl_pg_pairs;
17448 void *viraddr;
17449 LPFC_MBOXQ_t *mbox;
17450 uint32_t reqlen, alloclen, pg_pairs;
17451 uint32_t mbox_tmo;
17452 uint16_t xritag_start = 0;
17453 int rc = 0;
17454 uint32_t shdr_status, shdr_add_status;
17455 dma_addr_t pdma_phys_bpl1;
17456 union lpfc_sli4_cfg_shdr *shdr;
17457
17458
17459 reqlen = count * sizeof(struct sgl_page_pairs) +
17460 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
17461 if (reqlen > SLI4_PAGE_SIZE) {
17462 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
17463 "6118 Block sgl registration required DMA "
17464 "size (%d) great than a page\n", reqlen);
17465 return -ENOMEM;
17466 }
17467 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17468 if (!mbox) {
17469 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17470 "6119 Failed to allocate mbox cmd memory\n");
17471 return -ENOMEM;
17472 }
17473
17474
17475 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17476 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17477 reqlen, LPFC_SLI4_MBX_NEMBED);
17478
17479 if (alloclen < reqlen) {
17480 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17481 "6120 Allocated DMA memory size (%d) is "
17482 "less than the requested DMA memory "
17483 "size (%d)\n", alloclen, reqlen);
17484 lpfc_sli4_mbox_cmd_free(phba, mbox);
17485 return -ENOMEM;
17486 }
17487
17488
17489 viraddr = mbox->sge_array->addr[0];
17490
17491
17492 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
17493 sgl_pg_pairs = &sgl->sgl_pg_pairs;
17494
17495 pg_pairs = 0;
17496 list_for_each_entry(lpfc_ncmd, nblist, list) {
17497
17498 sgl_pg_pairs->sgl_pg0_addr_lo =
17499 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
17500 sgl_pg_pairs->sgl_pg0_addr_hi =
17501 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
17502 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
17503 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
17504 SGL_PAGE_SIZE;
17505 else
17506 pdma_phys_bpl1 = 0;
17507 sgl_pg_pairs->sgl_pg1_addr_lo =
17508 cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
17509 sgl_pg_pairs->sgl_pg1_addr_hi =
17510 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
17511
17512 if (pg_pairs == 0)
17513 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
17514 sgl_pg_pairs++;
17515 pg_pairs++;
17516 }
17517 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
17518 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
17519
17520 sgl->word0 = cpu_to_le32(sgl->word0);
17521
17522 if (!phba->sli4_hba.intr_enable) {
17523 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17524 } else {
17525 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17526 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17527 }
17528 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
17529 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17530 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17531 if (!phba->sli4_hba.intr_enable)
17532 lpfc_sli4_mbox_cmd_free(phba, mbox);
17533 else if (rc != MBX_TIMEOUT)
17534 lpfc_sli4_mbox_cmd_free(phba, mbox);
17535 if (shdr_status || shdr_add_status || rc) {
17536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17537 "6125 POST_SGL_BLOCK mailbox command failed "
17538 "status x%x add_status x%x mbx status x%x\n",
17539 shdr_status, shdr_add_status, rc);
17540 rc = -ENXIO;
17541 }
17542 return rc;
17543}
17544
17545
17546
17547
17548
17549
17550
17551
17552
17553
17554
17555
17556
17557
17558
17559
17560int
17561lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
17562 struct list_head *post_nblist, int sb_count)
17563{
17564 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
17565 int status, sgl_size;
17566 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
17567 dma_addr_t pdma_phys_sgl1;
17568 int last_xritag = NO_XRI;
17569 int cur_xritag;
17570 LIST_HEAD(prep_nblist);
17571 LIST_HEAD(blck_nblist);
17572 LIST_HEAD(nvme_nblist);
17573
17574
17575 if (sb_count <= 0)
17576 return -EINVAL;
17577
17578 sgl_size = phba->cfg_sg_dma_buf_size;
17579 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
17580 list_del_init(&lpfc_ncmd->list);
17581 block_cnt++;
17582 if ((last_xritag != NO_XRI) &&
17583 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
17584
17585 list_splice_init(&prep_nblist, &blck_nblist);
17586 post_cnt = block_cnt - 1;
17587
17588 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17589 block_cnt = 1;
17590 } else {
17591
17592 list_add_tail(&lpfc_ncmd->list, &prep_nblist);
17593
17594 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
17595 list_splice_init(&prep_nblist, &blck_nblist);
17596 post_cnt = block_cnt;
17597 block_cnt = 0;
17598 }
17599 }
17600 num_posting++;
17601 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17602
17603
17604 if (num_posting == sb_count) {
17605 if (post_cnt == 0) {
17606
17607 list_splice_init(&prep_nblist, &blck_nblist);
17608 post_cnt = block_cnt;
17609 } else if (block_cnt == 1) {
17610
17611 if (sgl_size > SGL_PAGE_SIZE)
17612 pdma_phys_sgl1 =
17613 lpfc_ncmd->dma_phys_sgl +
17614 SGL_PAGE_SIZE;
17615 else
17616 pdma_phys_sgl1 = 0;
17617 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
17618 status = lpfc_sli4_post_sgl(
17619 phba, lpfc_ncmd->dma_phys_sgl,
17620 pdma_phys_sgl1, cur_xritag);
17621 if (status) {
17622
17623 lpfc_ncmd->flags |=
17624 LPFC_SBUF_NOT_POSTED;
17625 } else {
17626
17627 lpfc_ncmd->flags &=
17628 ~LPFC_SBUF_NOT_POSTED;
17629 lpfc_ncmd->status = IOSTAT_SUCCESS;
17630 num_posted++;
17631 }
17632
17633 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17634 }
17635 }
17636
17637
17638 if (post_cnt == 0)
17639 continue;
17640
17641
17642 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
17643 post_cnt);
17644
17645
17646 if (block_cnt == 0)
17647 last_xritag = NO_XRI;
17648
17649
17650 post_cnt = 0;
17651
17652
17653 while (!list_empty(&blck_nblist)) {
17654 list_remove_head(&blck_nblist, lpfc_ncmd,
17655 struct lpfc_io_buf, list);
17656 if (status) {
17657
17658 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
17659 } else {
17660
17661 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
17662 lpfc_ncmd->status = IOSTAT_SUCCESS;
17663 num_posted++;
17664 }
17665 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
17666 }
17667 }
17668
17669 lpfc_io_buf_replenish(phba, &nvme_nblist);
17670
17671 return num_posted;
17672}
17673
17674
17675
17676
17677
17678
17679
17680
17681
17682
17683
17684static int
17685lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
17686{
17687
17688 struct fc_vft_header *fc_vft_hdr;
17689 uint32_t *header = (uint32_t *) fc_hdr;
17690
17691#define FC_RCTL_MDS_DIAGS 0xF4
17692
17693 switch (fc_hdr->fh_r_ctl) {
17694 case FC_RCTL_DD_UNCAT:
17695 case FC_RCTL_DD_SOL_DATA:
17696 case FC_RCTL_DD_UNSOL_CTL:
17697 case FC_RCTL_DD_SOL_CTL:
17698 case FC_RCTL_DD_UNSOL_DATA:
17699 case FC_RCTL_DD_DATA_DESC:
17700 case FC_RCTL_DD_UNSOL_CMD:
17701 case FC_RCTL_DD_CMD_STATUS:
17702 case FC_RCTL_ELS_REQ:
17703 case FC_RCTL_ELS_REP:
17704 case FC_RCTL_ELS4_REQ:
17705 case FC_RCTL_ELS4_REP:
17706 case FC_RCTL_BA_NOP:
17707 case FC_RCTL_BA_ABTS:
17708 case FC_RCTL_BA_RMC:
17709 case FC_RCTL_BA_ACC:
17710 case FC_RCTL_BA_RJT:
17711 case FC_RCTL_BA_PRMT:
17712 case FC_RCTL_ACK_1:
17713 case FC_RCTL_ACK_0:
17714 case FC_RCTL_P_RJT:
17715 case FC_RCTL_F_RJT:
17716 case FC_RCTL_P_BSY:
17717 case FC_RCTL_F_BSY:
17718 case FC_RCTL_F_BSYL:
17719 case FC_RCTL_LCR:
17720 case FC_RCTL_MDS_DIAGS:
17721 case FC_RCTL_END:
17722 break;
17723 case FC_RCTL_VFTH:
17724 fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17725 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
17726 return lpfc_fc_frame_check(phba, fc_hdr);
17727 default:
17728 goto drop;
17729 }
17730
17731 switch (fc_hdr->fh_type) {
17732 case FC_TYPE_BLS:
17733 case FC_TYPE_ELS:
17734 case FC_TYPE_FCP:
17735 case FC_TYPE_CT:
17736 case FC_TYPE_NVME:
17737 break;
17738 case FC_TYPE_IP:
17739 case FC_TYPE_ILS:
17740 default:
17741 goto drop;
17742 }
17743
17744 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
17745 "2538 Received frame rctl:x%x, type:x%x, "
17746 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17747 fc_hdr->fh_r_ctl, fc_hdr->fh_type,
17748 be32_to_cpu(header[0]), be32_to_cpu(header[1]),
17749 be32_to_cpu(header[2]), be32_to_cpu(header[3]),
17750 be32_to_cpu(header[4]), be32_to_cpu(header[5]),
17751 be32_to_cpu(header[6]));
17752 return 0;
17753drop:
17754 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
17755 "2539 Dropped frame rctl:x%x type:x%x\n",
17756 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17757 return 1;
17758}
17759
17760
17761
17762
17763
17764
17765
17766
17767
17768static uint32_t
17769lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
17770{
17771 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
17772
17773 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
17774 return 0;
17775 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
17776}
17777
17778
17779
17780
17781
17782
17783
17784
17785
17786
17787
17788
17789
17790
17791static struct lpfc_vport *
17792lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
17793 uint16_t fcfi, uint32_t did)
17794{
17795 struct lpfc_vport **vports;
17796 struct lpfc_vport *vport = NULL;
17797 int i;
17798
17799 if (did == Fabric_DID)
17800 return phba->pport;
17801 if ((phba->pport->fc_flag & FC_PT2PT) &&
17802 !(phba->link_state == LPFC_HBA_READY))
17803 return phba->pport;
17804
17805 vports = lpfc_create_vport_work_array(phba);
17806 if (vports != NULL) {
17807 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
17808 if (phba->fcf.fcfi == fcfi &&
17809 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
17810 vports[i]->fc_myDID == did) {
17811 vport = vports[i];
17812 break;
17813 }
17814 }
17815 }
17816 lpfc_destroy_vport_work_array(phba, vports);
17817 return vport;
17818}
17819
17820
17821
17822
17823
17824
17825
17826
17827
17828
17829
17830static void
17831lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
17832{
17833 struct lpfc_dmabuf *h_buf;
17834 struct hbq_dmabuf *dmabuf = NULL;
17835
17836
17837 h_buf = list_get_first(&vport->rcv_buffer_list,
17838 struct lpfc_dmabuf, list);
17839 if (!h_buf)
17840 return;
17841 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17842 vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
17843}
17844
17845
17846
17847
17848
17849
17850
17851
17852
17853void
17854lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
17855{
17856 struct lpfc_dmabuf *h_buf, *hnext;
17857 struct lpfc_dmabuf *d_buf, *dnext;
17858 struct hbq_dmabuf *dmabuf = NULL;
17859
17860
17861 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17862 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17863 list_del_init(&dmabuf->hbuf.list);
17864 list_for_each_entry_safe(d_buf, dnext,
17865 &dmabuf->dbuf.list, list) {
17866 list_del_init(&d_buf->list);
17867 lpfc_in_buf_free(vport->phba, d_buf);
17868 }
17869 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17870 }
17871}
17872
17873
17874
17875
17876
17877
17878
17879
17880
17881
17882
17883
17884
17885void
17886lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
17887{
17888 struct lpfc_dmabuf *h_buf, *hnext;
17889 struct lpfc_dmabuf *d_buf, *dnext;
17890 struct hbq_dmabuf *dmabuf = NULL;
17891 unsigned long timeout;
17892 int abort_count = 0;
17893
17894 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17895 vport->rcv_buffer_time_stamp);
17896 if (list_empty(&vport->rcv_buffer_list) ||
17897 time_before(jiffies, timeout))
17898 return;
17899
17900 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
17901 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17902 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
17903 dmabuf->time_stamp);
17904 if (time_before(jiffies, timeout))
17905 break;
17906 abort_count++;
17907 list_del_init(&dmabuf->hbuf.list);
17908 list_for_each_entry_safe(d_buf, dnext,
17909 &dmabuf->dbuf.list, list) {
17910 list_del_init(&d_buf->list);
17911 lpfc_in_buf_free(vport->phba, d_buf);
17912 }
17913 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
17914 }
17915 if (abort_count)
17916 lpfc_update_rcv_time_stamp(vport);
17917}
17918
17919
17920
17921
17922
17923
17924
17925
17926
17927
17928
17929
17930
17931
17932static struct hbq_dmabuf *
17933lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
17934{
17935 struct fc_frame_header *new_hdr;
17936 struct fc_frame_header *temp_hdr;
17937 struct lpfc_dmabuf *d_buf;
17938 struct lpfc_dmabuf *h_buf;
17939 struct hbq_dmabuf *seq_dmabuf = NULL;
17940 struct hbq_dmabuf *temp_dmabuf = NULL;
17941 uint8_t found = 0;
17942
17943 INIT_LIST_HEAD(&dmabuf->dbuf.list);
17944 dmabuf->time_stamp = jiffies;
17945 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17946
17947
17948 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
17949 temp_hdr = (struct fc_frame_header *)h_buf->virt;
17950 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
17951 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
17952 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
17953 continue;
17954
17955 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
17956 break;
17957 }
17958 if (!seq_dmabuf) {
17959
17960
17961
17962
17963 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17964 lpfc_update_rcv_time_stamp(vport);
17965 return dmabuf;
17966 }
17967 temp_hdr = seq_dmabuf->hbuf.virt;
17968 if (be16_to_cpu(new_hdr->fh_seq_cnt) <
17969 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17970 list_del_init(&seq_dmabuf->hbuf.list);
17971 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
17972 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17973 lpfc_update_rcv_time_stamp(vport);
17974 return dmabuf;
17975 }
17976
17977 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
17978 seq_dmabuf->time_stamp = jiffies;
17979 lpfc_update_rcv_time_stamp(vport);
17980 if (list_empty(&seq_dmabuf->dbuf.list)) {
17981 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
17982 return seq_dmabuf;
17983 }
17984
17985 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
17986 while (!found) {
17987 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
17988 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
17989
17990
17991
17992
17993 if (be16_to_cpu(new_hdr->fh_seq_cnt) >
17994 be16_to_cpu(temp_hdr->fh_seq_cnt)) {
17995 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
17996 found = 1;
17997 break;
17998 }
17999
18000 if (&d_buf->list == &seq_dmabuf->dbuf.list)
18001 break;
18002 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18003 }
18004
18005 if (found)
18006 return seq_dmabuf;
18007 return NULL;
18008}
18009
18010
18011
18012
18013
18014
18015
18016
18017
18018
18019
18020
18021
18022
18023
18024
18025
18026static bool
18027lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18028 struct hbq_dmabuf *dmabuf)
18029{
18030 struct fc_frame_header *new_hdr;
18031 struct fc_frame_header *temp_hdr;
18032 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18033 struct hbq_dmabuf *seq_dmabuf = NULL;
18034
18035
18036 INIT_LIST_HEAD(&dmabuf->dbuf.list);
18037 INIT_LIST_HEAD(&dmabuf->hbuf.list);
18038 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18039 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18040 temp_hdr = (struct fc_frame_header *)h_buf->virt;
18041 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18042 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18043 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18044 continue;
18045
18046 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18047 break;
18048 }
18049
18050
18051 if (seq_dmabuf) {
18052 list_for_each_entry_safe(d_buf, n_buf,
18053 &seq_dmabuf->dbuf.list, list) {
18054 list_del_init(&d_buf->list);
18055 lpfc_in_buf_free(vport->phba, d_buf);
18056 }
18057 return true;
18058 }
18059 return false;
18060}
18061
18062
18063
18064
18065
18066
18067
18068
18069
18070
18071
18072
18073
18074
18075
18076
18077
18078static bool
18079lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18080{
18081 struct lpfc_hba *phba = vport->phba;
18082 int handled;
18083
18084
18085 if (phba->sli_rev < LPFC_SLI_REV4)
18086 return false;
18087
18088
18089 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18090 if (handled)
18091 return true;
18092
18093 return false;
18094}
18095
18096
18097
18098
18099
18100
18101
18102
18103
18104
18105
18106static void
18107lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18108 struct lpfc_iocbq *cmd_iocbq,
18109 struct lpfc_iocbq *rsp_iocbq)
18110{
18111 struct lpfc_nodelist *ndlp;
18112
18113 if (cmd_iocbq) {
18114 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
18115 lpfc_nlp_put(ndlp);
18116 lpfc_sli_release_iocbq(phba, cmd_iocbq);
18117 }
18118
18119
18120 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18122 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18123 rsp_iocbq->iocb.ulpStatus,
18124 rsp_iocbq->iocb.un.ulpWord[4]);
18125}
18126
18127
18128
18129
18130
18131
18132
18133
18134
18135uint16_t
18136lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18137 uint16_t xri)
18138{
18139 uint16_t i;
18140
18141 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18142 if (xri == phba->sli4_hba.xri_ids[i])
18143 return i;
18144 }
18145 return NO_XRI;
18146}
18147
18148
18149
18150
18151
18152
18153
18154
18155
18156
18157void
18158lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18159 struct fc_frame_header *fc_hdr, bool aborted)
18160{
18161 struct lpfc_hba *phba = vport->phba;
18162 struct lpfc_iocbq *ctiocb = NULL;
18163 struct lpfc_nodelist *ndlp;
18164 uint16_t oxid, rxid, xri, lxri;
18165 uint32_t sid, fctl;
18166 IOCB_t *icmd;
18167 int rc;
18168
18169 if (!lpfc_is_link_up(phba))
18170 return;
18171
18172 sid = sli4_sid_from_fc_hdr(fc_hdr);
18173 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18174 rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18175
18176 ndlp = lpfc_findnode_did(vport, sid);
18177 if (!ndlp) {
18178 ndlp = lpfc_nlp_init(vport, sid);
18179 if (!ndlp) {
18180 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18181 "1268 Failed to allocate ndlp for "
18182 "oxid:x%x SID:x%x\n", oxid, sid);
18183 return;
18184 }
18185
18186 lpfc_enqueue_node(vport, ndlp);
18187 }
18188
18189
18190 ctiocb = lpfc_sli_get_iocbq(phba);
18191 if (!ctiocb)
18192 return;
18193
18194
18195 fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18196
18197 icmd = &ctiocb->iocb;
18198 icmd->un.xseq64.bdl.bdeSize = 0;
18199 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
18200 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
18201 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
18202 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
18203
18204
18205 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
18206 icmd->ulpBdeCount = 0;
18207 icmd->ulpLe = 1;
18208 icmd->ulpClass = CLASS3;
18209 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
18210 ctiocb->context1 = lpfc_nlp_get(ndlp);
18211 if (!ctiocb->context1) {
18212 lpfc_sli_release_iocbq(phba, ctiocb);
18213 return;
18214 }
18215
18216 ctiocb->vport = phba->pport;
18217 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18218 ctiocb->sli4_lxritag = NO_XRI;
18219 ctiocb->sli4_xritag = NO_XRI;
18220
18221 if (fctl & FC_FC_EX_CTX)
18222
18223
18224
18225 xri = oxid;
18226 else
18227 xri = rxid;
18228 lxri = lpfc_sli4_xri_inrange(phba, xri);
18229 if (lxri != NO_XRI)
18230 lpfc_set_rrq_active(phba, ndlp, lxri,
18231 (xri == oxid) ? rxid : oxid, 0);
18232
18233
18234
18235
18236
18237 if ((fctl & FC_FC_EX_CTX) &&
18238 (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18239 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18240 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18241 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18242 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18243 }
18244
18245
18246
18247
18248
18249 if (aborted == false) {
18250 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
18251 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
18252 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
18253 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
18254 }
18255
18256 if (fctl & FC_FC_EX_CTX) {
18257
18258
18259
18260
18261 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
18262 } else {
18263
18264
18265
18266
18267 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
18268 }
18269 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
18270 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
18271
18272
18273 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
18274 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18275 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
18276
18277 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
18278 if (rc == IOCB_ERROR) {
18279 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
18280 "2925 Failed to issue CT ABTS RSP x%x on "
18281 "xri x%x, Data x%x\n",
18282 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
18283 phba->link_state);
18284 lpfc_nlp_put(ndlp);
18285 ctiocb->context1 = NULL;
18286 lpfc_sli_release_iocbq(phba, ctiocb);
18287 }
18288}
18289
18290
18291
18292
18293
18294
18295
18296
18297
18298
18299
18300
18301
18302
18303static void
18304lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
18305 struct hbq_dmabuf *dmabuf)
18306{
18307 struct lpfc_hba *phba = vport->phba;
18308 struct fc_frame_header fc_hdr;
18309 uint32_t fctl;
18310 bool aborted;
18311
18312
18313 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
18314 fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
18315
18316 if (fctl & FC_FC_EX_CTX) {
18317
18318 aborted = true;
18319 } else {
18320
18321 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
18322 if (aborted == false)
18323 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
18324 }
18325 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18326
18327 if (phba->nvmet_support) {
18328 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
18329 return;
18330 }
18331
18332
18333 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
18334}
18335
18336
18337
18338
18339
18340
18341
18342
18343
18344
18345
18346
18347
18348static int
18349lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
18350{
18351 struct fc_frame_header *hdr;
18352 struct lpfc_dmabuf *d_buf;
18353 struct hbq_dmabuf *seq_dmabuf;
18354 uint32_t fctl;
18355 int seq_count = 0;
18356
18357 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18358
18359 if (hdr->fh_seq_cnt != seq_count)
18360 return 0;
18361 fctl = (hdr->fh_f_ctl[0] << 16 |
18362 hdr->fh_f_ctl[1] << 8 |
18363 hdr->fh_f_ctl[2]);
18364
18365 if (fctl & FC_FC_END_SEQ)
18366 return 1;
18367 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
18368 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18369 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18370
18371 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
18372 return 0;
18373 fctl = (hdr->fh_f_ctl[0] << 16 |
18374 hdr->fh_f_ctl[1] << 8 |
18375 hdr->fh_f_ctl[2]);
18376
18377 if (fctl & FC_FC_END_SEQ)
18378 return 1;
18379 }
18380 return 0;
18381}
18382
18383
18384
18385
18386
18387
18388
18389
18390
18391
18392
18393
18394
18395
18396static struct lpfc_iocbq *
18397lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
18398{
18399 struct hbq_dmabuf *hbq_buf;
18400 struct lpfc_dmabuf *d_buf, *n_buf;
18401 struct lpfc_iocbq *first_iocbq, *iocbq;
18402 struct fc_frame_header *fc_hdr;
18403 uint32_t sid;
18404 uint32_t len, tot_len;
18405 struct ulp_bde64 *pbde;
18406
18407 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18408
18409 list_del_init(&seq_dmabuf->hbuf.list);
18410 lpfc_update_rcv_time_stamp(vport);
18411
18412 sid = sli4_sid_from_fc_hdr(fc_hdr);
18413 tot_len = 0;
18414
18415 first_iocbq = lpfc_sli_get_iocbq(vport->phba);
18416 if (first_iocbq) {
18417
18418 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
18419 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
18420 first_iocbq->vport = vport;
18421
18422
18423 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
18424 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
18425 first_iocbq->iocb.un.rcvels.parmRo =
18426 sli4_did_from_fc_hdr(fc_hdr);
18427 first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
18428 } else
18429 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
18430 first_iocbq->iocb.ulpContext = NO_XRI;
18431 first_iocbq->iocb.unsli3.rcvsli3.ox_id =
18432 be16_to_cpu(fc_hdr->fh_ox_id);
18433
18434 first_iocbq->iocb.unsli3.rcvsli3.vpi =
18435 vport->phba->vpi_ids[vport->vpi];
18436
18437 tot_len = bf_get(lpfc_rcqe_length,
18438 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
18439
18440 first_iocbq->context2 = &seq_dmabuf->dbuf;
18441 first_iocbq->context3 = NULL;
18442 first_iocbq->iocb.ulpBdeCount = 1;
18443 if (tot_len > LPFC_DATA_BUF_SIZE)
18444 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18445 LPFC_DATA_BUF_SIZE;
18446 else
18447 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
18448
18449 first_iocbq->iocb.un.rcvels.remoteID = sid;
18450
18451 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18452 }
18453 iocbq = first_iocbq;
18454
18455
18456
18457
18458 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
18459 if (!iocbq) {
18460 lpfc_in_buf_free(vport->phba, d_buf);
18461 continue;
18462 }
18463 if (!iocbq->context3) {
18464 iocbq->context3 = d_buf;
18465 iocbq->iocb.ulpBdeCount++;
18466
18467 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18468 len = bf_get(lpfc_rcqe_length,
18469 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18470 pbde = (struct ulp_bde64 *)
18471 &iocbq->iocb.unsli3.sli3Words[4];
18472 if (len > LPFC_DATA_BUF_SIZE)
18473 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
18474 else
18475 pbde->tus.f.bdeSize = len;
18476
18477 iocbq->iocb.unsli3.rcvsli3.acc_len += len;
18478 tot_len += len;
18479 } else {
18480 iocbq = lpfc_sli_get_iocbq(vport->phba);
18481 if (!iocbq) {
18482 if (first_iocbq) {
18483 first_iocbq->iocb.ulpStatus =
18484 IOSTAT_FCP_RSP_ERROR;
18485 first_iocbq->iocb.un.ulpWord[4] =
18486 IOERR_NO_RESOURCES;
18487 }
18488 lpfc_in_buf_free(vport->phba, d_buf);
18489 continue;
18490 }
18491
18492 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18493 len = bf_get(lpfc_rcqe_length,
18494 &hbq_buf->cq_event.cqe.rcqe_cmpl);
18495 iocbq->context2 = d_buf;
18496 iocbq->context3 = NULL;
18497 iocbq->iocb.ulpBdeCount = 1;
18498 if (len > LPFC_DATA_BUF_SIZE)
18499 iocbq->iocb.un.cont64[0].tus.f.bdeSize =
18500 LPFC_DATA_BUF_SIZE;
18501 else
18502 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
18503
18504 tot_len += len;
18505 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
18506
18507 iocbq->iocb.un.rcvels.remoteID = sid;
18508 list_add_tail(&iocbq->list, &first_iocbq->list);
18509 }
18510 }
18511
18512 if (!first_iocbq)
18513 lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
18514
18515 return first_iocbq;
18516}
18517
18518static void
18519lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
18520 struct hbq_dmabuf *seq_dmabuf)
18521{
18522 struct fc_frame_header *fc_hdr;
18523 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
18524 struct lpfc_hba *phba = vport->phba;
18525
18526 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
18527 iocbq = lpfc_prep_seq(vport, seq_dmabuf);
18528 if (!iocbq) {
18529 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18530 "2707 Ring %d handler: Failed to allocate "
18531 "iocb Rctl x%x Type x%x received\n",
18532 LPFC_ELS_RING,
18533 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18534 return;
18535 }
18536 if (!lpfc_complete_unsol_iocb(phba,
18537 phba->sli4_hba.els_wq->pring,
18538 iocbq, fc_hdr->fh_r_ctl,
18539 fc_hdr->fh_type))
18540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18541 "2540 Ring %d handler: unexpected Rctl "
18542 "x%x Type x%x received\n",
18543 LPFC_ELS_RING,
18544 fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18545
18546
18547 list_for_each_entry_safe(curr_iocb, next_iocb,
18548 &iocbq->list, list) {
18549 list_del_init(&curr_iocb->list);
18550 lpfc_sli_release_iocbq(phba, curr_iocb);
18551 }
18552 lpfc_sli_release_iocbq(phba, iocbq);
18553}
18554
18555static void
18556lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
18557 struct lpfc_iocbq *rspiocb)
18558{
18559 struct lpfc_dmabuf *pcmd = cmdiocb->context2;
18560
18561 if (pcmd && pcmd->virt)
18562 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18563 kfree(pcmd);
18564 lpfc_sli_release_iocbq(phba, cmdiocb);
18565 lpfc_drain_txq(phba);
18566}
18567
18568static void
18569lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
18570 struct hbq_dmabuf *dmabuf)
18571{
18572 struct fc_frame_header *fc_hdr;
18573 struct lpfc_hba *phba = vport->phba;
18574 struct lpfc_iocbq *iocbq = NULL;
18575 union lpfc_wqe *wqe;
18576 struct lpfc_dmabuf *pcmd = NULL;
18577 uint32_t frame_len;
18578 int rc;
18579 unsigned long iflags;
18580
18581 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18582 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
18583
18584
18585 iocbq = lpfc_sli_get_iocbq(phba);
18586 if (!iocbq) {
18587
18588 spin_lock_irqsave(&phba->hbalock, iflags);
18589 list_add_tail(&dmabuf->cq_event.list,
18590 &phba->sli4_hba.sp_queue_event);
18591 phba->hba_flag |= HBA_SP_QUEUE_EVT;
18592 spin_unlock_irqrestore(&phba->hbalock, iflags);
18593 lpfc_worker_wake_up(phba);
18594 return;
18595 }
18596
18597
18598 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
18599 if (pcmd)
18600 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
18601 &pcmd->phys);
18602 if (!pcmd || !pcmd->virt)
18603 goto exit;
18604
18605 INIT_LIST_HEAD(&pcmd->list);
18606
18607
18608 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
18609
18610
18611 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
18612 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
18613 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
18614 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
18615
18616 iocbq->context2 = pcmd;
18617 iocbq->vport = vport;
18618 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
18619 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
18620
18621
18622
18623
18624
18625 wqe = (union lpfc_wqe *)&iocbq->iocb;
18626
18627 wqe->send_frame.frame_len = frame_len;
18628 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
18629 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
18630 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
18631 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
18632 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
18633 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
18634
18635 iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
18636 iocbq->iocb.ulpLe = 1;
18637 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
18638 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
18639 if (rc == IOCB_ERROR)
18640 goto exit;
18641
18642 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18643 return;
18644
18645exit:
18646 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18647 "2023 Unable to process MDS loopback frame\n");
18648 if (pcmd && pcmd->virt)
18649 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
18650 kfree(pcmd);
18651 if (iocbq)
18652 lpfc_sli_release_iocbq(phba, iocbq);
18653 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18654}
18655
18656
18657
18658
18659
18660
18661
18662
18663
18664
18665
18666
18667
18668void
18669lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
18670 struct hbq_dmabuf *dmabuf)
18671{
18672 struct hbq_dmabuf *seq_dmabuf;
18673 struct fc_frame_header *fc_hdr;
18674 struct lpfc_vport *vport;
18675 uint32_t fcfi;
18676 uint32_t did;
18677
18678
18679 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18680
18681 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
18682 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
18683 vport = phba->pport;
18684
18685 if (!(phba->pport->load_flag & FC_UNLOADING))
18686 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18687 else
18688 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18689 return;
18690 }
18691
18692
18693 if (lpfc_fc_frame_check(phba, fc_hdr)) {
18694 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18695 return;
18696 }
18697
18698 if ((bf_get(lpfc_cqe_code,
18699 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
18700 fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
18701 &dmabuf->cq_event.cqe.rcqe_cmpl);
18702 else
18703 fcfi = bf_get(lpfc_rcqe_fcf_id,
18704 &dmabuf->cq_event.cqe.rcqe_cmpl);
18705
18706 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
18707 vport = phba->pport;
18708 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
18709 "2023 MDS Loopback %d bytes\n",
18710 bf_get(lpfc_rcqe_length,
18711 &dmabuf->cq_event.cqe.rcqe_cmpl));
18712
18713 lpfc_sli4_handle_mds_loopback(vport, dmabuf);
18714 return;
18715 }
18716
18717
18718 did = sli4_did_from_fc_hdr(fc_hdr);
18719
18720 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
18721 if (!vport) {
18722
18723 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18724 return;
18725 }
18726
18727
18728 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
18729 (did != Fabric_DID)) {
18730
18731
18732
18733
18734
18735 if (!(vport->fc_flag & FC_PT2PT) ||
18736 (phba->link_state == LPFC_HBA_READY)) {
18737 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18738 return;
18739 }
18740 }
18741
18742
18743 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
18744 lpfc_sli4_handle_unsol_abort(vport, dmabuf);
18745 return;
18746 }
18747
18748
18749 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
18750 if (!seq_dmabuf) {
18751
18752 lpfc_in_buf_free(phba, &dmabuf->dbuf);
18753 return;
18754 }
18755
18756 if (!lpfc_seq_complete(seq_dmabuf))
18757 return;
18758
18759
18760 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
18761}
18762
18763
18764
18765
18766
18767
18768
18769
18770
18771
18772
18773
18774
18775
18776
18777
18778
18779
18780
18781
18782
18783
18784int
18785lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
18786{
18787 struct lpfc_rpi_hdr *rpi_page;
18788 uint32_t rc = 0;
18789 uint16_t lrpi = 0;
18790
18791
18792 if (!phba->sli4_hba.rpi_hdrs_in_use)
18793 goto exit;
18794 if (phba->sli4_hba.extents_in_use)
18795 return -EIO;
18796
18797 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
18798
18799
18800
18801
18802
18803 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
18804 LPFC_RPI_RSRC_RDY)
18805 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18806
18807 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
18808 if (rc != MBX_SUCCESS) {
18809 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18810 "2008 Error %d posting all rpi "
18811 "headers\n", rc);
18812 rc = -EIO;
18813 break;
18814 }
18815 }
18816
18817 exit:
18818 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
18819 LPFC_RPI_RSRC_RDY);
18820 return rc;
18821}
18822
18823
18824
18825
18826
18827
18828
18829
18830
18831
18832
18833
18834
18835
18836
18837int
18838lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
18839{
18840 LPFC_MBOXQ_t *mboxq;
18841 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
18842 uint32_t rc = 0;
18843 uint32_t shdr_status, shdr_add_status;
18844 union lpfc_sli4_cfg_shdr *shdr;
18845
18846
18847 if (!phba->sli4_hba.rpi_hdrs_in_use)
18848 return rc;
18849 if (phba->sli4_hba.extents_in_use)
18850 return -EIO;
18851
18852
18853 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18854 if (!mboxq) {
18855 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18856 "2001 Unable to allocate memory for issuing "
18857 "SLI_CONFIG_SPECIAL mailbox command\n");
18858 return -ENOMEM;
18859 }
18860
18861
18862 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
18863 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
18864 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
18865 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
18866 sizeof(struct lpfc_sli4_cfg_mhdr),
18867 LPFC_SLI4_MBX_EMBED);
18868
18869
18870
18871 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
18872 rpi_page->start_rpi);
18873 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
18874 hdr_tmpl, rpi_page->page_count);
18875
18876 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
18877 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
18878 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18879 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
18880 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18881 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18882 mempool_free(mboxq, phba->mbox_mem_pool);
18883 if (shdr_status || shdr_add_status || rc) {
18884 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18885 "2514 POST_RPI_HDR mailbox failed with "
18886 "status x%x add_status x%x, mbx status x%x\n",
18887 shdr_status, shdr_add_status, rc);
18888 rc = -ENXIO;
18889 } else {
18890
18891
18892
18893
18894 spin_lock_irq(&phba->hbalock);
18895 phba->sli4_hba.next_rpi = rpi_page->next_rpi;
18896 spin_unlock_irq(&phba->hbalock);
18897 }
18898 return rc;
18899}
18900
18901
18902
18903
18904
18905
18906
18907
18908
18909
18910
18911
18912
18913
18914int
18915lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
18916{
18917 unsigned long rpi;
18918 uint16_t max_rpi, rpi_limit;
18919 uint16_t rpi_remaining, lrpi = 0;
18920 struct lpfc_rpi_hdr *rpi_hdr;
18921 unsigned long iflag;
18922
18923
18924
18925
18926
18927 spin_lock_irqsave(&phba->hbalock, iflag);
18928 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
18929 rpi_limit = phba->sli4_hba.next_rpi;
18930
18931 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
18932 if (rpi >= rpi_limit)
18933 rpi = LPFC_RPI_ALLOC_ERROR;
18934 else {
18935 set_bit(rpi, phba->sli4_hba.rpi_bmask);
18936 phba->sli4_hba.max_cfg_param.rpi_used++;
18937 phba->sli4_hba.rpi_count++;
18938 }
18939 lpfc_printf_log(phba, KERN_INFO,
18940 LOG_NODE | LOG_DISCOVERY,
18941 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18942 (int) rpi, max_rpi, rpi_limit);
18943
18944
18945
18946
18947
18948 if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
18949 (phba->sli4_hba.rpi_count >= max_rpi)) {
18950 spin_unlock_irqrestore(&phba->hbalock, iflag);
18951 return rpi;
18952 }
18953
18954
18955
18956
18957
18958 if (!phba->sli4_hba.rpi_hdrs_in_use) {
18959 spin_unlock_irqrestore(&phba->hbalock, iflag);
18960 return rpi;
18961 }
18962
18963
18964
18965
18966
18967
18968
18969 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
18970 spin_unlock_irqrestore(&phba->hbalock, iflag);
18971 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
18972 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
18973 if (!rpi_hdr) {
18974 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18975 "2002 Error Could not grow rpi "
18976 "count\n");
18977 } else {
18978 lrpi = rpi_hdr->start_rpi;
18979 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
18980 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
18981 }
18982 }
18983
18984 return rpi;
18985}
18986
18987
18988
18989
18990
18991
18992
18993
18994
18995static void
18996__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
18997{
18998
18999
19000
19001
19002 if (rpi == LPFC_RPI_ALLOC_ERROR)
19003 return;
19004
19005 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19006 phba->sli4_hba.rpi_count--;
19007 phba->sli4_hba.max_cfg_param.rpi_used--;
19008 } else {
19009 lpfc_printf_log(phba, KERN_INFO,
19010 LOG_NODE | LOG_DISCOVERY,
19011 "2016 rpi %x not inuse\n",
19012 rpi);
19013 }
19014}
19015
19016
19017
19018
19019
19020
19021
19022
19023
19024void
19025lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19026{
19027 spin_lock_irq(&phba->hbalock);
19028 __lpfc_sli4_free_rpi(phba, rpi);
19029 spin_unlock_irq(&phba->hbalock);
19030}
19031
19032
19033
19034
19035
19036
19037
19038
19039void
19040lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19041{
19042 kfree(phba->sli4_hba.rpi_bmask);
19043 kfree(phba->sli4_hba.rpi_ids);
19044 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19045}
19046
19047
19048
19049
19050
19051
19052
19053
19054
19055
19056int
19057lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19058 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
19059{
19060 LPFC_MBOXQ_t *mboxq;
19061 struct lpfc_hba *phba = ndlp->phba;
19062 int rc;
19063
19064
19065 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19066 if (!mboxq)
19067 return -ENOMEM;
19068
19069
19070
19071
19072
19073
19074
19075 if (!lpfc_nlp_get(ndlp)) {
19076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19077 "2122 %s: Failed to get nlp ref\n",
19078 __func__);
19079 mempool_free(mboxq, phba->mbox_mem_pool);
19080 return -EIO;
19081 }
19082
19083
19084 lpfc_resume_rpi(mboxq, ndlp);
19085 if (cmpl) {
19086 mboxq->mbox_cmpl = cmpl;
19087 mboxq->ctx_buf = arg;
19088 } else
19089 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19090 mboxq->ctx_ndlp = ndlp;
19091 mboxq->vport = ndlp->vport;
19092 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19093 if (rc == MBX_NOT_FINISHED) {
19094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19095 "2010 Resume RPI Mailbox failed "
19096 "status %d, mbxStatus x%x\n", rc,
19097 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19098 lpfc_nlp_put(ndlp);
19099 mempool_free(mboxq, phba->mbox_mem_pool);
19100 return -EIO;
19101 }
19102 return 0;
19103}
19104
19105
19106
19107
19108
19109
19110
19111
19112
19113
19114
19115int
19116lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19117{
19118 LPFC_MBOXQ_t *mboxq;
19119 int rc = 0;
19120 int retval = MBX_SUCCESS;
19121 uint32_t mbox_tmo;
19122 struct lpfc_hba *phba = vport->phba;
19123 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19124 if (!mboxq)
19125 return -ENOMEM;
19126 lpfc_init_vpi(phba, mboxq, vport->vpi);
19127 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19128 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19129 if (rc != MBX_SUCCESS) {
19130 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19131 "2022 INIT VPI Mailbox failed "
19132 "status %d, mbxStatus x%x\n", rc,
19133 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19134 retval = -EIO;
19135 }
19136 if (rc != MBX_TIMEOUT)
19137 mempool_free(mboxq, vport->phba->mbox_mem_pool);
19138
19139 return retval;
19140}
19141
19142
19143
19144
19145
19146
19147
19148
19149
19150
19151static void
19152lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19153{
19154 void *virt_addr;
19155 union lpfc_sli4_cfg_shdr *shdr;
19156 uint32_t shdr_status, shdr_add_status;
19157
19158 virt_addr = mboxq->sge_array->addr[0];
19159
19160 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19161 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19162 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19163
19164 if ((shdr_status || shdr_add_status) &&
19165 (shdr_status != STATUS_FCF_IN_USE))
19166 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19167 "2558 ADD_FCF_RECORD mailbox failed with "
19168 "status x%x add_status x%x\n",
19169 shdr_status, shdr_add_status);
19170
19171 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19172}
19173
19174
19175
19176
19177
19178
19179
19180
19181
19182
19183int
19184lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19185{
19186 int rc = 0;
19187 LPFC_MBOXQ_t *mboxq;
19188 uint8_t *bytep;
19189 void *virt_addr;
19190 struct lpfc_mbx_sge sge;
19191 uint32_t alloc_len, req_len;
19192 uint32_t fcfindex;
19193
19194 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19195 if (!mboxq) {
19196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19197 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19198 return -ENOMEM;
19199 }
19200
19201 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19202 sizeof(uint32_t);
19203
19204
19205 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19206 LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19207 req_len, LPFC_SLI4_MBX_NEMBED);
19208 if (alloc_len < req_len) {
19209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19210 "2523 Allocated DMA memory size (x%x) is "
19211 "less than the requested DMA memory "
19212 "size (x%x)\n", alloc_len, req_len);
19213 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19214 return -ENOMEM;
19215 }
19216
19217
19218
19219
19220
19221 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19222 virt_addr = mboxq->sge_array->addr[0];
19223
19224
19225
19226
19227 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19228 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19229 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19230
19231
19232
19233
19234
19235
19236 bytep += sizeof(uint32_t);
19237 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19238 mboxq->vport = phba->pport;
19239 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19240 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19241 if (rc == MBX_NOT_FINISHED) {
19242 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19243 "2515 ADD_FCF_RECORD mailbox failed with "
19244 "status 0x%x\n", rc);
19245 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19246 rc = -EIO;
19247 } else
19248 rc = 0;
19249
19250 return rc;
19251}
19252
19253
19254
19255
19256
19257
19258
19259
19260
19261
19262
19263void
19264lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
19265 struct fcf_record *fcf_record,
19266 uint16_t fcf_index)
19267{
19268 memset(fcf_record, 0, sizeof(struct fcf_record));
19269 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
19270 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
19271 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
19272 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
19273 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
19274 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
19275 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
19276 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
19277 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
19278 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
19279 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
19280 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
19281 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
19282 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
19283 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
19284 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
19285 LPFC_FCF_FPMA | LPFC_FCF_SPMA);
19286
19287 if (phba->valid_vlan) {
19288 fcf_record->vlan_bitmap[phba->vlan_id / 8]
19289 = 1 << (phba->vlan_id % 8);
19290 }
19291}
19292
19293
19294
19295
19296
19297
19298
19299
19300
19301
19302
19303
19304
19305int
19306lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19307{
19308 int rc = 0, error;
19309 LPFC_MBOXQ_t *mboxq;
19310
19311 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
19312 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
19313 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19314 if (!mboxq) {
19315 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19316 "2000 Failed to allocate mbox for "
19317 "READ_FCF cmd\n");
19318 error = -ENOMEM;
19319 goto fail_fcf_scan;
19320 }
19321
19322 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19323 if (rc) {
19324 error = -EINVAL;
19325 goto fail_fcf_scan;
19326 }
19327
19328 mboxq->vport = phba->pport;
19329 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
19330
19331 spin_lock_irq(&phba->hbalock);
19332 phba->hba_flag |= FCF_TS_INPROG;
19333 spin_unlock_irq(&phba->hbalock);
19334
19335 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19336 if (rc == MBX_NOT_FINISHED)
19337 error = -EIO;
19338 else {
19339
19340 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
19341 phba->fcf.eligible_fcf_cnt = 0;
19342 error = 0;
19343 }
19344fail_fcf_scan:
19345 if (error) {
19346 if (mboxq)
19347 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19348
19349 spin_lock_irq(&phba->hbalock);
19350 phba->hba_flag &= ~FCF_TS_INPROG;
19351 spin_unlock_irq(&phba->hbalock);
19352 }
19353 return error;
19354}
19355
19356
19357
19358
19359
19360
19361
19362
19363
19364
19365
19366
19367int
19368lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19369{
19370 int rc = 0, error;
19371 LPFC_MBOXQ_t *mboxq;
19372
19373 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19374 if (!mboxq) {
19375 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19376 "2763 Failed to allocate mbox for "
19377 "READ_FCF cmd\n");
19378 error = -ENOMEM;
19379 goto fail_fcf_read;
19380 }
19381
19382 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19383 if (rc) {
19384 error = -EINVAL;
19385 goto fail_fcf_read;
19386 }
19387
19388 mboxq->vport = phba->pport;
19389 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
19390 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19391 if (rc == MBX_NOT_FINISHED)
19392 error = -EIO;
19393 else
19394 error = 0;
19395
19396fail_fcf_read:
19397 if (error && mboxq)
19398 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19399 return error;
19400}
19401
19402
19403
19404
19405
19406
19407
19408
19409
19410
19411
19412
19413int
19414lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
19415{
19416 int rc = 0, error;
19417 LPFC_MBOXQ_t *mboxq;
19418
19419 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19420 if (!mboxq) {
19421 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
19422 "2758 Failed to allocate mbox for "
19423 "READ_FCF cmd\n");
19424 error = -ENOMEM;
19425 goto fail_fcf_read;
19426 }
19427
19428 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
19429 if (rc) {
19430 error = -EINVAL;
19431 goto fail_fcf_read;
19432 }
19433
19434 mboxq->vport = phba->pport;
19435 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
19436 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19437 if (rc == MBX_NOT_FINISHED)
19438 error = -EIO;
19439 else
19440 error = 0;
19441
19442fail_fcf_read:
19443 if (error && mboxq)
19444 lpfc_sli4_mbox_cmd_free(phba, mboxq);
19445 return error;
19446}
19447
19448
19449
19450
19451
19452
19453
19454
19455
19456
19457
19458
19459
19460
19461static int
19462lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
19463{
19464 uint16_t next_fcf_pri;
19465 uint16_t last_index;
19466 struct lpfc_fcf_pri *fcf_pri;
19467 int rc;
19468 int ret = 0;
19469
19470 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
19471 LPFC_SLI4_FCF_TBL_INDX_MAX);
19472 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19473 "3060 Last IDX %d\n", last_index);
19474
19475
19476 spin_lock_irq(&phba->hbalock);
19477 if (list_empty(&phba->fcf.fcf_pri_list) ||
19478 list_is_singular(&phba->fcf.fcf_pri_list)) {
19479 spin_unlock_irq(&phba->hbalock);
19480 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19481 "3061 Last IDX %d\n", last_index);
19482 return 0;
19483 }
19484 spin_unlock_irq(&phba->hbalock);
19485
19486 next_fcf_pri = 0;
19487
19488
19489
19490
19491 memset(phba->fcf.fcf_rr_bmask, 0,
19492 sizeof(*phba->fcf.fcf_rr_bmask));
19493 spin_lock_irq(&phba->hbalock);
19494 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19495 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
19496 continue;
19497
19498
19499
19500
19501 if (!next_fcf_pri)
19502 next_fcf_pri = fcf_pri->fcf_rec.priority;
19503 spin_unlock_irq(&phba->hbalock);
19504 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19505 rc = lpfc_sli4_fcf_rr_index_set(phba,
19506 fcf_pri->fcf_rec.fcf_index);
19507 if (rc)
19508 return 0;
19509 }
19510 spin_lock_irq(&phba->hbalock);
19511 }
19512
19513
19514
19515
19516
19517 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
19518 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
19519 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
19520
19521
19522
19523
19524 if (!next_fcf_pri)
19525 next_fcf_pri = fcf_pri->fcf_rec.priority;
19526 spin_unlock_irq(&phba->hbalock);
19527 if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
19528 rc = lpfc_sli4_fcf_rr_index_set(phba,
19529 fcf_pri->fcf_rec.fcf_index);
19530 if (rc)
19531 return 0;
19532 }
19533 spin_lock_irq(&phba->hbalock);
19534 }
19535 } else
19536 ret = 1;
19537 spin_unlock_irq(&phba->hbalock);
19538
19539 return ret;
19540}
19541
19542
19543
19544
19545
19546
19547
19548
19549
19550
19551uint16_t
19552lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
19553{
19554 uint16_t next_fcf_index;
19555
19556initial_priority:
19557
19558 next_fcf_index = phba->fcf.current_rec.fcf_indx;
19559
19560next_priority:
19561
19562 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
19563 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19564 LPFC_SLI4_FCF_TBL_INDX_MAX,
19565 next_fcf_index);
19566
19567
19568 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19569
19570
19571
19572
19573
19574 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
19575 LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
19576 }
19577
19578
19579
19580 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
19581 next_fcf_index == phba->fcf.current_rec.fcf_indx) {
19582
19583
19584
19585
19586
19587
19588 if (lpfc_check_next_fcf_pri_level(phba))
19589 goto initial_priority;
19590 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
19591 "2844 No roundrobin failover FCF available\n");
19592
19593 return LPFC_FCOE_FCF_NEXT_NONE;
19594 }
19595
19596 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
19597 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
19598 LPFC_FCF_FLOGI_FAILED) {
19599 if (list_is_singular(&phba->fcf.fcf_pri_list))
19600 return LPFC_FCOE_FCF_NEXT_NONE;
19601
19602 goto next_priority;
19603 }
19604
19605 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19606 "2845 Get next roundrobin failover FCF (x%x)\n",
19607 next_fcf_index);
19608
19609 return next_fcf_index;
19610}
19611
19612
19613
19614
19615
19616
19617
19618
19619
19620
19621
19622
19623
19624
19625int
19626lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
19627{
19628 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19629 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19630 "2610 FCF (x%x) reached driver's book "
19631 "keeping dimension:x%x\n",
19632 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19633 return -EINVAL;
19634 }
19635
19636 set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19637
19638 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19639 "2790 Set FCF (x%x) to roundrobin FCF failover "
19640 "bmask\n", fcf_index);
19641
19642 return 0;
19643}
19644
19645
19646
19647
19648
19649
19650
19651
19652
19653
19654
19655void
19656lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
19657{
19658 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
19659 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
19660 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19661 "2762 FCF (x%x) reached driver's book "
19662 "keeping dimension:x%x\n",
19663 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
19664 return;
19665 }
19666
19667 spin_lock_irq(&phba->hbalock);
19668 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
19669 list) {
19670 if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
19671 list_del_init(&fcf_pri->list);
19672 break;
19673 }
19674 }
19675 spin_unlock_irq(&phba->hbalock);
19676 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
19677
19678 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19679 "2791 Clear FCF (x%x) from roundrobin failover "
19680 "bmask\n", fcf_index);
19681}
19682
19683
19684
19685
19686
19687
19688
19689
19690
19691
19692static void
19693lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
19694{
19695 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19696 uint32_t shdr_status, shdr_add_status;
19697
19698 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19699
19700 shdr_status = bf_get(lpfc_mbox_hdr_status,
19701 &redisc_fcf->header.cfg_shdr.response);
19702 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
19703 &redisc_fcf->header.cfg_shdr.response);
19704 if (shdr_status || shdr_add_status) {
19705 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
19706 "2746 Requesting for FCF rediscovery failed "
19707 "status x%x add_status x%x\n",
19708 shdr_status, shdr_add_status);
19709 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
19710 spin_lock_irq(&phba->hbalock);
19711 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
19712 spin_unlock_irq(&phba->hbalock);
19713
19714
19715
19716
19717 lpfc_retry_pport_discovery(phba);
19718 } else {
19719 spin_lock_irq(&phba->hbalock);
19720 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
19721 spin_unlock_irq(&phba->hbalock);
19722
19723
19724
19725
19726
19727 lpfc_sli4_fcf_dead_failthrough(phba);
19728 }
19729 } else {
19730 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
19731 "2775 Start FCF rediscover quiescent timer\n");
19732
19733
19734
19735
19736 lpfc_fcf_redisc_wait_start_timer(phba);
19737 }
19738
19739 mempool_free(mbox, phba->mbox_mem_pool);
19740}
19741
19742
19743
19744
19745
19746
19747
19748
19749int
19750lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
19751{
19752 LPFC_MBOXQ_t *mbox;
19753 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
19754 int rc, length;
19755
19756
19757 lpfc_cancel_all_vport_retry_delay_timer(phba);
19758
19759 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19760 if (!mbox) {
19761 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19762 "2745 Failed to allocate mbox for "
19763 "requesting FCF rediscover.\n");
19764 return -ENOMEM;
19765 }
19766
19767 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
19768 sizeof(struct lpfc_sli4_cfg_mhdr));
19769 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
19770 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
19771 length, LPFC_SLI4_MBX_EMBED);
19772
19773 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
19774
19775 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
19776
19777
19778 mbox->vport = phba->pport;
19779 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
19780 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
19781
19782 if (rc == MBX_NOT_FINISHED) {
19783 mempool_free(mbox, phba->mbox_mem_pool);
19784 return -EIO;
19785 }
19786 return 0;
19787}
19788
19789
19790
19791
19792
19793
19794
19795
19796void
19797lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
19798{
19799 uint32_t link_state;
19800
19801
19802
19803
19804
19805
19806 link_state = phba->link_state;
19807 lpfc_linkdown(phba);
19808 phba->link_state = link_state;
19809
19810
19811 lpfc_unregister_unused_fcf(phba);
19812}
19813
19814
19815
19816
19817
19818
19819
19820
19821
19822
19823static uint32_t
19824lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19825{
19826 LPFC_MBOXQ_t *pmb = NULL;
19827 MAILBOX_t *mb;
19828 uint32_t offset = 0;
19829 int rc;
19830
19831 if (!rgn23_data)
19832 return 0;
19833
19834 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19835 if (!pmb) {
19836 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19837 "2600 failed to allocate mailbox memory\n");
19838 return 0;
19839 }
19840 mb = &pmb->u.mb;
19841
19842 do {
19843 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
19844 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
19845
19846 if (rc != MBX_SUCCESS) {
19847 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
19848 "2601 failed to read config "
19849 "region 23, rc 0x%x Status 0x%x\n",
19850 rc, mb->mbxStatus);
19851 mb->un.varDmp.word_cnt = 0;
19852 }
19853
19854
19855
19856
19857 if (mb->un.varDmp.word_cnt == 0)
19858 break;
19859
19860 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
19861 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
19862
19863 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
19864 rgn23_data + offset,
19865 mb->un.varDmp.word_cnt);
19866 offset += mb->un.varDmp.word_cnt;
19867 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
19868
19869 mempool_free(pmb, phba->mbox_mem_pool);
19870 return offset;
19871}
19872
19873
19874
19875
19876
19877
19878
19879
19880
19881
19882static uint32_t
19883lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
19884{
19885 LPFC_MBOXQ_t *mboxq = NULL;
19886 struct lpfc_dmabuf *mp = NULL;
19887 struct lpfc_mqe *mqe;
19888 uint32_t data_length = 0;
19889 int rc;
19890
19891 if (!rgn23_data)
19892 return 0;
19893
19894 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19895 if (!mboxq) {
19896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19897 "3105 failed to allocate mailbox memory\n");
19898 return 0;
19899 }
19900
19901 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
19902 goto out;
19903 mqe = &mboxq->u.mqe;
19904 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;
19905 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19906 if (rc)
19907 goto out;
19908 data_length = mqe->un.mb_words[5];
19909 if (data_length == 0)
19910 goto out;
19911 if (data_length > DMP_RGN23_SIZE) {
19912 data_length = 0;
19913 goto out;
19914 }
19915 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
19916out:
19917 mempool_free(mboxq, phba->mbox_mem_pool);
19918 if (mp) {
19919 lpfc_mbuf_free(phba, mp->virt, mp->phys);
19920 kfree(mp);
19921 }
19922 return data_length;
19923}
19924
19925
19926
19927
19928
19929
19930
19931
19932
19933void
19934lpfc_sli_read_link_ste(struct lpfc_hba *phba)
19935{
19936 uint8_t *rgn23_data = NULL;
19937 uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
19938 uint32_t offset = 0;
19939
19940
19941 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
19942 if (!rgn23_data)
19943 goto out;
19944
19945 if (phba->sli_rev < LPFC_SLI_REV4)
19946 data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
19947 else {
19948 if_type = bf_get(lpfc_sli_intf_if_type,
19949 &phba->sli4_hba.sli_intf);
19950 if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
19951 goto out;
19952 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
19953 }
19954
19955 if (!data_size)
19956 goto out;
19957
19958
19959 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
19960 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19961 "2619 Config region 23 has bad signature\n");
19962 goto out;
19963 }
19964 offset += 4;
19965
19966
19967 if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
19968 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19969 "2620 Config region 23 has bad version\n");
19970 goto out;
19971 }
19972 offset += 4;
19973
19974
19975 while (offset < data_size) {
19976 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
19977 break;
19978
19979
19980
19981
19982 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
19983 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
19984 (rgn23_data[offset + 3] != 0)) {
19985 offset += rgn23_data[offset + 1] * 4 + 4;
19986 continue;
19987 }
19988
19989
19990 sub_tlv_len = rgn23_data[offset + 1] * 4;
19991 offset += 4;
19992 tlv_offset = 0;
19993
19994
19995
19996
19997 while ((offset < data_size) &&
19998 (tlv_offset < sub_tlv_len)) {
19999 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20000 offset += 4;
20001 tlv_offset += 4;
20002 break;
20003 }
20004 if (rgn23_data[offset] != PORT_STE_TYPE) {
20005 offset += rgn23_data[offset + 1] * 4 + 4;
20006 tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20007 continue;
20008 }
20009
20010
20011 if (!rgn23_data[offset + 2])
20012 phba->hba_flag |= LINK_DISABLED;
20013
20014 goto out;
20015 }
20016 }
20017
20018out:
20019 kfree(rgn23_data);
20020 return;
20021}
20022
20023
20024
20025
20026
20027
20028
20029
20030
20031
20032
20033
20034
20035
20036
20037
20038
20039
20040
20041
20042int
20043lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20044 uint32_t size, uint32_t *offset)
20045{
20046 struct lpfc_mbx_wr_object *wr_object;
20047 LPFC_MBOXQ_t *mbox;
20048 int rc = 0, i = 0;
20049 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf;
20050 uint32_t mbox_tmo;
20051 struct lpfc_dmabuf *dmabuf;
20052 uint32_t written = 0;
20053 bool check_change_status = false;
20054
20055 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20056 if (!mbox)
20057 return -ENOMEM;
20058
20059 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20060 LPFC_MBOX_OPCODE_WRITE_OBJECT,
20061 sizeof(struct lpfc_mbx_wr_object) -
20062 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20063
20064 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20065 wr_object->u.request.write_offset = *offset;
20066 sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20067 wr_object->u.request.object_name[0] =
20068 cpu_to_le32(wr_object->u.request.object_name[0]);
20069 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20070 list_for_each_entry(dmabuf, dmabuf_list, list) {
20071 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20072 break;
20073 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20074 wr_object->u.request.bde[i].addrHigh =
20075 putPaddrHigh(dmabuf->phys);
20076 if (written + SLI4_PAGE_SIZE >= size) {
20077 wr_object->u.request.bde[i].tus.f.bdeSize =
20078 (size - written);
20079 written += (size - written);
20080 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20081 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20082 check_change_status = true;
20083 } else {
20084 wr_object->u.request.bde[i].tus.f.bdeSize =
20085 SLI4_PAGE_SIZE;
20086 written += SLI4_PAGE_SIZE;
20087 }
20088 i++;
20089 }
20090 wr_object->u.request.bde_count = i;
20091 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20092 if (!phba->sli4_hba.intr_enable)
20093 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20094 else {
20095 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20096 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20097 }
20098
20099 shdr_status = bf_get(lpfc_mbox_hdr_status,
20100 &wr_object->header.cfg_shdr.response);
20101 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20102 &wr_object->header.cfg_shdr.response);
20103 if (check_change_status) {
20104 shdr_change_status = bf_get(lpfc_wr_object_change_status,
20105 &wr_object->u.response);
20106
20107 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20108 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20109 shdr_csf = bf_get(lpfc_wr_object_csf,
20110 &wr_object->u.response);
20111 if (shdr_csf)
20112 shdr_change_status =
20113 LPFC_CHANGE_STATUS_PCI_RESET;
20114 }
20115
20116 switch (shdr_change_status) {
20117 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20118 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20119 "3198 Firmware write complete: System "
20120 "reboot required to instantiate\n");
20121 break;
20122 case (LPFC_CHANGE_STATUS_FW_RESET):
20123 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20124 "3199 Firmware write complete: Firmware"
20125 " reset required to instantiate\n");
20126 break;
20127 case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20128 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20129 "3200 Firmware write complete: Port "
20130 "Migration or PCI Reset required to "
20131 "instantiate\n");
20132 break;
20133 case (LPFC_CHANGE_STATUS_PCI_RESET):
20134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20135 "3201 Firmware write complete: PCI "
20136 "Reset required to instantiate\n");
20137 break;
20138 default:
20139 break;
20140 }
20141 }
20142 if (!phba->sli4_hba.intr_enable)
20143 mempool_free(mbox, phba->mbox_mem_pool);
20144 else if (rc != MBX_TIMEOUT)
20145 mempool_free(mbox, phba->mbox_mem_pool);
20146 if (shdr_status || shdr_add_status || rc) {
20147 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20148 "3025 Write Object mailbox failed with "
20149 "status x%x add_status x%x, mbx status x%x\n",
20150 shdr_status, shdr_add_status, rc);
20151 rc = -ENXIO;
20152 *offset = shdr_add_status;
20153 } else
20154 *offset += wr_object->u.response.actual_write_length;
20155 return rc;
20156}
20157
20158
20159
20160
20161
20162
20163
20164
20165
20166
20167void
20168lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20169{
20170 struct lpfc_hba *phba = vport->phba;
20171 LPFC_MBOXQ_t *mb, *nextmb;
20172 struct lpfc_dmabuf *mp;
20173 struct lpfc_nodelist *ndlp;
20174 struct lpfc_nodelist *act_mbx_ndlp = NULL;
20175 LIST_HEAD(mbox_cmd_list);
20176 uint8_t restart_loop;
20177
20178
20179 spin_lock_irq(&phba->hbalock);
20180 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
20181 if (mb->vport != vport)
20182 continue;
20183
20184 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20185 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20186 continue;
20187
20188 list_move_tail(&mb->list, &mbox_cmd_list);
20189 }
20190
20191 mb = phba->sli.mbox_active;
20192 if (mb && (mb->vport == vport)) {
20193 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
20194 (mb->u.mb.mbxCommand == MBX_REG_VPI))
20195 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20196 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20197 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20198
20199 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
20200
20201 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20202 }
20203 }
20204
20205 do {
20206 restart_loop = 0;
20207 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
20208
20209
20210
20211
20212 if ((mb->vport != vport) ||
20213 (mb->mbox_flag & LPFC_MBX_IMED_UNREG))
20214 continue;
20215
20216 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
20217 (mb->u.mb.mbxCommand != MBX_REG_VPI))
20218 continue;
20219
20220 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
20221 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20222 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20223
20224 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
20225 restart_loop = 1;
20226 spin_unlock_irq(&phba->hbalock);
20227 spin_lock(&ndlp->lock);
20228 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20229 spin_unlock(&ndlp->lock);
20230 spin_lock_irq(&phba->hbalock);
20231 break;
20232 }
20233 }
20234 } while (restart_loop);
20235
20236 spin_unlock_irq(&phba->hbalock);
20237
20238
20239 while (!list_empty(&mbox_cmd_list)) {
20240 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
20241 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
20242 mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
20243 if (mp) {
20244 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
20245 kfree(mp);
20246 }
20247 mb->ctx_buf = NULL;
20248 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;
20249 mb->ctx_ndlp = NULL;
20250 if (ndlp) {
20251 spin_lock(&ndlp->lock);
20252 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20253 spin_unlock(&ndlp->lock);
20254 lpfc_nlp_put(ndlp);
20255 }
20256 }
20257 mempool_free(mb, phba->mbox_mem_pool);
20258 }
20259
20260
20261 if (act_mbx_ndlp) {
20262 spin_lock(&act_mbx_ndlp->lock);
20263 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
20264 spin_unlock(&act_mbx_ndlp->lock);
20265 lpfc_nlp_put(act_mbx_ndlp);
20266 }
20267}
20268
20269
20270
20271
20272
20273
20274
20275
20276
20277
20278
20279
20280uint32_t
20281lpfc_drain_txq(struct lpfc_hba *phba)
20282{
20283 LIST_HEAD(completions);
20284 struct lpfc_sli_ring *pring;
20285 struct lpfc_iocbq *piocbq = NULL;
20286 unsigned long iflags = 0;
20287 char *fail_msg = NULL;
20288 struct lpfc_sglq *sglq;
20289 union lpfc_wqe128 wqe;
20290 uint32_t txq_cnt = 0;
20291 struct lpfc_queue *wq;
20292
20293 if (phba->link_flag & LS_MDS_LOOPBACK) {
20294
20295 wq = phba->sli4_hba.hdwq[0].io_wq;
20296 if (unlikely(!wq))
20297 return 0;
20298 pring = wq->pring;
20299 } else {
20300 wq = phba->sli4_hba.els_wq;
20301 if (unlikely(!wq))
20302 return 0;
20303 pring = lpfc_phba_elsring(phba);
20304 }
20305
20306 if (unlikely(!pring) || list_empty(&pring->txq))
20307 return 0;
20308
20309 spin_lock_irqsave(&pring->ring_lock, iflags);
20310 list_for_each_entry(piocbq, &pring->txq, list) {
20311 txq_cnt++;
20312 }
20313
20314 if (txq_cnt > pring->txq_max)
20315 pring->txq_max = txq_cnt;
20316
20317 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20318
20319 while (!list_empty(&pring->txq)) {
20320 spin_lock_irqsave(&pring->ring_lock, iflags);
20321
20322 piocbq = lpfc_sli_ringtx_get(phba, pring);
20323 if (!piocbq) {
20324 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20326 "2823 txq empty and txq_cnt is %d\n ",
20327 txq_cnt);
20328 break;
20329 }
20330 sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
20331 if (!sglq) {
20332 __lpfc_sli_ringtx_put(phba, pring, piocbq);
20333 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20334 break;
20335 }
20336 txq_cnt--;
20337
20338
20339
20340
20341 piocbq->sli4_lxritag = sglq->sli4_lxritag;
20342 piocbq->sli4_xritag = sglq->sli4_xritag;
20343 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
20344 fail_msg = "to convert bpl to sgl";
20345 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe))
20346 fail_msg = "to convert iocb to wqe";
20347 else if (lpfc_sli4_wq_put(wq, &wqe))
20348 fail_msg = " - Wq is full";
20349 else
20350 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
20351
20352 if (fail_msg) {
20353
20354 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20355 "2822 IOCB failed %s iotag 0x%x "
20356 "xri 0x%x\n",
20357 fail_msg,
20358 piocbq->iotag, piocbq->sli4_xritag);
20359 list_add_tail(&piocbq->list, &completions);
20360 }
20361 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20362 }
20363
20364
20365 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
20366 IOERR_SLI_ABORTED);
20367
20368 return txq_cnt;
20369}
20370
20371
20372
20373
20374
20375
20376
20377
20378
20379
20380
20381
20382
20383
20384
20385
20386
20387
20388static uint16_t
20389lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
20390 struct lpfc_sglq *sglq)
20391{
20392 uint16_t xritag = NO_XRI;
20393 struct ulp_bde64 *bpl = NULL;
20394 struct ulp_bde64 bde;
20395 struct sli4_sge *sgl = NULL;
20396 struct lpfc_dmabuf *dmabuf;
20397 union lpfc_wqe128 *wqe;
20398 int numBdes = 0;
20399 int i = 0;
20400 uint32_t offset = 0;
20401 int inbound = 0;
20402 uint32_t cmd;
20403
20404 if (!pwqeq || !sglq)
20405 return xritag;
20406
20407 sgl = (struct sli4_sge *)sglq->sgl;
20408 wqe = &pwqeq->wqe;
20409 pwqeq->iocb.ulpIoTag = pwqeq->iotag;
20410
20411 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
20412 if (cmd == CMD_XMIT_BLS_RSP64_WQE)
20413 return sglq->sli4_xritag;
20414 numBdes = pwqeq->rsvd2;
20415 if (numBdes) {
20416
20417
20418
20419
20420 if (pwqeq->context3)
20421 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
20422 else
20423 return xritag;
20424
20425 bpl = (struct ulp_bde64 *)dmabuf->virt;
20426 if (!bpl)
20427 return xritag;
20428
20429 for (i = 0; i < numBdes; i++) {
20430
20431 sgl->addr_hi = bpl->addrHigh;
20432 sgl->addr_lo = bpl->addrLow;
20433
20434 sgl->word2 = le32_to_cpu(sgl->word2);
20435 if ((i+1) == numBdes)
20436 bf_set(lpfc_sli4_sge_last, sgl, 1);
20437 else
20438 bf_set(lpfc_sli4_sge_last, sgl, 0);
20439
20440
20441
20442 bde.tus.w = le32_to_cpu(bpl->tus.w);
20443 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
20444
20445
20446
20447
20448 switch (cmd) {
20449 case CMD_GEN_REQUEST64_WQE:
20450
20451 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
20452 inbound++;
20453
20454 if (inbound == 1)
20455 offset = 0;
20456 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20457 bf_set(lpfc_sli4_sge_type, sgl,
20458 LPFC_SGE_TYPE_DATA);
20459 offset += bde.tus.f.bdeSize;
20460 break;
20461 case CMD_FCP_TRSP64_WQE:
20462 bf_set(lpfc_sli4_sge_offset, sgl, 0);
20463 bf_set(lpfc_sli4_sge_type, sgl,
20464 LPFC_SGE_TYPE_DATA);
20465 break;
20466 case CMD_FCP_TSEND64_WQE:
20467 case CMD_FCP_TRECEIVE64_WQE:
20468 bf_set(lpfc_sli4_sge_type, sgl,
20469 bpl->tus.f.bdeFlags);
20470 if (i < 3)
20471 offset = 0;
20472 else
20473 offset += bde.tus.f.bdeSize;
20474 bf_set(lpfc_sli4_sge_offset, sgl, offset);
20475 break;
20476 }
20477 sgl->word2 = cpu_to_le32(sgl->word2);
20478 bpl++;
20479 sgl++;
20480 }
20481 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
20482
20483
20484
20485
20486 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
20487 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
20488 sgl->word2 = le32_to_cpu(sgl->word2);
20489 bf_set(lpfc_sli4_sge_last, sgl, 1);
20490 sgl->word2 = cpu_to_le32(sgl->word2);
20491 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
20492 }
20493 return sglq->sli4_xritag;
20494}
20495
20496
20497
20498
20499
20500
20501
20502int
20503lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20504 struct lpfc_iocbq *pwqe)
20505{
20506 union lpfc_wqe128 *wqe = &pwqe->wqe;
20507 struct lpfc_async_xchg_ctx *ctxp;
20508 struct lpfc_queue *wq;
20509 struct lpfc_sglq *sglq;
20510 struct lpfc_sli_ring *pring;
20511 unsigned long iflags;
20512 uint32_t ret = 0;
20513
20514
20515 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
20516 pring = phba->sli4_hba.nvmels_wq->pring;
20517 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20518 qp, wq_access);
20519 sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
20520 if (!sglq) {
20521 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20522 return WQE_BUSY;
20523 }
20524 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20525 pwqe->sli4_xritag = sglq->sli4_xritag;
20526 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
20527 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20528 return WQE_ERROR;
20529 }
20530 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20531 pwqe->sli4_xritag);
20532 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
20533 if (ret) {
20534 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20535 return ret;
20536 }
20537
20538 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20539 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20540
20541 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20542 return 0;
20543 }
20544
20545
20546 if (pwqe->iocb_flag & LPFC_IO_NVME ||
20547 pwqe->iocb_flag & LPFC_IO_FCP) {
20548
20549 wq = qp->io_wq;
20550 pring = wq->pring;
20551
20552 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20553
20554 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20555 qp, wq_access);
20556 ret = lpfc_sli4_wq_put(wq, wqe);
20557 if (ret) {
20558 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20559 return ret;
20560 }
20561 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20562 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20563
20564 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20565 return 0;
20566 }
20567
20568
20569 if (pwqe->iocb_flag & LPFC_IO_NVMET) {
20570
20571 wq = qp->io_wq;
20572 pring = wq->pring;
20573
20574 ctxp = pwqe->context2;
20575 sglq = ctxp->ctxbuf->sglq;
20576 if (pwqe->sli4_xritag == NO_XRI) {
20577 pwqe->sli4_lxritag = sglq->sli4_lxritag;
20578 pwqe->sli4_xritag = sglq->sli4_xritag;
20579 }
20580 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
20581 pwqe->sli4_xritag);
20582 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
20583
20584 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
20585 qp, wq_access);
20586 ret = lpfc_sli4_wq_put(wq, wqe);
20587 if (ret) {
20588 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20589 return ret;
20590 }
20591 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
20592 spin_unlock_irqrestore(&pring->ring_lock, iflags);
20593
20594 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
20595 return 0;
20596 }
20597 return WQE_ERROR;
20598}
20599
20600
20601
20602
20603
20604
20605
20606
20607
20608
20609
20610
20611
20612
20613int
20614lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
20615 void *cmpl)
20616{
20617 struct lpfc_vport *vport = cmdiocb->vport;
20618 struct lpfc_iocbq *abtsiocb = NULL;
20619 union lpfc_wqe128 *abtswqe;
20620 struct lpfc_io_buf *lpfc_cmd;
20621 int retval = IOCB_ERROR;
20622 u16 xritag = cmdiocb->sli4_xritag;
20623
20624
20625
20626
20627
20628
20629
20630 abtsiocb = __lpfc_sli_get_iocbq(phba);
20631 if (!abtsiocb)
20632 return WQE_NORESOURCE;
20633
20634
20635 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
20636
20637 abtswqe = &abtsiocb->wqe;
20638 memset(abtswqe, 0, sizeof(*abtswqe));
20639
20640 if (!lpfc_is_link_up(phba))
20641 bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
20642 bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
20643 abtswqe->abort_cmd.rsrvd5 = 0;
20644 abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
20645 bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
20646 bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
20647 bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
20648 bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
20649 bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
20650 bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
20651
20652
20653 abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
20654 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
20655 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
20656 abtsiocb->iocb_flag |= LPFC_IO_FCP;
20657 if (cmdiocb->iocb_flag & LPFC_IO_NVME)
20658 abtsiocb->iocb_flag |= LPFC_IO_NVME;
20659 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
20660 abtsiocb->iocb_flag |= LPFC_IO_FOF;
20661 abtsiocb->vport = vport;
20662 abtsiocb->wqe_cmpl = cmpl;
20663
20664 lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
20665 retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
20666
20667 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
20668 "0359 Abort xri x%x, original iotag x%x, "
20669 "abort cmd iotag x%x retval x%x\n",
20670 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
20671
20672 if (retval) {
20673 cmdiocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
20674 __lpfc_sli_release_iocbq(phba, abtsiocb);
20675 }
20676
20677 return retval;
20678}
20679
20680#ifdef LPFC_MXP_STAT
20681
20682
20683
20684
20685
20686
20687
20688
20689
20690
20691
20692
20693
20694
20695void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
20696{
20697 struct lpfc_sli4_hdw_queue *qp;
20698 struct lpfc_multixri_pool *multixri_pool;
20699 struct lpfc_pvt_pool *pvt_pool;
20700 struct lpfc_pbl_pool *pbl_pool;
20701 u32 txcmplq_cnt;
20702
20703 qp = &phba->sli4_hba.hdwq[hwqid];
20704 multixri_pool = qp->p_multixri_pool;
20705 if (!multixri_pool)
20706 return;
20707
20708 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
20709 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20710 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20711 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20712
20713 multixri_pool->stat_pbl_count = pbl_pool->count;
20714 multixri_pool->stat_pvt_count = pvt_pool->count;
20715 multixri_pool->stat_busy_count = txcmplq_cnt;
20716 }
20717
20718 multixri_pool->stat_snapshot_taken++;
20719}
20720#endif
20721
20722
20723
20724
20725
20726
20727
20728
20729
20730void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
20731{
20732 struct lpfc_multixri_pool *multixri_pool;
20733 u32 io_req_count;
20734 u32 prev_io_req_count;
20735
20736 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
20737 if (!multixri_pool)
20738 return;
20739 io_req_count = multixri_pool->io_req_count;
20740 prev_io_req_count = multixri_pool->prev_io_req_count;
20741
20742 if (prev_io_req_count != io_req_count) {
20743
20744 multixri_pool->prev_io_req_count = io_req_count;
20745 } else {
20746
20747
20748
20749 lpfc_move_xri_pvt_to_pbl(phba, hwqid);
20750 }
20751}
20752
20753
20754
20755
20756
20757
20758
20759
20760
20761void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
20762{
20763 u32 new_watermark;
20764 u32 watermark_max;
20765 u32 watermark_min;
20766 u32 xri_limit;
20767 u32 txcmplq_cnt;
20768 u32 abts_io_bufs;
20769 struct lpfc_multixri_pool *multixri_pool;
20770 struct lpfc_sli4_hdw_queue *qp;
20771
20772 qp = &phba->sli4_hba.hdwq[hwqid];
20773 multixri_pool = qp->p_multixri_pool;
20774 if (!multixri_pool)
20775 return;
20776 xri_limit = multixri_pool->xri_limit;
20777
20778 watermark_max = xri_limit;
20779 watermark_min = xri_limit / 2;
20780
20781 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
20782 abts_io_bufs = qp->abts_scsi_io_bufs;
20783 abts_io_bufs += qp->abts_nvme_io_bufs;
20784
20785 new_watermark = txcmplq_cnt + abts_io_bufs;
20786 new_watermark = min(watermark_max, new_watermark);
20787 new_watermark = max(watermark_min, new_watermark);
20788 multixri_pool->pvt_pool.high_watermark = new_watermark;
20789
20790#ifdef LPFC_MXP_STAT
20791 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
20792 new_watermark);
20793#endif
20794}
20795
20796
20797
20798
20799
20800
20801
20802
20803
20804
20805
20806void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
20807{
20808 struct lpfc_pbl_pool *pbl_pool;
20809 struct lpfc_pvt_pool *pvt_pool;
20810 struct lpfc_sli4_hdw_queue *qp;
20811 struct lpfc_io_buf *lpfc_ncmd;
20812 struct lpfc_io_buf *lpfc_ncmd_next;
20813 unsigned long iflag;
20814 struct list_head tmp_list;
20815 u32 tmp_count;
20816
20817 qp = &phba->sli4_hba.hdwq[hwqid];
20818 pbl_pool = &qp->p_multixri_pool->pbl_pool;
20819 pvt_pool = &qp->p_multixri_pool->pvt_pool;
20820 tmp_count = 0;
20821
20822 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
20823 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
20824
20825 if (pvt_pool->count > pvt_pool->low_watermark) {
20826
20827
20828
20829
20830
20831 INIT_LIST_HEAD(&tmp_list);
20832 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
20833 &pvt_pool->list, list) {
20834 list_move_tail(&lpfc_ncmd->list, &tmp_list);
20835 tmp_count++;
20836 if (tmp_count >= pvt_pool->low_watermark)
20837 break;
20838 }
20839
20840
20841 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20842
20843
20844 list_splice(&tmp_list, &pvt_pool->list);
20845
20846 pbl_pool->count += (pvt_pool->count - tmp_count);
20847 pvt_pool->count = tmp_count;
20848 } else {
20849
20850 list_splice_init(&pvt_pool->list, &pbl_pool->list);
20851 pbl_pool->count += pvt_pool->count;
20852 pvt_pool->count = 0;
20853 }
20854
20855 spin_unlock(&pvt_pool->lock);
20856 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20857}
20858
20859
20860
20861
20862
20863
20864
20865
20866
20867
20868
20869
20870
20871
20872
20873
20874
20875
20876static bool
20877_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
20878 struct lpfc_pbl_pool *pbl_pool,
20879 struct lpfc_pvt_pool *pvt_pool, u32 count)
20880{
20881 struct lpfc_io_buf *lpfc_ncmd;
20882 struct lpfc_io_buf *lpfc_ncmd_next;
20883 unsigned long iflag;
20884 int ret;
20885
20886 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
20887 if (ret) {
20888 if (pbl_pool->count) {
20889
20890 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
20891 list_for_each_entry_safe(lpfc_ncmd,
20892 lpfc_ncmd_next,
20893 &pbl_pool->list,
20894 list) {
20895 list_move_tail(&lpfc_ncmd->list,
20896 &pvt_pool->list);
20897 pvt_pool->count++;
20898 pbl_pool->count--;
20899 count--;
20900 if (count == 0)
20901 break;
20902 }
20903
20904 spin_unlock(&pvt_pool->lock);
20905 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20906 return true;
20907 }
20908 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
20909 }
20910
20911 return false;
20912}
20913
20914
20915
20916
20917
20918
20919
20920
20921
20922
20923
20924
20925
20926void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
20927{
20928 struct lpfc_multixri_pool *multixri_pool;
20929 struct lpfc_multixri_pool *next_multixri_pool;
20930 struct lpfc_pvt_pool *pvt_pool;
20931 struct lpfc_pbl_pool *pbl_pool;
20932 struct lpfc_sli4_hdw_queue *qp;
20933 u32 next_hwqid;
20934 u32 hwq_count;
20935 int ret;
20936
20937 qp = &phba->sli4_hba.hdwq[hwqid];
20938 multixri_pool = qp->p_multixri_pool;
20939 pvt_pool = &multixri_pool->pvt_pool;
20940 pbl_pool = &multixri_pool->pbl_pool;
20941
20942
20943 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
20944 if (ret) {
20945#ifdef LPFC_MXP_STAT
20946 multixri_pool->local_pbl_hit_count++;
20947#endif
20948 return;
20949 }
20950
20951 hwq_count = phba->cfg_hdw_queue;
20952
20953
20954 next_hwqid = multixri_pool->rrb_next_hwqid;
20955
20956 do {
20957
20958 next_hwqid = (next_hwqid + 1) % hwq_count;
20959
20960 next_multixri_pool =
20961 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
20962 pbl_pool = &next_multixri_pool->pbl_pool;
20963
20964
20965 ret = _lpfc_move_xri_pbl_to_pvt(
20966 phba, qp, pbl_pool, pvt_pool, count);
20967
20968
20969 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
20970
20971
20972 multixri_pool->rrb_next_hwqid = next_hwqid;
20973
20974 if (!ret) {
20975
20976 multixri_pool->pbl_empty_count++;
20977 }
20978
20979#ifdef LPFC_MXP_STAT
20980 if (ret) {
20981 if (next_hwqid == hwqid)
20982 multixri_pool->local_pbl_hit_count++;
20983 else
20984 multixri_pool->other_pbl_hit_count++;
20985 }
20986#endif
20987}
20988
20989
20990
20991
20992
20993
20994
20995
20996
20997void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
20998{
20999 struct lpfc_multixri_pool *multixri_pool;
21000 struct lpfc_pvt_pool *pvt_pool;
21001
21002 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21003 pvt_pool = &multixri_pool->pvt_pool;
21004
21005 if (pvt_pool->count < pvt_pool->low_watermark)
21006 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21007}
21008
21009
21010
21011
21012
21013
21014
21015
21016
21017
21018
21019
21020
21021void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21022 struct lpfc_sli4_hdw_queue *qp)
21023{
21024 unsigned long iflag;
21025 struct lpfc_pbl_pool *pbl_pool;
21026 struct lpfc_pvt_pool *pvt_pool;
21027 struct lpfc_epd_pool *epd_pool;
21028 u32 txcmplq_cnt;
21029 u32 xri_owned;
21030 u32 xri_limit;
21031 u32 abts_io_bufs;
21032
21033
21034 lpfc_ncmd->nvmeCmd = NULL;
21035 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
21036 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
21037
21038 if (phba->cfg_xpsgl && !phba->nvmet_support &&
21039 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21040 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21041
21042 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21043 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21044
21045 if (phba->cfg_xri_rebalancing) {
21046 if (lpfc_ncmd->expedite) {
21047
21048 epd_pool = &phba->epd_pool;
21049 spin_lock_irqsave(&epd_pool->lock, iflag);
21050 list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21051 epd_pool->count++;
21052 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21053 return;
21054 }
21055
21056
21057
21058
21059
21060 if (!qp->p_multixri_pool)
21061 return;
21062
21063 pbl_pool = &qp->p_multixri_pool->pbl_pool;
21064 pvt_pool = &qp->p_multixri_pool->pvt_pool;
21065
21066 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21067 abts_io_bufs = qp->abts_scsi_io_bufs;
21068 abts_io_bufs += qp->abts_nvme_io_bufs;
21069
21070 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21071 xri_limit = qp->p_multixri_pool->xri_limit;
21072
21073#ifdef LPFC_MXP_STAT
21074 if (xri_owned <= xri_limit)
21075 qp->p_multixri_pool->below_limit_count++;
21076 else
21077 qp->p_multixri_pool->above_limit_count++;
21078#endif
21079
21080
21081
21082
21083 if ((pvt_pool->count < pvt_pool->low_watermark) ||
21084 (xri_owned < xri_limit &&
21085 pvt_pool->count < pvt_pool->high_watermark)) {
21086 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21087 qp, free_pvt_pool);
21088 list_add_tail(&lpfc_ncmd->list,
21089 &pvt_pool->list);
21090 pvt_pool->count++;
21091 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21092 } else {
21093 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21094 qp, free_pub_pool);
21095 list_add_tail(&lpfc_ncmd->list,
21096 &pbl_pool->list);
21097 pbl_pool->count++;
21098 spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21099 }
21100 } else {
21101 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21102 qp, free_xri);
21103 list_add_tail(&lpfc_ncmd->list,
21104 &qp->lpfc_io_buf_list_put);
21105 qp->put_io_bufs++;
21106 spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21107 iflag);
21108 }
21109}
21110
21111
21112
21113
21114
21115
21116
21117
21118
21119
21120
21121
21122
21123
21124static struct lpfc_io_buf *
21125lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21126 struct lpfc_sli4_hdw_queue *qp,
21127 struct lpfc_pvt_pool *pvt_pool,
21128 struct lpfc_nodelist *ndlp)
21129{
21130 struct lpfc_io_buf *lpfc_ncmd;
21131 struct lpfc_io_buf *lpfc_ncmd_next;
21132 unsigned long iflag;
21133
21134 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21135 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21136 &pvt_pool->list, list) {
21137 if (lpfc_test_rrq_active(
21138 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21139 continue;
21140 list_del(&lpfc_ncmd->list);
21141 pvt_pool->count--;
21142 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21143 return lpfc_ncmd;
21144 }
21145 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21146
21147 return NULL;
21148}
21149
21150
21151
21152
21153
21154
21155
21156
21157
21158
21159
21160static struct lpfc_io_buf *
21161lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21162{
21163 struct lpfc_io_buf *lpfc_ncmd;
21164 struct lpfc_io_buf *lpfc_ncmd_next;
21165 unsigned long iflag;
21166 struct lpfc_epd_pool *epd_pool;
21167
21168 epd_pool = &phba->epd_pool;
21169 lpfc_ncmd = NULL;
21170
21171 spin_lock_irqsave(&epd_pool->lock, iflag);
21172 if (epd_pool->count > 0) {
21173 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21174 &epd_pool->list, list) {
21175 list_del(&lpfc_ncmd->list);
21176 epd_pool->count--;
21177 break;
21178 }
21179 }
21180 spin_unlock_irqrestore(&epd_pool->lock, iflag);
21181
21182 return lpfc_ncmd;
21183}
21184
21185
21186
21187
21188
21189
21190
21191
21192
21193
21194
21195
21196
21197
21198
21199
21200
21201
21202
21203
21204
21205
21206
21207
21208static struct lpfc_io_buf *
21209lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
21210 struct lpfc_nodelist *ndlp,
21211 int hwqid, int expedite)
21212{
21213 struct lpfc_sli4_hdw_queue *qp;
21214 struct lpfc_multixri_pool *multixri_pool;
21215 struct lpfc_pvt_pool *pvt_pool;
21216 struct lpfc_io_buf *lpfc_ncmd;
21217
21218 qp = &phba->sli4_hba.hdwq[hwqid];
21219 lpfc_ncmd = NULL;
21220 multixri_pool = qp->p_multixri_pool;
21221 pvt_pool = &multixri_pool->pvt_pool;
21222 multixri_pool->io_req_count++;
21223
21224
21225 if (pvt_pool->count == 0)
21226 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21227
21228
21229 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
21230
21231 if (lpfc_ncmd) {
21232 lpfc_ncmd->hdwq = qp;
21233 lpfc_ncmd->hdwq_no = hwqid;
21234 } else if (expedite) {
21235
21236
21237
21238 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
21239 }
21240
21241 return lpfc_ncmd;
21242}
21243
21244static inline struct lpfc_io_buf *
21245lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
21246{
21247 struct lpfc_sli4_hdw_queue *qp;
21248 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
21249
21250 qp = &phba->sli4_hba.hdwq[idx];
21251 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
21252 &qp->lpfc_io_buf_list_get, list) {
21253 if (lpfc_test_rrq_active(phba, ndlp,
21254 lpfc_cmd->cur_iocbq.sli4_lxritag))
21255 continue;
21256
21257 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
21258 continue;
21259
21260 list_del_init(&lpfc_cmd->list);
21261 qp->get_io_bufs--;
21262 lpfc_cmd->hdwq = qp;
21263 lpfc_cmd->hdwq_no = idx;
21264 return lpfc_cmd;
21265 }
21266 return NULL;
21267}
21268
21269
21270
21271
21272
21273
21274
21275
21276
21277
21278
21279
21280
21281
21282
21283
21284
21285
21286
21287struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
21288 struct lpfc_nodelist *ndlp,
21289 u32 hwqid, int expedite)
21290{
21291 struct lpfc_sli4_hdw_queue *qp;
21292 unsigned long iflag;
21293 struct lpfc_io_buf *lpfc_cmd;
21294
21295 qp = &phba->sli4_hba.hdwq[hwqid];
21296 lpfc_cmd = NULL;
21297
21298 if (phba->cfg_xri_rebalancing)
21299 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
21300 phba, ndlp, hwqid, expedite);
21301 else {
21302 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
21303 qp, alloc_xri_get);
21304 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
21305 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21306 if (!lpfc_cmd) {
21307 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
21308 qp, alloc_xri_put);
21309 list_splice(&qp->lpfc_io_buf_list_put,
21310 &qp->lpfc_io_buf_list_get);
21311 qp->get_io_bufs += qp->put_io_bufs;
21312 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
21313 qp->put_io_bufs = 0;
21314 spin_unlock(&qp->io_buf_list_put_lock);
21315 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
21316 expedite)
21317 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
21318 }
21319 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
21320 }
21321
21322 return lpfc_cmd;
21323}
21324
21325
21326
21327
21328
21329
21330
21331
21332
21333
21334
21335
21336
21337struct sli4_hybrid_sgl *
21338lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21339{
21340 struct sli4_hybrid_sgl *list_entry = NULL;
21341 struct sli4_hybrid_sgl *tmp = NULL;
21342 struct sli4_hybrid_sgl *allocated_sgl = NULL;
21343 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21344 struct list_head *buf_list = &hdwq->sgl_list;
21345 unsigned long iflags;
21346
21347 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21348
21349 if (likely(!list_empty(buf_list))) {
21350
21351 list_for_each_entry_safe(list_entry, tmp,
21352 buf_list, list_node) {
21353 list_move_tail(&list_entry->list_node,
21354 &lpfc_buf->dma_sgl_xtra_list);
21355 break;
21356 }
21357 } else {
21358
21359 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21360 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21361 cpu_to_node(hdwq->io_wq->chann));
21362 if (!tmp) {
21363 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21364 "8353 error kmalloc memory for HDWQ "
21365 "%d %s\n",
21366 lpfc_buf->hdwq_no, __func__);
21367 return NULL;
21368 }
21369
21370 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
21371 GFP_ATOMIC, &tmp->dma_phys_sgl);
21372 if (!tmp->dma_sgl) {
21373 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21374 "8354 error pool_alloc memory for HDWQ "
21375 "%d %s\n",
21376 lpfc_buf->hdwq_no, __func__);
21377 kfree(tmp);
21378 return NULL;
21379 }
21380
21381 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21382 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
21383 }
21384
21385 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
21386 struct sli4_hybrid_sgl,
21387 list_node);
21388
21389 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21390
21391 return allocated_sgl;
21392}
21393
21394
21395
21396
21397
21398
21399
21400
21401
21402
21403
21404
21405int
21406lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
21407{
21408 int rc = 0;
21409 struct sli4_hybrid_sgl *list_entry = NULL;
21410 struct sli4_hybrid_sgl *tmp = NULL;
21411 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21412 struct list_head *buf_list = &hdwq->sgl_list;
21413 unsigned long iflags;
21414
21415 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21416
21417 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
21418 list_for_each_entry_safe(list_entry, tmp,
21419 &lpfc_buf->dma_sgl_xtra_list,
21420 list_node) {
21421 list_move_tail(&list_entry->list_node,
21422 buf_list);
21423 }
21424 } else {
21425 rc = -EINVAL;
21426 }
21427
21428 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21429 return rc;
21430}
21431
21432
21433
21434
21435
21436
21437
21438
21439
21440
21441
21442void
21443lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
21444 struct lpfc_sli4_hdw_queue *hdwq)
21445{
21446 struct list_head *buf_list = &hdwq->sgl_list;
21447 struct sli4_hybrid_sgl *list_entry = NULL;
21448 struct sli4_hybrid_sgl *tmp = NULL;
21449 unsigned long iflags;
21450
21451 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21452
21453
21454 list_for_each_entry_safe(list_entry, tmp,
21455 buf_list, list_node) {
21456 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
21457 list_entry->dma_sgl,
21458 list_entry->dma_phys_sgl);
21459 list_del(&list_entry->list_node);
21460 kfree(list_entry);
21461 }
21462
21463 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21464}
21465
21466
21467
21468
21469
21470
21471
21472
21473
21474
21475
21476
21477
21478struct fcp_cmd_rsp_buf *
21479lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21480 struct lpfc_io_buf *lpfc_buf)
21481{
21482 struct fcp_cmd_rsp_buf *list_entry = NULL;
21483 struct fcp_cmd_rsp_buf *tmp = NULL;
21484 struct fcp_cmd_rsp_buf *allocated_buf = NULL;
21485 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21486 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21487 unsigned long iflags;
21488
21489 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21490
21491 if (likely(!list_empty(buf_list))) {
21492
21493 list_for_each_entry_safe(list_entry, tmp,
21494 buf_list,
21495 list_node) {
21496 list_move_tail(&list_entry->list_node,
21497 &lpfc_buf->dma_cmd_rsp_list);
21498 break;
21499 }
21500 } else {
21501
21502 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21503 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
21504 cpu_to_node(hdwq->io_wq->chann));
21505 if (!tmp) {
21506 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21507 "8355 error kmalloc memory for HDWQ "
21508 "%d %s\n",
21509 lpfc_buf->hdwq_no, __func__);
21510 return NULL;
21511 }
21512
21513 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool,
21514 GFP_ATOMIC,
21515 &tmp->fcp_cmd_rsp_dma_handle);
21516
21517 if (!tmp->fcp_cmnd) {
21518 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
21519 "8356 error pool_alloc memory for HDWQ "
21520 "%d %s\n",
21521 lpfc_buf->hdwq_no, __func__);
21522 kfree(tmp);
21523 return NULL;
21524 }
21525
21526 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
21527 sizeof(struct fcp_cmnd));
21528
21529 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21530 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
21531 }
21532
21533 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
21534 struct fcp_cmd_rsp_buf,
21535 list_node);
21536
21537 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21538
21539 return allocated_buf;
21540}
21541
21542
21543
21544
21545
21546
21547
21548
21549
21550
21551
21552
21553int
21554lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21555 struct lpfc_io_buf *lpfc_buf)
21556{
21557 int rc = 0;
21558 struct fcp_cmd_rsp_buf *list_entry = NULL;
21559 struct fcp_cmd_rsp_buf *tmp = NULL;
21560 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
21561 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21562 unsigned long iflags;
21563
21564 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21565
21566 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
21567 list_for_each_entry_safe(list_entry, tmp,
21568 &lpfc_buf->dma_cmd_rsp_list,
21569 list_node) {
21570 list_move_tail(&list_entry->list_node,
21571 buf_list);
21572 }
21573 } else {
21574 rc = -EINVAL;
21575 }
21576
21577 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21578 return rc;
21579}
21580
21581
21582
21583
21584
21585
21586
21587
21588
21589
21590
21591void
21592lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
21593 struct lpfc_sli4_hdw_queue *hdwq)
21594{
21595 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
21596 struct fcp_cmd_rsp_buf *list_entry = NULL;
21597 struct fcp_cmd_rsp_buf *tmp = NULL;
21598 unsigned long iflags;
21599
21600 spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
21601
21602
21603 list_for_each_entry_safe(list_entry, tmp,
21604 buf_list,
21605 list_node) {
21606 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
21607 list_entry->fcp_cmnd,
21608 list_entry->fcp_cmd_rsp_dma_handle);
21609 list_del(&list_entry->list_node);
21610 kfree(list_entry);
21611 }
21612
21613 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
21614}
21615