1
2
3
4
5
6#include "qla_def.h"
7#include "qla_target.h"
8#include "qla_gbl.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <linux/cpu.h>
13#include <linux/t10-pi.h>
14#include <scsi/scsi_tcq.h>
15#include <scsi/scsi_bsg_fc.h>
16#include <scsi/scsi_eh.h>
17#include <scsi/fc/fc_fs.h>
18#include <linux/nvme-fc-driver.h>
19
20static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24 sts_entry_t *);
25static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 struct purex_item *item);
27static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
28 uint16_t size);
29static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
30 void *pkt);
31static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 void **pkt, struct rsp_que **rsp);
33
34static void
35qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
36{
37 void *pkt = &item->iocb;
38 uint16_t pkt_size = item->size;
39
40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 "%s: Enter\n", __func__);
42
43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 "-------- ELS REQ -------\n");
45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
46 pkt, pkt_size);
47
48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
49}
50
51const char *const port_state_str[] = {
52 "Unknown",
53 "UNCONFIGURED",
54 "DEAD",
55 "LOST",
56 "ONLINE"
57};
58
59static void
60qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
61{
62 struct abts_entry_24xx *abts =
63 (struct abts_entry_24xx *)&pkt->iocb;
64 struct qla_hw_data *ha = vha->hw;
65 struct els_entry_24xx *rsp_els;
66 struct abts_entry_24xx *abts_rsp;
67 dma_addr_t dma;
68 uint32_t fctl;
69 int rval;
70
71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
72
73 ql_log(ql_log_warn, vha, 0x0287,
74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
76 abts->seq_id, abts->seq_cnt);
77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
78 "-------- ABTS RCV -------\n");
79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
80 (uint8_t *)abts, sizeof(*abts));
81
82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
83 GFP_KERNEL);
84 if (!rsp_els) {
85 ql_log(ql_log_warn, vha, 0x0287,
86 "Failed allocate dma buffer ABTS/ELS RSP.\n");
87 return;
88 }
89
90
91 rsp_els->entry_type = ELS_IOCB_TYPE;
92 rsp_els->entry_count = 1;
93 rsp_els->nport_handle = cpu_to_le16(~0);
94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
96 ql_dbg(ql_dbg_init, vha, 0x0283,
97 "Sending ELS Response to terminate exchange %#x...\n",
98 abts->rx_xch_addr_to_abort);
99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
100 "-------- ELS RSP -------\n");
101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
102 (uint8_t *)rsp_els, sizeof(*rsp_els));
103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
104 if (rval) {
105 ql_log(ql_log_warn, vha, 0x0288,
106 "%s: iocb failed to execute -> %x\n", __func__, rval);
107 } else if (rsp_els->comp_status) {
108 ql_log(ql_log_warn, vha, 0x0289,
109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
110 __func__, rsp_els->comp_status,
111 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
112 } else {
113 ql_dbg(ql_dbg_init, vha, 0x028a,
114 "%s: abort exchange done.\n", __func__);
115 }
116
117
118 abts_rsp = (void *)rsp_els;
119 memset(abts_rsp, 0, sizeof(*abts_rsp));
120 abts_rsp->entry_type = ABTS_RSP_TYPE;
121 abts_rsp->entry_count = 1;
122 abts_rsp->nport_handle = abts->nport_handle;
123 abts_rsp->vp_idx = abts->vp_idx;
124 abts_rsp->sof_type = abts->sof_type & 0xf0;
125 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
126 abts_rsp->d_id[0] = abts->s_id[0];
127 abts_rsp->d_id[1] = abts->s_id[1];
128 abts_rsp->d_id[2] = abts->s_id[2];
129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
130 abts_rsp->s_id[0] = abts->d_id[0];
131 abts_rsp->s_id[1] = abts->d_id[1];
132 abts_rsp->s_id[2] = abts->d_id[2];
133 abts_rsp->cs_ctl = abts->cs_ctl;
134
135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
140 abts_rsp->type = FC_TYPE_BLD;
141 abts_rsp->rx_id = abts->rx_id;
142 abts_rsp->ox_id = abts->ox_id;
143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
147 ql_dbg(ql_dbg_init, vha, 0x028b,
148 "Sending BA ACC response to ABTS %#x...\n",
149 abts->rx_xch_addr_to_abort);
150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
151 "-------- ELS RSP -------\n");
152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
153 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
155 if (rval) {
156 ql_log(ql_log_warn, vha, 0x028c,
157 "%s: iocb failed to execute -> %x\n", __func__, rval);
158 } else if (abts_rsp->comp_status) {
159 ql_log(ql_log_warn, vha, 0x028d,
160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
161 __func__, abts_rsp->comp_status,
162 abts_rsp->payload.error.subcode1,
163 abts_rsp->payload.error.subcode2);
164 } else {
165 ql_dbg(ql_dbg_init, vha, 0x028ea,
166 "%s: done.\n", __func__);
167 }
168
169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
170}
171
172
173
174
175
176
177
178
179
180
181
182
183void __qla_consume_iocb(struct scsi_qla_host *vha,
184 void **pkt, struct rsp_que **rsp)
185{
186 struct rsp_que *rsp_q = *rsp;
187 response_t *new_pkt;
188 uint16_t entry_count_remaining;
189 struct purex_entry_24xx *purex = *pkt;
190
191 entry_count_remaining = purex->entry_count;
192 while (entry_count_remaining > 0) {
193 new_pkt = rsp_q->ring_ptr;
194 *pkt = new_pkt;
195
196 rsp_q->ring_index++;
197 if (rsp_q->ring_index == rsp_q->length) {
198 rsp_q->ring_index = 0;
199 rsp_q->ring_ptr = rsp_q->ring;
200 } else {
201 rsp_q->ring_ptr++;
202 }
203
204 new_pkt->signature = RESPONSE_PROCESSED;
205
206 wmb();
207 --entry_count_remaining;
208 }
209}
210
211
212
213
214
215
216
217
218
219
220int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
221 void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
222{
223 struct purex_entry_24xx *purex = *pkt;
224 struct rsp_que *rsp_q = *rsp;
225 sts_cont_entry_t *new_pkt;
226 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
227 uint16_t buffer_copy_offset = 0;
228 uint16_t entry_count_remaining;
229 u16 tpad;
230
231 entry_count_remaining = purex->entry_count;
232 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
233 - PURX_ELS_HEADER_SIZE;
234
235
236
237
238
239 tpad = roundup(total_bytes, 4);
240
241 if (buf_len < tpad) {
242 ql_dbg(ql_dbg_async, vha, 0x5084,
243 "%s buffer is too small %d < %d\n",
244 __func__, buf_len, tpad);
245 __qla_consume_iocb(vha, pkt, rsp);
246 return -EIO;
247 }
248
249 pending_bytes = total_bytes = tpad;
250 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
251 sizeof(purex->els_frame_payload) : pending_bytes;
252
253 memcpy(buf, &purex->els_frame_payload[0], no_bytes);
254 buffer_copy_offset += no_bytes;
255 pending_bytes -= no_bytes;
256 --entry_count_remaining;
257
258 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
259
260 wmb();
261
262 do {
263 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
264 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
265 *pkt = new_pkt;
266
267 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
268 ql_log(ql_log_warn, vha, 0x507a,
269 "Unexpected IOCB type, partial data 0x%x\n",
270 buffer_copy_offset);
271 break;
272 }
273
274 rsp_q->ring_index++;
275 if (rsp_q->ring_index == rsp_q->length) {
276 rsp_q->ring_index = 0;
277 rsp_q->ring_ptr = rsp_q->ring;
278 } else {
279 rsp_q->ring_ptr++;
280 }
281 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
282 sizeof(new_pkt->data) : pending_bytes;
283 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
284 memcpy((buf + buffer_copy_offset), new_pkt->data,
285 no_bytes);
286 buffer_copy_offset += no_bytes;
287 pending_bytes -= no_bytes;
288 --entry_count_remaining;
289 } else {
290 ql_log(ql_log_warn, vha, 0x5044,
291 "Attempt to copy more that we got, optimizing..%x\n",
292 buffer_copy_offset);
293 memcpy((buf + buffer_copy_offset), new_pkt->data,
294 total_bytes - buffer_copy_offset);
295 }
296
297 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
298
299 wmb();
300 }
301
302 if (pending_bytes != 0 || entry_count_remaining != 0) {
303 ql_log(ql_log_fatal, vha, 0x508b,
304 "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
305 total_bytes, entry_count_remaining);
306 return -EIO;
307 }
308 } while (entry_count_remaining > 0);
309
310 be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
311
312 return 0;
313}
314
315
316
317
318
319
320
321
322
323
324irqreturn_t
325qla2100_intr_handler(int irq, void *dev_id)
326{
327 scsi_qla_host_t *vha;
328 struct qla_hw_data *ha;
329 struct device_reg_2xxx __iomem *reg;
330 int status;
331 unsigned long iter;
332 uint16_t hccr;
333 uint16_t mb[8];
334 struct rsp_que *rsp;
335 unsigned long flags;
336
337 rsp = (struct rsp_que *) dev_id;
338 if (!rsp) {
339 ql_log(ql_log_info, NULL, 0x505d,
340 "%s: NULL response queue pointer.\n", __func__);
341 return (IRQ_NONE);
342 }
343
344 ha = rsp->hw;
345 reg = &ha->iobase->isp;
346 status = 0;
347
348 spin_lock_irqsave(&ha->hardware_lock, flags);
349 vha = pci_get_drvdata(ha->pdev);
350 for (iter = 50; iter--; ) {
351 hccr = rd_reg_word(®->hccr);
352 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
353 break;
354 if (hccr & HCCR_RISC_PAUSE) {
355 if (pci_channel_offline(ha->pdev))
356 break;
357
358
359
360
361
362
363 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
364 rd_reg_word(®->hccr);
365
366 ha->isp_ops->fw_dump(vha);
367 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
368 break;
369 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
370 break;
371
372 if (rd_reg_word(®->semaphore) & BIT_0) {
373 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
374 rd_reg_word(®->hccr);
375
376
377 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
378 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
379 qla2x00_mbx_completion(vha, mb[0]);
380 status |= MBX_INTERRUPT;
381 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
382 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
383 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
384 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
385 qla2x00_async_event(vha, rsp, mb);
386 } else {
387
388 ql_dbg(ql_dbg_async, vha, 0x5025,
389 "Unrecognized interrupt type (%d).\n",
390 mb[0]);
391 }
392
393 wrt_reg_word(®->semaphore, 0);
394 rd_reg_word(®->semaphore);
395 } else {
396 qla2x00_process_response_queue(rsp);
397
398 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
399 rd_reg_word(®->hccr);
400 }
401 }
402 qla2x00_handle_mbx_completion(ha, status);
403 spin_unlock_irqrestore(&ha->hardware_lock, flags);
404
405 return (IRQ_HANDLED);
406}
407
408bool
409qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
410{
411
412 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
413 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
414 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
415 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
416 qla_schedule_eeh_work(vha);
417 }
418 return true;
419 } else
420 return false;
421}
422
423bool
424qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
425{
426 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
427}
428
429
430
431
432
433
434
435
436
437
438irqreturn_t
439qla2300_intr_handler(int irq, void *dev_id)
440{
441 scsi_qla_host_t *vha;
442 struct device_reg_2xxx __iomem *reg;
443 int status;
444 unsigned long iter;
445 uint32_t stat;
446 uint16_t hccr;
447 uint16_t mb[8];
448 struct rsp_que *rsp;
449 struct qla_hw_data *ha;
450 unsigned long flags;
451
452 rsp = (struct rsp_que *) dev_id;
453 if (!rsp) {
454 ql_log(ql_log_info, NULL, 0x5058,
455 "%s: NULL response queue pointer.\n", __func__);
456 return (IRQ_NONE);
457 }
458
459 ha = rsp->hw;
460 reg = &ha->iobase->isp;
461 status = 0;
462
463 spin_lock_irqsave(&ha->hardware_lock, flags);
464 vha = pci_get_drvdata(ha->pdev);
465 for (iter = 50; iter--; ) {
466 stat = rd_reg_dword(®->u.isp2300.host_status);
467 if (qla2x00_check_reg32_for_disconnect(vha, stat))
468 break;
469 if (stat & HSR_RISC_PAUSED) {
470 if (unlikely(pci_channel_offline(ha->pdev)))
471 break;
472
473 hccr = rd_reg_word(®->hccr);
474
475 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
476 ql_log(ql_log_warn, vha, 0x5026,
477 "Parity error -- HCCR=%x, Dumping "
478 "firmware.\n", hccr);
479 else
480 ql_log(ql_log_warn, vha, 0x5027,
481 "RISC paused -- HCCR=%x, Dumping "
482 "firmware.\n", hccr);
483
484
485
486
487
488
489 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
490 rd_reg_word(®->hccr);
491
492 ha->isp_ops->fw_dump(vha);
493 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
494 break;
495 } else if ((stat & HSR_RISC_INT) == 0)
496 break;
497
498 switch (stat & 0xff) {
499 case 0x1:
500 case 0x2:
501 case 0x10:
502 case 0x11:
503 qla2x00_mbx_completion(vha, MSW(stat));
504 status |= MBX_INTERRUPT;
505
506
507 wrt_reg_word(®->semaphore, 0);
508 break;
509 case 0x12:
510 mb[0] = MSW(stat);
511 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
512 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
513 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
514 qla2x00_async_event(vha, rsp, mb);
515 break;
516 case 0x13:
517 qla2x00_process_response_queue(rsp);
518 break;
519 case 0x15:
520 mb[0] = MBA_CMPLT_1_16BIT;
521 mb[1] = MSW(stat);
522 qla2x00_async_event(vha, rsp, mb);
523 break;
524 case 0x16:
525 mb[0] = MBA_SCSI_COMPLETION;
526 mb[1] = MSW(stat);
527 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
528 qla2x00_async_event(vha, rsp, mb);
529 break;
530 default:
531 ql_dbg(ql_dbg_async, vha, 0x5028,
532 "Unrecognized interrupt type (%d).\n", stat & 0xff);
533 break;
534 }
535 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
536 rd_reg_word_relaxed(®->hccr);
537 }
538 qla2x00_handle_mbx_completion(ha, status);
539 spin_unlock_irqrestore(&ha->hardware_lock, flags);
540
541 return (IRQ_HANDLED);
542}
543
544
545
546
547
548
549static void
550qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
551{
552 uint16_t cnt;
553 uint32_t mboxes;
554 __le16 __iomem *wptr;
555 struct qla_hw_data *ha = vha->hw;
556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
557
558
559 WARN_ON_ONCE(ha->mbx_count > 32);
560 mboxes = (1ULL << ha->mbx_count) - 1;
561 if (!ha->mcp)
562 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
563 else
564 mboxes = ha->mcp->in_mb;
565
566
567 ha->flags.mbox_int = 1;
568 ha->mailbox_out[0] = mb0;
569 mboxes >>= 1;
570 wptr = MAILBOX_REG(ha, reg, 1);
571
572 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
573 if (IS_QLA2200(ha) && cnt == 8)
574 wptr = MAILBOX_REG(ha, reg, 8);
575 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
576 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
577 else if (mboxes & BIT_0)
578 ha->mailbox_out[cnt] = rd_reg_word(wptr);
579
580 wptr++;
581 mboxes >>= 1;
582 }
583}
584
585static void
586qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
587{
588 static char *event[] =
589 { "Complete", "Request Notification", "Time Extension" };
590 int rval;
591 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
592 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
593 __le16 __iomem *wptr;
594 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
595
596
597 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
598 wptr = ®24->mailbox1;
599 else if (IS_QLA8044(vha->hw))
600 wptr = ®82->mailbox_out[1];
601 else
602 return;
603
604 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
605 mb[cnt] = rd_reg_word(wptr);
606
607 ql_dbg(ql_dbg_async, vha, 0x5021,
608 "Inter-Driver Communication %s -- "
609 "%04x %04x %04x %04x %04x %04x %04x.\n",
610 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
611 mb[4], mb[5], mb[6]);
612 switch (aen) {
613
614 case MBA_IDC_COMPLETE:
615 if (mb[1] >> 15) {
616 vha->hw->flags.idc_compl_status = 1;
617 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
618 complete(&vha->hw->dcbx_comp);
619 }
620 break;
621
622 case MBA_IDC_NOTIFY:
623
624 timeout = (descr >> 8) & 0xf;
625 ql_dbg(ql_dbg_async, vha, 0x5022,
626 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
627 vha->host_no, event[aen & 0xff], timeout);
628
629 if (!timeout)
630 return;
631 rval = qla2x00_post_idc_ack_work(vha, mb);
632 if (rval != QLA_SUCCESS)
633 ql_log(ql_log_warn, vha, 0x5023,
634 "IDC failed to post ACK.\n");
635 break;
636 case MBA_IDC_TIME_EXT:
637 vha->hw->idc_extend_tmo = descr;
638 ql_dbg(ql_dbg_async, vha, 0x5087,
639 "%lu Inter-Driver Communication %s -- "
640 "Extend timeout by=%d.\n",
641 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
642 break;
643 }
644}
645
646#define LS_UNKNOWN 2
647const char *
648qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
649{
650 static const char *const link_speeds[] = {
651 "1", "2", "?", "4", "8", "16", "32", "64", "10"
652 };
653#define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
654
655 if (IS_QLA2100(ha) || IS_QLA2200(ha))
656 return link_speeds[0];
657 else if (speed == 0x13)
658 return link_speeds[QLA_LAST_SPEED];
659 else if (speed < QLA_LAST_SPEED)
660 return link_speeds[speed];
661 else
662 return link_speeds[LS_UNKNOWN];
663}
664
665static void
666qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
667{
668 struct qla_hw_data *ha = vha->hw;
669
670
671
672
673
674
675
676
677
678
679
680
681 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
682 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
683 mb[0], mb[1], mb[2], mb[6]);
684 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
685 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
686 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
687
688 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
689 IDC_HEARTBEAT_FAILURE)) {
690 ha->flags.nic_core_hung = 1;
691 ql_log(ql_log_warn, vha, 0x5060,
692 "83XX: F/W Error Reported: Check if reset required.\n");
693
694 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
695 uint32_t protocol_engine_id, fw_err_code, err_level;
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710 protocol_engine_id = (mb[2] & 0xff);
711 fw_err_code = (((mb[2] & 0xff00) >> 8) |
712 ((mb[6] & 0x1fff) << 8));
713 err_level = ((mb[6] & 0xe000) >> 13);
714 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
715 "Register: protocol_engine_id=0x%x "
716 "fw_err_code=0x%x err_level=0x%x.\n",
717 protocol_engine_id, fw_err_code, err_level);
718 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
719 "Register: 0x%x%x.\n", mb[7], mb[3]);
720 if (err_level == ERR_LEVEL_NON_FATAL) {
721 ql_log(ql_log_warn, vha, 0x5063,
722 "Not a fatal error, f/w has recovered itself.\n");
723 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
724 ql_log(ql_log_fatal, vha, 0x5064,
725 "Recoverable Fatal error: Chip reset "
726 "required.\n");
727 qla83xx_schedule_work(vha,
728 QLA83XX_NIC_CORE_RESET);
729 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
730 ql_log(ql_log_fatal, vha, 0x5065,
731 "Unrecoverable Fatal error: Set FAILED "
732 "state, reboot required.\n");
733 qla83xx_schedule_work(vha,
734 QLA83XX_NIC_CORE_UNRECOVERABLE);
735 }
736 }
737
738 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
739 uint16_t peg_fw_state, nw_interface_link_up;
740 uint16_t nw_interface_signal_detect, sfp_status;
741 uint16_t htbt_counter, htbt_monitor_enable;
742 uint16_t sfp_additional_info, sfp_multirate;
743 uint16_t sfp_tx_fault, link_speed, dcbx_status;
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776 peg_fw_state = (mb[2] & 0x00ff);
777 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
778 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
779 sfp_status = ((mb[2] & 0x0c00) >> 10);
780 htbt_counter = ((mb[2] & 0x7000) >> 12);
781 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
782 sfp_additional_info = (mb[6] & 0x0003);
783 sfp_multirate = ((mb[6] & 0x0004) >> 2);
784 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
785 link_speed = ((mb[6] & 0x0070) >> 4);
786 dcbx_status = ((mb[6] & 0x7000) >> 12);
787
788 ql_log(ql_log_warn, vha, 0x5066,
789 "Peg-to-Fc Status Register:\n"
790 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
791 "nw_interface_signal_detect=0x%x"
792 "\nsfp_statis=0x%x.\n ", peg_fw_state,
793 nw_interface_link_up, nw_interface_signal_detect,
794 sfp_status);
795 ql_log(ql_log_warn, vha, 0x5067,
796 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
797 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
798 htbt_counter, htbt_monitor_enable,
799 sfp_additional_info, sfp_multirate);
800 ql_log(ql_log_warn, vha, 0x5068,
801 "sfp_tx_fault=0x%x, link_state=0x%x, "
802 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
803 dcbx_status);
804
805 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
806 }
807
808 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
809 ql_log(ql_log_warn, vha, 0x5069,
810 "Heartbeat Failure encountered, chip reset "
811 "required.\n");
812
813 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
814 }
815 }
816
817 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
818 ql_log(ql_log_info, vha, 0x506a,
819 "IDC Device-State changed = 0x%x.\n", mb[4]);
820 if (ha->flags.nic_core_reset_owner)
821 return;
822 qla83xx_schedule_work(vha, MBA_IDC_AEN);
823 }
824}
825
826int
827qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
828{
829 struct qla_hw_data *ha = vha->hw;
830 scsi_qla_host_t *vp;
831 uint32_t vp_did;
832 unsigned long flags;
833 int ret = 0;
834
835 if (!ha->num_vhosts)
836 return ret;
837
838 spin_lock_irqsave(&ha->vport_slock, flags);
839 list_for_each_entry(vp, &ha->vp_list, list) {
840 vp_did = vp->d_id.b24;
841 if (vp_did == rscn_entry) {
842 ret = 1;
843 break;
844 }
845 }
846 spin_unlock_irqrestore(&ha->vport_slock, flags);
847
848 return ret;
849}
850
851fc_port_t *
852qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
853{
854 fc_port_t *f, *tf;
855
856 f = tf = NULL;
857 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
858 if (f->loop_id == loop_id)
859 return f;
860 return NULL;
861}
862
863fc_port_t *
864qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
865{
866 fc_port_t *f, *tf;
867
868 f = tf = NULL;
869 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
870 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
871 if (incl_deleted)
872 return f;
873 else if (f->deleted == 0)
874 return f;
875 }
876 }
877 return NULL;
878}
879
880fc_port_t *
881qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
882 u8 incl_deleted)
883{
884 fc_port_t *f, *tf;
885
886 f = tf = NULL;
887 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
888 if (f->d_id.b24 == id->b24) {
889 if (incl_deleted)
890 return f;
891 else if (f->deleted == 0)
892 return f;
893 }
894 }
895 return NULL;
896}
897
898
899static void
900qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
901{
902 struct qla_hw_data *ha = vha->hw;
903 bool reset_isp_needed = false;
904
905 ql_log(ql_log_warn, vha, 0x02f0,
906 "MPI Heartbeat stop. MPI reset is%s needed. "
907 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
908 mb[1] & BIT_8 ? "" : " not",
909 mb[0], mb[1], mb[2], mb[3]);
910
911 if ((mb[1] & BIT_8) == 0)
912 return;
913
914 ql_log(ql_log_warn, vha, 0x02f1,
915 "MPI Heartbeat stop. FW dump needed\n");
916
917 if (ql2xfulldump_on_mpifail) {
918 ha->isp_ops->fw_dump(vha);
919 reset_isp_needed = true;
920 }
921
922 ha->isp_ops->mpi_fw_dump(vha, 1);
923
924 if (reset_isp_needed) {
925 vha->hw->flags.fw_init_done = 0;
926 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
927 qla2xxx_wake_dpc(vha);
928 }
929}
930
931static struct purex_item *
932qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
933{
934 struct purex_item *item = NULL;
935 uint8_t item_hdr_size = sizeof(*item);
936
937 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
938 item = kzalloc(item_hdr_size +
939 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
940 } else {
941 if (atomic_inc_return(&vha->default_item.in_use) == 1) {
942 item = &vha->default_item;
943 goto initialize_purex_header;
944 } else {
945 item = kzalloc(item_hdr_size, GFP_ATOMIC);
946 }
947 }
948 if (!item) {
949 ql_log(ql_log_warn, vha, 0x5092,
950 ">> Failed allocate purex list item.\n");
951
952 return NULL;
953 }
954
955initialize_purex_header:
956 item->vha = vha;
957 item->size = size;
958 return item;
959}
960
961static void
962qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
963 void (*process_item)(struct scsi_qla_host *vha,
964 struct purex_item *pkt))
965{
966 struct purex_list *list = &vha->purex_list;
967 ulong flags;
968
969 pkt->process_item = process_item;
970
971 spin_lock_irqsave(&list->lock, flags);
972 list_add_tail(&pkt->list, &list->head);
973 spin_unlock_irqrestore(&list->lock, flags);
974
975 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
976}
977
978
979
980
981
982
983
984
985static struct purex_item
986*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
987{
988 struct purex_item *item;
989
990 item = qla24xx_alloc_purex_item(vha,
991 QLA_DEFAULT_PAYLOAD_SIZE);
992 if (!item)
993 return item;
994
995 memcpy(&item->iocb, pkt, sizeof(item->iocb));
996 return item;
997}
998
999
1000
1001
1002
1003
1004
1005
1006static struct purex_item *
1007qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
1008 struct rsp_que **rsp)
1009{
1010 struct purex_entry_24xx *purex = *pkt;
1011 struct rsp_que *rsp_q = *rsp;
1012 sts_cont_entry_t *new_pkt;
1013 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
1014 uint16_t buffer_copy_offset = 0;
1015 uint16_t entry_count, entry_count_remaining;
1016 struct purex_item *item;
1017 void *fpin_pkt = NULL;
1018
1019 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
1020 - PURX_ELS_HEADER_SIZE;
1021 pending_bytes = total_bytes;
1022 entry_count = entry_count_remaining = purex->entry_count;
1023 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
1024 sizeof(purex->els_frame_payload) : pending_bytes;
1025 ql_log(ql_log_info, vha, 0x509a,
1026 "FPIN ELS, frame_size 0x%x, entry count %d\n",
1027 total_bytes, entry_count);
1028
1029 item = qla24xx_alloc_purex_item(vha, total_bytes);
1030 if (!item)
1031 return item;
1032
1033 fpin_pkt = &item->iocb;
1034
1035 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
1036 buffer_copy_offset += no_bytes;
1037 pending_bytes -= no_bytes;
1038 --entry_count_remaining;
1039
1040 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
1041 wmb();
1042
1043 do {
1044 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
1045 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
1046 ql_dbg(ql_dbg_async, vha, 0x5084,
1047 "Ran out of IOCBs, partial data 0x%x\n",
1048 buffer_copy_offset);
1049 cpu_relax();
1050 continue;
1051 }
1052
1053 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
1054 *pkt = new_pkt;
1055
1056 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
1057 ql_log(ql_log_warn, vha, 0x507a,
1058 "Unexpected IOCB type, partial data 0x%x\n",
1059 buffer_copy_offset);
1060 break;
1061 }
1062
1063 rsp_q->ring_index++;
1064 if (rsp_q->ring_index == rsp_q->length) {
1065 rsp_q->ring_index = 0;
1066 rsp_q->ring_ptr = rsp_q->ring;
1067 } else {
1068 rsp_q->ring_ptr++;
1069 }
1070 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
1071 sizeof(new_pkt->data) : pending_bytes;
1072 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
1073 memcpy(((uint8_t *)fpin_pkt +
1074 buffer_copy_offset), new_pkt->data,
1075 no_bytes);
1076 buffer_copy_offset += no_bytes;
1077 pending_bytes -= no_bytes;
1078 --entry_count_remaining;
1079 } else {
1080 ql_log(ql_log_warn, vha, 0x5044,
1081 "Attempt to copy more that we got, optimizing..%x\n",
1082 buffer_copy_offset);
1083 memcpy(((uint8_t *)fpin_pkt +
1084 buffer_copy_offset), new_pkt->data,
1085 total_bytes - buffer_copy_offset);
1086 }
1087
1088 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
1089 wmb();
1090 }
1091
1092 if (pending_bytes != 0 || entry_count_remaining != 0) {
1093 ql_log(ql_log_fatal, vha, 0x508b,
1094 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
1095 total_bytes, entry_count_remaining);
1096 qla24xx_free_purex_item(item);
1097 return NULL;
1098 }
1099 } while (entry_count_remaining > 0);
1100 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
1101 return item;
1102}
1103
1104
1105
1106
1107
1108
1109
1110void
1111qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1112{
1113 uint16_t handle_cnt;
1114 uint16_t cnt, mbx;
1115 uint32_t handles[5];
1116 struct qla_hw_data *ha = vha->hw;
1117 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1118 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1119 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1120 uint32_t rscn_entry, host_pid;
1121 unsigned long flags;
1122 fc_port_t *fcport = NULL;
1123
1124 if (!vha->hw->flags.fw_started)
1125 return;
1126
1127
1128 handle_cnt = 0;
1129 if (IS_CNA_CAPABLE(ha))
1130 goto skip_rio;
1131 switch (mb[0]) {
1132 case MBA_SCSI_COMPLETION:
1133 handles[0] = make_handle(mb[2], mb[1]);
1134 handle_cnt = 1;
1135 break;
1136 case MBA_CMPLT_1_16BIT:
1137 handles[0] = mb[1];
1138 handle_cnt = 1;
1139 mb[0] = MBA_SCSI_COMPLETION;
1140 break;
1141 case MBA_CMPLT_2_16BIT:
1142 handles[0] = mb[1];
1143 handles[1] = mb[2];
1144 handle_cnt = 2;
1145 mb[0] = MBA_SCSI_COMPLETION;
1146 break;
1147 case MBA_CMPLT_3_16BIT:
1148 handles[0] = mb[1];
1149 handles[1] = mb[2];
1150 handles[2] = mb[3];
1151 handle_cnt = 3;
1152 mb[0] = MBA_SCSI_COMPLETION;
1153 break;
1154 case MBA_CMPLT_4_16BIT:
1155 handles[0] = mb[1];
1156 handles[1] = mb[2];
1157 handles[2] = mb[3];
1158 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1159 handle_cnt = 4;
1160 mb[0] = MBA_SCSI_COMPLETION;
1161 break;
1162 case MBA_CMPLT_5_16BIT:
1163 handles[0] = mb[1];
1164 handles[1] = mb[2];
1165 handles[2] = mb[3];
1166 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1167 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1168 handle_cnt = 5;
1169 mb[0] = MBA_SCSI_COMPLETION;
1170 break;
1171 case MBA_CMPLT_2_32BIT:
1172 handles[0] = make_handle(mb[2], mb[1]);
1173 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1174 RD_MAILBOX_REG(ha, reg, 6));
1175 handle_cnt = 2;
1176 mb[0] = MBA_SCSI_COMPLETION;
1177 break;
1178 default:
1179 break;
1180 }
1181skip_rio:
1182 switch (mb[0]) {
1183 case MBA_SCSI_COMPLETION:
1184 if (!vha->flags.online)
1185 break;
1186
1187 for (cnt = 0; cnt < handle_cnt; cnt++)
1188 qla2x00_process_completed_request(vha, rsp->req,
1189 handles[cnt]);
1190 break;
1191
1192 case MBA_RESET:
1193 ql_dbg(ql_dbg_async, vha, 0x5002,
1194 "Asynchronous RESET.\n");
1195
1196 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1197 break;
1198
1199 case MBA_SYSTEM_ERR:
1200 mbx = 0;
1201
1202 vha->hw_err_cnt++;
1203
1204 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1205 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1206 u16 m[4];
1207
1208 m[0] = rd_reg_word(®24->mailbox4);
1209 m[1] = rd_reg_word(®24->mailbox5);
1210 m[2] = rd_reg_word(®24->mailbox6);
1211 mbx = m[3] = rd_reg_word(®24->mailbox7);
1212
1213 ql_log(ql_log_warn, vha, 0x5003,
1214 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1215 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1216 } else
1217 ql_log(ql_log_warn, vha, 0x5003,
1218 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1219 mb[1], mb[2], mb[3]);
1220
1221 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1222 rd_reg_word(®24->mailbox7) & BIT_8)
1223 ha->isp_ops->mpi_fw_dump(vha, 1);
1224 ha->isp_ops->fw_dump(vha);
1225 ha->flags.fw_init_done = 0;
1226 QLA_FW_STOPPED(ha);
1227
1228 if (IS_FWI2_CAPABLE(ha)) {
1229 if (mb[1] == 0 && mb[2] == 0) {
1230 ql_log(ql_log_fatal, vha, 0x5004,
1231 "Unrecoverable Hardware Error: adapter "
1232 "marked OFFLINE!\n");
1233 vha->flags.online = 0;
1234 vha->device_flags |= DFLG_DEV_FAILED;
1235 } else {
1236
1237 if ((mbx & MBX_3) && (ha->port_no == 0))
1238 set_bit(MPI_RESET_NEEDED,
1239 &vha->dpc_flags);
1240
1241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1242 }
1243 } else if (mb[1] == 0) {
1244 ql_log(ql_log_fatal, vha, 0x5005,
1245 "Unrecoverable Hardware Error: adapter marked "
1246 "OFFLINE!\n");
1247 vha->flags.online = 0;
1248 vha->device_flags |= DFLG_DEV_FAILED;
1249 } else
1250 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1251 break;
1252
1253 case MBA_REQ_TRANSFER_ERR:
1254 ql_log(ql_log_warn, vha, 0x5006,
1255 "ISP Request Transfer Error (%x).\n", mb[1]);
1256
1257 vha->hw_err_cnt++;
1258
1259 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1260 break;
1261
1262 case MBA_RSP_TRANSFER_ERR:
1263 ql_log(ql_log_warn, vha, 0x5007,
1264 "ISP Response Transfer Error (%x).\n", mb[1]);
1265
1266 vha->hw_err_cnt++;
1267
1268 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1269 break;
1270
1271 case MBA_WAKEUP_THRES:
1272 ql_dbg(ql_dbg_async, vha, 0x5008,
1273 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1274 break;
1275
1276 case MBA_LOOP_INIT_ERR:
1277 ql_log(ql_log_warn, vha, 0x5090,
1278 "LOOP INIT ERROR (%x).\n", mb[1]);
1279 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1280 break;
1281
1282 case MBA_LIP_OCCURRED:
1283 ha->flags.lip_ae = 1;
1284
1285 ql_dbg(ql_dbg_async, vha, 0x5009,
1286 "LIP occurred (%x).\n", mb[1]);
1287
1288 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1289 atomic_set(&vha->loop_state, LOOP_DOWN);
1290 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1291 qla2x00_mark_all_devices_lost(vha);
1292 }
1293
1294 if (vha->vp_idx) {
1295 atomic_set(&vha->vp_state, VP_FAILED);
1296 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1297 }
1298
1299 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1300 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1301
1302 vha->flags.management_server_logged_in = 0;
1303 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1304 break;
1305
1306 case MBA_LOOP_UP:
1307 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1308 ha->link_data_rate = PORT_SPEED_1GB;
1309 else
1310 ha->link_data_rate = mb[1];
1311
1312 ql_log(ql_log_info, vha, 0x500a,
1313 "LOOP UP detected (%s Gbps).\n",
1314 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1315
1316 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1317 if (mb[2] & BIT_0)
1318 ql_log(ql_log_info, vha, 0x11a0,
1319 "FEC=enabled (link up).\n");
1320 }
1321
1322 vha->flags.management_server_logged_in = 0;
1323 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1324
1325 if (vha->link_down_time < vha->hw->port_down_retry_count) {
1326 vha->short_link_down_cnt++;
1327 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1328 }
1329
1330 break;
1331
1332 case MBA_LOOP_DOWN:
1333 SAVE_TOPO(ha);
1334 ha->flags.lip_ae = 0;
1335 ha->current_topology = 0;
1336 vha->link_down_time = 0;
1337
1338 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1339 ? rd_reg_word(®24->mailbox4) : 0;
1340 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1341 : mbx;
1342 ql_log(ql_log_info, vha, 0x500b,
1343 "LOOP DOWN detected (%x %x %x %x).\n",
1344 mb[1], mb[2], mb[3], mbx);
1345
1346 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1347 atomic_set(&vha->loop_state, LOOP_DOWN);
1348 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1349
1350
1351
1352
1353
1354 if (!vha->vp_idx) {
1355 if (ha->flags.fawwpn_enabled &&
1356 (ha->current_topology == ISP_CFG_F)) {
1357 void *wwpn = ha->init_cb->port_name;
1358
1359 memcpy(vha->port_name, wwpn, WWN_SIZE);
1360 fc_host_port_name(vha->host) =
1361 wwn_to_u64(vha->port_name);
1362 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1363 vha, 0x00d8, "LOOP DOWN detected,"
1364 "restore WWPN %016llx\n",
1365 wwn_to_u64(vha->port_name));
1366 }
1367
1368 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1369 }
1370
1371 vha->device_flags |= DFLG_NO_CABLE;
1372 qla2x00_mark_all_devices_lost(vha);
1373 }
1374
1375 if (vha->vp_idx) {
1376 atomic_set(&vha->vp_state, VP_FAILED);
1377 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1378 }
1379
1380 vha->flags.management_server_logged_in = 0;
1381 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1382 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1383 break;
1384
1385 case MBA_LIP_RESET:
1386 ql_dbg(ql_dbg_async, vha, 0x500c,
1387 "LIP reset occurred (%x).\n", mb[1]);
1388
1389 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1390 atomic_set(&vha->loop_state, LOOP_DOWN);
1391 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1392 qla2x00_mark_all_devices_lost(vha);
1393 }
1394
1395 if (vha->vp_idx) {
1396 atomic_set(&vha->vp_state, VP_FAILED);
1397 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1398 }
1399
1400 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1401
1402 ha->operating_mode = LOOP;
1403 vha->flags.management_server_logged_in = 0;
1404 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1405 break;
1406
1407
1408 case MBA_POINT_TO_POINT:
1409 ha->flags.lip_ae = 0;
1410
1411 if (IS_QLA2100(ha))
1412 break;
1413
1414 if (IS_CNA_CAPABLE(ha)) {
1415 ql_dbg(ql_dbg_async, vha, 0x500d,
1416 "DCBX Completed -- %04x %04x %04x.\n",
1417 mb[1], mb[2], mb[3]);
1418 if (ha->notify_dcbx_comp && !vha->vp_idx)
1419 complete(&ha->dcbx_comp);
1420
1421 } else
1422 ql_dbg(ql_dbg_async, vha, 0x500e,
1423 "Asynchronous P2P MODE received.\n");
1424
1425
1426
1427
1428
1429 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1430 atomic_set(&vha->loop_state, LOOP_DOWN);
1431 if (!atomic_read(&vha->loop_down_timer))
1432 atomic_set(&vha->loop_down_timer,
1433 LOOP_DOWN_TIME);
1434 if (!N2N_TOPO(ha))
1435 qla2x00_mark_all_devices_lost(vha);
1436 }
1437
1438 if (vha->vp_idx) {
1439 atomic_set(&vha->vp_state, VP_FAILED);
1440 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1441 }
1442
1443 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1444 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1445
1446 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1447 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1448
1449 vha->flags.management_server_logged_in = 0;
1450 break;
1451
1452 case MBA_CHG_IN_CONNECTION:
1453 if (IS_QLA2100(ha))
1454 break;
1455
1456 ql_dbg(ql_dbg_async, vha, 0x500f,
1457 "Configuration change detected: value=%x.\n", mb[1]);
1458
1459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1460 atomic_set(&vha->loop_state, LOOP_DOWN);
1461 if (!atomic_read(&vha->loop_down_timer))
1462 atomic_set(&vha->loop_down_timer,
1463 LOOP_DOWN_TIME);
1464 qla2x00_mark_all_devices_lost(vha);
1465 }
1466
1467 if (vha->vp_idx) {
1468 atomic_set(&vha->vp_state, VP_FAILED);
1469 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1470 }
1471
1472 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1473 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1474 break;
1475
1476 case MBA_PORT_UPDATE:
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 if (IS_QLA2XXX_MIDTYPE(ha) &&
1493 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1494 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1495 break;
1496
1497 if (mb[2] == 0x7) {
1498 ql_dbg(ql_dbg_async, vha, 0x5010,
1499 "Port %s %04x %04x %04x.\n",
1500 mb[1] == 0xffff ? "unavailable" : "logout",
1501 mb[1], mb[2], mb[3]);
1502
1503 if (mb[1] == 0xffff)
1504 goto global_port_update;
1505
1506 if (mb[1] == NPH_SNS_LID(ha)) {
1507 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1508 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1509 break;
1510 }
1511
1512
1513 if (IS_FWI2_CAPABLE(ha))
1514 handle_cnt = NPH_SNS;
1515 else
1516 handle_cnt = SIMPLE_NAME_SERVER;
1517 if (mb[1] == handle_cnt) {
1518 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1519 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1520 break;
1521 }
1522
1523
1524 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1525 if (!fcport)
1526 break;
1527 if (atomic_read(&fcport->state) != FCS_ONLINE)
1528 break;
1529 ql_dbg(ql_dbg_async, vha, 0x508a,
1530 "Marking port lost loopid=%04x portid=%06x.\n",
1531 fcport->loop_id, fcport->d_id.b24);
1532 if (qla_ini_mode_enabled(vha)) {
1533 fcport->logout_on_delete = 0;
1534 qlt_schedule_sess_for_deletion(fcport);
1535 }
1536 break;
1537
1538global_port_update:
1539 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1540 atomic_set(&vha->loop_state, LOOP_DOWN);
1541 atomic_set(&vha->loop_down_timer,
1542 LOOP_DOWN_TIME);
1543 vha->device_flags |= DFLG_NO_CABLE;
1544 qla2x00_mark_all_devices_lost(vha);
1545 }
1546
1547 if (vha->vp_idx) {
1548 atomic_set(&vha->vp_state, VP_FAILED);
1549 fc_vport_set_state(vha->fc_vport,
1550 FC_VPORT_FAILED);
1551 qla2x00_mark_all_devices_lost(vha);
1552 }
1553
1554 vha->flags.management_server_logged_in = 0;
1555 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1556 break;
1557 }
1558
1559
1560
1561
1562
1563
1564 atomic_set(&vha->loop_down_timer, 0);
1565 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1566 !ha->flags.n2n_ae &&
1567 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1568 ql_dbg(ql_dbg_async, vha, 0x5011,
1569 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1570 mb[1], mb[2], mb[3]);
1571 break;
1572 }
1573
1574 ql_dbg(ql_dbg_async, vha, 0x5012,
1575 "Port database changed %04x %04x %04x.\n",
1576 mb[1], mb[2], mb[3]);
1577
1578
1579
1580
1581 atomic_set(&vha->loop_state, LOOP_UP);
1582 vha->scan.scan_retry = 0;
1583
1584 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1585 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1586 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1587 break;
1588
1589 case MBA_RSCN_UPDATE:
1590
1591 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1592 break;
1593
1594 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1595 break;
1596
1597 ql_log(ql_log_warn, vha, 0x5013,
1598 "RSCN database changed -- %04x %04x %04x.\n",
1599 mb[1], mb[2], mb[3]);
1600
1601 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1602 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1603 | vha->d_id.b.al_pa;
1604 if (rscn_entry == host_pid) {
1605 ql_dbg(ql_dbg_async, vha, 0x5014,
1606 "Ignoring RSCN update to local host "
1607 "port ID (%06x).\n", host_pid);
1608 break;
1609 }
1610
1611
1612 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1613
1614
1615 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1616 break;
1617
1618 atomic_set(&vha->loop_down_timer, 0);
1619 vha->flags.management_server_logged_in = 0;
1620 {
1621 struct event_arg ea;
1622
1623 memset(&ea, 0, sizeof(ea));
1624 ea.id.b24 = rscn_entry;
1625 ea.id.b.rsvd_1 = rscn_entry >> 24;
1626 qla2x00_handle_rscn(vha, &ea);
1627 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1628 }
1629 break;
1630 case MBA_CONGN_NOTI_RECV:
1631 if (!ha->flags.scm_enabled ||
1632 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1633 break;
1634
1635 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1636 ql_dbg(ql_dbg_async, vha, 0x509b,
1637 "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1638 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1639 ql_log(ql_log_warn, vha, 0x509b,
1640 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1641 }
1642 break;
1643
1644 case MBA_ZIO_RESPONSE:
1645 ql_dbg(ql_dbg_async, vha, 0x5015,
1646 "[R|Z]IO update completion.\n");
1647
1648 if (IS_FWI2_CAPABLE(ha))
1649 qla24xx_process_response_queue(vha, rsp);
1650 else
1651 qla2x00_process_response_queue(rsp);
1652 break;
1653
1654 case MBA_DISCARD_RND_FRAME:
1655 ql_dbg(ql_dbg_async, vha, 0x5016,
1656 "Discard RND Frame -- %04x %04x %04x.\n",
1657 mb[1], mb[2], mb[3]);
1658 vha->interface_err_cnt++;
1659 break;
1660
1661 case MBA_TRACE_NOTIFICATION:
1662 ql_dbg(ql_dbg_async, vha, 0x5017,
1663 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1664 break;
1665
1666 case MBA_ISP84XX_ALERT:
1667 ql_dbg(ql_dbg_async, vha, 0x5018,
1668 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1669 mb[1], mb[2], mb[3]);
1670
1671 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1672 switch (mb[1]) {
1673 case A84_PANIC_RECOVERY:
1674 ql_log(ql_log_info, vha, 0x5019,
1675 "Alert 84XX: panic recovery %04x %04x.\n",
1676 mb[2], mb[3]);
1677 break;
1678 case A84_OP_LOGIN_COMPLETE:
1679 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1680 ql_log(ql_log_info, vha, 0x501a,
1681 "Alert 84XX: firmware version %x.\n",
1682 ha->cs84xx->op_fw_version);
1683 break;
1684 case A84_DIAG_LOGIN_COMPLETE:
1685 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1686 ql_log(ql_log_info, vha, 0x501b,
1687 "Alert 84XX: diagnostic firmware version %x.\n",
1688 ha->cs84xx->diag_fw_version);
1689 break;
1690 case A84_GOLD_LOGIN_COMPLETE:
1691 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1692 ha->cs84xx->fw_update = 1;
1693 ql_log(ql_log_info, vha, 0x501c,
1694 "Alert 84XX: gold firmware version %x.\n",
1695 ha->cs84xx->gold_fw_version);
1696 break;
1697 default:
1698 ql_log(ql_log_warn, vha, 0x501d,
1699 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1700 mb[1], mb[2], mb[3]);
1701 }
1702 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1703 break;
1704 case MBA_DCBX_START:
1705 ql_dbg(ql_dbg_async, vha, 0x501e,
1706 "DCBX Started -- %04x %04x %04x.\n",
1707 mb[1], mb[2], mb[3]);
1708 break;
1709 case MBA_DCBX_PARAM_UPDATE:
1710 ql_dbg(ql_dbg_async, vha, 0x501f,
1711 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1712 mb[1], mb[2], mb[3]);
1713 break;
1714 case MBA_FCF_CONF_ERR:
1715 ql_dbg(ql_dbg_async, vha, 0x5020,
1716 "FCF Configuration Error -- %04x %04x %04x.\n",
1717 mb[1], mb[2], mb[3]);
1718 break;
1719 case MBA_IDC_NOTIFY:
1720 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1721 mb[4] = rd_reg_word(®24->mailbox4);
1722 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1723 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1724 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1725 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1726
1727
1728
1729 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1730 atomic_set(&vha->loop_down_timer,
1731 LOOP_DOWN_TIME);
1732 qla2xxx_wake_dpc(vha);
1733 }
1734 }
1735 fallthrough;
1736 case MBA_IDC_COMPLETE:
1737 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1738 complete(&ha->lb_portup_comp);
1739 fallthrough;
1740 case MBA_IDC_TIME_EXT:
1741 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1742 IS_QLA8044(ha))
1743 qla81xx_idc_event(vha, mb[0], mb[1]);
1744 break;
1745
1746 case MBA_IDC_AEN:
1747 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1748 vha->hw_err_cnt++;
1749 qla27xx_handle_8200_aen(vha, mb);
1750 } else if (IS_QLA83XX(ha)) {
1751 mb[4] = rd_reg_word(®24->mailbox4);
1752 mb[5] = rd_reg_word(®24->mailbox5);
1753 mb[6] = rd_reg_word(®24->mailbox6);
1754 mb[7] = rd_reg_word(®24->mailbox7);
1755 qla83xx_handle_8200_aen(vha, mb);
1756 } else {
1757 ql_dbg(ql_dbg_async, vha, 0x5052,
1758 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1759 mb[0], mb[1], mb[2], mb[3]);
1760 }
1761 break;
1762
1763 case MBA_DPORT_DIAGNOSTICS:
1764 ql_dbg(ql_dbg_async, vha, 0x5052,
1765 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1766 mb[0], mb[1], mb[2], mb[3]);
1767 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1768 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1769 static char *results[] = {
1770 "start", "done(pass)", "done(error)", "undefined" };
1771 static char *types[] = {
1772 "none", "dynamic", "static", "other" };
1773 uint result = mb[1] >> 0 & 0x3;
1774 uint type = mb[1] >> 6 & 0x3;
1775 uint sw = mb[1] >> 15 & 0x1;
1776 ql_dbg(ql_dbg_async, vha, 0x5052,
1777 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1778 results[result], types[type], sw);
1779 if (result == 2) {
1780 static char *reasons[] = {
1781 "reserved", "unexpected reject",
1782 "unexpected phase", "retry exceeded",
1783 "timed out", "not supported",
1784 "user stopped" };
1785 uint reason = mb[2] >> 0 & 0xf;
1786 uint phase = mb[2] >> 12 & 0xf;
1787 ql_dbg(ql_dbg_async, vha, 0x5052,
1788 "D-Port Diagnostics: reason=%s phase=%u \n",
1789 reason < 7 ? reasons[reason] : "other",
1790 phase >> 1);
1791 }
1792 }
1793 break;
1794
1795 case MBA_TEMPERATURE_ALERT:
1796 ql_dbg(ql_dbg_async, vha, 0x505e,
1797 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1798 break;
1799
1800 case MBA_TRANS_INSERT:
1801 ql_dbg(ql_dbg_async, vha, 0x5091,
1802 "Transceiver Insertion: %04x\n", mb[1]);
1803 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1804 break;
1805
1806 case MBA_TRANS_REMOVE:
1807 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1808 break;
1809
1810 default:
1811 ql_dbg(ql_dbg_async, vha, 0x5057,
1812 "Unknown AEN:%04x %04x %04x %04x\n",
1813 mb[0], mb[1], mb[2], mb[3]);
1814 }
1815
1816 qlt_async_event(mb[0], vha, mb);
1817
1818 if (!vha->vp_idx && ha->num_vhosts)
1819 qla2x00_alert_all_vps(rsp, mb);
1820}
1821
1822
1823
1824
1825
1826
1827
1828void
1829qla2x00_process_completed_request(struct scsi_qla_host *vha,
1830 struct req_que *req, uint32_t index)
1831{
1832 srb_t *sp;
1833 struct qla_hw_data *ha = vha->hw;
1834
1835
1836 if (index >= req->num_outstanding_cmds) {
1837 ql_log(ql_log_warn, vha, 0x3014,
1838 "Invalid SCSI command index (%x).\n", index);
1839
1840 if (IS_P3P_TYPE(ha))
1841 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1842 else
1843 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1844 return;
1845 }
1846
1847 sp = req->outstanding_cmds[index];
1848 if (sp) {
1849
1850 req->outstanding_cmds[index] = NULL;
1851
1852
1853 sp->done(sp, DID_OK << 16);
1854 } else {
1855 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1856
1857 if (IS_P3P_TYPE(ha))
1858 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1859 else
1860 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1861 }
1862}
1863
1864srb_t *
1865qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1866 struct req_que *req, void *iocb)
1867{
1868 struct qla_hw_data *ha = vha->hw;
1869 sts_entry_t *pkt = iocb;
1870 srb_t *sp;
1871 uint16_t index;
1872
1873 if (pkt->handle == QLA_SKIP_HANDLE)
1874 return NULL;
1875
1876 index = LSW(pkt->handle);
1877 if (index >= req->num_outstanding_cmds) {
1878 ql_log(ql_log_warn, vha, 0x5031,
1879 "%s: Invalid command index (%x) type %8ph.\n",
1880 func, index, iocb);
1881 if (IS_P3P_TYPE(ha))
1882 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1883 else
1884 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1885 return NULL;
1886 }
1887 sp = req->outstanding_cmds[index];
1888 if (!sp) {
1889 ql_log(ql_log_warn, vha, 0x5032,
1890 "%s: Invalid completion handle (%x) -- timed-out.\n",
1891 func, index);
1892 return NULL;
1893 }
1894 if (sp->handle != index) {
1895 ql_log(ql_log_warn, vha, 0x5033,
1896 "%s: SRB handle (%x) mismatch %x.\n", func,
1897 sp->handle, index);
1898 return NULL;
1899 }
1900
1901 req->outstanding_cmds[index] = NULL;
1902 return sp;
1903}
1904
1905static void
1906qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1907 struct mbx_entry *mbx)
1908{
1909 const char func[] = "MBX-IOCB";
1910 const char *type;
1911 fc_port_t *fcport;
1912 srb_t *sp;
1913 struct srb_iocb *lio;
1914 uint16_t *data;
1915 uint16_t status;
1916
1917 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1918 if (!sp)
1919 return;
1920
1921 lio = &sp->u.iocb_cmd;
1922 type = sp->name;
1923 fcport = sp->fcport;
1924 data = lio->u.logio.data;
1925
1926 data[0] = MBS_COMMAND_ERROR;
1927 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1928 QLA_LOGIO_LOGIN_RETRIED : 0;
1929 if (mbx->entry_status) {
1930 ql_dbg(ql_dbg_async, vha, 0x5043,
1931 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1932 "entry-status=%x status=%x state-flag=%x "
1933 "status-flags=%x.\n", type, sp->handle,
1934 fcport->d_id.b.domain, fcport->d_id.b.area,
1935 fcport->d_id.b.al_pa, mbx->entry_status,
1936 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1937 le16_to_cpu(mbx->status_flags));
1938
1939 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1940 mbx, sizeof(*mbx));
1941
1942 goto logio_done;
1943 }
1944
1945 status = le16_to_cpu(mbx->status);
1946 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1947 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1948 status = 0;
1949 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1950 ql_dbg(ql_dbg_async, vha, 0x5045,
1951 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1952 type, sp->handle, fcport->d_id.b.domain,
1953 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1954 le16_to_cpu(mbx->mb1));
1955
1956 data[0] = MBS_COMMAND_COMPLETE;
1957 if (sp->type == SRB_LOGIN_CMD) {
1958 fcport->port_type = FCT_TARGET;
1959 if (le16_to_cpu(mbx->mb1) & BIT_0)
1960 fcport->port_type = FCT_INITIATOR;
1961 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1962 fcport->flags |= FCF_FCP2_DEVICE;
1963 }
1964 goto logio_done;
1965 }
1966
1967 data[0] = le16_to_cpu(mbx->mb0);
1968 switch (data[0]) {
1969 case MBS_PORT_ID_USED:
1970 data[1] = le16_to_cpu(mbx->mb1);
1971 break;
1972 case MBS_LOOP_ID_USED:
1973 break;
1974 default:
1975 data[0] = MBS_COMMAND_ERROR;
1976 break;
1977 }
1978
1979 ql_log(ql_log_warn, vha, 0x5046,
1980 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1981 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1982 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1983 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1984 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1985 le16_to_cpu(mbx->mb7));
1986
1987logio_done:
1988 sp->done(sp, 0);
1989}
1990
1991static void
1992qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1993 struct mbx_24xx_entry *pkt)
1994{
1995 const char func[] = "MBX-IOCB2";
1996 struct qla_hw_data *ha = vha->hw;
1997 srb_t *sp;
1998 struct srb_iocb *si;
1999 u16 sz, i;
2000 int res;
2001
2002 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2003 if (!sp)
2004 return;
2005
2006 if (sp->type == SRB_SCSI_CMD ||
2007 sp->type == SRB_NVME_CMD ||
2008 sp->type == SRB_TM_CMD) {
2009 ql_log(ql_log_warn, vha, 0x509d,
2010 "Inconsistent event entry type %d\n", sp->type);
2011 if (IS_P3P_TYPE(ha))
2012 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2013 else
2014 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2015 return;
2016 }
2017
2018 si = &sp->u.iocb_cmd;
2019 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
2020
2021 for (i = 0; i < sz; i++)
2022 si->u.mbx.in_mb[i] = pkt->mb[i];
2023
2024 res = (si->u.mbx.in_mb[0] & MBS_MASK);
2025
2026 sp->done(sp, res);
2027}
2028
2029static void
2030qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2031 struct nack_to_isp *pkt)
2032{
2033 const char func[] = "nack";
2034 srb_t *sp;
2035 int res = 0;
2036
2037 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2038 if (!sp)
2039 return;
2040
2041 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
2042 res = QLA_FUNCTION_FAILED;
2043
2044 sp->done(sp, res);
2045}
2046
2047static void
2048qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
2049 sts_entry_t *pkt, int iocb_type)
2050{
2051 const char func[] = "CT_IOCB";
2052 const char *type;
2053 srb_t *sp;
2054 struct bsg_job *bsg_job;
2055 struct fc_bsg_reply *bsg_reply;
2056 uint16_t comp_status;
2057 int res = 0;
2058
2059 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2060 if (!sp)
2061 return;
2062
2063 switch (sp->type) {
2064 case SRB_CT_CMD:
2065 bsg_job = sp->u.bsg_job;
2066 bsg_reply = bsg_job->reply;
2067
2068 type = "ct pass-through";
2069
2070 comp_status = le16_to_cpu(pkt->comp_status);
2071
2072
2073
2074
2075
2076 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2077 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2078
2079 if (comp_status != CS_COMPLETE) {
2080 if (comp_status == CS_DATA_UNDERRUN) {
2081 res = DID_OK << 16;
2082 bsg_reply->reply_payload_rcv_len =
2083 le16_to_cpu(pkt->rsp_info_len);
2084
2085 ql_log(ql_log_warn, vha, 0x5048,
2086 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
2087 type, comp_status,
2088 bsg_reply->reply_payload_rcv_len);
2089 } else {
2090 ql_log(ql_log_warn, vha, 0x5049,
2091 "CT pass-through-%s error comp_status=0x%x.\n",
2092 type, comp_status);
2093 res = DID_ERROR << 16;
2094 bsg_reply->reply_payload_rcv_len = 0;
2095 }
2096 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
2097 pkt, sizeof(*pkt));
2098 } else {
2099 res = DID_OK << 16;
2100 bsg_reply->reply_payload_rcv_len =
2101 bsg_job->reply_payload.payload_len;
2102 bsg_job->reply_len = 0;
2103 }
2104 break;
2105 case SRB_CT_PTHRU_CMD:
2106
2107
2108
2109
2110 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
2111 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2112 sp->name);
2113 break;
2114 }
2115
2116 sp->done(sp, res);
2117}
2118
2119static void
2120qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
2121 struct sts_entry_24xx *pkt, int iocb_type)
2122{
2123 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
2124 const char func[] = "ELS_CT_IOCB";
2125 const char *type;
2126 srb_t *sp;
2127 struct bsg_job *bsg_job;
2128 struct fc_bsg_reply *bsg_reply;
2129 uint16_t comp_status;
2130 uint32_t fw_status[3];
2131 int res, logit = 1;
2132 struct srb_iocb *els;
2133 uint n;
2134 scsi_qla_host_t *vha;
2135 struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
2136
2137 sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2138 if (!sp)
2139 return;
2140 bsg_job = sp->u.bsg_job;
2141 vha = sp->vha;
2142
2143 type = NULL;
2144
2145 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2146 fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
2147 fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
2148
2149 switch (sp->type) {
2150 case SRB_ELS_CMD_RPT:
2151 case SRB_ELS_CMD_HST:
2152 type = "rpt hst";
2153 break;
2154 case SRB_ELS_CMD_HST_NOLOGIN:
2155 type = "els";
2156 {
2157 struct els_entry_24xx *els = (void *)pkt;
2158 struct qla_bsg_auth_els_request *p =
2159 (struct qla_bsg_auth_els_request *)bsg_job->request;
2160
2161 ql_dbg(ql_dbg_user, vha, 0x700f,
2162 "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
2163 __func__, sc_to_str(p->e.sub_cmd),
2164 e->d_id[2], e->d_id[1], e->d_id[0],
2165 comp_status, p->e.extra_rx_xchg_address, bsg_job);
2166
2167 if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
2168 if (sp->remap.remapped) {
2169 n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2170 bsg_job->reply_payload.sg_cnt,
2171 sp->remap.rsp.buf,
2172 sp->remap.rsp.len);
2173 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
2174 "%s: SG copied %x of %x\n",
2175 __func__, n, sp->remap.rsp.len);
2176 } else {
2177 ql_dbg(ql_dbg_user, vha, 0x700f,
2178 "%s: NOT REMAPPED (error)...!!!\n",
2179 __func__);
2180 }
2181 }
2182 }
2183 break;
2184 case SRB_CT_CMD:
2185 type = "ct pass-through";
2186 break;
2187 case SRB_ELS_DCMD:
2188 type = "Driver ELS logo";
2189 if (iocb_type != ELS_IOCB_TYPE) {
2190 ql_dbg(ql_dbg_user, vha, 0x5047,
2191 "Completing %s: (%p) type=%d.\n",
2192 type, sp, sp->type);
2193 sp->done(sp, 0);
2194 return;
2195 }
2196 break;
2197 case SRB_CT_PTHRU_CMD:
2198
2199
2200
2201 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2202 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2203 sp->name);
2204 sp->done(sp, res);
2205 return;
2206 default:
2207 ql_dbg(ql_dbg_user, vha, 0x503e,
2208 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2209 return;
2210 }
2211
2212 if (iocb_type == ELS_IOCB_TYPE) {
2213 els = &sp->u.iocb_cmd;
2214 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2215 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2216 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2217 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2218 if (comp_status == CS_COMPLETE) {
2219 res = DID_OK << 16;
2220 } else {
2221 if (comp_status == CS_DATA_UNDERRUN) {
2222 res = DID_OK << 16;
2223 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2224 ese->total_byte_count));
2225
2226 if (sp->remap.remapped &&
2227 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
2228 ql_dbg(ql_dbg_user, vha, 0x503f,
2229 "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
2230 __func__, e->s_id[0], e->s_id[2], e->s_id[1],
2231 e->d_id[2], e->d_id[1], e->d_id[0]);
2232 logit = 0;
2233 }
2234
2235 } else if (comp_status == CS_PORT_LOGGED_OUT) {
2236 ql_dbg(ql_dbg_disc, vha, 0x911e,
2237 "%s %d schedule session deletion\n",
2238 __func__, __LINE__);
2239
2240 els->u.els_plogi.len = 0;
2241 res = DID_IMM_RETRY << 16;
2242 qlt_schedule_sess_for_deletion(sp->fcport);
2243 } else {
2244 els->u.els_plogi.len = 0;
2245 res = DID_ERROR << 16;
2246 }
2247
2248 if (logit) {
2249 if (sp->remap.remapped &&
2250 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
2251 ql_dbg(ql_dbg_user, vha, 0x503f,
2252 "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
2253 type, sp->handle, comp_status);
2254
2255 ql_dbg(ql_dbg_user, vha, 0x503f,
2256 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2257 fw_status[1], fw_status[2],
2258 le32_to_cpu(((struct els_sts_entry_24xx *)
2259 pkt)->total_byte_count),
2260 e->s_id[0], e->s_id[2], e->s_id[1],
2261 e->d_id[2], e->d_id[1], e->d_id[0]);
2262 } else {
2263 ql_log(ql_log_info, vha, 0x503f,
2264 "%s IOCB Done hdl=%x comp_status=0x%x\n",
2265 type, sp->handle, comp_status);
2266 ql_log(ql_log_info, vha, 0x503f,
2267 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2268 fw_status[1], fw_status[2],
2269 le32_to_cpu(((struct els_sts_entry_24xx *)
2270 pkt)->total_byte_count),
2271 e->s_id[0], e->s_id[2], e->s_id[1],
2272 e->d_id[2], e->d_id[1], e->d_id[0]);
2273 }
2274 }
2275 }
2276 goto els_ct_done;
2277 }
2278
2279
2280
2281
2282 bsg_job = sp->u.bsg_job;
2283 bsg_reply = bsg_job->reply;
2284 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2285 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2286
2287 if (comp_status != CS_COMPLETE) {
2288 if (comp_status == CS_DATA_UNDERRUN) {
2289 res = DID_OK << 16;
2290 bsg_reply->reply_payload_rcv_len =
2291 le32_to_cpu(ese->total_byte_count);
2292
2293 ql_dbg(ql_dbg_user, vha, 0x503f,
2294 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2295 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2296 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2297 le32_to_cpu(ese->total_byte_count));
2298 } else {
2299 ql_dbg(ql_dbg_user, vha, 0x5040,
2300 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2301 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2302 type, sp->handle, comp_status,
2303 le32_to_cpu(ese->error_subcode_1),
2304 le32_to_cpu(ese->error_subcode_2));
2305 res = DID_ERROR << 16;
2306 bsg_reply->reply_payload_rcv_len = 0;
2307 }
2308 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2309 fw_status, sizeof(fw_status));
2310 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2311 pkt, sizeof(*pkt));
2312 }
2313 else {
2314 res = DID_OK << 16;
2315 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2316 bsg_job->reply_len = 0;
2317 }
2318els_ct_done:
2319
2320 sp->done(sp, res);
2321}
2322
2323static void
2324qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2325 struct logio_entry_24xx *logio)
2326{
2327 const char func[] = "LOGIO-IOCB";
2328 const char *type;
2329 fc_port_t *fcport;
2330 srb_t *sp;
2331 struct srb_iocb *lio;
2332 uint16_t *data;
2333 uint32_t iop[2];
2334 int logit = 1;
2335
2336 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2337 if (!sp)
2338 return;
2339
2340 lio = &sp->u.iocb_cmd;
2341 type = sp->name;
2342 fcport = sp->fcport;
2343 data = lio->u.logio.data;
2344
2345 data[0] = MBS_COMMAND_ERROR;
2346 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2347 QLA_LOGIO_LOGIN_RETRIED : 0;
2348 if (logio->entry_status) {
2349 ql_log(ql_log_warn, fcport->vha, 0x5034,
2350 "Async-%s error entry - %8phC hdl=%x"
2351 "portid=%02x%02x%02x entry-status=%x.\n",
2352 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2353 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2354 logio->entry_status);
2355 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2356 logio, sizeof(*logio));
2357
2358 goto logio_done;
2359 }
2360
2361 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2362 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2363 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2364 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2365 le32_to_cpu(logio->io_parameter[0]));
2366
2367 vha->hw->exch_starvation = 0;
2368 data[0] = MBS_COMMAND_COMPLETE;
2369
2370 if (sp->type == SRB_PRLI_CMD) {
2371 lio->u.logio.iop[0] =
2372 le32_to_cpu(logio->io_parameter[0]);
2373 lio->u.logio.iop[1] =
2374 le32_to_cpu(logio->io_parameter[1]);
2375 goto logio_done;
2376 }
2377
2378 if (sp->type != SRB_LOGIN_CMD)
2379 goto logio_done;
2380
2381 lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
2382 if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
2383 fcport->flags |= FCF_FCSP_DEVICE;
2384
2385 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2386 if (iop[0] & BIT_4) {
2387 fcport->port_type = FCT_TARGET;
2388 if (iop[0] & BIT_8)
2389 fcport->flags |= FCF_FCP2_DEVICE;
2390 } else if (iop[0] & BIT_5)
2391 fcport->port_type = FCT_INITIATOR;
2392
2393 if (iop[0] & BIT_7)
2394 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2395
2396 if (logio->io_parameter[7] || logio->io_parameter[8])
2397 fcport->supported_classes |= FC_COS_CLASS2;
2398 if (logio->io_parameter[9] || logio->io_parameter[10])
2399 fcport->supported_classes |= FC_COS_CLASS3;
2400
2401 goto logio_done;
2402 }
2403
2404 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2405 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2406 lio->u.logio.iop[0] = iop[0];
2407 lio->u.logio.iop[1] = iop[1];
2408 switch (iop[0]) {
2409 case LSC_SCODE_PORTID_USED:
2410 data[0] = MBS_PORT_ID_USED;
2411 data[1] = LSW(iop[1]);
2412 logit = 0;
2413 break;
2414 case LSC_SCODE_NPORT_USED:
2415 data[0] = MBS_LOOP_ID_USED;
2416 logit = 0;
2417 break;
2418 case LSC_SCODE_CMD_FAILED:
2419 if (iop[1] == 0x0606) {
2420
2421
2422
2423
2424 data[0] = MBS_COMMAND_COMPLETE;
2425 goto logio_done;
2426 }
2427 data[0] = MBS_COMMAND_ERROR;
2428 break;
2429 case LSC_SCODE_NOXCB:
2430 vha->hw->exch_starvation++;
2431 if (vha->hw->exch_starvation > 5) {
2432 ql_log(ql_log_warn, vha, 0xd046,
2433 "Exchange starvation. Resetting RISC\n");
2434
2435 vha->hw->exch_starvation = 0;
2436
2437 if (IS_P3P_TYPE(vha->hw))
2438 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2439 else
2440 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2441 qla2xxx_wake_dpc(vha);
2442 }
2443 fallthrough;
2444 default:
2445 data[0] = MBS_COMMAND_ERROR;
2446 break;
2447 }
2448
2449 if (logit)
2450 ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
2451 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2452 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2453 le16_to_cpu(logio->comp_status),
2454 le32_to_cpu(logio->io_parameter[0]),
2455 le32_to_cpu(logio->io_parameter[1]));
2456 else
2457 ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
2458 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2459 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2460 le16_to_cpu(logio->comp_status),
2461 le32_to_cpu(logio->io_parameter[0]),
2462 le32_to_cpu(logio->io_parameter[1]));
2463
2464logio_done:
2465 sp->done(sp, 0);
2466}
2467
2468static void
2469qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2470{
2471 const char func[] = "TMF-IOCB";
2472 const char *type;
2473 fc_port_t *fcport;
2474 srb_t *sp;
2475 struct srb_iocb *iocb;
2476 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2477 u16 comp_status;
2478
2479 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2480 if (!sp)
2481 return;
2482
2483 comp_status = le16_to_cpu(sts->comp_status);
2484 iocb = &sp->u.iocb_cmd;
2485 type = sp->name;
2486 fcport = sp->fcport;
2487 iocb->u.tmf.data = QLA_SUCCESS;
2488
2489 if (sts->entry_status) {
2490 ql_log(ql_log_warn, fcport->vha, 0x5038,
2491 "Async-%s error - hdl=%x entry-status(%x).\n",
2492 type, sp->handle, sts->entry_status);
2493 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2494 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2495 ql_log(ql_log_warn, fcport->vha, 0x5039,
2496 "Async-%s error - hdl=%x completion status(%x).\n",
2497 type, sp->handle, comp_status);
2498 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2499 } else if ((le16_to_cpu(sts->scsi_status) &
2500 SS_RESPONSE_INFO_LEN_VALID)) {
2501 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2502 ql_log(ql_log_warn, fcport->vha, 0x503b,
2503 "Async-%s error - hdl=%x not enough response(%d).\n",
2504 type, sp->handle, sts->rsp_data_len);
2505 } else if (sts->data[3]) {
2506 ql_log(ql_log_warn, fcport->vha, 0x503c,
2507 "Async-%s error - hdl=%x response(%x).\n",
2508 type, sp->handle, sts->data[3]);
2509 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2510 }
2511 }
2512
2513 switch (comp_status) {
2514 case CS_PORT_LOGGED_OUT:
2515 case CS_PORT_CONFIG_CHG:
2516 case CS_PORT_BUSY:
2517 case CS_INCOMPLETE:
2518 case CS_PORT_UNAVAILABLE:
2519 case CS_TIMEOUT:
2520 case CS_RESET:
2521 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2522 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2523 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2524 fcport->d_id.b.domain, fcport->d_id.b.area,
2525 fcport->d_id.b.al_pa,
2526 port_state_str[FCS_ONLINE],
2527 comp_status);
2528
2529 qlt_schedule_sess_for_deletion(fcport);
2530 }
2531 break;
2532
2533 default:
2534 break;
2535 }
2536
2537 if (iocb->u.tmf.data != QLA_SUCCESS)
2538 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2539 sts, sizeof(*sts));
2540
2541 sp->done(sp, 0);
2542}
2543
2544static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2545 void *tsk, srb_t *sp)
2546{
2547 fc_port_t *fcport;
2548 struct srb_iocb *iocb;
2549 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2550 uint16_t state_flags;
2551 struct nvmefc_fcp_req *fd;
2552 uint16_t ret = QLA_SUCCESS;
2553 __le16 comp_status = sts->comp_status;
2554 int logit = 0;
2555
2556 iocb = &sp->u.iocb_cmd;
2557 fcport = sp->fcport;
2558 iocb->u.nvme.comp_status = comp_status;
2559 state_flags = le16_to_cpu(sts->state_flags);
2560 fd = iocb->u.nvme.desc;
2561
2562 if (unlikely(iocb->u.nvme.aen_op))
2563 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2564 else
2565 sp->qpair->cmd_completion_cnt++;
2566
2567 if (unlikely(comp_status != CS_COMPLETE))
2568 logit = 1;
2569
2570 fd->transferred_length = fd->payload_length -
2571 le32_to_cpu(sts->residual_len);
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2582 iocb->u.nvme.rsp_pyld_len = 0;
2583 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2584 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2585
2586 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2587 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2588
2589
2590
2591
2592 iocb->u.nvme.rsp_pyld_len = 0;
2593 fd->transferred_length = 0;
2594 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2595 "Unexpected values in NVMe_RSP IU.\n");
2596 logit = 1;
2597 } else if (state_flags & SF_NVME_ERSP) {
2598 uint32_t *inbuf, *outbuf;
2599 uint16_t iter;
2600
2601 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2602 outbuf = (uint32_t *)fd->rspaddr;
2603 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2604 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2605 sizeof(struct nvme_fc_ersp_iu))) {
2606 if (ql_mask_match(ql_dbg_io)) {
2607 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2608 iocb->u.nvme.rsp_pyld_len);
2609 ql_log(ql_log_warn, fcport->vha, 0x5100,
2610 "Unexpected response payload length %u.\n",
2611 iocb->u.nvme.rsp_pyld_len);
2612 }
2613 iocb->u.nvme.rsp_pyld_len =
2614 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2615 }
2616 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2617 for (; iter; iter--)
2618 *outbuf++ = swab32(*inbuf++);
2619 }
2620
2621 if (state_flags & SF_NVME_ERSP) {
2622 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2623 u32 tgt_xfer_len;
2624
2625 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2626 if (fd->transferred_length != tgt_xfer_len) {
2627 ql_log(ql_log_warn, fcport->vha, 0x3079,
2628 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2629 tgt_xfer_len, fd->transferred_length);
2630 logit = 1;
2631 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2632
2633
2634
2635
2636 logit = 0;
2637 }
2638 }
2639
2640 if (unlikely(logit))
2641 ql_log(ql_dbg_io, fcport->vha, 0x5060,
2642 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2643 sp->name, sp->handle, comp_status,
2644 fd->transferred_length, le32_to_cpu(sts->residual_len),
2645 sts->ox_id);
2646
2647
2648
2649
2650
2651 switch (le16_to_cpu(comp_status)) {
2652 case CS_COMPLETE:
2653 break;
2654
2655 case CS_RESET:
2656 case CS_PORT_UNAVAILABLE:
2657 case CS_PORT_LOGGED_OUT:
2658 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2659 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2660 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2661 "Port to be marked lost on fcport=%06x, current "
2662 "port state= %s comp_status %x.\n",
2663 fcport->d_id.b24, port_state_str[FCS_ONLINE],
2664 comp_status);
2665
2666 qlt_schedule_sess_for_deletion(fcport);
2667 }
2668 fallthrough;
2669 case CS_ABORTED:
2670 case CS_PORT_BUSY:
2671 fd->transferred_length = 0;
2672 iocb->u.nvme.rsp_pyld_len = 0;
2673 ret = QLA_ABORTED;
2674 break;
2675 case CS_DATA_UNDERRUN:
2676 break;
2677 default:
2678 ret = QLA_FUNCTION_FAILED;
2679 break;
2680 }
2681 sp->done(sp, ret);
2682}
2683
2684static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2685 struct vp_ctrl_entry_24xx *vce)
2686{
2687 const char func[] = "CTRLVP-IOCB";
2688 srb_t *sp;
2689 int rval = QLA_SUCCESS;
2690
2691 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2692 if (!sp)
2693 return;
2694
2695 if (vce->entry_status != 0) {
2696 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2697 "%s: Failed to complete IOCB -- error status (%x)\n",
2698 sp->name, vce->entry_status);
2699 rval = QLA_FUNCTION_FAILED;
2700 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2701 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2702 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2703 sp->name, le16_to_cpu(vce->comp_status),
2704 le16_to_cpu(vce->vp_idx_failed));
2705 rval = QLA_FUNCTION_FAILED;
2706 } else {
2707 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2708 "Done %s.\n", __func__);
2709 }
2710
2711 sp->rc = rval;
2712 sp->done(sp, rval);
2713}
2714
2715
2716static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2717 struct rsp_que *rsp,
2718 sts_entry_t *pkt)
2719{
2720 sts21_entry_t *sts21_entry;
2721 sts22_entry_t *sts22_entry;
2722 uint16_t handle_cnt;
2723 uint16_t cnt;
2724
2725 switch (pkt->entry_type) {
2726 case STATUS_TYPE:
2727 qla2x00_status_entry(vha, rsp, pkt);
2728 break;
2729 case STATUS_TYPE_21:
2730 sts21_entry = (sts21_entry_t *)pkt;
2731 handle_cnt = sts21_entry->handle_count;
2732 for (cnt = 0; cnt < handle_cnt; cnt++)
2733 qla2x00_process_completed_request(vha, rsp->req,
2734 sts21_entry->handle[cnt]);
2735 break;
2736 case STATUS_TYPE_22:
2737 sts22_entry = (sts22_entry_t *)pkt;
2738 handle_cnt = sts22_entry->handle_count;
2739 for (cnt = 0; cnt < handle_cnt; cnt++)
2740 qla2x00_process_completed_request(vha, rsp->req,
2741 sts22_entry->handle[cnt]);
2742 break;
2743 case STATUS_CONT_TYPE:
2744 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2745 break;
2746 case MBX_IOCB_TYPE:
2747 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2748 break;
2749 case CT_IOCB_TYPE:
2750 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2751 break;
2752 default:
2753
2754 ql_log(ql_log_warn, vha, 0x504a,
2755 "Received unknown response pkt type %x entry status=%x.\n",
2756 pkt->entry_type, pkt->entry_status);
2757 break;
2758 }
2759}
2760
2761
2762
2763
2764
2765void
2766qla2x00_process_response_queue(struct rsp_que *rsp)
2767{
2768 struct scsi_qla_host *vha;
2769 struct qla_hw_data *ha = rsp->hw;
2770 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2771 sts_entry_t *pkt;
2772
2773 vha = pci_get_drvdata(ha->pdev);
2774
2775 if (!vha->flags.online)
2776 return;
2777
2778 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2779 pkt = (sts_entry_t *)rsp->ring_ptr;
2780
2781 rsp->ring_index++;
2782 if (rsp->ring_index == rsp->length) {
2783 rsp->ring_index = 0;
2784 rsp->ring_ptr = rsp->ring;
2785 } else {
2786 rsp->ring_ptr++;
2787 }
2788
2789 if (pkt->entry_status != 0) {
2790 qla2x00_error_entry(vha, rsp, pkt);
2791 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2792 wmb();
2793 continue;
2794 }
2795
2796 qla2x00_process_response_entry(vha, rsp, pkt);
2797 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2798 wmb();
2799 }
2800
2801
2802 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2803}
2804
2805static inline void
2806qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2807 uint32_t sense_len, struct rsp_que *rsp, int res)
2808{
2809 struct scsi_qla_host *vha = sp->vha;
2810 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2811 uint32_t track_sense_len;
2812
2813 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2814 sense_len = SCSI_SENSE_BUFFERSIZE;
2815
2816 SET_CMD_SENSE_LEN(sp, sense_len);
2817 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2818 track_sense_len = sense_len;
2819
2820 if (sense_len > par_sense_len)
2821 sense_len = par_sense_len;
2822
2823 memcpy(cp->sense_buffer, sense_data, sense_len);
2824
2825 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2826 track_sense_len -= sense_len;
2827 SET_CMD_SENSE_LEN(sp, track_sense_len);
2828
2829 if (track_sense_len != 0) {
2830 rsp->status_srb = sp;
2831 cp->result = res;
2832 }
2833
2834 if (sense_len) {
2835 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2836 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2837 sp->vha->host_no, cp->device->id, cp->device->lun,
2838 cp);
2839 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2840 cp->sense_buffer, sense_len);
2841 }
2842}
2843
2844struct scsi_dif_tuple {
2845 __be16 guard;
2846 __be16 app_tag;
2847 __be32 ref_tag;
2848};
2849
2850
2851
2852
2853
2854
2855
2856static inline int
2857qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2858{
2859 struct scsi_qla_host *vha = sp->vha;
2860 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2861 uint8_t *ap = &sts24->data[12];
2862 uint8_t *ep = &sts24->data[20];
2863 uint32_t e_ref_tag, a_ref_tag;
2864 uint16_t e_app_tag, a_app_tag;
2865 uint16_t e_guard, a_guard;
2866
2867
2868
2869
2870
2871 a_guard = get_unaligned_le16(ap + 2);
2872 a_app_tag = get_unaligned_le16(ap + 0);
2873 a_ref_tag = get_unaligned_le32(ap + 4);
2874 e_guard = get_unaligned_le16(ep + 2);
2875 e_app_tag = get_unaligned_le16(ep + 0);
2876 e_ref_tag = get_unaligned_le32(ep + 4);
2877
2878 ql_dbg(ql_dbg_io, vha, 0x3023,
2879 "iocb(s) %p Returned STATUS.\n", sts24);
2880
2881 ql_dbg(ql_dbg_io, vha, 0x3024,
2882 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2883 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2884 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2885 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2886 a_app_tag, e_app_tag, a_guard, e_guard);
2887
2888
2889
2890
2891
2892
2893 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2894 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2895 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2896 uint32_t blocks_done, resid;
2897 sector_t lba_s = scsi_get_lba(cmd);
2898
2899
2900 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2901
2902 resid = scsi_bufflen(cmd) - (blocks_done *
2903 cmd->device->sector_size);
2904
2905 scsi_set_resid(cmd, resid);
2906 cmd->result = DID_OK << 16;
2907
2908
2909 if (scsi_prot_sg_count(cmd)) {
2910 uint32_t i, j = 0, k = 0, num_ent;
2911 struct scatterlist *sg;
2912 struct t10_pi_tuple *spt;
2913
2914
2915 scsi_for_each_prot_sg(cmd, sg,
2916 scsi_prot_sg_count(cmd), i) {
2917 num_ent = sg_dma_len(sg) / 8;
2918 if (k + num_ent < blocks_done) {
2919 k += num_ent;
2920 continue;
2921 }
2922 j = blocks_done - k - 1;
2923 k = blocks_done;
2924 break;
2925 }
2926
2927 if (k != blocks_done) {
2928 ql_log(ql_log_warn, vha, 0x302f,
2929 "unexpected tag values tag:lba=%x:%llx)\n",
2930 e_ref_tag, (unsigned long long)lba_s);
2931 return 1;
2932 }
2933
2934 spt = page_address(sg_page(sg)) + sg->offset;
2935 spt += j;
2936
2937 spt->app_tag = T10_PI_APP_ESCAPE;
2938 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2939 spt->ref_tag = T10_PI_REF_ESCAPE;
2940 }
2941
2942 return 0;
2943 }
2944
2945
2946 if (e_guard != a_guard) {
2947 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2948 set_host_byte(cmd, DID_ABORT);
2949 return 1;
2950 }
2951
2952
2953 if (e_ref_tag != a_ref_tag) {
2954 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2955 set_host_byte(cmd, DID_ABORT);
2956 return 1;
2957 }
2958
2959
2960 if (e_app_tag != a_app_tag) {
2961 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2962 set_host_byte(cmd, DID_ABORT);
2963 return 1;
2964 }
2965
2966 return 1;
2967}
2968
2969static void
2970qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2971 struct req_que *req, uint32_t index)
2972{
2973 struct qla_hw_data *ha = vha->hw;
2974 srb_t *sp;
2975 uint16_t comp_status;
2976 uint16_t scsi_status;
2977 uint16_t thread_id;
2978 uint32_t rval = EXT_STATUS_OK;
2979 struct bsg_job *bsg_job = NULL;
2980 struct fc_bsg_request *bsg_request;
2981 struct fc_bsg_reply *bsg_reply;
2982 sts_entry_t *sts = pkt;
2983 struct sts_entry_24xx *sts24 = pkt;
2984
2985
2986 if (index >= req->num_outstanding_cmds) {
2987 ql_log(ql_log_warn, vha, 0x70af,
2988 "Invalid SCSI completion handle 0x%x.\n", index);
2989 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2990 return;
2991 }
2992
2993 sp = req->outstanding_cmds[index];
2994 if (!sp) {
2995 ql_log(ql_log_warn, vha, 0x70b0,
2996 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2997 req->id, index);
2998
2999 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3000 return;
3001 }
3002
3003
3004 req->outstanding_cmds[index] = NULL;
3005 bsg_job = sp->u.bsg_job;
3006 bsg_request = bsg_job->request;
3007 bsg_reply = bsg_job->reply;
3008
3009 if (IS_FWI2_CAPABLE(ha)) {
3010 comp_status = le16_to_cpu(sts24->comp_status);
3011 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3012 } else {
3013 comp_status = le16_to_cpu(sts->comp_status);
3014 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3015 }
3016
3017 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3018 switch (comp_status) {
3019 case CS_COMPLETE:
3020 if (scsi_status == 0) {
3021 bsg_reply->reply_payload_rcv_len =
3022 bsg_job->reply_payload.payload_len;
3023 vha->qla_stats.input_bytes +=
3024 bsg_reply->reply_payload_rcv_len;
3025 vha->qla_stats.input_requests++;
3026 rval = EXT_STATUS_OK;
3027 }
3028 goto done;
3029
3030 case CS_DATA_OVERRUN:
3031 ql_dbg(ql_dbg_user, vha, 0x70b1,
3032 "Command completed with data overrun thread_id=%d\n",
3033 thread_id);
3034 rval = EXT_STATUS_DATA_OVERRUN;
3035 break;
3036
3037 case CS_DATA_UNDERRUN:
3038 ql_dbg(ql_dbg_user, vha, 0x70b2,
3039 "Command completed with data underrun thread_id=%d\n",
3040 thread_id);
3041 rval = EXT_STATUS_DATA_UNDERRUN;
3042 break;
3043 case CS_BIDIR_RD_OVERRUN:
3044 ql_dbg(ql_dbg_user, vha, 0x70b3,
3045 "Command completed with read data overrun thread_id=%d\n",
3046 thread_id);
3047 rval = EXT_STATUS_DATA_OVERRUN;
3048 break;
3049
3050 case CS_BIDIR_RD_WR_OVERRUN:
3051 ql_dbg(ql_dbg_user, vha, 0x70b4,
3052 "Command completed with read and write data overrun "
3053 "thread_id=%d\n", thread_id);
3054 rval = EXT_STATUS_DATA_OVERRUN;
3055 break;
3056
3057 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
3058 ql_dbg(ql_dbg_user, vha, 0x70b5,
3059 "Command completed with read data over and write data "
3060 "underrun thread_id=%d\n", thread_id);
3061 rval = EXT_STATUS_DATA_OVERRUN;
3062 break;
3063
3064 case CS_BIDIR_RD_UNDERRUN:
3065 ql_dbg(ql_dbg_user, vha, 0x70b6,
3066 "Command completed with read data underrun "
3067 "thread_id=%d\n", thread_id);
3068 rval = EXT_STATUS_DATA_UNDERRUN;
3069 break;
3070
3071 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
3072 ql_dbg(ql_dbg_user, vha, 0x70b7,
3073 "Command completed with read data under and write data "
3074 "overrun thread_id=%d\n", thread_id);
3075 rval = EXT_STATUS_DATA_UNDERRUN;
3076 break;
3077
3078 case CS_BIDIR_RD_WR_UNDERRUN:
3079 ql_dbg(ql_dbg_user, vha, 0x70b8,
3080 "Command completed with read and write data underrun "
3081 "thread_id=%d\n", thread_id);
3082 rval = EXT_STATUS_DATA_UNDERRUN;
3083 break;
3084
3085 case CS_BIDIR_DMA:
3086 ql_dbg(ql_dbg_user, vha, 0x70b9,
3087 "Command completed with data DMA error thread_id=%d\n",
3088 thread_id);
3089 rval = EXT_STATUS_DMA_ERR;
3090 break;
3091
3092 case CS_TIMEOUT:
3093 ql_dbg(ql_dbg_user, vha, 0x70ba,
3094 "Command completed with timeout thread_id=%d\n",
3095 thread_id);
3096 rval = EXT_STATUS_TIMEOUT;
3097 break;
3098 default:
3099 ql_dbg(ql_dbg_user, vha, 0x70bb,
3100 "Command completed with completion status=0x%x "
3101 "thread_id=%d\n", comp_status, thread_id);
3102 rval = EXT_STATUS_ERR;
3103 break;
3104 }
3105 bsg_reply->reply_payload_rcv_len = 0;
3106
3107done:
3108
3109 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
3110 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3111
3112
3113 sp->done(sp, DID_OK << 16);
3114
3115}
3116
3117
3118
3119
3120
3121
3122
3123static void
3124qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3125{
3126 srb_t *sp;
3127 fc_port_t *fcport;
3128 struct scsi_cmnd *cp;
3129 sts_entry_t *sts = pkt;
3130 struct sts_entry_24xx *sts24 = pkt;
3131 uint16_t comp_status;
3132 uint16_t scsi_status;
3133 uint16_t ox_id;
3134 uint8_t lscsi_status;
3135 int32_t resid;
3136 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
3137 fw_resid_len;
3138 uint8_t *rsp_info, *sense_data;
3139 struct qla_hw_data *ha = vha->hw;
3140 uint32_t handle;
3141 uint16_t que;
3142 struct req_que *req;
3143 int logit = 1;
3144 int res = 0;
3145 uint16_t state_flags = 0;
3146 uint16_t sts_qual = 0;
3147
3148 if (IS_FWI2_CAPABLE(ha)) {
3149 comp_status = le16_to_cpu(sts24->comp_status);
3150 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3151 state_flags = le16_to_cpu(sts24->state_flags);
3152 } else {
3153 comp_status = le16_to_cpu(sts->comp_status);
3154 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3155 }
3156 handle = (uint32_t) LSW(sts->handle);
3157 que = MSW(sts->handle);
3158 req = ha->req_q_map[que];
3159
3160
3161 if (req == NULL ||
3162 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
3163 ql_dbg(ql_dbg_io, vha, 0x3059,
3164 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
3165 "que=%u.\n", sts->handle, req, que);
3166 return;
3167 }
3168
3169
3170 if (handle < req->num_outstanding_cmds) {
3171 sp = req->outstanding_cmds[handle];
3172 if (!sp) {
3173 ql_dbg(ql_dbg_io, vha, 0x3075,
3174 "%s(%ld): Already returned command for status handle (0x%x).\n",
3175 __func__, vha->host_no, sts->handle);
3176 return;
3177 }
3178 } else {
3179 ql_dbg(ql_dbg_io, vha, 0x3017,
3180 "Invalid status handle, out of range (0x%x).\n",
3181 sts->handle);
3182
3183 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3184 if (IS_P3P_TYPE(ha))
3185 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3186 else
3187 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3188 qla2xxx_wake_dpc(vha);
3189 }
3190 return;
3191 }
3192 qla_put_iocbs(sp->qpair, &sp->iores);
3193
3194 if (sp->cmd_type != TYPE_SRB) {
3195 req->outstanding_cmds[handle] = NULL;
3196 ql_dbg(ql_dbg_io, vha, 0x3015,
3197 "Unknown sp->cmd_type %x %p).\n",
3198 sp->cmd_type, sp);
3199 return;
3200 }
3201
3202
3203 if (sp->type == SRB_NVME_CMD) {
3204 req->outstanding_cmds[handle] = NULL;
3205 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
3206 return;
3207 }
3208
3209 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
3210 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
3211 return;
3212 }
3213
3214
3215 if (sp->type == SRB_TM_CMD) {
3216 qla24xx_tm_iocb_entry(vha, req, pkt);
3217 return;
3218 }
3219
3220
3221 qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
3222 sp->qpair->cmd_completion_cnt++;
3223
3224 if (comp_status == CS_COMPLETE && scsi_status == 0) {
3225 qla2x00_process_completed_request(vha, req, handle);
3226
3227 return;
3228 }
3229
3230 req->outstanding_cmds[handle] = NULL;
3231 cp = GET_CMD_SP(sp);
3232 if (cp == NULL) {
3233 ql_dbg(ql_dbg_io, vha, 0x3018,
3234 "Command already returned (0x%x/%p).\n",
3235 sts->handle, sp);
3236
3237 return;
3238 }
3239
3240 lscsi_status = scsi_status & STATUS_MASK;
3241
3242 fcport = sp->fcport;
3243
3244 ox_id = 0;
3245 sense_len = par_sense_len = rsp_info_len = resid_len =
3246 fw_resid_len = 0;
3247 if (IS_FWI2_CAPABLE(ha)) {
3248 if (scsi_status & SS_SENSE_LEN_VALID)
3249 sense_len = le32_to_cpu(sts24->sense_len);
3250 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3251 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3252 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3253 resid_len = le32_to_cpu(sts24->rsp_residual_count);
3254 if (comp_status == CS_DATA_UNDERRUN)
3255 fw_resid_len = le32_to_cpu(sts24->residual_len);
3256 rsp_info = sts24->data;
3257 sense_data = sts24->data;
3258 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3259 ox_id = le16_to_cpu(sts24->ox_id);
3260 par_sense_len = sizeof(sts24->data);
3261 sts_qual = le16_to_cpu(sts24->status_qualifier);
3262 } else {
3263 if (scsi_status & SS_SENSE_LEN_VALID)
3264 sense_len = le16_to_cpu(sts->req_sense_length);
3265 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3266 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3267 resid_len = le32_to_cpu(sts->residual_length);
3268 rsp_info = sts->rsp_info;
3269 sense_data = sts->req_sense_data;
3270 par_sense_len = sizeof(sts->req_sense_data);
3271 }
3272
3273
3274 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3275
3276 if (IS_FWI2_CAPABLE(ha)) {
3277 sense_data += rsp_info_len;
3278 par_sense_len -= rsp_info_len;
3279 }
3280 if (rsp_info_len > 3 && rsp_info[3]) {
3281 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3282 "FCP I/O protocol failure (0x%x/0x%x).\n",
3283 rsp_info_len, rsp_info[3]);
3284
3285 res = DID_BUS_BUSY << 16;
3286 goto out;
3287 }
3288 }
3289
3290
3291 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3292 scsi_status & SS_RESIDUAL_OVER)
3293 comp_status = CS_DATA_OVERRUN;
3294
3295
3296
3297
3298
3299 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3300 lscsi_status == SAM_STAT_BUSY))
3301 qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3302
3303
3304
3305
3306 switch (comp_status) {
3307 case CS_COMPLETE:
3308 case CS_QUEUE_FULL:
3309 if (scsi_status == 0) {
3310 res = DID_OK << 16;
3311 break;
3312 }
3313 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3314 resid = resid_len;
3315 scsi_set_resid(cp, resid);
3316
3317 if (!lscsi_status &&
3318 ((unsigned)(scsi_bufflen(cp) - resid) <
3319 cp->underflow)) {
3320 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3321 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3322 resid, scsi_bufflen(cp));
3323
3324 res = DID_ERROR << 16;
3325 break;
3326 }
3327 }
3328 res = DID_OK << 16 | lscsi_status;
3329
3330 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3331 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3332 "QUEUE FULL detected.\n");
3333 break;
3334 }
3335 logit = 0;
3336 if (lscsi_status != SS_CHECK_CONDITION)
3337 break;
3338
3339 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3340 if (!(scsi_status & SS_SENSE_LEN_VALID))
3341 break;
3342
3343 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3344 rsp, res);
3345 break;
3346
3347 case CS_DATA_UNDERRUN:
3348
3349 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3350 scsi_set_resid(cp, resid);
3351 if (scsi_status & SS_RESIDUAL_UNDER) {
3352 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3353 ql_log(ql_log_warn, fcport->vha, 0x301d,
3354 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3355 resid, scsi_bufflen(cp));
3356
3357 vha->interface_err_cnt++;
3358
3359 res = DID_ERROR << 16 | lscsi_status;
3360 goto check_scsi_status;
3361 }
3362
3363 if (!lscsi_status &&
3364 ((unsigned)(scsi_bufflen(cp) - resid) <
3365 cp->underflow)) {
3366 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3367 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3368 resid, scsi_bufflen(cp));
3369
3370 res = DID_ERROR << 16;
3371 break;
3372 }
3373 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3374 lscsi_status != SAM_STAT_BUSY) {
3375
3376
3377
3378
3379
3380 ql_log(ql_log_warn, fcport->vha, 0x301f,
3381 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3382 resid, scsi_bufflen(cp));
3383
3384 vha->interface_err_cnt++;
3385
3386 res = DID_ERROR << 16 | lscsi_status;
3387 goto check_scsi_status;
3388 } else {
3389 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3390 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3391 scsi_status, lscsi_status);
3392 }
3393
3394 res = DID_OK << 16 | lscsi_status;
3395 logit = 0;
3396
3397check_scsi_status:
3398
3399
3400
3401
3402 if (lscsi_status != 0) {
3403 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3404 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3405 "QUEUE FULL detected.\n");
3406 logit = 1;
3407 break;
3408 }
3409 if (lscsi_status != SS_CHECK_CONDITION)
3410 break;
3411
3412 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3413 if (!(scsi_status & SS_SENSE_LEN_VALID))
3414 break;
3415
3416 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3417 sense_len, rsp, res);
3418 }
3419 break;
3420
3421 case CS_PORT_LOGGED_OUT:
3422 case CS_PORT_CONFIG_CHG:
3423 case CS_PORT_BUSY:
3424 case CS_INCOMPLETE:
3425 case CS_PORT_UNAVAILABLE:
3426 case CS_TIMEOUT:
3427 case CS_RESET:
3428
3429
3430
3431
3432
3433
3434 res = DID_TRANSPORT_DISRUPTED << 16;
3435
3436 if (comp_status == CS_TIMEOUT) {
3437 if (IS_FWI2_CAPABLE(ha))
3438 break;
3439 else if ((le16_to_cpu(sts->status_flags) &
3440 SF_LOGOUT_SENT) == 0)
3441 break;
3442 }
3443
3444 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3445 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3446 "Port to be marked lost on fcport=%02x%02x%02x, current "
3447 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3448 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3449 port_state_str[FCS_ONLINE],
3450 comp_status);
3451
3452 qlt_schedule_sess_for_deletion(fcport);
3453 }
3454
3455 break;
3456
3457 case CS_ABORTED:
3458 res = DID_RESET << 16;
3459 break;
3460
3461 case CS_DIF_ERROR:
3462 logit = qla2x00_handle_dif_error(sp, sts24);
3463 res = cp->result;
3464 break;
3465
3466 case CS_TRANSPORT:
3467 res = DID_ERROR << 16;
3468 vha->hw_err_cnt++;
3469
3470 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3471 break;
3472
3473 if (state_flags & BIT_4)
3474 scmd_printk(KERN_WARNING, cp,
3475 "Unsupported device '%s' found.\n",
3476 cp->device->vendor);
3477 break;
3478
3479 case CS_DMA:
3480 ql_log(ql_log_info, fcport->vha, 0x3022,
3481 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3482 comp_status, scsi_status, res, vha->host_no,
3483 cp->device->id, cp->device->lun, fcport->d_id.b24,
3484 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3485 resid_len, fw_resid_len, sp, cp);
3486 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3487 pkt, sizeof(*sts24));
3488 res = DID_ERROR << 16;
3489 vha->hw_err_cnt++;
3490 break;
3491 default:
3492 res = DID_ERROR << 16;
3493 break;
3494 }
3495
3496out:
3497 if (logit)
3498 ql_log(ql_dbg_io, fcport->vha, 0x3022,
3499 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3500 comp_status, scsi_status, res, vha->host_no,
3501 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3502 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3503 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3504 resid_len, fw_resid_len, sp, cp);
3505
3506 if (rsp->status_srb == NULL)
3507 sp->done(sp, res);
3508}
3509
3510
3511
3512
3513
3514
3515
3516
3517static void
3518qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3519{
3520 uint8_t sense_sz = 0;
3521 struct qla_hw_data *ha = rsp->hw;
3522 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3523 srb_t *sp = rsp->status_srb;
3524 struct scsi_cmnd *cp;
3525 uint32_t sense_len;
3526 uint8_t *sense_ptr;
3527
3528 if (!sp || !GET_CMD_SENSE_LEN(sp))
3529 return;
3530
3531 sense_len = GET_CMD_SENSE_LEN(sp);
3532 sense_ptr = GET_CMD_SENSE_PTR(sp);
3533
3534 cp = GET_CMD_SP(sp);
3535 if (cp == NULL) {
3536 ql_log(ql_log_warn, vha, 0x3025,
3537 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3538
3539 rsp->status_srb = NULL;
3540 return;
3541 }
3542
3543 if (sense_len > sizeof(pkt->data))
3544 sense_sz = sizeof(pkt->data);
3545 else
3546 sense_sz = sense_len;
3547
3548
3549 if (IS_FWI2_CAPABLE(ha))
3550 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3551 memcpy(sense_ptr, pkt->data, sense_sz);
3552 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3553 sense_ptr, sense_sz);
3554
3555 sense_len -= sense_sz;
3556 sense_ptr += sense_sz;
3557
3558 SET_CMD_SENSE_PTR(sp, sense_ptr);
3559 SET_CMD_SENSE_LEN(sp, sense_len);
3560
3561
3562 if (sense_len == 0) {
3563 rsp->status_srb = NULL;
3564 sp->done(sp, cp->result);
3565 }
3566}
3567
3568
3569
3570
3571
3572
3573
3574
3575static int
3576qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3577{
3578 srb_t *sp;
3579 struct qla_hw_data *ha = vha->hw;
3580 const char func[] = "ERROR-IOCB";
3581 uint16_t que = MSW(pkt->handle);
3582 struct req_que *req = NULL;
3583 int res = DID_ERROR << 16;
3584
3585 ql_dbg(ql_dbg_async, vha, 0x502a,
3586 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3587 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3588
3589 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3590 goto fatal;
3591
3592 req = ha->req_q_map[que];
3593
3594 if (pkt->entry_status & RF_BUSY)
3595 res = DID_BUS_BUSY << 16;
3596
3597 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3598 return 0;
3599
3600 switch (pkt->entry_type) {
3601 case NOTIFY_ACK_TYPE:
3602 case STATUS_TYPE:
3603 case STATUS_CONT_TYPE:
3604 case LOGINOUT_PORT_IOCB_TYPE:
3605 case CT_IOCB_TYPE:
3606 case ELS_IOCB_TYPE:
3607 case ABORT_IOCB_TYPE:
3608 case MBX_IOCB_TYPE:
3609 default:
3610 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3611 if (sp) {
3612 qla_put_iocbs(sp->qpair, &sp->iores);
3613 sp->done(sp, res);
3614 return 0;
3615 }
3616 break;
3617
3618 case SA_UPDATE_IOCB_TYPE:
3619 case ABTS_RESP_24XX:
3620 case CTIO_TYPE7:
3621 case CTIO_CRC2:
3622 return 1;
3623 }
3624fatal:
3625 ql_log(ql_log_warn, vha, 0x5030,
3626 "Error entry - invalid handle/queue (%04x).\n", que);
3627 return 0;
3628}
3629
3630
3631
3632
3633
3634
3635static void
3636qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3637{
3638 uint16_t cnt;
3639 uint32_t mboxes;
3640 __le16 __iomem *wptr;
3641 struct qla_hw_data *ha = vha->hw;
3642 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3643
3644
3645 WARN_ON_ONCE(ha->mbx_count > 32);
3646 mboxes = (1ULL << ha->mbx_count) - 1;
3647 if (!ha->mcp)
3648 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3649 else
3650 mboxes = ha->mcp->in_mb;
3651
3652
3653 ha->flags.mbox_int = 1;
3654 ha->mailbox_out[0] = mb0;
3655 mboxes >>= 1;
3656 wptr = ®->mailbox1;
3657
3658 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3659 if (mboxes & BIT_0)
3660 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3661
3662 mboxes >>= 1;
3663 wptr++;
3664 }
3665}
3666
3667static void
3668qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3669 struct abort_entry_24xx *pkt)
3670{
3671 const char func[] = "ABT_IOCB";
3672 srb_t *sp;
3673 srb_t *orig_sp = NULL;
3674 struct srb_iocb *abt;
3675
3676 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3677 if (!sp)
3678 return;
3679
3680 abt = &sp->u.iocb_cmd;
3681 abt->u.abt.comp_status = pkt->comp_status;
3682 orig_sp = sp->cmd_sp;
3683
3684 if (orig_sp)
3685 qla_nvme_abort_process_comp_status(pkt, orig_sp);
3686
3687 sp->done(sp, 0);
3688}
3689
3690void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3691 struct pt_ls4_request *pkt, struct req_que *req)
3692{
3693 srb_t *sp;
3694 const char func[] = "LS4_IOCB";
3695 uint16_t comp_status;
3696
3697 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3698 if (!sp)
3699 return;
3700
3701 comp_status = le16_to_cpu(pkt->status);
3702 sp->done(sp, comp_status);
3703}
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3714 struct rsp_que *rsp, response_t *pkt)
3715{
3716 int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
3717 response_t *end_pkt;
3718 int rc = 0;
3719 u32 rsp_q_in;
3720
3721 if (pkt->entry_count == 1)
3722 return rc;
3723
3724
3725 if (rsp->ring_index == 0)
3726 start_pkt_ring_index = rsp->length - 1;
3727 else
3728 start_pkt_ring_index = rsp->ring_index - 1;
3729
3730 if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
3731 end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
3732 rsp->length - 1;
3733 else
3734 end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
3735
3736 end_pkt = rsp->ring + end_pkt_ring_index;
3737
3738
3739 n_ring_index = end_pkt_ring_index + 1;
3740 if (n_ring_index >= rsp->length)
3741 n_ring_index = 0;
3742
3743 rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
3744 rd_reg_dword(rsp->rsp_q_in);
3745
3746
3747 if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
3748 rsp_q_in >= n_ring_index)
3749
3750 rc = 0;
3751 else
3752 rc = -EIO;
3753
3754 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
3755 "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
3756 __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
3757 rsp_q_in, rc);
3758
3759 return rc;
3760}
3761
3762
3763
3764
3765
3766
3767void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3768 struct rsp_que *rsp)
3769{
3770 struct sts_entry_24xx *pkt;
3771 struct qla_hw_data *ha = vha->hw;
3772 struct purex_entry_24xx *purex_entry;
3773 struct purex_item *pure_item;
3774
3775 if (!ha->flags.fw_started)
3776 return;
3777
3778 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
3779 rsp->qpair->rcv_intr = 1;
3780 qla_cpu_update(rsp->qpair, smp_processor_id());
3781 }
3782
3783 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3784 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3785
3786 rsp->ring_index++;
3787 if (rsp->ring_index == rsp->length) {
3788 rsp->ring_index = 0;
3789 rsp->ring_ptr = rsp->ring;
3790 } else {
3791 rsp->ring_ptr++;
3792 }
3793
3794 if (pkt->entry_status != 0) {
3795 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3796 goto process_err;
3797
3798 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3799 wmb();
3800 continue;
3801 }
3802process_err:
3803
3804 switch (pkt->entry_type) {
3805 case STATUS_TYPE:
3806 qla2x00_status_entry(vha, rsp, pkt);
3807 break;
3808 case STATUS_CONT_TYPE:
3809 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3810 break;
3811 case VP_RPT_ID_IOCB_TYPE:
3812 qla24xx_report_id_acquisition(vha,
3813 (struct vp_rpt_id_entry_24xx *)pkt);
3814 break;
3815 case LOGINOUT_PORT_IOCB_TYPE:
3816 qla24xx_logio_entry(vha, rsp->req,
3817 (struct logio_entry_24xx *)pkt);
3818 break;
3819 case CT_IOCB_TYPE:
3820 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3821 break;
3822 case ELS_IOCB_TYPE:
3823 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3824 break;
3825 case ABTS_RECV_24XX:
3826 if (qla_ini_mode_enabled(vha)) {
3827 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3828 if (!pure_item)
3829 break;
3830 qla24xx_queue_purex_item(vha, pure_item,
3831 qla24xx_process_abts);
3832 break;
3833 }
3834 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3835 IS_QLA28XX(ha)) {
3836
3837 qlt_handle_abts_recv(vha, rsp,
3838 (response_t *)pkt);
3839 break;
3840 } else {
3841 qlt_24xx_process_atio_queue(vha, 1);
3842 }
3843 fallthrough;
3844 case ABTS_RESP_24XX:
3845 case CTIO_TYPE7:
3846 case CTIO_CRC2:
3847 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3848 break;
3849 case PT_LS4_REQUEST:
3850 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3851 rsp->req);
3852 break;
3853 case NOTIFY_ACK_TYPE:
3854 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3855 qlt_response_pkt_all_vps(vha, rsp,
3856 (response_t *)pkt);
3857 else
3858 qla24xxx_nack_iocb_entry(vha, rsp->req,
3859 (struct nack_to_isp *)pkt);
3860 break;
3861 case MARKER_TYPE:
3862
3863
3864
3865 break;
3866 case ABORT_IOCB_TYPE:
3867 qla24xx_abort_iocb_entry(vha, rsp->req,
3868 (struct abort_entry_24xx *)pkt);
3869 break;
3870 case MBX_IOCB_TYPE:
3871 qla24xx_mbx_iocb_entry(vha, rsp->req,
3872 (struct mbx_24xx_entry *)pkt);
3873 break;
3874 case VP_CTRL_IOCB_TYPE:
3875 qla_ctrlvp_completed(vha, rsp->req,
3876 (struct vp_ctrl_entry_24xx *)pkt);
3877 break;
3878 case PUREX_IOCB_TYPE:
3879 purex_entry = (void *)pkt;
3880 switch (purex_entry->els_frame_payload[3]) {
3881 case ELS_RDP:
3882 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3883 if (!pure_item)
3884 break;
3885 qla24xx_queue_purex_item(vha, pure_item,
3886 qla24xx_process_purex_rdp);
3887 break;
3888 case ELS_FPIN:
3889 if (!vha->hw->flags.scm_enabled) {
3890 ql_log(ql_log_warn, vha, 0x5094,
3891 "SCM not active for this port\n");
3892 break;
3893 }
3894 pure_item = qla27xx_copy_fpin_pkt(vha,
3895 (void **)&pkt, &rsp);
3896 if (!pure_item)
3897 break;
3898 qla24xx_queue_purex_item(vha, pure_item,
3899 qla27xx_process_purex_fpin);
3900 break;
3901
3902 case ELS_AUTH_ELS:
3903 if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
3904 ql_dbg(ql_dbg_init, vha, 0x5091,
3905 "Defer processing ELS opcode %#x...\n",
3906 purex_entry->els_frame_payload[3]);
3907 return;
3908 }
3909 qla24xx_auth_els(vha, (void **)&pkt, &rsp);
3910 break;
3911 default:
3912 ql_log(ql_log_warn, vha, 0x509c,
3913 "Discarding ELS Request opcode 0x%x\n",
3914 purex_entry->els_frame_payload[3]);
3915 }
3916 break;
3917 case SA_UPDATE_IOCB_TYPE:
3918 qla28xx_sa_update_iocb_entry(vha, rsp->req,
3919 (struct sa_update_28xx *)pkt);
3920 break;
3921
3922 default:
3923
3924 ql_dbg(ql_dbg_async, vha, 0x5042,
3925 "Received unknown response pkt type 0x%x entry status=%x.\n",
3926 pkt->entry_type, pkt->entry_status);
3927 break;
3928 }
3929 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3930 wmb();
3931 }
3932
3933
3934 if (IS_P3P_TYPE(ha)) {
3935 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3936
3937 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3938 } else {
3939 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3940 }
3941}
3942
3943static void
3944qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3945{
3946 int rval;
3947 uint32_t cnt;
3948 struct qla_hw_data *ha = vha->hw;
3949 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3950
3951 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3952 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3953 return;
3954
3955 rval = QLA_SUCCESS;
3956 wrt_reg_dword(®->iobase_addr, 0x7C00);
3957 rd_reg_dword(®->iobase_addr);
3958 wrt_reg_dword(®->iobase_window, 0x0001);
3959 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3960 rval == QLA_SUCCESS; cnt--) {
3961 if (cnt) {
3962 wrt_reg_dword(®->iobase_window, 0x0001);
3963 udelay(10);
3964 } else
3965 rval = QLA_FUNCTION_TIMEOUT;
3966 }
3967 if (rval == QLA_SUCCESS)
3968 goto next_test;
3969
3970 rval = QLA_SUCCESS;
3971 wrt_reg_dword(®->iobase_window, 0x0003);
3972 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3973 rval == QLA_SUCCESS; cnt--) {
3974 if (cnt) {
3975 wrt_reg_dword(®->iobase_window, 0x0003);
3976 udelay(10);
3977 } else
3978 rval = QLA_FUNCTION_TIMEOUT;
3979 }
3980 if (rval != QLA_SUCCESS)
3981 goto done;
3982
3983next_test:
3984 if (rd_reg_dword(®->iobase_c8) & BIT_3)
3985 ql_log(ql_log_info, vha, 0x504c,
3986 "Additional code -- 0x55AA.\n");
3987
3988done:
3989 wrt_reg_dword(®->iobase_window, 0x0000);
3990 rd_reg_dword(®->iobase_window);
3991}
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002irqreturn_t
4003qla24xx_intr_handler(int irq, void *dev_id)
4004{
4005 scsi_qla_host_t *vha;
4006 struct qla_hw_data *ha;
4007 struct device_reg_24xx __iomem *reg;
4008 int status;
4009 unsigned long iter;
4010 uint32_t stat;
4011 uint32_t hccr;
4012 uint16_t mb[8];
4013 struct rsp_que *rsp;
4014 unsigned long flags;
4015 bool process_atio = false;
4016
4017 rsp = (struct rsp_que *) dev_id;
4018 if (!rsp) {
4019 ql_log(ql_log_info, NULL, 0x5059,
4020 "%s: NULL response queue pointer.\n", __func__);
4021 return IRQ_NONE;
4022 }
4023
4024 ha = rsp->hw;
4025 reg = &ha->iobase->isp24;
4026 status = 0;
4027
4028 if (unlikely(pci_channel_offline(ha->pdev)))
4029 return IRQ_HANDLED;
4030
4031 spin_lock_irqsave(&ha->hardware_lock, flags);
4032 vha = pci_get_drvdata(ha->pdev);
4033 for (iter = 50; iter--; ) {
4034 stat = rd_reg_dword(®->host_status);
4035 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4036 break;
4037 if (stat & HSRX_RISC_PAUSED) {
4038 if (unlikely(pci_channel_offline(ha->pdev)))
4039 break;
4040
4041 hccr = rd_reg_dword(®->hccr);
4042
4043 ql_log(ql_log_warn, vha, 0x504b,
4044 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4045 hccr);
4046
4047 qla2xxx_check_risc_status(vha);
4048
4049 ha->isp_ops->fw_dump(vha);
4050 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4051 break;
4052 } else if ((stat & HSRX_RISC_INT) == 0)
4053 break;
4054
4055 switch (stat & 0xff) {
4056 case INTR_ROM_MB_SUCCESS:
4057 case INTR_ROM_MB_FAILED:
4058 case INTR_MB_SUCCESS:
4059 case INTR_MB_FAILED:
4060 qla24xx_mbx_completion(vha, MSW(stat));
4061 status |= MBX_INTERRUPT;
4062
4063 break;
4064 case INTR_ASYNC_EVENT:
4065 mb[0] = MSW(stat);
4066 mb[1] = rd_reg_word(®->mailbox1);
4067 mb[2] = rd_reg_word(®->mailbox2);
4068 mb[3] = rd_reg_word(®->mailbox3);
4069 qla2x00_async_event(vha, rsp, mb);
4070 break;
4071 case INTR_RSP_QUE_UPDATE:
4072 case INTR_RSP_QUE_UPDATE_83XX:
4073 qla24xx_process_response_queue(vha, rsp);
4074 break;
4075 case INTR_ATIO_QUE_UPDATE_27XX:
4076 case INTR_ATIO_QUE_UPDATE:
4077 process_atio = true;
4078 break;
4079 case INTR_ATIO_RSP_QUE_UPDATE:
4080 process_atio = true;
4081 qla24xx_process_response_queue(vha, rsp);
4082 break;
4083 default:
4084 ql_dbg(ql_dbg_async, vha, 0x504f,
4085 "Unrecognized interrupt type (%d).\n", stat * 0xff);
4086 break;
4087 }
4088 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4089 rd_reg_dword_relaxed(®->hccr);
4090 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
4091 ndelay(3500);
4092 }
4093 qla2x00_handle_mbx_completion(ha, status);
4094 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4095
4096 if (process_atio) {
4097 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4098 qlt_24xx_process_atio_queue(vha, 0);
4099 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4100 }
4101
4102 return IRQ_HANDLED;
4103}
4104
4105static irqreturn_t
4106qla24xx_msix_rsp_q(int irq, void *dev_id)
4107{
4108 struct qla_hw_data *ha;
4109 struct rsp_que *rsp;
4110 struct device_reg_24xx __iomem *reg;
4111 struct scsi_qla_host *vha;
4112 unsigned long flags;
4113
4114 rsp = (struct rsp_que *) dev_id;
4115 if (!rsp) {
4116 ql_log(ql_log_info, NULL, 0x505a,
4117 "%s: NULL response queue pointer.\n", __func__);
4118 return IRQ_NONE;
4119 }
4120 ha = rsp->hw;
4121 reg = &ha->iobase->isp24;
4122
4123 spin_lock_irqsave(&ha->hardware_lock, flags);
4124
4125 vha = pci_get_drvdata(ha->pdev);
4126 qla24xx_process_response_queue(vha, rsp);
4127 if (!ha->flags.disable_msix_handshake) {
4128 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4129 rd_reg_dword_relaxed(®->hccr);
4130 }
4131 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4132
4133 return IRQ_HANDLED;
4134}
4135
4136static irqreturn_t
4137qla24xx_msix_default(int irq, void *dev_id)
4138{
4139 scsi_qla_host_t *vha;
4140 struct qla_hw_data *ha;
4141 struct rsp_que *rsp;
4142 struct device_reg_24xx __iomem *reg;
4143 int status;
4144 uint32_t stat;
4145 uint32_t hccr;
4146 uint16_t mb[8];
4147 unsigned long flags;
4148 bool process_atio = false;
4149
4150 rsp = (struct rsp_que *) dev_id;
4151 if (!rsp) {
4152 ql_log(ql_log_info, NULL, 0x505c,
4153 "%s: NULL response queue pointer.\n", __func__);
4154 return IRQ_NONE;
4155 }
4156 ha = rsp->hw;
4157 reg = &ha->iobase->isp24;
4158 status = 0;
4159
4160 spin_lock_irqsave(&ha->hardware_lock, flags);
4161 vha = pci_get_drvdata(ha->pdev);
4162 do {
4163 stat = rd_reg_dword(®->host_status);
4164 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4165 break;
4166 if (stat & HSRX_RISC_PAUSED) {
4167 if (unlikely(pci_channel_offline(ha->pdev)))
4168 break;
4169
4170 hccr = rd_reg_dword(®->hccr);
4171
4172 ql_log(ql_log_info, vha, 0x5050,
4173 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4174 hccr);
4175
4176 qla2xxx_check_risc_status(vha);
4177 vha->hw_err_cnt++;
4178
4179 ha->isp_ops->fw_dump(vha);
4180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4181 break;
4182 } else if ((stat & HSRX_RISC_INT) == 0)
4183 break;
4184
4185 switch (stat & 0xff) {
4186 case INTR_ROM_MB_SUCCESS:
4187 case INTR_ROM_MB_FAILED:
4188 case INTR_MB_SUCCESS:
4189 case INTR_MB_FAILED:
4190 qla24xx_mbx_completion(vha, MSW(stat));
4191 status |= MBX_INTERRUPT;
4192
4193 break;
4194 case INTR_ASYNC_EVENT:
4195 mb[0] = MSW(stat);
4196 mb[1] = rd_reg_word(®->mailbox1);
4197 mb[2] = rd_reg_word(®->mailbox2);
4198 mb[3] = rd_reg_word(®->mailbox3);
4199 qla2x00_async_event(vha, rsp, mb);
4200 break;
4201 case INTR_RSP_QUE_UPDATE:
4202 case INTR_RSP_QUE_UPDATE_83XX:
4203 qla24xx_process_response_queue(vha, rsp);
4204 break;
4205 case INTR_ATIO_QUE_UPDATE_27XX:
4206 case INTR_ATIO_QUE_UPDATE:
4207 process_atio = true;
4208 break;
4209 case INTR_ATIO_RSP_QUE_UPDATE:
4210 process_atio = true;
4211 qla24xx_process_response_queue(vha, rsp);
4212 break;
4213 default:
4214 ql_dbg(ql_dbg_async, vha, 0x5051,
4215 "Unrecognized interrupt type (%d).\n", stat & 0xff);
4216 break;
4217 }
4218 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4219 } while (0);
4220 qla2x00_handle_mbx_completion(ha, status);
4221 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4222
4223 if (process_atio) {
4224 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4225 qlt_24xx_process_atio_queue(vha, 0);
4226 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4227 }
4228
4229 return IRQ_HANDLED;
4230}
4231
4232irqreturn_t
4233qla2xxx_msix_rsp_q(int irq, void *dev_id)
4234{
4235 struct qla_hw_data *ha;
4236 struct qla_qpair *qpair;
4237
4238 qpair = dev_id;
4239 if (!qpair) {
4240 ql_log(ql_log_info, NULL, 0x505b,
4241 "%s: NULL response queue pointer.\n", __func__);
4242 return IRQ_NONE;
4243 }
4244 ha = qpair->hw;
4245
4246 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4247
4248 return IRQ_HANDLED;
4249}
4250
4251irqreturn_t
4252qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
4253{
4254 struct qla_hw_data *ha;
4255 struct qla_qpair *qpair;
4256 struct device_reg_24xx __iomem *reg;
4257 unsigned long flags;
4258
4259 qpair = dev_id;
4260 if (!qpair) {
4261 ql_log(ql_log_info, NULL, 0x505b,
4262 "%s: NULL response queue pointer.\n", __func__);
4263 return IRQ_NONE;
4264 }
4265 ha = qpair->hw;
4266
4267 reg = &ha->iobase->isp24;
4268 spin_lock_irqsave(&ha->hardware_lock, flags);
4269 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4270 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4271
4272 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4273
4274 return IRQ_HANDLED;
4275}
4276
4277
4278
4279struct qla_init_msix_entry {
4280 const char *name;
4281 irq_handler_t handler;
4282};
4283
4284static const struct qla_init_msix_entry msix_entries[] = {
4285 { "default", qla24xx_msix_default },
4286 { "rsp_q", qla24xx_msix_rsp_q },
4287 { "atio_q", qla83xx_msix_atio_q },
4288 { "qpair_multiq", qla2xxx_msix_rsp_q },
4289 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
4290};
4291
4292static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
4293 { "qla2xxx (default)", qla82xx_msix_default },
4294 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
4295};
4296
4297static int
4298qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4299{
4300 int i, ret;
4301 struct qla_msix_entry *qentry;
4302 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4303 int min_vecs = QLA_BASE_VECTORS;
4304 struct irq_affinity desc = {
4305 .pre_vectors = QLA_BASE_VECTORS,
4306 };
4307
4308 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4309 IS_ATIO_MSIX_CAPABLE(ha)) {
4310 desc.pre_vectors++;
4311 min_vecs++;
4312 }
4313
4314 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4315
4316 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4317 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4318 PCI_IRQ_MSIX);
4319 } else
4320 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4321 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4322 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4323 &desc);
4324
4325 if (ret < 0) {
4326 ql_log(ql_log_fatal, vha, 0x00c7,
4327 "MSI-X: Failed to enable support, "
4328 "giving up -- %d/%d.\n",
4329 ha->msix_count, ret);
4330 goto msix_out;
4331 } else if (ret < ha->msix_count) {
4332 ql_log(ql_log_info, vha, 0x00c6,
4333 "MSI-X: Using %d vectors\n", ret);
4334 ha->msix_count = ret;
4335
4336 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4337 ha->max_req_queues = ha->msix_count - 1;
4338
4339
4340 if (QLA_TGT_MODE_ENABLED())
4341 ha->max_req_queues--;
4342
4343 ha->max_rsp_queues = ha->max_req_queues;
4344
4345 ha->max_qpairs = ha->max_req_queues - 1;
4346 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4347 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4348 }
4349 }
4350 vha->irq_offset = desc.pre_vectors;
4351 ha->msix_entries = kcalloc(ha->msix_count,
4352 sizeof(struct qla_msix_entry),
4353 GFP_KERNEL);
4354 if (!ha->msix_entries) {
4355 ql_log(ql_log_fatal, vha, 0x00c8,
4356 "Failed to allocate memory for ha->msix_entries.\n");
4357 ret = -ENOMEM;
4358 goto free_irqs;
4359 }
4360 ha->flags.msix_enabled = 1;
4361
4362 for (i = 0; i < ha->msix_count; i++) {
4363 qentry = &ha->msix_entries[i];
4364 qentry->vector = pci_irq_vector(ha->pdev, i);
4365 qentry->entry = i;
4366 qentry->have_irq = 0;
4367 qentry->in_use = 0;
4368 qentry->handle = NULL;
4369 }
4370
4371
4372 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4373 qentry = &ha->msix_entries[i];
4374 qentry->handle = rsp;
4375 rsp->msix = qentry;
4376 scnprintf(qentry->name, sizeof(qentry->name),
4377 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4378 if (IS_P3P_TYPE(ha))
4379 ret = request_irq(qentry->vector,
4380 qla82xx_msix_entries[i].handler,
4381 0, qla82xx_msix_entries[i].name, rsp);
4382 else
4383 ret = request_irq(qentry->vector,
4384 msix_entries[i].handler,
4385 0, qentry->name, rsp);
4386 if (ret)
4387 goto msix_register_fail;
4388 qentry->have_irq = 1;
4389 qentry->in_use = 1;
4390 }
4391
4392
4393
4394
4395
4396 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4397 IS_ATIO_MSIX_CAPABLE(ha)) {
4398 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4399 rsp->msix = qentry;
4400 qentry->handle = rsp;
4401 scnprintf(qentry->name, sizeof(qentry->name),
4402 "qla2xxx%lu_%s", vha->host_no,
4403 msix_entries[QLA_ATIO_VECTOR].name);
4404 qentry->in_use = 1;
4405 ret = request_irq(qentry->vector,
4406 msix_entries[QLA_ATIO_VECTOR].handler,
4407 0, qentry->name, rsp);
4408 qentry->have_irq = 1;
4409 }
4410
4411msix_register_fail:
4412 if (ret) {
4413 ql_log(ql_log_fatal, vha, 0x00cb,
4414 "MSI-X: unable to register handler -- %x/%d.\n",
4415 qentry->vector, ret);
4416 qla2x00_free_irqs(vha);
4417 ha->mqenable = 0;
4418 goto msix_out;
4419 }
4420
4421
4422 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4423 if (ha->msixbase && ha->mqiobase &&
4424 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4425 ql2xmqsupport))
4426 ha->mqenable = 1;
4427 } else
4428 if (ha->mqiobase &&
4429 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4430 ql2xmqsupport))
4431 ha->mqenable = 1;
4432 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4433 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4434 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4435 ql_dbg(ql_dbg_init, vha, 0x0055,
4436 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4437 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4438
4439msix_out:
4440 return ret;
4441
4442free_irqs:
4443 pci_free_irq_vectors(ha->pdev);
4444 goto msix_out;
4445}
4446
4447int
4448qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4449{
4450 int ret = QLA_FUNCTION_FAILED;
4451 device_reg_t *reg = ha->iobase;
4452 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4453
4454
4455 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4456 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4457 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4458 goto skip_msi;
4459
4460 if (ql2xenablemsix == 2)
4461 goto skip_msix;
4462
4463 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4464 (ha->pdev->subsystem_device == 0x7040 ||
4465 ha->pdev->subsystem_device == 0x7041 ||
4466 ha->pdev->subsystem_device == 0x1705)) {
4467 ql_log(ql_log_warn, vha, 0x0034,
4468 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4469 ha->pdev->subsystem_vendor,
4470 ha->pdev->subsystem_device);
4471 goto skip_msi;
4472 }
4473
4474 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4475 ql_log(ql_log_warn, vha, 0x0035,
4476 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4477 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4478 goto skip_msix;
4479 }
4480
4481 ret = qla24xx_enable_msix(ha, rsp);
4482 if (!ret) {
4483 ql_dbg(ql_dbg_init, vha, 0x0036,
4484 "MSI-X: Enabled (0x%X, 0x%X).\n",
4485 ha->chip_revision, ha->fw_attributes);
4486 goto clear_risc_ints;
4487 }
4488
4489skip_msix:
4490
4491 ql_log(ql_log_info, vha, 0x0037,
4492 "Falling back-to MSI mode -- ret=%d.\n", ret);
4493
4494 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4495 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4496 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4497 goto skip_msi;
4498
4499 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4500 if (ret > 0) {
4501 ql_dbg(ql_dbg_init, vha, 0x0038,
4502 "MSI: Enabled.\n");
4503 ha->flags.msi_enabled = 1;
4504 } else
4505 ql_log(ql_log_warn, vha, 0x0039,
4506 "Falling back-to INTa mode -- ret=%d.\n", ret);
4507skip_msi:
4508
4509
4510 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4511 return QLA_FUNCTION_FAILED;
4512
4513 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4514 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4515 QLA2XXX_DRIVER_NAME, rsp);
4516 if (ret) {
4517 ql_log(ql_log_warn, vha, 0x003a,
4518 "Failed to reserve interrupt %d already in use.\n",
4519 ha->pdev->irq);
4520 goto fail;
4521 } else if (!ha->flags.msi_enabled) {
4522 ql_dbg(ql_dbg_init, vha, 0x0125,
4523 "INTa mode: Enabled.\n");
4524 ha->flags.mr_intr_valid = 1;
4525
4526 ha->max_qpairs = 0;
4527 }
4528
4529clear_risc_ints:
4530 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4531 goto fail;
4532
4533 spin_lock_irq(&ha->hardware_lock);
4534 wrt_reg_word(®->isp.semaphore, 0);
4535 spin_unlock_irq(&ha->hardware_lock);
4536
4537fail:
4538 return ret;
4539}
4540
4541void
4542qla2x00_free_irqs(scsi_qla_host_t *vha)
4543{
4544 struct qla_hw_data *ha = vha->hw;
4545 struct rsp_que *rsp;
4546 struct qla_msix_entry *qentry;
4547 int i;
4548
4549
4550
4551
4552
4553 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4554 goto free_irqs;
4555 rsp = ha->rsp_q_map[0];
4556
4557 if (ha->flags.msix_enabled) {
4558 for (i = 0; i < ha->msix_count; i++) {
4559 qentry = &ha->msix_entries[i];
4560 if (qentry->have_irq) {
4561 irq_set_affinity_notifier(qentry->vector, NULL);
4562 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4563 }
4564 }
4565 kfree(ha->msix_entries);
4566 ha->msix_entries = NULL;
4567 ha->flags.msix_enabled = 0;
4568 ql_dbg(ql_dbg_init, vha, 0x0042,
4569 "Disabled MSI-X.\n");
4570 } else {
4571 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4572 }
4573
4574free_irqs:
4575 pci_free_irq_vectors(ha->pdev);
4576}
4577
4578int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4579 struct qla_msix_entry *msix, int vector_type)
4580{
4581 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4582 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4583 int ret;
4584
4585 scnprintf(msix->name, sizeof(msix->name),
4586 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4587 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4588 if (ret) {
4589 ql_log(ql_log_fatal, vha, 0x00e6,
4590 "MSI-X: Unable to register handler -- %x/%d.\n",
4591 msix->vector, ret);
4592 return ret;
4593 }
4594 msix->have_irq = 1;
4595 msix->handle = qpair;
4596 return ret;
4597}
4598