1
2
3
4
5
6#include "qla_def.h"
7#include "qla_target.h"
8#include "qla_gbl.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <linux/cpu.h>
13#include <linux/t10-pi.h>
14#include <scsi/scsi_tcq.h>
15#include <scsi/scsi_bsg_fc.h>
16#include <scsi/scsi_eh.h>
17#include <scsi/fc/fc_fs.h>
18#include <linux/nvme-fc-driver.h>
19
20static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24 sts_entry_t *);
25static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha,
26 struct purex_item *item);
27static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha,
28 uint16_t size);
29static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha,
30 void *pkt);
31static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha,
32 void **pkt, struct rsp_que **rsp);
33
34static void
35qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item)
36{
37 void *pkt = &item->iocb;
38 uint16_t pkt_size = item->size;
39
40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d,
41 "%s: Enter\n", __func__);
42
43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e,
44 "-------- ELS REQ -------\n");
45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f,
46 pkt, pkt_size);
47
48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt);
49}
50
51const char *const port_state_str[] = {
52 "Unknown",
53 "UNCONFIGURED",
54 "DEAD",
55 "LOST",
56 "ONLINE"
57};
58
59static void
60qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
61{
62 struct abts_entry_24xx *abts =
63 (struct abts_entry_24xx *)&pkt->iocb;
64 struct qla_hw_data *ha = vha->hw;
65 struct els_entry_24xx *rsp_els;
66 struct abts_entry_24xx *abts_rsp;
67 dma_addr_t dma;
68 uint32_t fctl;
69 int rval;
70
71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__);
72
73 ql_log(ql_log_warn, vha, 0x0287,
74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n",
75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id,
76 abts->seq_id, abts->seq_cnt);
77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
78 "-------- ABTS RCV -------\n");
79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287,
80 (uint8_t *)abts, sizeof(*abts));
81
82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
83 GFP_KERNEL);
84 if (!rsp_els) {
85 ql_log(ql_log_warn, vha, 0x0287,
86 "Failed allocate dma buffer ABTS/ELS RSP.\n");
87 return;
88 }
89
90
91 rsp_els->entry_type = ELS_IOCB_TYPE;
92 rsp_els->entry_count = 1;
93 rsp_els->nport_handle = cpu_to_le16(~0);
94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort;
95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG);
96 ql_dbg(ql_dbg_init, vha, 0x0283,
97 "Sending ELS Response to terminate exchange %#x...\n",
98 abts->rx_xch_addr_to_abort);
99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
100 "-------- ELS RSP -------\n");
101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283,
102 (uint8_t *)rsp_els, sizeof(*rsp_els));
103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0);
104 if (rval) {
105 ql_log(ql_log_warn, vha, 0x0288,
106 "%s: iocb failed to execute -> %x\n", __func__, rval);
107 } else if (rsp_els->comp_status) {
108 ql_log(ql_log_warn, vha, 0x0289,
109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
110 __func__, rsp_els->comp_status,
111 rsp_els->error_subcode_1, rsp_els->error_subcode_2);
112 } else {
113 ql_dbg(ql_dbg_init, vha, 0x028a,
114 "%s: abort exchange done.\n", __func__);
115 }
116
117
118 abts_rsp = (void *)rsp_els;
119 memset(abts_rsp, 0, sizeof(*abts_rsp));
120 abts_rsp->entry_type = ABTS_RSP_TYPE;
121 abts_rsp->entry_count = 1;
122 abts_rsp->nport_handle = abts->nport_handle;
123 abts_rsp->vp_idx = abts->vp_idx;
124 abts_rsp->sof_type = abts->sof_type & 0xf0;
125 abts_rsp->rx_xch_addr = abts->rx_xch_addr;
126 abts_rsp->d_id[0] = abts->s_id[0];
127 abts_rsp->d_id[1] = abts->s_id[1];
128 abts_rsp->d_id[2] = abts->s_id[2];
129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC;
130 abts_rsp->s_id[0] = abts->d_id[0];
131 abts_rsp->s_id[1] = abts->d_id[1];
132 abts_rsp->s_id[2] = abts->d_id[2];
133 abts_rsp->cs_ctl = abts->cs_ctl;
134
135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 |
136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT;
137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff;
138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff;
139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff;
140 abts_rsp->type = FC_TYPE_BLD;
141 abts_rsp->rx_id = abts->rx_id;
142 abts_rsp->ox_id = abts->ox_id;
143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id;
144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id;
145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0);
146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort;
147 ql_dbg(ql_dbg_init, vha, 0x028b,
148 "Sending BA ACC response to ABTS %#x...\n",
149 abts->rx_xch_addr_to_abort);
150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
151 "-------- ELS RSP -------\n");
152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b,
153 (uint8_t *)abts_rsp, sizeof(*abts_rsp));
154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0);
155 if (rval) {
156 ql_log(ql_log_warn, vha, 0x028c,
157 "%s: iocb failed to execute -> %x\n", __func__, rval);
158 } else if (abts_rsp->comp_status) {
159 ql_log(ql_log_warn, vha, 0x028d,
160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n",
161 __func__, abts_rsp->comp_status,
162 abts_rsp->payload.error.subcode1,
163 abts_rsp->payload.error.subcode2);
164 } else {
165 ql_dbg(ql_dbg_init, vha, 0x028ea,
166 "%s: done.\n", __func__);
167 }
168
169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
170}
171
172
173
174
175
176
177
178
179
180
181
182
183void __qla_consume_iocb(struct scsi_qla_host *vha,
184 void **pkt, struct rsp_que **rsp)
185{
186 struct rsp_que *rsp_q = *rsp;
187 response_t *new_pkt;
188 uint16_t entry_count_remaining;
189 struct purex_entry_24xx *purex = *pkt;
190
191 entry_count_remaining = purex->entry_count;
192 while (entry_count_remaining > 0) {
193 new_pkt = rsp_q->ring_ptr;
194 *pkt = new_pkt;
195
196 rsp_q->ring_index++;
197 if (rsp_q->ring_index == rsp_q->length) {
198 rsp_q->ring_index = 0;
199 rsp_q->ring_ptr = rsp_q->ring;
200 } else {
201 rsp_q->ring_ptr++;
202 }
203
204 new_pkt->signature = RESPONSE_PROCESSED;
205
206 wmb();
207 --entry_count_remaining;
208 }
209}
210
211
212
213
214
215
216
217
218
219
220int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha,
221 void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len)
222{
223 struct purex_entry_24xx *purex = *pkt;
224 struct rsp_que *rsp_q = *rsp;
225 sts_cont_entry_t *new_pkt;
226 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
227 uint16_t buffer_copy_offset = 0;
228 uint16_t entry_count_remaining;
229 u16 tpad;
230
231 entry_count_remaining = purex->entry_count;
232 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
233 - PURX_ELS_HEADER_SIZE;
234
235
236
237
238
239 tpad = roundup(total_bytes, 4);
240
241 if (buf_len < tpad) {
242 ql_dbg(ql_dbg_async, vha, 0x5084,
243 "%s buffer is too small %d < %d\n",
244 __func__, buf_len, tpad);
245 __qla_consume_iocb(vha, pkt, rsp);
246 return -EIO;
247 }
248
249 pending_bytes = total_bytes = tpad;
250 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
251 sizeof(purex->els_frame_payload) : pending_bytes;
252
253 memcpy(buf, &purex->els_frame_payload[0], no_bytes);
254 buffer_copy_offset += no_bytes;
255 pending_bytes -= no_bytes;
256 --entry_count_remaining;
257
258 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
259
260 wmb();
261
262 do {
263 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
264 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
265 *pkt = new_pkt;
266
267 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
268 ql_log(ql_log_warn, vha, 0x507a,
269 "Unexpected IOCB type, partial data 0x%x\n",
270 buffer_copy_offset);
271 break;
272 }
273
274 rsp_q->ring_index++;
275 if (rsp_q->ring_index == rsp_q->length) {
276 rsp_q->ring_index = 0;
277 rsp_q->ring_ptr = rsp_q->ring;
278 } else {
279 rsp_q->ring_ptr++;
280 }
281 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
282 sizeof(new_pkt->data) : pending_bytes;
283 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
284 memcpy((buf + buffer_copy_offset), new_pkt->data,
285 no_bytes);
286 buffer_copy_offset += no_bytes;
287 pending_bytes -= no_bytes;
288 --entry_count_remaining;
289 } else {
290 ql_log(ql_log_warn, vha, 0x5044,
291 "Attempt to copy more that we got, optimizing..%x\n",
292 buffer_copy_offset);
293 memcpy((buf + buffer_copy_offset), new_pkt->data,
294 total_bytes - buffer_copy_offset);
295 }
296
297 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
298
299 wmb();
300 }
301
302 if (pending_bytes != 0 || entry_count_remaining != 0) {
303 ql_log(ql_log_fatal, vha, 0x508b,
304 "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n",
305 total_bytes, entry_count_remaining);
306 return -EIO;
307 }
308 } while (entry_count_remaining > 0);
309
310 be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2);
311
312 return 0;
313}
314
315
316
317
318
319
320
321
322
323
324irqreturn_t
325qla2100_intr_handler(int irq, void *dev_id)
326{
327 scsi_qla_host_t *vha;
328 struct qla_hw_data *ha;
329 struct device_reg_2xxx __iomem *reg;
330 int status;
331 unsigned long iter;
332 uint16_t hccr;
333 uint16_t mb[8];
334 struct rsp_que *rsp;
335 unsigned long flags;
336
337 rsp = (struct rsp_que *) dev_id;
338 if (!rsp) {
339 ql_log(ql_log_info, NULL, 0x505d,
340 "%s: NULL response queue pointer.\n", __func__);
341 return (IRQ_NONE);
342 }
343
344 ha = rsp->hw;
345 reg = &ha->iobase->isp;
346 status = 0;
347
348 spin_lock_irqsave(&ha->hardware_lock, flags);
349 vha = pci_get_drvdata(ha->pdev);
350 for (iter = 50; iter--; ) {
351 hccr = rd_reg_word(®->hccr);
352 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
353 break;
354 if (hccr & HCCR_RISC_PAUSE) {
355 if (pci_channel_offline(ha->pdev))
356 break;
357
358
359
360
361
362
363 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
364 rd_reg_word(®->hccr);
365
366 ha->isp_ops->fw_dump(vha);
367 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
368 break;
369 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0)
370 break;
371
372 if (rd_reg_word(®->semaphore) & BIT_0) {
373 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
374 rd_reg_word(®->hccr);
375
376
377 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
378 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
379 qla2x00_mbx_completion(vha, mb[0]);
380 status |= MBX_INTERRUPT;
381 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
382 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
383 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
384 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
385 qla2x00_async_event(vha, rsp, mb);
386 } else {
387
388 ql_dbg(ql_dbg_async, vha, 0x5025,
389 "Unrecognized interrupt type (%d).\n",
390 mb[0]);
391 }
392
393 wrt_reg_word(®->semaphore, 0);
394 rd_reg_word(®->semaphore);
395 } else {
396 qla2x00_process_response_queue(rsp);
397
398 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
399 rd_reg_word(®->hccr);
400 }
401 }
402 qla2x00_handle_mbx_completion(ha, status);
403 spin_unlock_irqrestore(&ha->hardware_lock, flags);
404
405 return (IRQ_HANDLED);
406}
407
408bool
409qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
410{
411
412 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
413 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
414 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
415 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
416 qla_schedule_eeh_work(vha);
417 }
418 return true;
419 } else
420 return false;
421}
422
423bool
424qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
425{
426 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
427}
428
429
430
431
432
433
434
435
436
437
438irqreturn_t
439qla2300_intr_handler(int irq, void *dev_id)
440{
441 scsi_qla_host_t *vha;
442 struct device_reg_2xxx __iomem *reg;
443 int status;
444 unsigned long iter;
445 uint32_t stat;
446 uint16_t hccr;
447 uint16_t mb[8];
448 struct rsp_que *rsp;
449 struct qla_hw_data *ha;
450 unsigned long flags;
451
452 rsp = (struct rsp_que *) dev_id;
453 if (!rsp) {
454 ql_log(ql_log_info, NULL, 0x5058,
455 "%s: NULL response queue pointer.\n", __func__);
456 return (IRQ_NONE);
457 }
458
459 ha = rsp->hw;
460 reg = &ha->iobase->isp;
461 status = 0;
462
463 spin_lock_irqsave(&ha->hardware_lock, flags);
464 vha = pci_get_drvdata(ha->pdev);
465 for (iter = 50; iter--; ) {
466 stat = rd_reg_dword(®->u.isp2300.host_status);
467 if (qla2x00_check_reg32_for_disconnect(vha, stat))
468 break;
469 if (stat & HSR_RISC_PAUSED) {
470 if (unlikely(pci_channel_offline(ha->pdev)))
471 break;
472
473 hccr = rd_reg_word(®->hccr);
474
475 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
476 ql_log(ql_log_warn, vha, 0x5026,
477 "Parity error -- HCCR=%x, Dumping "
478 "firmware.\n", hccr);
479 else
480 ql_log(ql_log_warn, vha, 0x5027,
481 "RISC paused -- HCCR=%x, Dumping "
482 "firmware.\n", hccr);
483
484
485
486
487
488
489 wrt_reg_word(®->hccr, HCCR_RESET_RISC);
490 rd_reg_word(®->hccr);
491
492 ha->isp_ops->fw_dump(vha);
493 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
494 break;
495 } else if ((stat & HSR_RISC_INT) == 0)
496 break;
497
498 switch (stat & 0xff) {
499 case 0x1:
500 case 0x2:
501 case 0x10:
502 case 0x11:
503 qla2x00_mbx_completion(vha, MSW(stat));
504 status |= MBX_INTERRUPT;
505
506
507 wrt_reg_word(®->semaphore, 0);
508 break;
509 case 0x12:
510 mb[0] = MSW(stat);
511 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
512 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
513 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
514 qla2x00_async_event(vha, rsp, mb);
515 break;
516 case 0x13:
517 qla2x00_process_response_queue(rsp);
518 break;
519 case 0x15:
520 mb[0] = MBA_CMPLT_1_16BIT;
521 mb[1] = MSW(stat);
522 qla2x00_async_event(vha, rsp, mb);
523 break;
524 case 0x16:
525 mb[0] = MBA_SCSI_COMPLETION;
526 mb[1] = MSW(stat);
527 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
528 qla2x00_async_event(vha, rsp, mb);
529 break;
530 default:
531 ql_dbg(ql_dbg_async, vha, 0x5028,
532 "Unrecognized interrupt type (%d).\n", stat & 0xff);
533 break;
534 }
535 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT);
536 rd_reg_word_relaxed(®->hccr);
537 }
538 qla2x00_handle_mbx_completion(ha, status);
539 spin_unlock_irqrestore(&ha->hardware_lock, flags);
540
541 return (IRQ_HANDLED);
542}
543
544
545
546
547
548
549static void
550qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
551{
552 uint16_t cnt;
553 uint32_t mboxes;
554 __le16 __iomem *wptr;
555 struct qla_hw_data *ha = vha->hw;
556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
557
558
559 WARN_ON_ONCE(ha->mbx_count > 32);
560 mboxes = (1ULL << ha->mbx_count) - 1;
561 if (!ha->mcp)
562 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
563 else
564 mboxes = ha->mcp->in_mb;
565
566
567 ha->flags.mbox_int = 1;
568 ha->mailbox_out[0] = mb0;
569 mboxes >>= 1;
570 wptr = MAILBOX_REG(ha, reg, 1);
571
572 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
573 if (IS_QLA2200(ha) && cnt == 8)
574 wptr = MAILBOX_REG(ha, reg, 8);
575 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
576 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
577 else if (mboxes & BIT_0)
578 ha->mailbox_out[cnt] = rd_reg_word(wptr);
579
580 wptr++;
581 mboxes >>= 1;
582 }
583}
584
585static void
586qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
587{
588 static char *event[] =
589 { "Complete", "Request Notification", "Time Extension" };
590 int rval;
591 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
592 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
593 __le16 __iomem *wptr;
594 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
595
596
597 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
598 wptr = ®24->mailbox1;
599 else if (IS_QLA8044(vha->hw))
600 wptr = ®82->mailbox_out[1];
601 else
602 return;
603
604 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
605 mb[cnt] = rd_reg_word(wptr);
606
607 ql_dbg(ql_dbg_async, vha, 0x5021,
608 "Inter-Driver Communication %s -- "
609 "%04x %04x %04x %04x %04x %04x %04x.\n",
610 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
611 mb[4], mb[5], mb[6]);
612 switch (aen) {
613
614 case MBA_IDC_COMPLETE:
615 if (mb[1] >> 15) {
616 vha->hw->flags.idc_compl_status = 1;
617 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
618 complete(&vha->hw->dcbx_comp);
619 }
620 break;
621
622 case MBA_IDC_NOTIFY:
623
624 timeout = (descr >> 8) & 0xf;
625 ql_dbg(ql_dbg_async, vha, 0x5022,
626 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
627 vha->host_no, event[aen & 0xff], timeout);
628
629 if (!timeout)
630 return;
631 rval = qla2x00_post_idc_ack_work(vha, mb);
632 if (rval != QLA_SUCCESS)
633 ql_log(ql_log_warn, vha, 0x5023,
634 "IDC failed to post ACK.\n");
635 break;
636 case MBA_IDC_TIME_EXT:
637 vha->hw->idc_extend_tmo = descr;
638 ql_dbg(ql_dbg_async, vha, 0x5087,
639 "%lu Inter-Driver Communication %s -- "
640 "Extend timeout by=%d.\n",
641 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
642 break;
643 }
644}
645
646#define LS_UNKNOWN 2
647const char *
648qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
649{
650 static const char *const link_speeds[] = {
651 "1", "2", "?", "4", "8", "16", "32", "64", "10"
652 };
653#define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
654
655 if (IS_QLA2100(ha) || IS_QLA2200(ha))
656 return link_speeds[0];
657 else if (speed == 0x13)
658 return link_speeds[QLA_LAST_SPEED];
659 else if (speed < QLA_LAST_SPEED)
660 return link_speeds[speed];
661 else
662 return link_speeds[LS_UNKNOWN];
663}
664
665static void
666qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
667{
668 struct qla_hw_data *ha = vha->hw;
669
670
671
672
673
674
675
676
677
678
679
680
681 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
682 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
683 mb[0], mb[1], mb[2], mb[6]);
684 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
685 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
686 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
687
688 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
689 IDC_HEARTBEAT_FAILURE)) {
690 ha->flags.nic_core_hung = 1;
691 ql_log(ql_log_warn, vha, 0x5060,
692 "83XX: F/W Error Reported: Check if reset required.\n");
693
694 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
695 uint32_t protocol_engine_id, fw_err_code, err_level;
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710 protocol_engine_id = (mb[2] & 0xff);
711 fw_err_code = (((mb[2] & 0xff00) >> 8) |
712 ((mb[6] & 0x1fff) << 8));
713 err_level = ((mb[6] & 0xe000) >> 13);
714 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
715 "Register: protocol_engine_id=0x%x "
716 "fw_err_code=0x%x err_level=0x%x.\n",
717 protocol_engine_id, fw_err_code, err_level);
718 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
719 "Register: 0x%x%x.\n", mb[7], mb[3]);
720 if (err_level == ERR_LEVEL_NON_FATAL) {
721 ql_log(ql_log_warn, vha, 0x5063,
722 "Not a fatal error, f/w has recovered itself.\n");
723 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
724 ql_log(ql_log_fatal, vha, 0x5064,
725 "Recoverable Fatal error: Chip reset "
726 "required.\n");
727 qla83xx_schedule_work(vha,
728 QLA83XX_NIC_CORE_RESET);
729 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
730 ql_log(ql_log_fatal, vha, 0x5065,
731 "Unrecoverable Fatal error: Set FAILED "
732 "state, reboot required.\n");
733 qla83xx_schedule_work(vha,
734 QLA83XX_NIC_CORE_UNRECOVERABLE);
735 }
736 }
737
738 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
739 uint16_t peg_fw_state, nw_interface_link_up;
740 uint16_t nw_interface_signal_detect, sfp_status;
741 uint16_t htbt_counter, htbt_monitor_enable;
742 uint16_t sfp_additional_info, sfp_multirate;
743 uint16_t sfp_tx_fault, link_speed, dcbx_status;
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776 peg_fw_state = (mb[2] & 0x00ff);
777 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
778 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
779 sfp_status = ((mb[2] & 0x0c00) >> 10);
780 htbt_counter = ((mb[2] & 0x7000) >> 12);
781 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
782 sfp_additional_info = (mb[6] & 0x0003);
783 sfp_multirate = ((mb[6] & 0x0004) >> 2);
784 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
785 link_speed = ((mb[6] & 0x0070) >> 4);
786 dcbx_status = ((mb[6] & 0x7000) >> 12);
787
788 ql_log(ql_log_warn, vha, 0x5066,
789 "Peg-to-Fc Status Register:\n"
790 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
791 "nw_interface_signal_detect=0x%x"
792 "\nsfp_statis=0x%x.\n ", peg_fw_state,
793 nw_interface_link_up, nw_interface_signal_detect,
794 sfp_status);
795 ql_log(ql_log_warn, vha, 0x5067,
796 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
797 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
798 htbt_counter, htbt_monitor_enable,
799 sfp_additional_info, sfp_multirate);
800 ql_log(ql_log_warn, vha, 0x5068,
801 "sfp_tx_fault=0x%x, link_state=0x%x, "
802 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
803 dcbx_status);
804
805 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
806 }
807
808 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
809 ql_log(ql_log_warn, vha, 0x5069,
810 "Heartbeat Failure encountered, chip reset "
811 "required.\n");
812
813 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
814 }
815 }
816
817 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
818 ql_log(ql_log_info, vha, 0x506a,
819 "IDC Device-State changed = 0x%x.\n", mb[4]);
820 if (ha->flags.nic_core_reset_owner)
821 return;
822 qla83xx_schedule_work(vha, MBA_IDC_AEN);
823 }
824}
825
826int
827qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
828{
829 struct qla_hw_data *ha = vha->hw;
830 scsi_qla_host_t *vp;
831 uint32_t vp_did;
832 unsigned long flags;
833 int ret = 0;
834
835 if (!ha->num_vhosts)
836 return ret;
837
838 spin_lock_irqsave(&ha->vport_slock, flags);
839 list_for_each_entry(vp, &ha->vp_list, list) {
840 vp_did = vp->d_id.b24;
841 if (vp_did == rscn_entry) {
842 ret = 1;
843 break;
844 }
845 }
846 spin_unlock_irqrestore(&ha->vport_slock, flags);
847
848 return ret;
849}
850
851fc_port_t *
852qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
853{
854 fc_port_t *f, *tf;
855
856 f = tf = NULL;
857 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
858 if (f->loop_id == loop_id)
859 return f;
860 return NULL;
861}
862
863fc_port_t *
864qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
865{
866 fc_port_t *f, *tf;
867
868 f = tf = NULL;
869 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
870 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
871 if (incl_deleted)
872 return f;
873 else if (f->deleted == 0)
874 return f;
875 }
876 }
877 return NULL;
878}
879
880fc_port_t *
881qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
882 u8 incl_deleted)
883{
884 fc_port_t *f, *tf;
885
886 f = tf = NULL;
887 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
888 if (f->d_id.b24 == id->b24) {
889 if (incl_deleted)
890 return f;
891 else if (f->deleted == 0)
892 return f;
893 }
894 }
895 return NULL;
896}
897
898
899static void
900qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
901{
902 struct qla_hw_data *ha = vha->hw;
903 bool reset_isp_needed = false;
904
905 ql_log(ql_log_warn, vha, 0x02f0,
906 "MPI Heartbeat stop. MPI reset is%s needed. "
907 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
908 mb[1] & BIT_8 ? "" : " not",
909 mb[0], mb[1], mb[2], mb[3]);
910
911 if ((mb[1] & BIT_8) == 0)
912 return;
913
914 ql_log(ql_log_warn, vha, 0x02f1,
915 "MPI Heartbeat stop. FW dump needed\n");
916
917 if (ql2xfulldump_on_mpifail) {
918 ha->isp_ops->fw_dump(vha);
919 reset_isp_needed = true;
920 }
921
922 ha->isp_ops->mpi_fw_dump(vha, 1);
923
924 if (reset_isp_needed) {
925 vha->hw->flags.fw_init_done = 0;
926 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
927 qla2xxx_wake_dpc(vha);
928 }
929}
930
931static struct purex_item *
932qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
933{
934 struct purex_item *item = NULL;
935 uint8_t item_hdr_size = sizeof(*item);
936
937 if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
938 item = kzalloc(item_hdr_size +
939 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
940 } else {
941 if (atomic_inc_return(&vha->default_item.in_use) == 1) {
942 item = &vha->default_item;
943 goto initialize_purex_header;
944 } else {
945 item = kzalloc(item_hdr_size, GFP_ATOMIC);
946 }
947 }
948 if (!item) {
949 ql_log(ql_log_warn, vha, 0x5092,
950 ">> Failed allocate purex list item.\n");
951
952 return NULL;
953 }
954
955initialize_purex_header:
956 item->vha = vha;
957 item->size = size;
958 return item;
959}
960
961static void
962qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
963 void (*process_item)(struct scsi_qla_host *vha,
964 struct purex_item *pkt))
965{
966 struct purex_list *list = &vha->purex_list;
967 ulong flags;
968
969 pkt->process_item = process_item;
970
971 spin_lock_irqsave(&list->lock, flags);
972 list_add_tail(&pkt->list, &list->head);
973 spin_unlock_irqrestore(&list->lock, flags);
974
975 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
976}
977
978
979
980
981
982
983
984
985static struct purex_item
986*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
987{
988 struct purex_item *item;
989
990 item = qla24xx_alloc_purex_item(vha,
991 QLA_DEFAULT_PAYLOAD_SIZE);
992 if (!item)
993 return item;
994
995 memcpy(&item->iocb, pkt, sizeof(item->iocb));
996 return item;
997}
998
999
1000
1001
1002
1003
1004
1005
1006static struct purex_item *
1007qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt,
1008 struct rsp_que **rsp)
1009{
1010 struct purex_entry_24xx *purex = *pkt;
1011 struct rsp_que *rsp_q = *rsp;
1012 sts_cont_entry_t *new_pkt;
1013 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
1014 uint16_t buffer_copy_offset = 0;
1015 uint16_t entry_count, entry_count_remaining;
1016 struct purex_item *item;
1017 void *fpin_pkt = NULL;
1018
1019 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF)
1020 - PURX_ELS_HEADER_SIZE;
1021 pending_bytes = total_bytes;
1022 entry_count = entry_count_remaining = purex->entry_count;
1023 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ?
1024 sizeof(purex->els_frame_payload) : pending_bytes;
1025 ql_log(ql_log_info, vha, 0x509a,
1026 "FPIN ELS, frame_size 0x%x, entry count %d\n",
1027 total_bytes, entry_count);
1028
1029 item = qla24xx_alloc_purex_item(vha, total_bytes);
1030 if (!item)
1031 return item;
1032
1033 fpin_pkt = &item->iocb;
1034
1035 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes);
1036 buffer_copy_offset += no_bytes;
1037 pending_bytes -= no_bytes;
1038 --entry_count_remaining;
1039
1040 ((response_t *)purex)->signature = RESPONSE_PROCESSED;
1041 wmb();
1042
1043 do {
1044 while ((total_bytes > 0) && (entry_count_remaining > 0)) {
1045 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
1046 ql_dbg(ql_dbg_async, vha, 0x5084,
1047 "Ran out of IOCBs, partial data 0x%x\n",
1048 buffer_copy_offset);
1049 cpu_relax();
1050 continue;
1051 }
1052
1053 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
1054 *pkt = new_pkt;
1055
1056 if (new_pkt->entry_type != STATUS_CONT_TYPE) {
1057 ql_log(ql_log_warn, vha, 0x507a,
1058 "Unexpected IOCB type, partial data 0x%x\n",
1059 buffer_copy_offset);
1060 break;
1061 }
1062
1063 rsp_q->ring_index++;
1064 if (rsp_q->ring_index == rsp_q->length) {
1065 rsp_q->ring_index = 0;
1066 rsp_q->ring_ptr = rsp_q->ring;
1067 } else {
1068 rsp_q->ring_ptr++;
1069 }
1070 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
1071 sizeof(new_pkt->data) : pending_bytes;
1072 if ((buffer_copy_offset + no_bytes) <= total_bytes) {
1073 memcpy(((uint8_t *)fpin_pkt +
1074 buffer_copy_offset), new_pkt->data,
1075 no_bytes);
1076 buffer_copy_offset += no_bytes;
1077 pending_bytes -= no_bytes;
1078 --entry_count_remaining;
1079 } else {
1080 ql_log(ql_log_warn, vha, 0x5044,
1081 "Attempt to copy more that we got, optimizing..%x\n",
1082 buffer_copy_offset);
1083 memcpy(((uint8_t *)fpin_pkt +
1084 buffer_copy_offset), new_pkt->data,
1085 total_bytes - buffer_copy_offset);
1086 }
1087
1088 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
1089 wmb();
1090 }
1091
1092 if (pending_bytes != 0 || entry_count_remaining != 0) {
1093 ql_log(ql_log_fatal, vha, 0x508b,
1094 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
1095 total_bytes, entry_count_remaining);
1096 qla24xx_free_purex_item(item);
1097 return NULL;
1098 }
1099 } while (entry_count_remaining > 0);
1100 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
1101 return item;
1102}
1103
1104
1105
1106
1107
1108
1109
1110void
1111qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
1112{
1113 uint16_t handle_cnt;
1114 uint16_t cnt, mbx;
1115 uint32_t handles[5];
1116 struct qla_hw_data *ha = vha->hw;
1117 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1118 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1119 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1120 uint32_t rscn_entry, host_pid;
1121 unsigned long flags;
1122 fc_port_t *fcport = NULL;
1123
1124 if (!vha->hw->flags.fw_started)
1125 return;
1126
1127
1128 handle_cnt = 0;
1129 if (IS_CNA_CAPABLE(ha))
1130 goto skip_rio;
1131 switch (mb[0]) {
1132 case MBA_SCSI_COMPLETION:
1133 handles[0] = make_handle(mb[2], mb[1]);
1134 handle_cnt = 1;
1135 break;
1136 case MBA_CMPLT_1_16BIT:
1137 handles[0] = mb[1];
1138 handle_cnt = 1;
1139 mb[0] = MBA_SCSI_COMPLETION;
1140 break;
1141 case MBA_CMPLT_2_16BIT:
1142 handles[0] = mb[1];
1143 handles[1] = mb[2];
1144 handle_cnt = 2;
1145 mb[0] = MBA_SCSI_COMPLETION;
1146 break;
1147 case MBA_CMPLT_3_16BIT:
1148 handles[0] = mb[1];
1149 handles[1] = mb[2];
1150 handles[2] = mb[3];
1151 handle_cnt = 3;
1152 mb[0] = MBA_SCSI_COMPLETION;
1153 break;
1154 case MBA_CMPLT_4_16BIT:
1155 handles[0] = mb[1];
1156 handles[1] = mb[2];
1157 handles[2] = mb[3];
1158 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1159 handle_cnt = 4;
1160 mb[0] = MBA_SCSI_COMPLETION;
1161 break;
1162 case MBA_CMPLT_5_16BIT:
1163 handles[0] = mb[1];
1164 handles[1] = mb[2];
1165 handles[2] = mb[3];
1166 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1167 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1168 handle_cnt = 5;
1169 mb[0] = MBA_SCSI_COMPLETION;
1170 break;
1171 case MBA_CMPLT_2_32BIT:
1172 handles[0] = make_handle(mb[2], mb[1]);
1173 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1174 RD_MAILBOX_REG(ha, reg, 6));
1175 handle_cnt = 2;
1176 mb[0] = MBA_SCSI_COMPLETION;
1177 break;
1178 default:
1179 break;
1180 }
1181skip_rio:
1182 switch (mb[0]) {
1183 case MBA_SCSI_COMPLETION:
1184 if (!vha->flags.online)
1185 break;
1186
1187 for (cnt = 0; cnt < handle_cnt; cnt++)
1188 qla2x00_process_completed_request(vha, rsp->req,
1189 handles[cnt]);
1190 break;
1191
1192 case MBA_RESET:
1193 ql_dbg(ql_dbg_async, vha, 0x5002,
1194 "Asynchronous RESET.\n");
1195
1196 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1197 break;
1198
1199 case MBA_SYSTEM_ERR:
1200 mbx = 0;
1201
1202 vha->hw_err_cnt++;
1203
1204 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1205 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1206 u16 m[4];
1207
1208 m[0] = rd_reg_word(®24->mailbox4);
1209 m[1] = rd_reg_word(®24->mailbox5);
1210 m[2] = rd_reg_word(®24->mailbox6);
1211 mbx = m[3] = rd_reg_word(®24->mailbox7);
1212
1213 ql_log(ql_log_warn, vha, 0x5003,
1214 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n",
1215 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]);
1216 } else
1217 ql_log(ql_log_warn, vha, 0x5003,
1218 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ",
1219 mb[1], mb[2], mb[3]);
1220
1221 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1222 rd_reg_word(®24->mailbox7) & BIT_8)
1223 ha->isp_ops->mpi_fw_dump(vha, 1);
1224 ha->isp_ops->fw_dump(vha);
1225 ha->flags.fw_init_done = 0;
1226 QLA_FW_STOPPED(ha);
1227
1228 if (IS_FWI2_CAPABLE(ha)) {
1229 if (mb[1] == 0 && mb[2] == 0) {
1230 ql_log(ql_log_fatal, vha, 0x5004,
1231 "Unrecoverable Hardware Error: adapter "
1232 "marked OFFLINE!\n");
1233 vha->flags.online = 0;
1234 vha->device_flags |= DFLG_DEV_FAILED;
1235 } else {
1236
1237 if ((mbx & MBX_3) && (ha->port_no == 0))
1238 set_bit(MPI_RESET_NEEDED,
1239 &vha->dpc_flags);
1240
1241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1242 }
1243 } else if (mb[1] == 0) {
1244 ql_log(ql_log_fatal, vha, 0x5005,
1245 "Unrecoverable Hardware Error: adapter marked "
1246 "OFFLINE!\n");
1247 vha->flags.online = 0;
1248 vha->device_flags |= DFLG_DEV_FAILED;
1249 } else
1250 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1251 break;
1252
1253 case MBA_REQ_TRANSFER_ERR:
1254 ql_log(ql_log_warn, vha, 0x5006,
1255 "ISP Request Transfer Error (%x).\n", mb[1]);
1256
1257 vha->hw_err_cnt++;
1258
1259 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1260 break;
1261
1262 case MBA_RSP_TRANSFER_ERR:
1263 ql_log(ql_log_warn, vha, 0x5007,
1264 "ISP Response Transfer Error (%x).\n", mb[1]);
1265
1266 vha->hw_err_cnt++;
1267
1268 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1269 break;
1270
1271 case MBA_WAKEUP_THRES:
1272 ql_dbg(ql_dbg_async, vha, 0x5008,
1273 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
1274 break;
1275
1276 case MBA_LOOP_INIT_ERR:
1277 ql_log(ql_log_warn, vha, 0x5090,
1278 "LOOP INIT ERROR (%x).\n", mb[1]);
1279 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1280 break;
1281
1282 case MBA_LIP_OCCURRED:
1283 ha->flags.lip_ae = 1;
1284
1285 ql_dbg(ql_dbg_async, vha, 0x5009,
1286 "LIP occurred (%x).\n", mb[1]);
1287
1288 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1289 atomic_set(&vha->loop_state, LOOP_DOWN);
1290 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1291 qla2x00_mark_all_devices_lost(vha);
1292 }
1293
1294 if (vha->vp_idx) {
1295 atomic_set(&vha->vp_state, VP_FAILED);
1296 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1297 }
1298
1299 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1300 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1301
1302 vha->flags.management_server_logged_in = 0;
1303 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
1304 break;
1305
1306 case MBA_LOOP_UP:
1307 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1308 ha->link_data_rate = PORT_SPEED_1GB;
1309 else
1310 ha->link_data_rate = mb[1];
1311
1312 ql_log(ql_log_info, vha, 0x500a,
1313 "LOOP UP detected (%s Gbps).\n",
1314 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1315
1316 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1317 if (mb[2] & BIT_0)
1318 ql_log(ql_log_info, vha, 0x11a0,
1319 "FEC=enabled (link up).\n");
1320 }
1321
1322 vha->flags.management_server_logged_in = 0;
1323 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1324
1325 if (vha->link_down_time < vha->hw->port_down_retry_count) {
1326 vha->short_link_down_cnt++;
1327 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
1328 }
1329
1330 break;
1331
1332 case MBA_LOOP_DOWN:
1333 SAVE_TOPO(ha);
1334 ha->flags.lip_ae = 0;
1335 ha->current_topology = 0;
1336 vha->link_down_time = 0;
1337
1338 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1339 ? rd_reg_word(®24->mailbox4) : 0;
1340 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4])
1341 : mbx;
1342 ql_log(ql_log_info, vha, 0x500b,
1343 "LOOP DOWN detected (%x %x %x %x).\n",
1344 mb[1], mb[2], mb[3], mbx);
1345
1346 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1347 atomic_set(&vha->loop_state, LOOP_DOWN);
1348 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1349
1350
1351
1352
1353
1354 if (!vha->vp_idx) {
1355 if (ha->flags.fawwpn_enabled &&
1356 (ha->current_topology == ISP_CFG_F)) {
1357 void *wwpn = ha->init_cb->port_name;
1358
1359 memcpy(vha->port_name, wwpn, WWN_SIZE);
1360 fc_host_port_name(vha->host) =
1361 wwn_to_u64(vha->port_name);
1362 ql_dbg(ql_dbg_init + ql_dbg_verbose,
1363 vha, 0x00d8, "LOOP DOWN detected,"
1364 "restore WWPN %016llx\n",
1365 wwn_to_u64(vha->port_name));
1366 }
1367
1368 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
1369 }
1370
1371 vha->device_flags |= DFLG_NO_CABLE;
1372 qla2x00_mark_all_devices_lost(vha);
1373 }
1374
1375 if (vha->vp_idx) {
1376 atomic_set(&vha->vp_state, VP_FAILED);
1377 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1378 }
1379
1380 vha->flags.management_server_logged_in = 0;
1381 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1382 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
1383 break;
1384
1385 case MBA_LIP_RESET:
1386 ql_dbg(ql_dbg_async, vha, 0x500c,
1387 "LIP reset occurred (%x).\n", mb[1]);
1388
1389 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1390 atomic_set(&vha->loop_state, LOOP_DOWN);
1391 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
1392 qla2x00_mark_all_devices_lost(vha);
1393 }
1394
1395 if (vha->vp_idx) {
1396 atomic_set(&vha->vp_state, VP_FAILED);
1397 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1398 }
1399
1400 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1401
1402 ha->operating_mode = LOOP;
1403 vha->flags.management_server_logged_in = 0;
1404 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
1405 break;
1406
1407
1408 case MBA_POINT_TO_POINT:
1409 ha->flags.lip_ae = 0;
1410
1411 if (IS_QLA2100(ha))
1412 break;
1413
1414 if (IS_CNA_CAPABLE(ha)) {
1415 ql_dbg(ql_dbg_async, vha, 0x500d,
1416 "DCBX Completed -- %04x %04x %04x.\n",
1417 mb[1], mb[2], mb[3]);
1418 if (ha->notify_dcbx_comp && !vha->vp_idx)
1419 complete(&ha->dcbx_comp);
1420
1421 } else
1422 ql_dbg(ql_dbg_async, vha, 0x500e,
1423 "Asynchronous P2P MODE received.\n");
1424
1425
1426
1427
1428
1429 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1430 atomic_set(&vha->loop_state, LOOP_DOWN);
1431 if (!atomic_read(&vha->loop_down_timer))
1432 atomic_set(&vha->loop_down_timer,
1433 LOOP_DOWN_TIME);
1434 if (!N2N_TOPO(ha))
1435 qla2x00_mark_all_devices_lost(vha);
1436 }
1437
1438 if (vha->vp_idx) {
1439 atomic_set(&vha->vp_state, VP_FAILED);
1440 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1441 }
1442
1443 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
1444 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
1445
1446 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
1447 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1448
1449 vha->flags.management_server_logged_in = 0;
1450 break;
1451
1452 case MBA_CHG_IN_CONNECTION:
1453 if (IS_QLA2100(ha))
1454 break;
1455
1456 ql_dbg(ql_dbg_async, vha, 0x500f,
1457 "Configuration change detected: value=%x.\n", mb[1]);
1458
1459 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1460 atomic_set(&vha->loop_state, LOOP_DOWN);
1461 if (!atomic_read(&vha->loop_down_timer))
1462 atomic_set(&vha->loop_down_timer,
1463 LOOP_DOWN_TIME);
1464 qla2x00_mark_all_devices_lost(vha);
1465 }
1466
1467 if (vha->vp_idx) {
1468 atomic_set(&vha->vp_state, VP_FAILED);
1469 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
1470 }
1471
1472 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1473 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1474 break;
1475
1476 case MBA_PORT_UPDATE:
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 if (IS_QLA2XXX_MIDTYPE(ha) &&
1493 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
1494 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
1495 break;
1496
1497 if (mb[2] == 0x7) {
1498 ql_dbg(ql_dbg_async, vha, 0x5010,
1499 "Port %s %04x %04x %04x.\n",
1500 mb[1] == 0xffff ? "unavailable" : "logout",
1501 mb[1], mb[2], mb[3]);
1502
1503 if (mb[1] == 0xffff)
1504 goto global_port_update;
1505
1506 if (mb[1] == NPH_SNS_LID(ha)) {
1507 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1508 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1509 break;
1510 }
1511
1512
1513 if (IS_FWI2_CAPABLE(ha))
1514 handle_cnt = NPH_SNS;
1515 else
1516 handle_cnt = SIMPLE_NAME_SERVER;
1517 if (mb[1] == handle_cnt) {
1518 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1519 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1520 break;
1521 }
1522
1523
1524 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1525 if (!fcport)
1526 break;
1527 if (atomic_read(&fcport->state) != FCS_ONLINE)
1528 break;
1529 ql_dbg(ql_dbg_async, vha, 0x508a,
1530 "Marking port lost loopid=%04x portid=%06x.\n",
1531 fcport->loop_id, fcport->d_id.b24);
1532 if (qla_ini_mode_enabled(vha)) {
1533 fcport->logout_on_delete = 0;
1534 qlt_schedule_sess_for_deletion(fcport);
1535 }
1536 break;
1537
1538global_port_update:
1539 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1540 atomic_set(&vha->loop_state, LOOP_DOWN);
1541 atomic_set(&vha->loop_down_timer,
1542 LOOP_DOWN_TIME);
1543 vha->device_flags |= DFLG_NO_CABLE;
1544 qla2x00_mark_all_devices_lost(vha);
1545 }
1546
1547 if (vha->vp_idx) {
1548 atomic_set(&vha->vp_state, VP_FAILED);
1549 fc_vport_set_state(vha->fc_vport,
1550 FC_VPORT_FAILED);
1551 qla2x00_mark_all_devices_lost(vha);
1552 }
1553
1554 vha->flags.management_server_logged_in = 0;
1555 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1556 break;
1557 }
1558
1559
1560
1561
1562
1563
1564 atomic_set(&vha->loop_down_timer, 0);
1565 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1566 !ha->flags.n2n_ae &&
1567 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1568 ql_dbg(ql_dbg_async, vha, 0x5011,
1569 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1570 mb[1], mb[2], mb[3]);
1571 break;
1572 }
1573
1574 ql_dbg(ql_dbg_async, vha, 0x5012,
1575 "Port database changed %04x %04x %04x.\n",
1576 mb[1], mb[2], mb[3]);
1577
1578
1579
1580
1581 atomic_set(&vha->loop_state, LOOP_UP);
1582 vha->scan.scan_retry = 0;
1583
1584 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1585 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1586 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1587 break;
1588
1589 case MBA_RSCN_UPDATE:
1590
1591 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1592 break;
1593
1594 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1595 break;
1596
1597 ql_log(ql_log_warn, vha, 0x5013,
1598 "RSCN database changed -- %04x %04x %04x.\n",
1599 mb[1], mb[2], mb[3]);
1600
1601 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1602 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1603 | vha->d_id.b.al_pa;
1604 if (rscn_entry == host_pid) {
1605 ql_dbg(ql_dbg_async, vha, 0x5014,
1606 "Ignoring RSCN update to local host "
1607 "port ID (%06x).\n", host_pid);
1608 break;
1609 }
1610
1611
1612 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1613
1614
1615 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1616 break;
1617
1618 atomic_set(&vha->loop_down_timer, 0);
1619 vha->flags.management_server_logged_in = 0;
1620 {
1621 struct event_arg ea;
1622
1623 memset(&ea, 0, sizeof(ea));
1624 ea.id.b24 = rscn_entry;
1625 ea.id.b.rsvd_1 = rscn_entry >> 24;
1626 qla2x00_handle_rscn(vha, &ea);
1627 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1628 }
1629 break;
1630 case MBA_CONGN_NOTI_RECV:
1631 if (!ha->flags.scm_enabled ||
1632 mb[1] != QLA_CON_PRIMITIVE_RECEIVED)
1633 break;
1634
1635 if (mb[2] == QLA_CONGESTION_ARB_WARNING) {
1636 ql_dbg(ql_dbg_async, vha, 0x509b,
1637 "Congestion Warning %04x %04x.\n", mb[1], mb[2]);
1638 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) {
1639 ql_log(ql_log_warn, vha, 0x509b,
1640 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]);
1641 }
1642 break;
1643
1644 case MBA_ZIO_RESPONSE:
1645 ql_dbg(ql_dbg_async, vha, 0x5015,
1646 "[R|Z]IO update completion.\n");
1647
1648 if (IS_FWI2_CAPABLE(ha))
1649 qla24xx_process_response_queue(vha, rsp);
1650 else
1651 qla2x00_process_response_queue(rsp);
1652 break;
1653
1654 case MBA_DISCARD_RND_FRAME:
1655 ql_dbg(ql_dbg_async, vha, 0x5016,
1656 "Discard RND Frame -- %04x %04x %04x.\n",
1657 mb[1], mb[2], mb[3]);
1658 vha->interface_err_cnt++;
1659 break;
1660
1661 case MBA_TRACE_NOTIFICATION:
1662 ql_dbg(ql_dbg_async, vha, 0x5017,
1663 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1664 break;
1665
1666 case MBA_ISP84XX_ALERT:
1667 ql_dbg(ql_dbg_async, vha, 0x5018,
1668 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1669 mb[1], mb[2], mb[3]);
1670
1671 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1672 switch (mb[1]) {
1673 case A84_PANIC_RECOVERY:
1674 ql_log(ql_log_info, vha, 0x5019,
1675 "Alert 84XX: panic recovery %04x %04x.\n",
1676 mb[2], mb[3]);
1677 break;
1678 case A84_OP_LOGIN_COMPLETE:
1679 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1680 ql_log(ql_log_info, vha, 0x501a,
1681 "Alert 84XX: firmware version %x.\n",
1682 ha->cs84xx->op_fw_version);
1683 break;
1684 case A84_DIAG_LOGIN_COMPLETE:
1685 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1686 ql_log(ql_log_info, vha, 0x501b,
1687 "Alert 84XX: diagnostic firmware version %x.\n",
1688 ha->cs84xx->diag_fw_version);
1689 break;
1690 case A84_GOLD_LOGIN_COMPLETE:
1691 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1692 ha->cs84xx->fw_update = 1;
1693 ql_log(ql_log_info, vha, 0x501c,
1694 "Alert 84XX: gold firmware version %x.\n",
1695 ha->cs84xx->gold_fw_version);
1696 break;
1697 default:
1698 ql_log(ql_log_warn, vha, 0x501d,
1699 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1700 mb[1], mb[2], mb[3]);
1701 }
1702 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1703 break;
1704 case MBA_DCBX_START:
1705 ql_dbg(ql_dbg_async, vha, 0x501e,
1706 "DCBX Started -- %04x %04x %04x.\n",
1707 mb[1], mb[2], mb[3]);
1708 break;
1709 case MBA_DCBX_PARAM_UPDATE:
1710 ql_dbg(ql_dbg_async, vha, 0x501f,
1711 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1712 mb[1], mb[2], mb[3]);
1713 break;
1714 case MBA_FCF_CONF_ERR:
1715 ql_dbg(ql_dbg_async, vha, 0x5020,
1716 "FCF Configuration Error -- %04x %04x %04x.\n",
1717 mb[1], mb[2], mb[3]);
1718 break;
1719 case MBA_IDC_NOTIFY:
1720 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1721 mb[4] = rd_reg_word(®24->mailbox4);
1722 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1723 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1724 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1725 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1726
1727
1728
1729 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1730 atomic_set(&vha->loop_down_timer,
1731 LOOP_DOWN_TIME);
1732 qla2xxx_wake_dpc(vha);
1733 }
1734 }
1735 fallthrough;
1736 case MBA_IDC_COMPLETE:
1737 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1738 complete(&ha->lb_portup_comp);
1739 fallthrough;
1740 case MBA_IDC_TIME_EXT:
1741 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1742 IS_QLA8044(ha))
1743 qla81xx_idc_event(vha, mb[0], mb[1]);
1744 break;
1745
1746 case MBA_IDC_AEN:
1747 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1748 vha->hw_err_cnt++;
1749 qla27xx_handle_8200_aen(vha, mb);
1750 } else if (IS_QLA83XX(ha)) {
1751 mb[4] = rd_reg_word(®24->mailbox4);
1752 mb[5] = rd_reg_word(®24->mailbox5);
1753 mb[6] = rd_reg_word(®24->mailbox6);
1754 mb[7] = rd_reg_word(®24->mailbox7);
1755 qla83xx_handle_8200_aen(vha, mb);
1756 } else {
1757 ql_dbg(ql_dbg_async, vha, 0x5052,
1758 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n",
1759 mb[0], mb[1], mb[2], mb[3]);
1760 }
1761 break;
1762
1763 case MBA_DPORT_DIAGNOSTICS:
1764 ql_dbg(ql_dbg_async, vha, 0x5052,
1765 "D-Port Diagnostics: %04x %04x %04x %04x\n",
1766 mb[0], mb[1], mb[2], mb[3]);
1767 memcpy(vha->dport_data, mb, sizeof(vha->dport_data));
1768 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1769 static char *results[] = {
1770 "start", "done(pass)", "done(error)", "undefined" };
1771 static char *types[] = {
1772 "none", "dynamic", "static", "other" };
1773 uint result = mb[1] >> 0 & 0x3;
1774 uint type = mb[1] >> 6 & 0x3;
1775 uint sw = mb[1] >> 15 & 0x1;
1776 ql_dbg(ql_dbg_async, vha, 0x5052,
1777 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n",
1778 results[result], types[type], sw);
1779 if (result == 2) {
1780 static char *reasons[] = {
1781 "reserved", "unexpected reject",
1782 "unexpected phase", "retry exceeded",
1783 "timed out", "not supported",
1784 "user stopped" };
1785 uint reason = mb[2] >> 0 & 0xf;
1786 uint phase = mb[2] >> 12 & 0xf;
1787 ql_dbg(ql_dbg_async, vha, 0x5052,
1788 "D-Port Diagnostics: reason=%s phase=%u \n",
1789 reason < 7 ? reasons[reason] : "other",
1790 phase >> 1);
1791 }
1792 }
1793 break;
1794
1795 case MBA_TEMPERATURE_ALERT:
1796 ql_dbg(ql_dbg_async, vha, 0x505e,
1797 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1798 break;
1799
1800 case MBA_TRANS_INSERT:
1801 ql_dbg(ql_dbg_async, vha, 0x5091,
1802 "Transceiver Insertion: %04x\n", mb[1]);
1803 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
1804 break;
1805
1806 case MBA_TRANS_REMOVE:
1807 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n");
1808 break;
1809
1810 default:
1811 ql_dbg(ql_dbg_async, vha, 0x5057,
1812 "Unknown AEN:%04x %04x %04x %04x\n",
1813 mb[0], mb[1], mb[2], mb[3]);
1814 }
1815
1816 qlt_async_event(mb[0], vha, mb);
1817
1818 if (!vha->vp_idx && ha->num_vhosts)
1819 qla2x00_alert_all_vps(rsp, mb);
1820}
1821
1822
1823
1824
1825
1826
1827
1828void
1829qla2x00_process_completed_request(struct scsi_qla_host *vha,
1830 struct req_que *req, uint32_t index)
1831{
1832 srb_t *sp;
1833 struct qla_hw_data *ha = vha->hw;
1834
1835
1836 if (index >= req->num_outstanding_cmds) {
1837 ql_log(ql_log_warn, vha, 0x3014,
1838 "Invalid SCSI command index (%x).\n", index);
1839
1840 if (IS_P3P_TYPE(ha))
1841 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1842 else
1843 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1844 return;
1845 }
1846
1847 sp = req->outstanding_cmds[index];
1848 if (sp) {
1849
1850 req->outstanding_cmds[index] = NULL;
1851
1852
1853 sp->done(sp, DID_OK << 16);
1854 } else {
1855 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1856
1857 if (IS_P3P_TYPE(ha))
1858 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1859 else
1860 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1861 }
1862}
1863
1864srb_t *
1865qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1866 struct req_que *req, void *iocb)
1867{
1868 struct qla_hw_data *ha = vha->hw;
1869 sts_entry_t *pkt = iocb;
1870 srb_t *sp;
1871 uint16_t index;
1872
1873 if (pkt->handle == QLA_SKIP_HANDLE)
1874 return NULL;
1875
1876 index = LSW(pkt->handle);
1877 if (index >= req->num_outstanding_cmds) {
1878 ql_log(ql_log_warn, vha, 0x5031,
1879 "%s: Invalid command index (%x) type %8ph.\n",
1880 func, index, iocb);
1881 if (IS_P3P_TYPE(ha))
1882 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1883 else
1884 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1885 return NULL;
1886 }
1887 sp = req->outstanding_cmds[index];
1888 if (!sp) {
1889 ql_log(ql_log_warn, vha, 0x5032,
1890 "%s: Invalid completion handle (%x) -- timed-out.\n",
1891 func, index);
1892 return NULL;
1893 }
1894 if (sp->handle != index) {
1895 ql_log(ql_log_warn, vha, 0x5033,
1896 "%s: SRB handle (%x) mismatch %x.\n", func,
1897 sp->handle, index);
1898 return NULL;
1899 }
1900
1901 req->outstanding_cmds[index] = NULL;
1902 return sp;
1903}
1904
1905static void
1906qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1907 struct mbx_entry *mbx)
1908{
1909 const char func[] = "MBX-IOCB";
1910 const char *type;
1911 fc_port_t *fcport;
1912 srb_t *sp;
1913 struct srb_iocb *lio;
1914 uint16_t *data;
1915 uint16_t status;
1916
1917 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1918 if (!sp)
1919 return;
1920
1921 lio = &sp->u.iocb_cmd;
1922 type = sp->name;
1923 fcport = sp->fcport;
1924 data = lio->u.logio.data;
1925
1926 data[0] = MBS_COMMAND_ERROR;
1927 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1928 QLA_LOGIO_LOGIN_RETRIED : 0;
1929 if (mbx->entry_status) {
1930 ql_dbg(ql_dbg_async, vha, 0x5043,
1931 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1932 "entry-status=%x status=%x state-flag=%x "
1933 "status-flags=%x.\n", type, sp->handle,
1934 fcport->d_id.b.domain, fcport->d_id.b.area,
1935 fcport->d_id.b.al_pa, mbx->entry_status,
1936 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1937 le16_to_cpu(mbx->status_flags));
1938
1939 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1940 mbx, sizeof(*mbx));
1941
1942 goto logio_done;
1943 }
1944
1945 status = le16_to_cpu(mbx->status);
1946 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1947 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1948 status = 0;
1949 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1950 ql_dbg(ql_dbg_async, vha, 0x5045,
1951 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1952 type, sp->handle, fcport->d_id.b.domain,
1953 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1954 le16_to_cpu(mbx->mb1));
1955
1956 data[0] = MBS_COMMAND_COMPLETE;
1957 if (sp->type == SRB_LOGIN_CMD) {
1958 fcport->port_type = FCT_TARGET;
1959 if (le16_to_cpu(mbx->mb1) & BIT_0)
1960 fcport->port_type = FCT_INITIATOR;
1961 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1962 fcport->flags |= FCF_FCP2_DEVICE;
1963 }
1964 goto logio_done;
1965 }
1966
1967 data[0] = le16_to_cpu(mbx->mb0);
1968 switch (data[0]) {
1969 case MBS_PORT_ID_USED:
1970 data[1] = le16_to_cpu(mbx->mb1);
1971 break;
1972 case MBS_LOOP_ID_USED:
1973 break;
1974 default:
1975 data[0] = MBS_COMMAND_ERROR;
1976 break;
1977 }
1978
1979 ql_log(ql_log_warn, vha, 0x5046,
1980 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1981 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1982 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1983 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1984 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1985 le16_to_cpu(mbx->mb7));
1986
1987logio_done:
1988 sp->done(sp, 0);
1989}
1990
1991static void
1992qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1993 struct mbx_24xx_entry *pkt)
1994{
1995 const char func[] = "MBX-IOCB2";
1996 struct qla_hw_data *ha = vha->hw;
1997 srb_t *sp;
1998 struct srb_iocb *si;
1999 u16 sz, i;
2000 int res;
2001
2002 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2003 if (!sp)
2004 return;
2005
2006 if (sp->type == SRB_SCSI_CMD ||
2007 sp->type == SRB_NVME_CMD ||
2008 sp->type == SRB_TM_CMD) {
2009 ql_log(ql_log_warn, vha, 0x509d,
2010 "Inconsistent event entry type %d\n", sp->type);
2011 if (IS_P3P_TYPE(ha))
2012 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2013 else
2014 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2015 return;
2016 }
2017
2018 si = &sp->u.iocb_cmd;
2019 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
2020
2021 for (i = 0; i < sz; i++)
2022 si->u.mbx.in_mb[i] = pkt->mb[i];
2023
2024 res = (si->u.mbx.in_mb[0] & MBS_MASK);
2025
2026 sp->done(sp, res);
2027}
2028
2029static void
2030qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2031 struct nack_to_isp *pkt)
2032{
2033 const char func[] = "nack";
2034 srb_t *sp;
2035 int res = 0;
2036
2037 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2038 if (!sp)
2039 return;
2040
2041 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
2042 res = QLA_FUNCTION_FAILED;
2043
2044 sp->done(sp, res);
2045}
2046
2047static void
2048qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
2049 sts_entry_t *pkt, int iocb_type)
2050{
2051 const char func[] = "CT_IOCB";
2052 const char *type;
2053 srb_t *sp;
2054 struct bsg_job *bsg_job;
2055 struct fc_bsg_reply *bsg_reply;
2056 uint16_t comp_status;
2057 int res = 0;
2058
2059 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2060 if (!sp)
2061 return;
2062
2063 switch (sp->type) {
2064 case SRB_CT_CMD:
2065 bsg_job = sp->u.bsg_job;
2066 bsg_reply = bsg_job->reply;
2067
2068 type = "ct pass-through";
2069
2070 comp_status = le16_to_cpu(pkt->comp_status);
2071
2072
2073
2074
2075
2076 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2077 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2078
2079 if (comp_status != CS_COMPLETE) {
2080 if (comp_status == CS_DATA_UNDERRUN) {
2081 res = DID_OK << 16;
2082 bsg_reply->reply_payload_rcv_len =
2083 le16_to_cpu(pkt->rsp_info_len);
2084
2085 ql_log(ql_log_warn, vha, 0x5048,
2086 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
2087 type, comp_status,
2088 bsg_reply->reply_payload_rcv_len);
2089 } else {
2090 ql_log(ql_log_warn, vha, 0x5049,
2091 "CT pass-through-%s error comp_status=0x%x.\n",
2092 type, comp_status);
2093 res = DID_ERROR << 16;
2094 bsg_reply->reply_payload_rcv_len = 0;
2095 }
2096 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
2097 pkt, sizeof(*pkt));
2098 } else {
2099 res = DID_OK << 16;
2100 bsg_reply->reply_payload_rcv_len =
2101 bsg_job->reply_payload.payload_len;
2102 bsg_job->reply_len = 0;
2103 }
2104 break;
2105 case SRB_CT_PTHRU_CMD:
2106
2107
2108
2109
2110 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
2111 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2112 sp->name);
2113 break;
2114 }
2115
2116 sp->done(sp, res);
2117}
2118
2119static void
2120qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req,
2121 struct sts_entry_24xx *pkt, int iocb_type)
2122{
2123 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt;
2124 const char func[] = "ELS_CT_IOCB";
2125 const char *type;
2126 srb_t *sp;
2127 struct bsg_job *bsg_job;
2128 struct fc_bsg_reply *bsg_reply;
2129 uint16_t comp_status;
2130 uint32_t fw_status[3];
2131 int res, logit = 1;
2132 struct srb_iocb *els;
2133 uint n;
2134 scsi_qla_host_t *vha;
2135 struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt;
2136
2137 sp = qla2x00_get_sp_from_handle(v, func, req, pkt);
2138 if (!sp)
2139 return;
2140 bsg_job = sp->u.bsg_job;
2141 vha = sp->vha;
2142
2143 type = NULL;
2144
2145 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
2146 fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
2147 fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
2148
2149 switch (sp->type) {
2150 case SRB_ELS_CMD_RPT:
2151 case SRB_ELS_CMD_HST:
2152 type = "rpt hst";
2153 break;
2154 case SRB_ELS_CMD_HST_NOLOGIN:
2155 type = "els";
2156 {
2157 struct els_entry_24xx *els = (void *)pkt;
2158 struct qla_bsg_auth_els_request *p =
2159 (struct qla_bsg_auth_els_request *)bsg_job->request;
2160
2161 ql_dbg(ql_dbg_user, vha, 0x700f,
2162 "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n",
2163 __func__, sc_to_str(p->e.sub_cmd),
2164 e->d_id[2], e->d_id[1], e->d_id[0],
2165 comp_status, p->e.extra_rx_xchg_address, bsg_job);
2166
2167 if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) {
2168 if (sp->remap.remapped) {
2169 n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2170 bsg_job->reply_payload.sg_cnt,
2171 sp->remap.rsp.buf,
2172 sp->remap.rsp.len);
2173 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e,
2174 "%s: SG copied %x of %x\n",
2175 __func__, n, sp->remap.rsp.len);
2176 } else {
2177 ql_dbg(ql_dbg_user, vha, 0x700f,
2178 "%s: NOT REMAPPED (error)...!!!\n",
2179 __func__);
2180 }
2181 }
2182 }
2183 break;
2184 case SRB_CT_CMD:
2185 type = "ct pass-through";
2186 break;
2187 case SRB_ELS_DCMD:
2188 type = "Driver ELS logo";
2189 if (iocb_type != ELS_IOCB_TYPE) {
2190 ql_dbg(ql_dbg_user, vha, 0x5047,
2191 "Completing %s: (%p) type=%d.\n",
2192 type, sp, sp->type);
2193 sp->done(sp, 0);
2194 return;
2195 }
2196 break;
2197 case SRB_CT_PTHRU_CMD:
2198
2199
2200
2201 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
2202 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
2203 sp->name);
2204 sp->done(sp, res);
2205 return;
2206 default:
2207 ql_dbg(ql_dbg_user, vha, 0x503e,
2208 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
2209 return;
2210 }
2211
2212 if (iocb_type == ELS_IOCB_TYPE) {
2213 els = &sp->u.iocb_cmd;
2214 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]);
2215 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]);
2216 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]);
2217 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]);
2218 if (comp_status == CS_COMPLETE) {
2219 res = DID_OK << 16;
2220 } else {
2221 if (comp_status == CS_DATA_UNDERRUN) {
2222 res = DID_OK << 16;
2223 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu(
2224 ese->total_byte_count));
2225
2226 if (sp->remap.remapped &&
2227 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) {
2228 ql_dbg(ql_dbg_user, vha, 0x503f,
2229 "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x",
2230 __func__, e->s_id[0], e->s_id[2], e->s_id[1],
2231 e->d_id[2], e->d_id[1], e->d_id[0]);
2232 logit = 0;
2233 }
2234
2235 } else if (comp_status == CS_PORT_LOGGED_OUT) {
2236 els->u.els_plogi.len = 0;
2237 res = DID_IMM_RETRY << 16;
2238 qlt_schedule_sess_for_deletion(sp->fcport);
2239 } else {
2240 els->u.els_plogi.len = 0;
2241 res = DID_ERROR << 16;
2242 }
2243
2244 if (logit) {
2245 if (sp->remap.remapped &&
2246 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) {
2247 ql_dbg(ql_dbg_user, vha, 0x503f,
2248 "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n",
2249 type, sp->handle, comp_status);
2250
2251 ql_dbg(ql_dbg_user, vha, 0x503f,
2252 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2253 fw_status[1], fw_status[2],
2254 le32_to_cpu(((struct els_sts_entry_24xx *)
2255 pkt)->total_byte_count),
2256 e->s_id[0], e->s_id[2], e->s_id[1],
2257 e->d_id[2], e->d_id[1], e->d_id[0]);
2258 } else {
2259 ql_log(ql_log_info, vha, 0x503f,
2260 "%s IOCB Done hdl=%x comp_status=0x%x\n",
2261 type, sp->handle, comp_status);
2262 ql_log(ql_log_info, vha, 0x503f,
2263 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n",
2264 fw_status[1], fw_status[2],
2265 le32_to_cpu(((struct els_sts_entry_24xx *)
2266 pkt)->total_byte_count),
2267 e->s_id[0], e->s_id[2], e->s_id[1],
2268 e->d_id[2], e->d_id[1], e->d_id[0]);
2269 }
2270 }
2271 }
2272 goto els_ct_done;
2273 }
2274
2275
2276
2277
2278 bsg_job = sp->u.bsg_job;
2279 bsg_reply = bsg_job->reply;
2280 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2281 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
2282
2283 if (comp_status != CS_COMPLETE) {
2284 if (comp_status == CS_DATA_UNDERRUN) {
2285 res = DID_OK << 16;
2286 bsg_reply->reply_payload_rcv_len =
2287 le32_to_cpu(ese->total_byte_count);
2288
2289 ql_dbg(ql_dbg_user, vha, 0x503f,
2290 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2291 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
2292 type, sp->handle, comp_status, fw_status[1], fw_status[2],
2293 le32_to_cpu(ese->total_byte_count));
2294 } else {
2295 ql_dbg(ql_dbg_user, vha, 0x5040,
2296 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
2297 "error subcode 1=0x%x error subcode 2=0x%x.\n",
2298 type, sp->handle, comp_status,
2299 le32_to_cpu(ese->error_subcode_1),
2300 le32_to_cpu(ese->error_subcode_2));
2301 res = DID_ERROR << 16;
2302 bsg_reply->reply_payload_rcv_len = 0;
2303 }
2304 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
2305 fw_status, sizeof(fw_status));
2306 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
2307 pkt, sizeof(*pkt));
2308 }
2309 else {
2310 res = DID_OK << 16;
2311 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2312 bsg_job->reply_len = 0;
2313 }
2314els_ct_done:
2315
2316 sp->done(sp, res);
2317}
2318
2319static void
2320qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
2321 struct logio_entry_24xx *logio)
2322{
2323 const char func[] = "LOGIO-IOCB";
2324 const char *type;
2325 fc_port_t *fcport;
2326 srb_t *sp;
2327 struct srb_iocb *lio;
2328 uint16_t *data;
2329 uint32_t iop[2];
2330 int logit = 1;
2331
2332 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
2333 if (!sp)
2334 return;
2335
2336 lio = &sp->u.iocb_cmd;
2337 type = sp->name;
2338 fcport = sp->fcport;
2339 data = lio->u.logio.data;
2340
2341 data[0] = MBS_COMMAND_ERROR;
2342 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
2343 QLA_LOGIO_LOGIN_RETRIED : 0;
2344 if (logio->entry_status) {
2345 ql_log(ql_log_warn, fcport->vha, 0x5034,
2346 "Async-%s error entry - %8phC hdl=%x"
2347 "portid=%02x%02x%02x entry-status=%x.\n",
2348 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
2349 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2350 logio->entry_status);
2351 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
2352 logio, sizeof(*logio));
2353
2354 goto logio_done;
2355 }
2356
2357 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
2358 ql_dbg(ql_dbg_async, sp->vha, 0x5036,
2359 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n",
2360 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2361 le32_to_cpu(logio->io_parameter[0]));
2362
2363 vha->hw->exch_starvation = 0;
2364 data[0] = MBS_COMMAND_COMPLETE;
2365
2366 if (sp->type == SRB_PRLI_CMD) {
2367 lio->u.logio.iop[0] =
2368 le32_to_cpu(logio->io_parameter[0]);
2369 lio->u.logio.iop[1] =
2370 le32_to_cpu(logio->io_parameter[1]);
2371 goto logio_done;
2372 }
2373
2374 if (sp->type != SRB_LOGIN_CMD)
2375 goto logio_done;
2376
2377 lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]);
2378 if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP)
2379 fcport->flags |= FCF_FCSP_DEVICE;
2380
2381 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2382 if (iop[0] & BIT_4) {
2383 fcport->port_type = FCT_TARGET;
2384 if (iop[0] & BIT_8)
2385 fcport->flags |= FCF_FCP2_DEVICE;
2386 } else if (iop[0] & BIT_5)
2387 fcport->port_type = FCT_INITIATOR;
2388
2389 if (iop[0] & BIT_7)
2390 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2391
2392 if (logio->io_parameter[7] || logio->io_parameter[8])
2393 fcport->supported_classes |= FC_COS_CLASS2;
2394 if (logio->io_parameter[9] || logio->io_parameter[10])
2395 fcport->supported_classes |= FC_COS_CLASS3;
2396
2397 goto logio_done;
2398 }
2399
2400 iop[0] = le32_to_cpu(logio->io_parameter[0]);
2401 iop[1] = le32_to_cpu(logio->io_parameter[1]);
2402 lio->u.logio.iop[0] = iop[0];
2403 lio->u.logio.iop[1] = iop[1];
2404 switch (iop[0]) {
2405 case LSC_SCODE_PORTID_USED:
2406 data[0] = MBS_PORT_ID_USED;
2407 data[1] = LSW(iop[1]);
2408 logit = 0;
2409 break;
2410 case LSC_SCODE_NPORT_USED:
2411 data[0] = MBS_LOOP_ID_USED;
2412 logit = 0;
2413 break;
2414 case LSC_SCODE_CMD_FAILED:
2415 if (iop[1] == 0x0606) {
2416
2417
2418
2419
2420 data[0] = MBS_COMMAND_COMPLETE;
2421 goto logio_done;
2422 }
2423 data[0] = MBS_COMMAND_ERROR;
2424 break;
2425 case LSC_SCODE_NOXCB:
2426 vha->hw->exch_starvation++;
2427 if (vha->hw->exch_starvation > 5) {
2428 ql_log(ql_log_warn, vha, 0xd046,
2429 "Exchange starvation. Resetting RISC\n");
2430
2431 vha->hw->exch_starvation = 0;
2432
2433 if (IS_P3P_TYPE(vha->hw))
2434 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2435 else
2436 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2437 qla2xxx_wake_dpc(vha);
2438 }
2439 fallthrough;
2440 default:
2441 data[0] = MBS_COMMAND_ERROR;
2442 break;
2443 }
2444
2445 if (logit)
2446 ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: "
2447 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2448 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2449 le16_to_cpu(logio->comp_status),
2450 le32_to_cpu(logio->io_parameter[0]),
2451 le32_to_cpu(logio->io_parameter[1]));
2452 else
2453 ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: "
2454 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
2455 type, sp->handle, fcport->d_id.b24, fcport->port_name,
2456 le16_to_cpu(logio->comp_status),
2457 le32_to_cpu(logio->io_parameter[0]),
2458 le32_to_cpu(logio->io_parameter[1]));
2459
2460logio_done:
2461 sp->done(sp, 0);
2462}
2463
2464static void
2465qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
2466{
2467 const char func[] = "TMF-IOCB";
2468 const char *type;
2469 fc_port_t *fcport;
2470 srb_t *sp;
2471 struct srb_iocb *iocb;
2472 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2473 u16 comp_status;
2474
2475 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
2476 if (!sp)
2477 return;
2478
2479 comp_status = le16_to_cpu(sts->comp_status);
2480 iocb = &sp->u.iocb_cmd;
2481 type = sp->name;
2482 fcport = sp->fcport;
2483 iocb->u.tmf.data = QLA_SUCCESS;
2484
2485 if (sts->entry_status) {
2486 ql_log(ql_log_warn, fcport->vha, 0x5038,
2487 "Async-%s error - hdl=%x entry-status(%x).\n",
2488 type, sp->handle, sts->entry_status);
2489 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2490 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2491 ql_log(ql_log_warn, fcport->vha, 0x5039,
2492 "Async-%s error - hdl=%x completion status(%x).\n",
2493 type, sp->handle, comp_status);
2494 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2495 } else if ((le16_to_cpu(sts->scsi_status) &
2496 SS_RESPONSE_INFO_LEN_VALID)) {
2497 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2498 ql_log(ql_log_warn, fcport->vha, 0x503b,
2499 "Async-%s error - hdl=%x not enough response(%d).\n",
2500 type, sp->handle, sts->rsp_data_len);
2501 } else if (sts->data[3]) {
2502 ql_log(ql_log_warn, fcport->vha, 0x503c,
2503 "Async-%s error - hdl=%x response(%x).\n",
2504 type, sp->handle, sts->data[3]);
2505 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
2506 }
2507 }
2508
2509 switch (comp_status) {
2510 case CS_PORT_LOGGED_OUT:
2511 case CS_PORT_CONFIG_CHG:
2512 case CS_PORT_BUSY:
2513 case CS_INCOMPLETE:
2514 case CS_PORT_UNAVAILABLE:
2515 case CS_TIMEOUT:
2516 case CS_RESET:
2517 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2518 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2519 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n",
2520 fcport->d_id.b.domain, fcport->d_id.b.area,
2521 fcport->d_id.b.al_pa,
2522 port_state_str[FCS_ONLINE],
2523 comp_status);
2524
2525 qlt_schedule_sess_for_deletion(fcport);
2526 }
2527 break;
2528
2529 default:
2530 break;
2531 }
2532
2533 if (iocb->u.tmf.data != QLA_SUCCESS)
2534 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
2535 sts, sizeof(*sts));
2536
2537 sp->done(sp, 0);
2538}
2539
2540static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2541 void *tsk, srb_t *sp)
2542{
2543 fc_port_t *fcport;
2544 struct srb_iocb *iocb;
2545 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
2546 uint16_t state_flags;
2547 struct nvmefc_fcp_req *fd;
2548 uint16_t ret = QLA_SUCCESS;
2549 __le16 comp_status = sts->comp_status;
2550 int logit = 0;
2551
2552 iocb = &sp->u.iocb_cmd;
2553 fcport = sp->fcport;
2554 iocb->u.nvme.comp_status = comp_status;
2555 state_flags = le16_to_cpu(sts->state_flags);
2556 fd = iocb->u.nvme.desc;
2557
2558 if (unlikely(iocb->u.nvme.aen_op))
2559 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
2560 else
2561 sp->qpair->cmd_completion_cnt++;
2562
2563 if (unlikely(comp_status != CS_COMPLETE))
2564 logit = 1;
2565
2566 fd->transferred_length = fd->payload_length -
2567 le32_to_cpu(sts->residual_len);
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
2578 iocb->u.nvme.rsp_pyld_len = 0;
2579 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) ==
2580 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) {
2581
2582 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2583 } else if ((state_flags & SF_FCP_RSP_DMA)) {
2584
2585
2586
2587
2588 iocb->u.nvme.rsp_pyld_len = 0;
2589 fd->transferred_length = 0;
2590 ql_dbg(ql_dbg_io, fcport->vha, 0x307a,
2591 "Unexpected values in NVMe_RSP IU.\n");
2592 logit = 1;
2593 } else if (state_flags & SF_NVME_ERSP) {
2594 uint32_t *inbuf, *outbuf;
2595 uint16_t iter;
2596
2597 inbuf = (uint32_t *)&sts->nvme_ersp_data;
2598 outbuf = (uint32_t *)fd->rspaddr;
2599 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len;
2600 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >
2601 sizeof(struct nvme_fc_ersp_iu))) {
2602 if (ql_mask_match(ql_dbg_io)) {
2603 WARN_ONCE(1, "Unexpected response payload length %u.\n",
2604 iocb->u.nvme.rsp_pyld_len);
2605 ql_log(ql_log_warn, fcport->vha, 0x5100,
2606 "Unexpected response payload length %u.\n",
2607 iocb->u.nvme.rsp_pyld_len);
2608 }
2609 iocb->u.nvme.rsp_pyld_len =
2610 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu));
2611 }
2612 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2;
2613 for (; iter; iter--)
2614 *outbuf++ = swab32(*inbuf++);
2615 }
2616
2617 if (state_flags & SF_NVME_ERSP) {
2618 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr;
2619 u32 tgt_xfer_len;
2620
2621 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
2622 if (fd->transferred_length != tgt_xfer_len) {
2623 ql_log(ql_log_warn, fcport->vha, 0x3079,
2624 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
2625 tgt_xfer_len, fd->transferred_length);
2626 logit = 1;
2627 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
2628
2629
2630
2631
2632 logit = 0;
2633 }
2634 }
2635
2636 if (unlikely(logit))
2637 ql_log(ql_dbg_io, fcport->vha, 0x5060,
2638 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
2639 sp->name, sp->handle, comp_status,
2640 fd->transferred_length, le32_to_cpu(sts->residual_len),
2641 sts->ox_id);
2642
2643
2644
2645
2646
2647 switch (le16_to_cpu(comp_status)) {
2648 case CS_COMPLETE:
2649 break;
2650
2651 case CS_RESET:
2652 case CS_PORT_UNAVAILABLE:
2653 case CS_PORT_LOGGED_OUT:
2654 fcport->nvme_flag |= NVME_FLAG_RESETTING;
2655 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2656 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2657 "Port to be marked lost on fcport=%06x, current "
2658 "port state= %s comp_status %x.\n",
2659 fcport->d_id.b24, port_state_str[FCS_ONLINE],
2660 comp_status);
2661
2662 qlt_schedule_sess_for_deletion(fcport);
2663 }
2664 fallthrough;
2665 case CS_ABORTED:
2666 case CS_PORT_BUSY:
2667 fd->transferred_length = 0;
2668 iocb->u.nvme.rsp_pyld_len = 0;
2669 ret = QLA_ABORTED;
2670 break;
2671 case CS_DATA_UNDERRUN:
2672 break;
2673 default:
2674 ret = QLA_FUNCTION_FAILED;
2675 break;
2676 }
2677 sp->done(sp, ret);
2678}
2679
2680static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
2681 struct vp_ctrl_entry_24xx *vce)
2682{
2683 const char func[] = "CTRLVP-IOCB";
2684 srb_t *sp;
2685 int rval = QLA_SUCCESS;
2686
2687 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
2688 if (!sp)
2689 return;
2690
2691 if (vce->entry_status != 0) {
2692 ql_dbg(ql_dbg_vport, vha, 0x10c4,
2693 "%s: Failed to complete IOCB -- error status (%x)\n",
2694 sp->name, vce->entry_status);
2695 rval = QLA_FUNCTION_FAILED;
2696 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
2697 ql_dbg(ql_dbg_vport, vha, 0x10c5,
2698 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
2699 sp->name, le16_to_cpu(vce->comp_status),
2700 le16_to_cpu(vce->vp_idx_failed));
2701 rval = QLA_FUNCTION_FAILED;
2702 } else {
2703 ql_dbg(ql_dbg_vport, vha, 0x10c6,
2704 "Done %s.\n", __func__);
2705 }
2706
2707 sp->rc = rval;
2708 sp->done(sp, rval);
2709}
2710
2711
2712static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
2713 struct rsp_que *rsp,
2714 sts_entry_t *pkt)
2715{
2716 sts21_entry_t *sts21_entry;
2717 sts22_entry_t *sts22_entry;
2718 uint16_t handle_cnt;
2719 uint16_t cnt;
2720
2721 switch (pkt->entry_type) {
2722 case STATUS_TYPE:
2723 qla2x00_status_entry(vha, rsp, pkt);
2724 break;
2725 case STATUS_TYPE_21:
2726 sts21_entry = (sts21_entry_t *)pkt;
2727 handle_cnt = sts21_entry->handle_count;
2728 for (cnt = 0; cnt < handle_cnt; cnt++)
2729 qla2x00_process_completed_request(vha, rsp->req,
2730 sts21_entry->handle[cnt]);
2731 break;
2732 case STATUS_TYPE_22:
2733 sts22_entry = (sts22_entry_t *)pkt;
2734 handle_cnt = sts22_entry->handle_count;
2735 for (cnt = 0; cnt < handle_cnt; cnt++)
2736 qla2x00_process_completed_request(vha, rsp->req,
2737 sts22_entry->handle[cnt]);
2738 break;
2739 case STATUS_CONT_TYPE:
2740 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2741 break;
2742 case MBX_IOCB_TYPE:
2743 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2744 break;
2745 case CT_IOCB_TYPE:
2746 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2747 break;
2748 default:
2749
2750 ql_log(ql_log_warn, vha, 0x504a,
2751 "Received unknown response pkt type %x entry status=%x.\n",
2752 pkt->entry_type, pkt->entry_status);
2753 break;
2754 }
2755}
2756
2757
2758
2759
2760
2761void
2762qla2x00_process_response_queue(struct rsp_que *rsp)
2763{
2764 struct scsi_qla_host *vha;
2765 struct qla_hw_data *ha = rsp->hw;
2766 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2767 sts_entry_t *pkt;
2768
2769 vha = pci_get_drvdata(ha->pdev);
2770
2771 if (!vha->flags.online)
2772 return;
2773
2774 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2775 pkt = (sts_entry_t *)rsp->ring_ptr;
2776
2777 rsp->ring_index++;
2778 if (rsp->ring_index == rsp->length) {
2779 rsp->ring_index = 0;
2780 rsp->ring_ptr = rsp->ring;
2781 } else {
2782 rsp->ring_ptr++;
2783 }
2784
2785 if (pkt->entry_status != 0) {
2786 qla2x00_error_entry(vha, rsp, pkt);
2787 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2788 wmb();
2789 continue;
2790 }
2791
2792 qla2x00_process_response_entry(vha, rsp, pkt);
2793 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2794 wmb();
2795 }
2796
2797
2798 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2799}
2800
2801static inline void
2802qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2803 uint32_t sense_len, struct rsp_que *rsp, int res)
2804{
2805 struct scsi_qla_host *vha = sp->vha;
2806 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2807 uint32_t track_sense_len;
2808
2809 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2810 sense_len = SCSI_SENSE_BUFFERSIZE;
2811
2812 SET_CMD_SENSE_LEN(sp, sense_len);
2813 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2814 track_sense_len = sense_len;
2815
2816 if (sense_len > par_sense_len)
2817 sense_len = par_sense_len;
2818
2819 memcpy(cp->sense_buffer, sense_data, sense_len);
2820
2821 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2822 track_sense_len -= sense_len;
2823 SET_CMD_SENSE_LEN(sp, track_sense_len);
2824
2825 if (track_sense_len != 0) {
2826 rsp->status_srb = sp;
2827 cp->result = res;
2828 }
2829
2830 if (sense_len) {
2831 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2832 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2833 sp->vha->host_no, cp->device->id, cp->device->lun,
2834 cp);
2835 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2836 cp->sense_buffer, sense_len);
2837 }
2838}
2839
2840struct scsi_dif_tuple {
2841 __be16 guard;
2842 __be16 app_tag;
2843 __be32 ref_tag;
2844};
2845
2846
2847
2848
2849
2850
2851
2852static inline int
2853qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2854{
2855 struct scsi_qla_host *vha = sp->vha;
2856 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2857 uint8_t *ap = &sts24->data[12];
2858 uint8_t *ep = &sts24->data[20];
2859 uint32_t e_ref_tag, a_ref_tag;
2860 uint16_t e_app_tag, a_app_tag;
2861 uint16_t e_guard, a_guard;
2862
2863
2864
2865
2866
2867 a_guard = get_unaligned_le16(ap + 2);
2868 a_app_tag = get_unaligned_le16(ap + 0);
2869 a_ref_tag = get_unaligned_le32(ap + 4);
2870 e_guard = get_unaligned_le16(ep + 2);
2871 e_app_tag = get_unaligned_le16(ep + 0);
2872 e_ref_tag = get_unaligned_le32(ep + 4);
2873
2874 ql_dbg(ql_dbg_io, vha, 0x3023,
2875 "iocb(s) %p Returned STATUS.\n", sts24);
2876
2877 ql_dbg(ql_dbg_io, vha, 0x3024,
2878 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2879 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2880 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2881 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2882 a_app_tag, e_app_tag, a_guard, e_guard);
2883
2884
2885
2886
2887
2888
2889 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) &&
2890 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 ||
2891 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) {
2892 uint32_t blocks_done, resid;
2893 sector_t lba_s = scsi_get_lba(cmd);
2894
2895
2896 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2897
2898 resid = scsi_bufflen(cmd) - (blocks_done *
2899 cmd->device->sector_size);
2900
2901 scsi_set_resid(cmd, resid);
2902 cmd->result = DID_OK << 16;
2903
2904
2905 if (scsi_prot_sg_count(cmd)) {
2906 uint32_t i, j = 0, k = 0, num_ent;
2907 struct scatterlist *sg;
2908 struct t10_pi_tuple *spt;
2909
2910
2911 scsi_for_each_prot_sg(cmd, sg,
2912 scsi_prot_sg_count(cmd), i) {
2913 num_ent = sg_dma_len(sg) / 8;
2914 if (k + num_ent < blocks_done) {
2915 k += num_ent;
2916 continue;
2917 }
2918 j = blocks_done - k - 1;
2919 k = blocks_done;
2920 break;
2921 }
2922
2923 if (k != blocks_done) {
2924 ql_log(ql_log_warn, vha, 0x302f,
2925 "unexpected tag values tag:lba=%x:%llx)\n",
2926 e_ref_tag, (unsigned long long)lba_s);
2927 return 1;
2928 }
2929
2930 spt = page_address(sg_page(sg)) + sg->offset;
2931 spt += j;
2932
2933 spt->app_tag = T10_PI_APP_ESCAPE;
2934 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2935 spt->ref_tag = T10_PI_REF_ESCAPE;
2936 }
2937
2938 return 0;
2939 }
2940
2941
2942 if (e_guard != a_guard) {
2943 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2944 set_host_byte(cmd, DID_ABORT);
2945 return 1;
2946 }
2947
2948
2949 if (e_ref_tag != a_ref_tag) {
2950 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2951 set_host_byte(cmd, DID_ABORT);
2952 return 1;
2953 }
2954
2955
2956 if (e_app_tag != a_app_tag) {
2957 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2958 set_host_byte(cmd, DID_ABORT);
2959 return 1;
2960 }
2961
2962 return 1;
2963}
2964
2965static void
2966qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2967 struct req_que *req, uint32_t index)
2968{
2969 struct qla_hw_data *ha = vha->hw;
2970 srb_t *sp;
2971 uint16_t comp_status;
2972 uint16_t scsi_status;
2973 uint16_t thread_id;
2974 uint32_t rval = EXT_STATUS_OK;
2975 struct bsg_job *bsg_job = NULL;
2976 struct fc_bsg_request *bsg_request;
2977 struct fc_bsg_reply *bsg_reply;
2978 sts_entry_t *sts = pkt;
2979 struct sts_entry_24xx *sts24 = pkt;
2980
2981
2982 if (index >= req->num_outstanding_cmds) {
2983 ql_log(ql_log_warn, vha, 0x70af,
2984 "Invalid SCSI completion handle 0x%x.\n", index);
2985 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2986 return;
2987 }
2988
2989 sp = req->outstanding_cmds[index];
2990 if (!sp) {
2991 ql_log(ql_log_warn, vha, 0x70b0,
2992 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2993 req->id, index);
2994
2995 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2996 return;
2997 }
2998
2999
3000 req->outstanding_cmds[index] = NULL;
3001 bsg_job = sp->u.bsg_job;
3002 bsg_request = bsg_job->request;
3003 bsg_reply = bsg_job->reply;
3004
3005 if (IS_FWI2_CAPABLE(ha)) {
3006 comp_status = le16_to_cpu(sts24->comp_status);
3007 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3008 } else {
3009 comp_status = le16_to_cpu(sts->comp_status);
3010 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3011 }
3012
3013 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
3014 switch (comp_status) {
3015 case CS_COMPLETE:
3016 if (scsi_status == 0) {
3017 bsg_reply->reply_payload_rcv_len =
3018 bsg_job->reply_payload.payload_len;
3019 vha->qla_stats.input_bytes +=
3020 bsg_reply->reply_payload_rcv_len;
3021 vha->qla_stats.input_requests++;
3022 rval = EXT_STATUS_OK;
3023 }
3024 goto done;
3025
3026 case CS_DATA_OVERRUN:
3027 ql_dbg(ql_dbg_user, vha, 0x70b1,
3028 "Command completed with data overrun thread_id=%d\n",
3029 thread_id);
3030 rval = EXT_STATUS_DATA_OVERRUN;
3031 break;
3032
3033 case CS_DATA_UNDERRUN:
3034 ql_dbg(ql_dbg_user, vha, 0x70b2,
3035 "Command completed with data underrun thread_id=%d\n",
3036 thread_id);
3037 rval = EXT_STATUS_DATA_UNDERRUN;
3038 break;
3039 case CS_BIDIR_RD_OVERRUN:
3040 ql_dbg(ql_dbg_user, vha, 0x70b3,
3041 "Command completed with read data overrun thread_id=%d\n",
3042 thread_id);
3043 rval = EXT_STATUS_DATA_OVERRUN;
3044 break;
3045
3046 case CS_BIDIR_RD_WR_OVERRUN:
3047 ql_dbg(ql_dbg_user, vha, 0x70b4,
3048 "Command completed with read and write data overrun "
3049 "thread_id=%d\n", thread_id);
3050 rval = EXT_STATUS_DATA_OVERRUN;
3051 break;
3052
3053 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
3054 ql_dbg(ql_dbg_user, vha, 0x70b5,
3055 "Command completed with read data over and write data "
3056 "underrun thread_id=%d\n", thread_id);
3057 rval = EXT_STATUS_DATA_OVERRUN;
3058 break;
3059
3060 case CS_BIDIR_RD_UNDERRUN:
3061 ql_dbg(ql_dbg_user, vha, 0x70b6,
3062 "Command completed with read data underrun "
3063 "thread_id=%d\n", thread_id);
3064 rval = EXT_STATUS_DATA_UNDERRUN;
3065 break;
3066
3067 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
3068 ql_dbg(ql_dbg_user, vha, 0x70b7,
3069 "Command completed with read data under and write data "
3070 "overrun thread_id=%d\n", thread_id);
3071 rval = EXT_STATUS_DATA_UNDERRUN;
3072 break;
3073
3074 case CS_BIDIR_RD_WR_UNDERRUN:
3075 ql_dbg(ql_dbg_user, vha, 0x70b8,
3076 "Command completed with read and write data underrun "
3077 "thread_id=%d\n", thread_id);
3078 rval = EXT_STATUS_DATA_UNDERRUN;
3079 break;
3080
3081 case CS_BIDIR_DMA:
3082 ql_dbg(ql_dbg_user, vha, 0x70b9,
3083 "Command completed with data DMA error thread_id=%d\n",
3084 thread_id);
3085 rval = EXT_STATUS_DMA_ERR;
3086 break;
3087
3088 case CS_TIMEOUT:
3089 ql_dbg(ql_dbg_user, vha, 0x70ba,
3090 "Command completed with timeout thread_id=%d\n",
3091 thread_id);
3092 rval = EXT_STATUS_TIMEOUT;
3093 break;
3094 default:
3095 ql_dbg(ql_dbg_user, vha, 0x70bb,
3096 "Command completed with completion status=0x%x "
3097 "thread_id=%d\n", comp_status, thread_id);
3098 rval = EXT_STATUS_ERR;
3099 break;
3100 }
3101 bsg_reply->reply_payload_rcv_len = 0;
3102
3103done:
3104
3105 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
3106 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
3107
3108
3109 sp->done(sp, DID_OK << 16);
3110
3111}
3112
3113
3114
3115
3116
3117
3118
3119static void
3120qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
3121{
3122 srb_t *sp;
3123 fc_port_t *fcport;
3124 struct scsi_cmnd *cp;
3125 sts_entry_t *sts = pkt;
3126 struct sts_entry_24xx *sts24 = pkt;
3127 uint16_t comp_status;
3128 uint16_t scsi_status;
3129 uint16_t ox_id;
3130 uint8_t lscsi_status;
3131 int32_t resid;
3132 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
3133 fw_resid_len;
3134 uint8_t *rsp_info, *sense_data;
3135 struct qla_hw_data *ha = vha->hw;
3136 uint32_t handle;
3137 uint16_t que;
3138 struct req_que *req;
3139 int logit = 1;
3140 int res = 0;
3141 uint16_t state_flags = 0;
3142 uint16_t sts_qual = 0;
3143
3144 if (IS_FWI2_CAPABLE(ha)) {
3145 comp_status = le16_to_cpu(sts24->comp_status);
3146 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
3147 state_flags = le16_to_cpu(sts24->state_flags);
3148 } else {
3149 comp_status = le16_to_cpu(sts->comp_status);
3150 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
3151 }
3152 handle = (uint32_t) LSW(sts->handle);
3153 que = MSW(sts->handle);
3154 req = ha->req_q_map[que];
3155
3156
3157 if (req == NULL ||
3158 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
3159 ql_dbg(ql_dbg_io, vha, 0x3059,
3160 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
3161 "que=%u.\n", sts->handle, req, que);
3162 return;
3163 }
3164
3165
3166 if (handle < req->num_outstanding_cmds) {
3167 sp = req->outstanding_cmds[handle];
3168 if (!sp) {
3169 ql_dbg(ql_dbg_io, vha, 0x3075,
3170 "%s(%ld): Already returned command for status handle (0x%x).\n",
3171 __func__, vha->host_no, sts->handle);
3172 return;
3173 }
3174 } else {
3175 ql_dbg(ql_dbg_io, vha, 0x3017,
3176 "Invalid status handle, out of range (0x%x).\n",
3177 sts->handle);
3178
3179 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
3180 if (IS_P3P_TYPE(ha))
3181 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3182 else
3183 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3184 qla2xxx_wake_dpc(vha);
3185 }
3186 return;
3187 }
3188 qla_put_iocbs(sp->qpair, &sp->iores);
3189
3190 if (sp->cmd_type != TYPE_SRB) {
3191 req->outstanding_cmds[handle] = NULL;
3192 ql_dbg(ql_dbg_io, vha, 0x3015,
3193 "Unknown sp->cmd_type %x %p).\n",
3194 sp->cmd_type, sp);
3195 return;
3196 }
3197
3198
3199 if (sp->type == SRB_NVME_CMD) {
3200 req->outstanding_cmds[handle] = NULL;
3201 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
3202 return;
3203 }
3204
3205 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
3206 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
3207 return;
3208 }
3209
3210
3211 if (sp->type == SRB_TM_CMD) {
3212 qla24xx_tm_iocb_entry(vha, req, pkt);
3213 return;
3214 }
3215
3216
3217 qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24);
3218 sp->qpair->cmd_completion_cnt++;
3219
3220 if (comp_status == CS_COMPLETE && scsi_status == 0) {
3221 qla2x00_process_completed_request(vha, req, handle);
3222
3223 return;
3224 }
3225
3226 req->outstanding_cmds[handle] = NULL;
3227 cp = GET_CMD_SP(sp);
3228 if (cp == NULL) {
3229 ql_dbg(ql_dbg_io, vha, 0x3018,
3230 "Command already returned (0x%x/%p).\n",
3231 sts->handle, sp);
3232
3233 return;
3234 }
3235
3236 lscsi_status = scsi_status & STATUS_MASK;
3237
3238 fcport = sp->fcport;
3239
3240 ox_id = 0;
3241 sense_len = par_sense_len = rsp_info_len = resid_len =
3242 fw_resid_len = 0;
3243 if (IS_FWI2_CAPABLE(ha)) {
3244 if (scsi_status & SS_SENSE_LEN_VALID)
3245 sense_len = le32_to_cpu(sts24->sense_len);
3246 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3247 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
3248 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
3249 resid_len = le32_to_cpu(sts24->rsp_residual_count);
3250 if (comp_status == CS_DATA_UNDERRUN)
3251 fw_resid_len = le32_to_cpu(sts24->residual_len);
3252 rsp_info = sts24->data;
3253 sense_data = sts24->data;
3254 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
3255 ox_id = le16_to_cpu(sts24->ox_id);
3256 par_sense_len = sizeof(sts24->data);
3257 sts_qual = le16_to_cpu(sts24->status_qualifier);
3258 } else {
3259 if (scsi_status & SS_SENSE_LEN_VALID)
3260 sense_len = le16_to_cpu(sts->req_sense_length);
3261 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
3262 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
3263 resid_len = le32_to_cpu(sts->residual_length);
3264 rsp_info = sts->rsp_info;
3265 sense_data = sts->req_sense_data;
3266 par_sense_len = sizeof(sts->req_sense_data);
3267 }
3268
3269
3270 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
3271
3272 if (IS_FWI2_CAPABLE(ha)) {
3273 sense_data += rsp_info_len;
3274 par_sense_len -= rsp_info_len;
3275 }
3276 if (rsp_info_len > 3 && rsp_info[3]) {
3277 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
3278 "FCP I/O protocol failure (0x%x/0x%x).\n",
3279 rsp_info_len, rsp_info[3]);
3280
3281 res = DID_BUS_BUSY << 16;
3282 goto out;
3283 }
3284 }
3285
3286
3287 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3288 scsi_status & SS_RESIDUAL_OVER)
3289 comp_status = CS_DATA_OVERRUN;
3290
3291
3292
3293
3294
3295 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
3296 lscsi_status == SAM_STAT_BUSY))
3297 qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
3298
3299
3300
3301
3302 switch (comp_status) {
3303 case CS_COMPLETE:
3304 case CS_QUEUE_FULL:
3305 if (scsi_status == 0) {
3306 res = DID_OK << 16;
3307 break;
3308 }
3309 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
3310 resid = resid_len;
3311 scsi_set_resid(cp, resid);
3312
3313 if (!lscsi_status &&
3314 ((unsigned)(scsi_bufflen(cp) - resid) <
3315 cp->underflow)) {
3316 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
3317 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3318 resid, scsi_bufflen(cp));
3319
3320 res = DID_ERROR << 16;
3321 break;
3322 }
3323 }
3324 res = DID_OK << 16 | lscsi_status;
3325
3326 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3327 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
3328 "QUEUE FULL detected.\n");
3329 break;
3330 }
3331 logit = 0;
3332 if (lscsi_status != SS_CHECK_CONDITION)
3333 break;
3334
3335 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3336 if (!(scsi_status & SS_SENSE_LEN_VALID))
3337 break;
3338
3339 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
3340 rsp, res);
3341 break;
3342
3343 case CS_DATA_UNDERRUN:
3344
3345 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3346 scsi_set_resid(cp, resid);
3347 if (scsi_status & SS_RESIDUAL_UNDER) {
3348 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3349 ql_log(ql_log_warn, fcport->vha, 0x301d,
3350 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3351 resid, scsi_bufflen(cp));
3352
3353 vha->interface_err_cnt++;
3354
3355 res = DID_ERROR << 16 | lscsi_status;
3356 goto check_scsi_status;
3357 }
3358
3359 if (!lscsi_status &&
3360 ((unsigned)(scsi_bufflen(cp) - resid) <
3361 cp->underflow)) {
3362 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
3363 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
3364 resid, scsi_bufflen(cp));
3365
3366 res = DID_ERROR << 16;
3367 break;
3368 }
3369 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
3370 lscsi_status != SAM_STAT_BUSY) {
3371
3372
3373
3374
3375
3376 ql_log(ql_log_warn, fcport->vha, 0x301f,
3377 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
3378 resid, scsi_bufflen(cp));
3379
3380 vha->interface_err_cnt++;
3381
3382 res = DID_ERROR << 16 | lscsi_status;
3383 goto check_scsi_status;
3384 } else {
3385 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
3386 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
3387 scsi_status, lscsi_status);
3388 }
3389
3390 res = DID_OK << 16 | lscsi_status;
3391 logit = 0;
3392
3393check_scsi_status:
3394
3395
3396
3397
3398 if (lscsi_status != 0) {
3399 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
3400 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
3401 "QUEUE FULL detected.\n");
3402 logit = 1;
3403 break;
3404 }
3405 if (lscsi_status != SS_CHECK_CONDITION)
3406 break;
3407
3408 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3409 if (!(scsi_status & SS_SENSE_LEN_VALID))
3410 break;
3411
3412 qla2x00_handle_sense(sp, sense_data, par_sense_len,
3413 sense_len, rsp, res);
3414 }
3415 break;
3416
3417 case CS_PORT_LOGGED_OUT:
3418 case CS_PORT_CONFIG_CHG:
3419 case CS_PORT_BUSY:
3420 case CS_INCOMPLETE:
3421 case CS_PORT_UNAVAILABLE:
3422 case CS_TIMEOUT:
3423 case CS_RESET:
3424
3425
3426
3427
3428
3429
3430 res = DID_TRANSPORT_DISRUPTED << 16;
3431
3432 if (comp_status == CS_TIMEOUT) {
3433 if (IS_FWI2_CAPABLE(ha))
3434 break;
3435 else if ((le16_to_cpu(sts->status_flags) &
3436 SF_LOGOUT_SENT) == 0)
3437 break;
3438 }
3439
3440 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3441 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
3442 "Port to be marked lost on fcport=%02x%02x%02x, current "
3443 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
3444 fcport->d_id.b.area, fcport->d_id.b.al_pa,
3445 port_state_str[FCS_ONLINE],
3446 comp_status);
3447
3448 qlt_schedule_sess_for_deletion(fcport);
3449 }
3450
3451 break;
3452
3453 case CS_ABORTED:
3454 res = DID_RESET << 16;
3455 break;
3456
3457 case CS_DIF_ERROR:
3458 logit = qla2x00_handle_dif_error(sp, sts24);
3459 res = cp->result;
3460 break;
3461
3462 case CS_TRANSPORT:
3463 res = DID_ERROR << 16;
3464 vha->hw_err_cnt++;
3465
3466 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3467 break;
3468
3469 if (state_flags & BIT_4)
3470 scmd_printk(KERN_WARNING, cp,
3471 "Unsupported device '%s' found.\n",
3472 cp->device->vendor);
3473 break;
3474
3475 case CS_DMA:
3476 ql_log(ql_log_info, fcport->vha, 0x3022,
3477 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3478 comp_status, scsi_status, res, vha->host_no,
3479 cp->device->id, cp->device->lun, fcport->d_id.b24,
3480 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3481 resid_len, fw_resid_len, sp, cp);
3482 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
3483 pkt, sizeof(*sts24));
3484 res = DID_ERROR << 16;
3485 vha->hw_err_cnt++;
3486 break;
3487 default:
3488 res = DID_ERROR << 16;
3489 break;
3490 }
3491
3492out:
3493 if (logit)
3494 ql_log(ql_dbg_io, fcport->vha, 0x3022,
3495 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
3496 comp_status, scsi_status, res, vha->host_no,
3497 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
3498 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
3499 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
3500 resid_len, fw_resid_len, sp, cp);
3501
3502 if (rsp->status_srb == NULL)
3503 sp->done(sp, res);
3504}
3505
3506
3507
3508
3509
3510
3511
3512
3513static void
3514qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
3515{
3516 uint8_t sense_sz = 0;
3517 struct qla_hw_data *ha = rsp->hw;
3518 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3519 srb_t *sp = rsp->status_srb;
3520 struct scsi_cmnd *cp;
3521 uint32_t sense_len;
3522 uint8_t *sense_ptr;
3523
3524 if (!sp || !GET_CMD_SENSE_LEN(sp))
3525 return;
3526
3527 sense_len = GET_CMD_SENSE_LEN(sp);
3528 sense_ptr = GET_CMD_SENSE_PTR(sp);
3529
3530 cp = GET_CMD_SP(sp);
3531 if (cp == NULL) {
3532 ql_log(ql_log_warn, vha, 0x3025,
3533 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
3534
3535 rsp->status_srb = NULL;
3536 return;
3537 }
3538
3539 if (sense_len > sizeof(pkt->data))
3540 sense_sz = sizeof(pkt->data);
3541 else
3542 sense_sz = sense_len;
3543
3544
3545 if (IS_FWI2_CAPABLE(ha))
3546 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
3547 memcpy(sense_ptr, pkt->data, sense_sz);
3548 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
3549 sense_ptr, sense_sz);
3550
3551 sense_len -= sense_sz;
3552 sense_ptr += sense_sz;
3553
3554 SET_CMD_SENSE_PTR(sp, sense_ptr);
3555 SET_CMD_SENSE_LEN(sp, sense_len);
3556
3557
3558 if (sense_len == 0) {
3559 rsp->status_srb = NULL;
3560 sp->done(sp, cp->result);
3561 }
3562}
3563
3564
3565
3566
3567
3568
3569
3570
3571static int
3572qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3573{
3574 srb_t *sp;
3575 struct qla_hw_data *ha = vha->hw;
3576 const char func[] = "ERROR-IOCB";
3577 uint16_t que = MSW(pkt->handle);
3578 struct req_que *req = NULL;
3579 int res = DID_ERROR << 16;
3580
3581 ql_dbg(ql_dbg_async, vha, 0x502a,
3582 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
3583 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
3584
3585 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3586 goto fatal;
3587
3588 req = ha->req_q_map[que];
3589
3590 if (pkt->entry_status & RF_BUSY)
3591 res = DID_BUS_BUSY << 16;
3592
3593 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
3594 return 0;
3595
3596 switch (pkt->entry_type) {
3597 case NOTIFY_ACK_TYPE:
3598 case STATUS_TYPE:
3599 case STATUS_CONT_TYPE:
3600 case LOGINOUT_PORT_IOCB_TYPE:
3601 case CT_IOCB_TYPE:
3602 case ELS_IOCB_TYPE:
3603 case ABORT_IOCB_TYPE:
3604 case MBX_IOCB_TYPE:
3605 default:
3606 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3607 if (sp) {
3608 qla_put_iocbs(sp->qpair, &sp->iores);
3609 sp->done(sp, res);
3610 return 0;
3611 }
3612 break;
3613
3614 case SA_UPDATE_IOCB_TYPE:
3615 case ABTS_RESP_24XX:
3616 case CTIO_TYPE7:
3617 case CTIO_CRC2:
3618 return 1;
3619 }
3620fatal:
3621 ql_log(ql_log_warn, vha, 0x5030,
3622 "Error entry - invalid handle/queue (%04x).\n", que);
3623 return 0;
3624}
3625
3626
3627
3628
3629
3630
3631static void
3632qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
3633{
3634 uint16_t cnt;
3635 uint32_t mboxes;
3636 __le16 __iomem *wptr;
3637 struct qla_hw_data *ha = vha->hw;
3638 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3639
3640
3641 WARN_ON_ONCE(ha->mbx_count > 32);
3642 mboxes = (1ULL << ha->mbx_count) - 1;
3643 if (!ha->mcp)
3644 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
3645 else
3646 mboxes = ha->mcp->in_mb;
3647
3648
3649 ha->flags.mbox_int = 1;
3650 ha->mailbox_out[0] = mb0;
3651 mboxes >>= 1;
3652 wptr = ®->mailbox1;
3653
3654 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3655 if (mboxes & BIT_0)
3656 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3657
3658 mboxes >>= 1;
3659 wptr++;
3660 }
3661}
3662
3663static void
3664qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
3665 struct abort_entry_24xx *pkt)
3666{
3667 const char func[] = "ABT_IOCB";
3668 srb_t *sp;
3669 srb_t *orig_sp = NULL;
3670 struct srb_iocb *abt;
3671
3672 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3673 if (!sp)
3674 return;
3675
3676 abt = &sp->u.iocb_cmd;
3677 abt->u.abt.comp_status = pkt->comp_status;
3678 orig_sp = sp->cmd_sp;
3679
3680 if (orig_sp)
3681 qla_nvme_abort_process_comp_status(pkt, orig_sp);
3682
3683 sp->done(sp, 0);
3684}
3685
3686void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
3687 struct pt_ls4_request *pkt, struct req_que *req)
3688{
3689 srb_t *sp;
3690 const char func[] = "LS4_IOCB";
3691 uint16_t comp_status;
3692
3693 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3694 if (!sp)
3695 return;
3696
3697 comp_status = le16_to_cpu(pkt->status);
3698 sp->done(sp, comp_status);
3699}
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha,
3710 struct rsp_que *rsp, response_t *pkt)
3711{
3712 int start_pkt_ring_index, end_pkt_ring_index, n_ring_index;
3713 response_t *end_pkt;
3714 int rc = 0;
3715 u32 rsp_q_in;
3716
3717 if (pkt->entry_count == 1)
3718 return rc;
3719
3720
3721 if (rsp->ring_index == 0)
3722 start_pkt_ring_index = rsp->length - 1;
3723 else
3724 start_pkt_ring_index = rsp->ring_index - 1;
3725
3726 if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length)
3727 end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count -
3728 rsp->length - 1;
3729 else
3730 end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1;
3731
3732 end_pkt = rsp->ring + end_pkt_ring_index;
3733
3734
3735 n_ring_index = end_pkt_ring_index + 1;
3736 if (n_ring_index >= rsp->length)
3737 n_ring_index = 0;
3738
3739 rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr :
3740 rd_reg_dword(rsp->rsp_q_in);
3741
3742
3743 if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) ||
3744 rsp_q_in >= n_ring_index)
3745
3746 rc = 0;
3747 else
3748 rc = -EIO;
3749
3750 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091,
3751 "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n",
3752 __func__, rsp->ring, pkt, end_pkt, pkt->entry_count,
3753 rsp_q_in, rc);
3754
3755 return rc;
3756}
3757
3758
3759
3760
3761
3762
3763void qla24xx_process_response_queue(struct scsi_qla_host *vha,
3764 struct rsp_que *rsp)
3765{
3766 struct sts_entry_24xx *pkt;
3767 struct qla_hw_data *ha = vha->hw;
3768 struct purex_entry_24xx *purex_entry;
3769 struct purex_item *pure_item;
3770
3771 if (!ha->flags.fw_started)
3772 return;
3773
3774 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
3775 rsp->qpair->rcv_intr = 1;
3776 qla_cpu_update(rsp->qpair, smp_processor_id());
3777 }
3778
3779 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3780 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3781
3782 rsp->ring_index++;
3783 if (rsp->ring_index == rsp->length) {
3784 rsp->ring_index = 0;
3785 rsp->ring_ptr = rsp->ring;
3786 } else {
3787 rsp->ring_ptr++;
3788 }
3789
3790 if (pkt->entry_status != 0) {
3791 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3792 goto process_err;
3793
3794 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3795 wmb();
3796 continue;
3797 }
3798process_err:
3799
3800 switch (pkt->entry_type) {
3801 case STATUS_TYPE:
3802 qla2x00_status_entry(vha, rsp, pkt);
3803 break;
3804 case STATUS_CONT_TYPE:
3805 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3806 break;
3807 case VP_RPT_ID_IOCB_TYPE:
3808 qla24xx_report_id_acquisition(vha,
3809 (struct vp_rpt_id_entry_24xx *)pkt);
3810 break;
3811 case LOGINOUT_PORT_IOCB_TYPE:
3812 qla24xx_logio_entry(vha, rsp->req,
3813 (struct logio_entry_24xx *)pkt);
3814 break;
3815 case CT_IOCB_TYPE:
3816 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3817 break;
3818 case ELS_IOCB_TYPE:
3819 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3820 break;
3821 case ABTS_RECV_24XX:
3822 if (qla_ini_mode_enabled(vha)) {
3823 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3824 if (!pure_item)
3825 break;
3826 qla24xx_queue_purex_item(vha, pure_item,
3827 qla24xx_process_abts);
3828 break;
3829 }
3830 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3831 IS_QLA28XX(ha)) {
3832
3833 qlt_handle_abts_recv(vha, rsp,
3834 (response_t *)pkt);
3835 break;
3836 } else {
3837 qlt_24xx_process_atio_queue(vha, 1);
3838 }
3839 fallthrough;
3840 case ABTS_RESP_24XX:
3841 case CTIO_TYPE7:
3842 case CTIO_CRC2:
3843 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3844 break;
3845 case PT_LS4_REQUEST:
3846 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3847 rsp->req);
3848 break;
3849 case NOTIFY_ACK_TYPE:
3850 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3851 qlt_response_pkt_all_vps(vha, rsp,
3852 (response_t *)pkt);
3853 else
3854 qla24xxx_nack_iocb_entry(vha, rsp->req,
3855 (struct nack_to_isp *)pkt);
3856 break;
3857 case MARKER_TYPE:
3858
3859
3860
3861 break;
3862 case ABORT_IOCB_TYPE:
3863 qla24xx_abort_iocb_entry(vha, rsp->req,
3864 (struct abort_entry_24xx *)pkt);
3865 break;
3866 case MBX_IOCB_TYPE:
3867 qla24xx_mbx_iocb_entry(vha, rsp->req,
3868 (struct mbx_24xx_entry *)pkt);
3869 break;
3870 case VP_CTRL_IOCB_TYPE:
3871 qla_ctrlvp_completed(vha, rsp->req,
3872 (struct vp_ctrl_entry_24xx *)pkt);
3873 break;
3874 case PUREX_IOCB_TYPE:
3875 purex_entry = (void *)pkt;
3876 switch (purex_entry->els_frame_payload[3]) {
3877 case ELS_RDP:
3878 pure_item = qla24xx_copy_std_pkt(vha, pkt);
3879 if (!pure_item)
3880 break;
3881 qla24xx_queue_purex_item(vha, pure_item,
3882 qla24xx_process_purex_rdp);
3883 break;
3884 case ELS_FPIN:
3885 if (!vha->hw->flags.scm_enabled) {
3886 ql_log(ql_log_warn, vha, 0x5094,
3887 "SCM not active for this port\n");
3888 break;
3889 }
3890 pure_item = qla27xx_copy_fpin_pkt(vha,
3891 (void **)&pkt, &rsp);
3892 if (!pure_item)
3893 break;
3894 qla24xx_queue_purex_item(vha, pure_item,
3895 qla27xx_process_purex_fpin);
3896 break;
3897
3898 case ELS_AUTH_ELS:
3899 if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) {
3900 ql_dbg(ql_dbg_init, vha, 0x5091,
3901 "Defer processing ELS opcode %#x...\n",
3902 purex_entry->els_frame_payload[3]);
3903 return;
3904 }
3905 qla24xx_auth_els(vha, (void **)&pkt, &rsp);
3906 break;
3907 default:
3908 ql_log(ql_log_warn, vha, 0x509c,
3909 "Discarding ELS Request opcode 0x%x\n",
3910 purex_entry->els_frame_payload[3]);
3911 }
3912 break;
3913 case SA_UPDATE_IOCB_TYPE:
3914 qla28xx_sa_update_iocb_entry(vha, rsp->req,
3915 (struct sa_update_28xx *)pkt);
3916 break;
3917
3918 default:
3919
3920 ql_dbg(ql_dbg_async, vha, 0x5042,
3921 "Received unknown response pkt type 0x%x entry status=%x.\n",
3922 pkt->entry_type, pkt->entry_status);
3923 break;
3924 }
3925 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3926 wmb();
3927 }
3928
3929
3930 if (IS_P3P_TYPE(ha)) {
3931 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3932
3933 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index);
3934 } else {
3935 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index);
3936 }
3937}
3938
3939static void
3940qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3941{
3942 int rval;
3943 uint32_t cnt;
3944 struct qla_hw_data *ha = vha->hw;
3945 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3946
3947 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3948 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3949 return;
3950
3951 rval = QLA_SUCCESS;
3952 wrt_reg_dword(®->iobase_addr, 0x7C00);
3953 rd_reg_dword(®->iobase_addr);
3954 wrt_reg_dword(®->iobase_window, 0x0001);
3955 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3956 rval == QLA_SUCCESS; cnt--) {
3957 if (cnt) {
3958 wrt_reg_dword(®->iobase_window, 0x0001);
3959 udelay(10);
3960 } else
3961 rval = QLA_FUNCTION_TIMEOUT;
3962 }
3963 if (rval == QLA_SUCCESS)
3964 goto next_test;
3965
3966 rval = QLA_SUCCESS;
3967 wrt_reg_dword(®->iobase_window, 0x0003);
3968 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 &&
3969 rval == QLA_SUCCESS; cnt--) {
3970 if (cnt) {
3971 wrt_reg_dword(®->iobase_window, 0x0003);
3972 udelay(10);
3973 } else
3974 rval = QLA_FUNCTION_TIMEOUT;
3975 }
3976 if (rval != QLA_SUCCESS)
3977 goto done;
3978
3979next_test:
3980 if (rd_reg_dword(®->iobase_c8) & BIT_3)
3981 ql_log(ql_log_info, vha, 0x504c,
3982 "Additional code -- 0x55AA.\n");
3983
3984done:
3985 wrt_reg_dword(®->iobase_window, 0x0000);
3986 rd_reg_dword(®->iobase_window);
3987}
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998irqreturn_t
3999qla24xx_intr_handler(int irq, void *dev_id)
4000{
4001 scsi_qla_host_t *vha;
4002 struct qla_hw_data *ha;
4003 struct device_reg_24xx __iomem *reg;
4004 int status;
4005 unsigned long iter;
4006 uint32_t stat;
4007 uint32_t hccr;
4008 uint16_t mb[8];
4009 struct rsp_que *rsp;
4010 unsigned long flags;
4011 bool process_atio = false;
4012
4013 rsp = (struct rsp_que *) dev_id;
4014 if (!rsp) {
4015 ql_log(ql_log_info, NULL, 0x5059,
4016 "%s: NULL response queue pointer.\n", __func__);
4017 return IRQ_NONE;
4018 }
4019
4020 ha = rsp->hw;
4021 reg = &ha->iobase->isp24;
4022 status = 0;
4023
4024 if (unlikely(pci_channel_offline(ha->pdev)))
4025 return IRQ_HANDLED;
4026
4027 spin_lock_irqsave(&ha->hardware_lock, flags);
4028 vha = pci_get_drvdata(ha->pdev);
4029 for (iter = 50; iter--; ) {
4030 stat = rd_reg_dword(®->host_status);
4031 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4032 break;
4033 if (stat & HSRX_RISC_PAUSED) {
4034 if (unlikely(pci_channel_offline(ha->pdev)))
4035 break;
4036
4037 hccr = rd_reg_dword(®->hccr);
4038
4039 ql_log(ql_log_warn, vha, 0x504b,
4040 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4041 hccr);
4042
4043 qla2xxx_check_risc_status(vha);
4044
4045 ha->isp_ops->fw_dump(vha);
4046 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4047 break;
4048 } else if ((stat & HSRX_RISC_INT) == 0)
4049 break;
4050
4051 switch (stat & 0xff) {
4052 case INTR_ROM_MB_SUCCESS:
4053 case INTR_ROM_MB_FAILED:
4054 case INTR_MB_SUCCESS:
4055 case INTR_MB_FAILED:
4056 qla24xx_mbx_completion(vha, MSW(stat));
4057 status |= MBX_INTERRUPT;
4058
4059 break;
4060 case INTR_ASYNC_EVENT:
4061 mb[0] = MSW(stat);
4062 mb[1] = rd_reg_word(®->mailbox1);
4063 mb[2] = rd_reg_word(®->mailbox2);
4064 mb[3] = rd_reg_word(®->mailbox3);
4065 qla2x00_async_event(vha, rsp, mb);
4066 break;
4067 case INTR_RSP_QUE_UPDATE:
4068 case INTR_RSP_QUE_UPDATE_83XX:
4069 qla24xx_process_response_queue(vha, rsp);
4070 break;
4071 case INTR_ATIO_QUE_UPDATE_27XX:
4072 case INTR_ATIO_QUE_UPDATE:
4073 process_atio = true;
4074 break;
4075 case INTR_ATIO_RSP_QUE_UPDATE:
4076 process_atio = true;
4077 qla24xx_process_response_queue(vha, rsp);
4078 break;
4079 default:
4080 ql_dbg(ql_dbg_async, vha, 0x504f,
4081 "Unrecognized interrupt type (%d).\n", stat * 0xff);
4082 break;
4083 }
4084 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4085 rd_reg_dword_relaxed(®->hccr);
4086 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
4087 ndelay(3500);
4088 }
4089 qla2x00_handle_mbx_completion(ha, status);
4090 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4091
4092 if (process_atio) {
4093 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4094 qlt_24xx_process_atio_queue(vha, 0);
4095 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4096 }
4097
4098 return IRQ_HANDLED;
4099}
4100
4101static irqreturn_t
4102qla24xx_msix_rsp_q(int irq, void *dev_id)
4103{
4104 struct qla_hw_data *ha;
4105 struct rsp_que *rsp;
4106 struct device_reg_24xx __iomem *reg;
4107 struct scsi_qla_host *vha;
4108 unsigned long flags;
4109
4110 rsp = (struct rsp_que *) dev_id;
4111 if (!rsp) {
4112 ql_log(ql_log_info, NULL, 0x505a,
4113 "%s: NULL response queue pointer.\n", __func__);
4114 return IRQ_NONE;
4115 }
4116 ha = rsp->hw;
4117 reg = &ha->iobase->isp24;
4118
4119 spin_lock_irqsave(&ha->hardware_lock, flags);
4120
4121 vha = pci_get_drvdata(ha->pdev);
4122 qla24xx_process_response_queue(vha, rsp);
4123 if (!ha->flags.disable_msix_handshake) {
4124 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4125 rd_reg_dword_relaxed(®->hccr);
4126 }
4127 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4128
4129 return IRQ_HANDLED;
4130}
4131
4132static irqreturn_t
4133qla24xx_msix_default(int irq, void *dev_id)
4134{
4135 scsi_qla_host_t *vha;
4136 struct qla_hw_data *ha;
4137 struct rsp_que *rsp;
4138 struct device_reg_24xx __iomem *reg;
4139 int status;
4140 uint32_t stat;
4141 uint32_t hccr;
4142 uint16_t mb[8];
4143 unsigned long flags;
4144 bool process_atio = false;
4145
4146 rsp = (struct rsp_que *) dev_id;
4147 if (!rsp) {
4148 ql_log(ql_log_info, NULL, 0x505c,
4149 "%s: NULL response queue pointer.\n", __func__);
4150 return IRQ_NONE;
4151 }
4152 ha = rsp->hw;
4153 reg = &ha->iobase->isp24;
4154 status = 0;
4155
4156 spin_lock_irqsave(&ha->hardware_lock, flags);
4157 vha = pci_get_drvdata(ha->pdev);
4158 do {
4159 stat = rd_reg_dword(®->host_status);
4160 if (qla2x00_check_reg32_for_disconnect(vha, stat))
4161 break;
4162 if (stat & HSRX_RISC_PAUSED) {
4163 if (unlikely(pci_channel_offline(ha->pdev)))
4164 break;
4165
4166 hccr = rd_reg_dword(®->hccr);
4167
4168 ql_log(ql_log_info, vha, 0x5050,
4169 "RISC paused -- HCCR=%x, Dumping firmware.\n",
4170 hccr);
4171
4172 qla2xxx_check_risc_status(vha);
4173 vha->hw_err_cnt++;
4174
4175 ha->isp_ops->fw_dump(vha);
4176 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4177 break;
4178 } else if ((stat & HSRX_RISC_INT) == 0)
4179 break;
4180
4181 switch (stat & 0xff) {
4182 case INTR_ROM_MB_SUCCESS:
4183 case INTR_ROM_MB_FAILED:
4184 case INTR_MB_SUCCESS:
4185 case INTR_MB_FAILED:
4186 qla24xx_mbx_completion(vha, MSW(stat));
4187 status |= MBX_INTERRUPT;
4188
4189 break;
4190 case INTR_ASYNC_EVENT:
4191 mb[0] = MSW(stat);
4192 mb[1] = rd_reg_word(®->mailbox1);
4193 mb[2] = rd_reg_word(®->mailbox2);
4194 mb[3] = rd_reg_word(®->mailbox3);
4195 qla2x00_async_event(vha, rsp, mb);
4196 break;
4197 case INTR_RSP_QUE_UPDATE:
4198 case INTR_RSP_QUE_UPDATE_83XX:
4199 qla24xx_process_response_queue(vha, rsp);
4200 break;
4201 case INTR_ATIO_QUE_UPDATE_27XX:
4202 case INTR_ATIO_QUE_UPDATE:
4203 process_atio = true;
4204 break;
4205 case INTR_ATIO_RSP_QUE_UPDATE:
4206 process_atio = true;
4207 qla24xx_process_response_queue(vha, rsp);
4208 break;
4209 default:
4210 ql_dbg(ql_dbg_async, vha, 0x5051,
4211 "Unrecognized interrupt type (%d).\n", stat & 0xff);
4212 break;
4213 }
4214 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4215 } while (0);
4216 qla2x00_handle_mbx_completion(ha, status);
4217 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4218
4219 if (process_atio) {
4220 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4221 qlt_24xx_process_atio_queue(vha, 0);
4222 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4223 }
4224
4225 return IRQ_HANDLED;
4226}
4227
4228irqreturn_t
4229qla2xxx_msix_rsp_q(int irq, void *dev_id)
4230{
4231 struct qla_hw_data *ha;
4232 struct qla_qpair *qpair;
4233
4234 qpair = dev_id;
4235 if (!qpair) {
4236 ql_log(ql_log_info, NULL, 0x505b,
4237 "%s: NULL response queue pointer.\n", __func__);
4238 return IRQ_NONE;
4239 }
4240 ha = qpair->hw;
4241
4242 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4243
4244 return IRQ_HANDLED;
4245}
4246
4247irqreturn_t
4248qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
4249{
4250 struct qla_hw_data *ha;
4251 struct qla_qpair *qpair;
4252 struct device_reg_24xx __iomem *reg;
4253 unsigned long flags;
4254
4255 qpair = dev_id;
4256 if (!qpair) {
4257 ql_log(ql_log_info, NULL, 0x505b,
4258 "%s: NULL response queue pointer.\n", __func__);
4259 return IRQ_NONE;
4260 }
4261 ha = qpair->hw;
4262
4263 reg = &ha->iobase->isp24;
4264 spin_lock_irqsave(&ha->hardware_lock, flags);
4265 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
4266 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4267
4268 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
4269
4270 return IRQ_HANDLED;
4271}
4272
4273
4274
4275struct qla_init_msix_entry {
4276 const char *name;
4277 irq_handler_t handler;
4278};
4279
4280static const struct qla_init_msix_entry msix_entries[] = {
4281 { "default", qla24xx_msix_default },
4282 { "rsp_q", qla24xx_msix_rsp_q },
4283 { "atio_q", qla83xx_msix_atio_q },
4284 { "qpair_multiq", qla2xxx_msix_rsp_q },
4285 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs },
4286};
4287
4288static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
4289 { "qla2xxx (default)", qla82xx_msix_default },
4290 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
4291};
4292
4293static int
4294qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4295{
4296 int i, ret;
4297 struct qla_msix_entry *qentry;
4298 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4299 int min_vecs = QLA_BASE_VECTORS;
4300 struct irq_affinity desc = {
4301 .pre_vectors = QLA_BASE_VECTORS,
4302 };
4303
4304 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4305 IS_ATIO_MSIX_CAPABLE(ha)) {
4306 desc.pre_vectors++;
4307 min_vecs++;
4308 }
4309
4310 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4311
4312 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4313 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4314 PCI_IRQ_MSIX);
4315 } else
4316 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4317 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4318 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
4319 &desc);
4320
4321 if (ret < 0) {
4322 ql_log(ql_log_fatal, vha, 0x00c7,
4323 "MSI-X: Failed to enable support, "
4324 "giving up -- %d/%d.\n",
4325 ha->msix_count, ret);
4326 goto msix_out;
4327 } else if (ret < ha->msix_count) {
4328 ql_log(ql_log_info, vha, 0x00c6,
4329 "MSI-X: Using %d vectors\n", ret);
4330 ha->msix_count = ret;
4331
4332 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4333 ha->max_req_queues = ha->msix_count - 1;
4334
4335
4336 if (QLA_TGT_MODE_ENABLED())
4337 ha->max_req_queues--;
4338
4339 ha->max_rsp_queues = ha->max_req_queues;
4340
4341 ha->max_qpairs = ha->max_req_queues - 1;
4342 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4343 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4344 }
4345 }
4346 vha->irq_offset = desc.pre_vectors;
4347 ha->msix_entries = kcalloc(ha->msix_count,
4348 sizeof(struct qla_msix_entry),
4349 GFP_KERNEL);
4350 if (!ha->msix_entries) {
4351 ql_log(ql_log_fatal, vha, 0x00c8,
4352 "Failed to allocate memory for ha->msix_entries.\n");
4353 ret = -ENOMEM;
4354 goto free_irqs;
4355 }
4356 ha->flags.msix_enabled = 1;
4357
4358 for (i = 0; i < ha->msix_count; i++) {
4359 qentry = &ha->msix_entries[i];
4360 qentry->vector = pci_irq_vector(ha->pdev, i);
4361 qentry->entry = i;
4362 qentry->have_irq = 0;
4363 qentry->in_use = 0;
4364 qentry->handle = NULL;
4365 }
4366
4367
4368 for (i = 0; i < QLA_BASE_VECTORS; i++) {
4369 qentry = &ha->msix_entries[i];
4370 qentry->handle = rsp;
4371 rsp->msix = qentry;
4372 scnprintf(qentry->name, sizeof(qentry->name),
4373 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
4374 if (IS_P3P_TYPE(ha))
4375 ret = request_irq(qentry->vector,
4376 qla82xx_msix_entries[i].handler,
4377 0, qla82xx_msix_entries[i].name, rsp);
4378 else
4379 ret = request_irq(qentry->vector,
4380 msix_entries[i].handler,
4381 0, qentry->name, rsp);
4382 if (ret)
4383 goto msix_register_fail;
4384 qentry->have_irq = 1;
4385 qentry->in_use = 1;
4386 }
4387
4388
4389
4390
4391
4392 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
4393 IS_ATIO_MSIX_CAPABLE(ha)) {
4394 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4395 rsp->msix = qentry;
4396 qentry->handle = rsp;
4397 scnprintf(qentry->name, sizeof(qentry->name),
4398 "qla2xxx%lu_%s", vha->host_no,
4399 msix_entries[QLA_ATIO_VECTOR].name);
4400 qentry->in_use = 1;
4401 ret = request_irq(qentry->vector,
4402 msix_entries[QLA_ATIO_VECTOR].handler,
4403 0, qentry->name, rsp);
4404 qentry->have_irq = 1;
4405 }
4406
4407msix_register_fail:
4408 if (ret) {
4409 ql_log(ql_log_fatal, vha, 0x00cb,
4410 "MSI-X: unable to register handler -- %x/%d.\n",
4411 qentry->vector, ret);
4412 qla2x00_free_irqs(vha);
4413 ha->mqenable = 0;
4414 goto msix_out;
4415 }
4416
4417
4418 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4419 if (ha->msixbase && ha->mqiobase &&
4420 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4421 ql2xmqsupport))
4422 ha->mqenable = 1;
4423 } else
4424 if (ha->mqiobase &&
4425 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
4426 ql2xmqsupport))
4427 ha->mqenable = 1;
4428 ql_dbg(ql_dbg_multiq, vha, 0xc005,
4429 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4430 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4431 ql_dbg(ql_dbg_init, vha, 0x0055,
4432 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
4433 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4434
4435msix_out:
4436 return ret;
4437
4438free_irqs:
4439 pci_free_irq_vectors(ha->pdev);
4440 goto msix_out;
4441}
4442
4443int
4444qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4445{
4446 int ret = QLA_FUNCTION_FAILED;
4447 device_reg_t *reg = ha->iobase;
4448 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4449
4450
4451 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4452 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4453 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4454 goto skip_msi;
4455
4456 if (ql2xenablemsix == 2)
4457 goto skip_msix;
4458
4459 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4460 (ha->pdev->subsystem_device == 0x7040 ||
4461 ha->pdev->subsystem_device == 0x7041 ||
4462 ha->pdev->subsystem_device == 0x1705)) {
4463 ql_log(ql_log_warn, vha, 0x0034,
4464 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
4465 ha->pdev->subsystem_vendor,
4466 ha->pdev->subsystem_device);
4467 goto skip_msi;
4468 }
4469
4470 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4471 ql_log(ql_log_warn, vha, 0x0035,
4472 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
4473 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4474 goto skip_msix;
4475 }
4476
4477 ret = qla24xx_enable_msix(ha, rsp);
4478 if (!ret) {
4479 ql_dbg(ql_dbg_init, vha, 0x0036,
4480 "MSI-X: Enabled (0x%X, 0x%X).\n",
4481 ha->chip_revision, ha->fw_attributes);
4482 goto clear_risc_ints;
4483 }
4484
4485skip_msix:
4486
4487 ql_log(ql_log_info, vha, 0x0037,
4488 "Falling back-to MSI mode -- ret=%d.\n", ret);
4489
4490 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4491 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4492 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4493 goto skip_msi;
4494
4495 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4496 if (ret > 0) {
4497 ql_dbg(ql_dbg_init, vha, 0x0038,
4498 "MSI: Enabled.\n");
4499 ha->flags.msi_enabled = 1;
4500 } else
4501 ql_log(ql_log_warn, vha, 0x0039,
4502 "Falling back-to INTa mode -- ret=%d.\n", ret);
4503skip_msi:
4504
4505
4506 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4507 return QLA_FUNCTION_FAILED;
4508
4509 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4510 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4511 QLA2XXX_DRIVER_NAME, rsp);
4512 if (ret) {
4513 ql_log(ql_log_warn, vha, 0x003a,
4514 "Failed to reserve interrupt %d already in use.\n",
4515 ha->pdev->irq);
4516 goto fail;
4517 } else if (!ha->flags.msi_enabled) {
4518 ql_dbg(ql_dbg_init, vha, 0x0125,
4519 "INTa mode: Enabled.\n");
4520 ha->flags.mr_intr_valid = 1;
4521
4522 ha->max_qpairs = 0;
4523 }
4524
4525clear_risc_ints:
4526 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4527 goto fail;
4528
4529 spin_lock_irq(&ha->hardware_lock);
4530 wrt_reg_word(®->isp.semaphore, 0);
4531 spin_unlock_irq(&ha->hardware_lock);
4532
4533fail:
4534 return ret;
4535}
4536
4537void
4538qla2x00_free_irqs(scsi_qla_host_t *vha)
4539{
4540 struct qla_hw_data *ha = vha->hw;
4541 struct rsp_que *rsp;
4542 struct qla_msix_entry *qentry;
4543 int i;
4544
4545
4546
4547
4548
4549 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4550 goto free_irqs;
4551 rsp = ha->rsp_q_map[0];
4552
4553 if (ha->flags.msix_enabled) {
4554 for (i = 0; i < ha->msix_count; i++) {
4555 qentry = &ha->msix_entries[i];
4556 if (qentry->have_irq) {
4557 irq_set_affinity_notifier(qentry->vector, NULL);
4558 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4559 }
4560 }
4561 kfree(ha->msix_entries);
4562 ha->msix_entries = NULL;
4563 ha->flags.msix_enabled = 0;
4564 ql_dbg(ql_dbg_init, vha, 0x0042,
4565 "Disabled MSI-X.\n");
4566 } else {
4567 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4568 }
4569
4570free_irqs:
4571 pci_free_irq_vectors(ha->pdev);
4572}
4573
4574int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4575 struct qla_msix_entry *msix, int vector_type)
4576{
4577 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
4578 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4579 int ret;
4580
4581 scnprintf(msix->name, sizeof(msix->name),
4582 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
4583 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
4584 if (ret) {
4585 ql_log(ql_log_fatal, vha, 0x00e6,
4586 "MSI-X: Unable to register handler -- %x/%d.\n",
4587 msix->vector, ret);
4588 return ret;
4589 }
4590 msix->have_irq = 1;
4591 msix->handle = qpair;
4592 return ret;
4593}
4594