1
2
3
4
5
6
7#include "qla_def.h"
8#include "qla_target.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <linux/cpu.h>
13#include <linux/t10-pi.h>
14#include <scsi/scsi_tcq.h>
15#include <scsi/scsi_bsg_fc.h>
16#include <scsi/scsi_eh.h>
17#include <scsi/fc/fc_fs.h>
18#include <linux/nvme-fc-driver.h>
19
20static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
24 sts_entry_t *);
25
26
27
28
29
30
31
32
33
34
35irqreturn_t
36qla2100_intr_handler(int irq, void *dev_id)
37{
38 scsi_qla_host_t *vha;
39 struct qla_hw_data *ha;
40 struct device_reg_2xxx __iomem *reg;
41 int status;
42 unsigned long iter;
43 uint16_t hccr;
44 uint16_t mb[4];
45 struct rsp_que *rsp;
46 unsigned long flags;
47
48 rsp = (struct rsp_que *) dev_id;
49 if (!rsp) {
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
52 return (IRQ_NONE);
53 }
54
55 ha = rsp->hw;
56 reg = &ha->iobase->isp;
57 status = 0;
58
59 spin_lock_irqsave(&ha->hardware_lock, flags);
60 vha = pci_get_drvdata(ha->pdev);
61 for (iter = 50; iter--; ) {
62 hccr = RD_REG_WORD(®->hccr);
63 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
64 break;
65 if (hccr & HCCR_RISC_PAUSE) {
66 if (pci_channel_offline(ha->pdev))
67 break;
68
69
70
71
72
73
74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
75 RD_REG_WORD(®->hccr);
76
77 ha->isp_ops->fw_dump(vha, 1);
78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
79 break;
80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
81 break;
82
83 if (RD_REG_WORD(®->semaphore) & BIT_0) {
84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
85 RD_REG_WORD(®->hccr);
86
87
88 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
89 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
90 qla2x00_mbx_completion(vha, mb[0]);
91 status |= MBX_INTERRUPT;
92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
93 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
94 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
95 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
96 qla2x00_async_event(vha, rsp, mb);
97 } else {
98
99 ql_dbg(ql_dbg_async, vha, 0x5025,
100 "Unrecognized interrupt type (%d).\n",
101 mb[0]);
102 }
103
104 WRT_REG_WORD(®->semaphore, 0);
105 RD_REG_WORD(®->semaphore);
106 } else {
107 qla2x00_process_response_queue(rsp);
108
109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
110 RD_REG_WORD(®->hccr);
111 }
112 }
113 qla2x00_handle_mbx_completion(ha, status);
114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
115
116 return (IRQ_HANDLED);
117}
118
119bool
120qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
121{
122
123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
127
128
129
130
131
132 schedule_work(&vha->hw->board_disable);
133 }
134 return true;
135 } else
136 return false;
137}
138
139bool
140qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
141{
142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
143}
144
145
146
147
148
149
150
151
152
153
154irqreturn_t
155qla2300_intr_handler(int irq, void *dev_id)
156{
157 scsi_qla_host_t *vha;
158 struct device_reg_2xxx __iomem *reg;
159 int status;
160 unsigned long iter;
161 uint32_t stat;
162 uint16_t hccr;
163 uint16_t mb[4];
164 struct rsp_que *rsp;
165 struct qla_hw_data *ha;
166 unsigned long flags;
167
168 rsp = (struct rsp_que *) dev_id;
169 if (!rsp) {
170 ql_log(ql_log_info, NULL, 0x5058,
171 "%s: NULL response queue pointer.\n", __func__);
172 return (IRQ_NONE);
173 }
174
175 ha = rsp->hw;
176 reg = &ha->iobase->isp;
177 status = 0;
178
179 spin_lock_irqsave(&ha->hardware_lock, flags);
180 vha = pci_get_drvdata(ha->pdev);
181 for (iter = 50; iter--; ) {
182 stat = RD_REG_DWORD(®->u.isp2300.host_status);
183 if (qla2x00_check_reg32_for_disconnect(vha, stat))
184 break;
185 if (stat & HSR_RISC_PAUSED) {
186 if (unlikely(pci_channel_offline(ha->pdev)))
187 break;
188
189 hccr = RD_REG_WORD(®->hccr);
190
191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
192 ql_log(ql_log_warn, vha, 0x5026,
193 "Parity error -- HCCR=%x, Dumping "
194 "firmware.\n", hccr);
195 else
196 ql_log(ql_log_warn, vha, 0x5027,
197 "RISC paused -- HCCR=%x, Dumping "
198 "firmware.\n", hccr);
199
200
201
202
203
204
205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
206 RD_REG_WORD(®->hccr);
207
208 ha->isp_ops->fw_dump(vha, 1);
209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
210 break;
211 } else if ((stat & HSR_RISC_INT) == 0)
212 break;
213
214 switch (stat & 0xff) {
215 case 0x1:
216 case 0x2:
217 case 0x10:
218 case 0x11:
219 qla2x00_mbx_completion(vha, MSW(stat));
220 status |= MBX_INTERRUPT;
221
222
223 WRT_REG_WORD(®->semaphore, 0);
224 break;
225 case 0x12:
226 mb[0] = MSW(stat);
227 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
228 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
229 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
230 qla2x00_async_event(vha, rsp, mb);
231 break;
232 case 0x13:
233 qla2x00_process_response_queue(rsp);
234 break;
235 case 0x15:
236 mb[0] = MBA_CMPLT_1_16BIT;
237 mb[1] = MSW(stat);
238 qla2x00_async_event(vha, rsp, mb);
239 break;
240 case 0x16:
241 mb[0] = MBA_SCSI_COMPLETION;
242 mb[1] = MSW(stat);
243 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
244 qla2x00_async_event(vha, rsp, mb);
245 break;
246 default:
247 ql_dbg(ql_dbg_async, vha, 0x5028,
248 "Unrecognized interrupt type (%d).\n", stat & 0xff);
249 break;
250 }
251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
252 RD_REG_WORD_RELAXED(®->hccr);
253 }
254 qla2x00_handle_mbx_completion(ha, status);
255 spin_unlock_irqrestore(&ha->hardware_lock, flags);
256
257 return (IRQ_HANDLED);
258}
259
260
261
262
263
264
265static void
266qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
267{
268 uint16_t cnt;
269 uint32_t mboxes;
270 uint16_t __iomem *wptr;
271 struct qla_hw_data *ha = vha->hw;
272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
273
274
275 WARN_ON_ONCE(ha->mbx_count > 32);
276 mboxes = (1ULL << ha->mbx_count) - 1;
277 if (!ha->mcp)
278 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
279 else
280 mboxes = ha->mcp->in_mb;
281
282
283 ha->flags.mbox_int = 1;
284 ha->mailbox_out[0] = mb0;
285 mboxes >>= 1;
286 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
287
288 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
289 if (IS_QLA2200(ha) && cnt == 8)
290 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
291 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
292 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
293 else if (mboxes & BIT_0)
294 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
295
296 wptr++;
297 mboxes >>= 1;
298 }
299}
300
301static void
302qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
303{
304 static char *event[] =
305 { "Complete", "Request Notification", "Time Extension" };
306 int rval;
307 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
308 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
309 uint16_t __iomem *wptr;
310 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
311
312
313 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
314 wptr = (uint16_t __iomem *)®24->mailbox1;
315 else if (IS_QLA8044(vha->hw))
316 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
317 else
318 return;
319
320 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
321 mb[cnt] = RD_REG_WORD(wptr);
322
323 ql_dbg(ql_dbg_async, vha, 0x5021,
324 "Inter-Driver Communication %s -- "
325 "%04x %04x %04x %04x %04x %04x %04x.\n",
326 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
327 mb[4], mb[5], mb[6]);
328 switch (aen) {
329
330 case MBA_IDC_COMPLETE:
331 if (mb[1] >> 15) {
332 vha->hw->flags.idc_compl_status = 1;
333 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
334 complete(&vha->hw->dcbx_comp);
335 }
336 break;
337
338 case MBA_IDC_NOTIFY:
339
340 timeout = (descr >> 8) & 0xf;
341 ql_dbg(ql_dbg_async, vha, 0x5022,
342 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
343 vha->host_no, event[aen & 0xff], timeout);
344
345 if (!timeout)
346 return;
347 rval = qla2x00_post_idc_ack_work(vha, mb);
348 if (rval != QLA_SUCCESS)
349 ql_log(ql_log_warn, vha, 0x5023,
350 "IDC failed to post ACK.\n");
351 break;
352 case MBA_IDC_TIME_EXT:
353 vha->hw->idc_extend_tmo = descr;
354 ql_dbg(ql_dbg_async, vha, 0x5087,
355 "%lu Inter-Driver Communication %s -- "
356 "Extend timeout by=%d.\n",
357 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
358 break;
359 }
360}
361
362#define LS_UNKNOWN 2
363const char *
364qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
365{
366 static const char *const link_speeds[] = {
367 "1", "2", "?", "4", "8", "16", "32", "10"
368 };
369#define QLA_LAST_SPEED 7
370
371 if (IS_QLA2100(ha) || IS_QLA2200(ha))
372 return link_speeds[0];
373 else if (speed == 0x13)
374 return link_speeds[QLA_LAST_SPEED];
375 else if (speed < QLA_LAST_SPEED)
376 return link_speeds[speed];
377 else
378 return link_speeds[LS_UNKNOWN];
379}
380
381static void
382qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
383{
384 struct qla_hw_data *ha = vha->hw;
385
386
387
388
389
390
391
392
393
394
395
396
397 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
398 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
399 mb[0], mb[1], mb[2], mb[6]);
400 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
401 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
402 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
403
404 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
405 IDC_HEARTBEAT_FAILURE)) {
406 ha->flags.nic_core_hung = 1;
407 ql_log(ql_log_warn, vha, 0x5060,
408 "83XX: F/W Error Reported: Check if reset required.\n");
409
410 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
411 uint32_t protocol_engine_id, fw_err_code, err_level;
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426 protocol_engine_id = (mb[2] & 0xff);
427 fw_err_code = (((mb[2] & 0xff00) >> 8) |
428 ((mb[6] & 0x1fff) << 8));
429 err_level = ((mb[6] & 0xe000) >> 13);
430 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
431 "Register: protocol_engine_id=0x%x "
432 "fw_err_code=0x%x err_level=0x%x.\n",
433 protocol_engine_id, fw_err_code, err_level);
434 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
435 "Register: 0x%x%x.\n", mb[7], mb[3]);
436 if (err_level == ERR_LEVEL_NON_FATAL) {
437 ql_log(ql_log_warn, vha, 0x5063,
438 "Not a fatal error, f/w has recovered itself.\n");
439 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
440 ql_log(ql_log_fatal, vha, 0x5064,
441 "Recoverable Fatal error: Chip reset "
442 "required.\n");
443 qla83xx_schedule_work(vha,
444 QLA83XX_NIC_CORE_RESET);
445 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
446 ql_log(ql_log_fatal, vha, 0x5065,
447 "Unrecoverable Fatal error: Set FAILED "
448 "state, reboot required.\n");
449 qla83xx_schedule_work(vha,
450 QLA83XX_NIC_CORE_UNRECOVERABLE);
451 }
452 }
453
454 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
455 uint16_t peg_fw_state, nw_interface_link_up;
456 uint16_t nw_interface_signal_detect, sfp_status;
457 uint16_t htbt_counter, htbt_monitor_enable;
458 uint16_t sfp_additional_info, sfp_multirate;
459 uint16_t sfp_tx_fault, link_speed, dcbx_status;
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492 peg_fw_state = (mb[2] & 0x00ff);
493 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
494 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
495 sfp_status = ((mb[2] & 0x0c00) >> 10);
496 htbt_counter = ((mb[2] & 0x7000) >> 12);
497 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
498 sfp_additional_info = (mb[6] & 0x0003);
499 sfp_multirate = ((mb[6] & 0x0004) >> 2);
500 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
501 link_speed = ((mb[6] & 0x0070) >> 4);
502 dcbx_status = ((mb[6] & 0x7000) >> 12);
503
504 ql_log(ql_log_warn, vha, 0x5066,
505 "Peg-to-Fc Status Register:\n"
506 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
507 "nw_interface_signal_detect=0x%x"
508 "\nsfp_statis=0x%x.\n ", peg_fw_state,
509 nw_interface_link_up, nw_interface_signal_detect,
510 sfp_status);
511 ql_log(ql_log_warn, vha, 0x5067,
512 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
513 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
514 htbt_counter, htbt_monitor_enable,
515 sfp_additional_info, sfp_multirate);
516 ql_log(ql_log_warn, vha, 0x5068,
517 "sfp_tx_fault=0x%x, link_state=0x%x, "
518 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
519 dcbx_status);
520
521 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
522 }
523
524 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
525 ql_log(ql_log_warn, vha, 0x5069,
526 "Heartbeat Failure encountered, chip reset "
527 "required.\n");
528
529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
530 }
531 }
532
533 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
534 ql_log(ql_log_info, vha, 0x506a,
535 "IDC Device-State changed = 0x%x.\n", mb[4]);
536 if (ha->flags.nic_core_reset_owner)
537 return;
538 qla83xx_schedule_work(vha, MBA_IDC_AEN);
539 }
540}
541
542int
543qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
544{
545 struct qla_hw_data *ha = vha->hw;
546 scsi_qla_host_t *vp;
547 uint32_t vp_did;
548 unsigned long flags;
549 int ret = 0;
550
551 if (!ha->num_vhosts)
552 return ret;
553
554 spin_lock_irqsave(&ha->vport_slock, flags);
555 list_for_each_entry(vp, &ha->vp_list, list) {
556 vp_did = vp->d_id.b24;
557 if (vp_did == rscn_entry) {
558 ret = 1;
559 break;
560 }
561 }
562 spin_unlock_irqrestore(&ha->vport_slock, flags);
563
564 return ret;
565}
566
567fc_port_t *
568qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
569{
570 fc_port_t *f, *tf;
571
572 f = tf = NULL;
573 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
574 if (f->loop_id == loop_id)
575 return f;
576 return NULL;
577}
578
579fc_port_t *
580qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
581{
582 fc_port_t *f, *tf;
583
584 f = tf = NULL;
585 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
586 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
587 if (incl_deleted)
588 return f;
589 else if (f->deleted == 0)
590 return f;
591 }
592 }
593 return NULL;
594}
595
596fc_port_t *
597qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
598 u8 incl_deleted)
599{
600 fc_port_t *f, *tf;
601
602 f = tf = NULL;
603 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
604 if (f->d_id.b24 == id->b24) {
605 if (incl_deleted)
606 return f;
607 else if (f->deleted == 0)
608 return f;
609 }
610 }
611 return NULL;
612}
613
614
615
616
617
618
619
620void
621qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
622{
623 uint16_t handle_cnt;
624 uint16_t cnt, mbx;
625 uint32_t handles[5];
626 struct qla_hw_data *ha = vha->hw;
627 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
628 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
629 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
630 uint32_t rscn_entry, host_pid;
631 unsigned long flags;
632 fc_port_t *fcport = NULL;
633
634 if (!vha->hw->flags.fw_started)
635 return;
636
637
638 handle_cnt = 0;
639 if (IS_CNA_CAPABLE(ha))
640 goto skip_rio;
641 switch (mb[0]) {
642 case MBA_SCSI_COMPLETION:
643 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
644 handle_cnt = 1;
645 break;
646 case MBA_CMPLT_1_16BIT:
647 handles[0] = mb[1];
648 handle_cnt = 1;
649 mb[0] = MBA_SCSI_COMPLETION;
650 break;
651 case MBA_CMPLT_2_16BIT:
652 handles[0] = mb[1];
653 handles[1] = mb[2];
654 handle_cnt = 2;
655 mb[0] = MBA_SCSI_COMPLETION;
656 break;
657 case MBA_CMPLT_3_16BIT:
658 handles[0] = mb[1];
659 handles[1] = mb[2];
660 handles[2] = mb[3];
661 handle_cnt = 3;
662 mb[0] = MBA_SCSI_COMPLETION;
663 break;
664 case MBA_CMPLT_4_16BIT:
665 handles[0] = mb[1];
666 handles[1] = mb[2];
667 handles[2] = mb[3];
668 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
669 handle_cnt = 4;
670 mb[0] = MBA_SCSI_COMPLETION;
671 break;
672 case MBA_CMPLT_5_16BIT:
673 handles[0] = mb[1];
674 handles[1] = mb[2];
675 handles[2] = mb[3];
676 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
677 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
678 handle_cnt = 5;
679 mb[0] = MBA_SCSI_COMPLETION;
680 break;
681 case MBA_CMPLT_2_32BIT:
682 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
683 handles[1] = le32_to_cpu(
684 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
685 RD_MAILBOX_REG(ha, reg, 6));
686 handle_cnt = 2;
687 mb[0] = MBA_SCSI_COMPLETION;
688 break;
689 default:
690 break;
691 }
692skip_rio:
693 switch (mb[0]) {
694 case MBA_SCSI_COMPLETION:
695 if (!vha->flags.online)
696 break;
697
698 for (cnt = 0; cnt < handle_cnt; cnt++)
699 qla2x00_process_completed_request(vha, rsp->req,
700 handles[cnt]);
701 break;
702
703 case MBA_RESET:
704 ql_dbg(ql_dbg_async, vha, 0x5002,
705 "Asynchronous RESET.\n");
706
707 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
708 break;
709
710 case MBA_SYSTEM_ERR:
711 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
712 RD_REG_WORD(®24->mailbox7) : 0;
713 ql_log(ql_log_warn, vha, 0x5003,
714 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
715 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
716
717 ha->isp_ops->fw_dump(vha, 1);
718 ha->flags.fw_init_done = 0;
719 QLA_FW_STOPPED(ha);
720
721 if (IS_FWI2_CAPABLE(ha)) {
722 if (mb[1] == 0 && mb[2] == 0) {
723 ql_log(ql_log_fatal, vha, 0x5004,
724 "Unrecoverable Hardware Error: adapter "
725 "marked OFFLINE!\n");
726 vha->flags.online = 0;
727 vha->device_flags |= DFLG_DEV_FAILED;
728 } else {
729
730 if ((mbx & MBX_3) && (ha->port_no == 0))
731 set_bit(MPI_RESET_NEEDED,
732 &vha->dpc_flags);
733
734 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
735 }
736 } else if (mb[1] == 0) {
737 ql_log(ql_log_fatal, vha, 0x5005,
738 "Unrecoverable Hardware Error: adapter marked "
739 "OFFLINE!\n");
740 vha->flags.online = 0;
741 vha->device_flags |= DFLG_DEV_FAILED;
742 } else
743 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
744 break;
745
746 case MBA_REQ_TRANSFER_ERR:
747 ql_log(ql_log_warn, vha, 0x5006,
748 "ISP Request Transfer Error (%x).\n", mb[1]);
749
750 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
751 break;
752
753 case MBA_RSP_TRANSFER_ERR:
754 ql_log(ql_log_warn, vha, 0x5007,
755 "ISP Response Transfer Error (%x).\n", mb[1]);
756
757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
758 break;
759
760 case MBA_WAKEUP_THRES:
761 ql_dbg(ql_dbg_async, vha, 0x5008,
762 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
763 break;
764
765 case MBA_LOOP_INIT_ERR:
766 ql_log(ql_log_warn, vha, 0x5090,
767 "LOOP INIT ERROR (%x).\n", mb[1]);
768 ha->isp_ops->fw_dump(vha, 1);
769 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
770 break;
771
772 case MBA_LIP_OCCURRED:
773 ha->flags.lip_ae = 1;
774
775 ql_dbg(ql_dbg_async, vha, 0x5009,
776 "LIP occurred (%x).\n", mb[1]);
777
778 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
779 atomic_set(&vha->loop_state, LOOP_DOWN);
780 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
781 qla2x00_mark_all_devices_lost(vha, 1);
782 }
783
784 if (vha->vp_idx) {
785 atomic_set(&vha->vp_state, VP_FAILED);
786 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
787 }
788
789 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
790 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
791
792 vha->flags.management_server_logged_in = 0;
793 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
794 break;
795
796 case MBA_LOOP_UP:
797 if (IS_QLA2100(ha) || IS_QLA2200(ha))
798 ha->link_data_rate = PORT_SPEED_1GB;
799 else
800 ha->link_data_rate = mb[1];
801
802 ql_log(ql_log_info, vha, 0x500a,
803 "LOOP UP detected (%s Gbps).\n",
804 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
805
806 vha->flags.management_server_logged_in = 0;
807 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
808
809 if (AUTO_DETECT_SFP_SUPPORT(vha)) {
810 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
811 qla2xxx_wake_dpc(vha);
812 }
813 break;
814
815 case MBA_LOOP_DOWN:
816 SAVE_TOPO(ha);
817 ha->flags.lip_ae = 0;
818 ha->current_topology = 0;
819
820 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
821 ? RD_REG_WORD(®24->mailbox4) : 0;
822 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
823 : mbx;
824 ql_log(ql_log_info, vha, 0x500b,
825 "LOOP DOWN detected (%x %x %x %x).\n",
826 mb[1], mb[2], mb[3], mbx);
827
828 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
829 atomic_set(&vha->loop_state, LOOP_DOWN);
830 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
831
832
833
834
835
836 if (!vha->vp_idx) {
837 if (ha->flags.fawwpn_enabled) {
838 void *wwpn = ha->init_cb->port_name;
839 memcpy(vha->port_name, wwpn, WWN_SIZE);
840 fc_host_port_name(vha->host) =
841 wwn_to_u64(vha->port_name);
842 ql_dbg(ql_dbg_init + ql_dbg_verbose,
843 vha, 0x00d8, "LOOP DOWN detected,"
844 "restore WWPN %016llx\n",
845 wwn_to_u64(vha->port_name));
846 }
847
848 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
849 }
850
851 vha->device_flags |= DFLG_NO_CABLE;
852 qla2x00_mark_all_devices_lost(vha, 1);
853 }
854
855 if (vha->vp_idx) {
856 atomic_set(&vha->vp_state, VP_FAILED);
857 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
858 }
859
860 vha->flags.management_server_logged_in = 0;
861 ha->link_data_rate = PORT_SPEED_UNKNOWN;
862 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
863 break;
864
865 case MBA_LIP_RESET:
866 ql_dbg(ql_dbg_async, vha, 0x500c,
867 "LIP reset occurred (%x).\n", mb[1]);
868
869 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
870 atomic_set(&vha->loop_state, LOOP_DOWN);
871 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
872 qla2x00_mark_all_devices_lost(vha, 1);
873 }
874
875 if (vha->vp_idx) {
876 atomic_set(&vha->vp_state, VP_FAILED);
877 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
878 }
879
880 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
881
882 ha->operating_mode = LOOP;
883 vha->flags.management_server_logged_in = 0;
884 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
885 break;
886
887
888 case MBA_POINT_TO_POINT:
889 ha->flags.lip_ae = 0;
890
891 if (IS_QLA2100(ha))
892 break;
893
894 if (IS_CNA_CAPABLE(ha)) {
895 ql_dbg(ql_dbg_async, vha, 0x500d,
896 "DCBX Completed -- %04x %04x %04x.\n",
897 mb[1], mb[2], mb[3]);
898 if (ha->notify_dcbx_comp && !vha->vp_idx)
899 complete(&ha->dcbx_comp);
900
901 } else
902 ql_dbg(ql_dbg_async, vha, 0x500e,
903 "Asynchronous P2P MODE received.\n");
904
905
906
907
908
909 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
910 atomic_set(&vha->loop_state, LOOP_DOWN);
911 if (!atomic_read(&vha->loop_down_timer))
912 atomic_set(&vha->loop_down_timer,
913 LOOP_DOWN_TIME);
914 if (!N2N_TOPO(ha))
915 qla2x00_mark_all_devices_lost(vha, 1);
916 }
917
918 if (vha->vp_idx) {
919 atomic_set(&vha->vp_state, VP_FAILED);
920 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
921 }
922
923 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
924 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
925
926 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
927 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
928
929 vha->flags.management_server_logged_in = 0;
930 break;
931
932 case MBA_CHG_IN_CONNECTION:
933 if (IS_QLA2100(ha))
934 break;
935
936 ql_dbg(ql_dbg_async, vha, 0x500f,
937 "Configuration change detected: value=%x.\n", mb[1]);
938
939 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
940 atomic_set(&vha->loop_state, LOOP_DOWN);
941 if (!atomic_read(&vha->loop_down_timer))
942 atomic_set(&vha->loop_down_timer,
943 LOOP_DOWN_TIME);
944 qla2x00_mark_all_devices_lost(vha, 1);
945 }
946
947 if (vha->vp_idx) {
948 atomic_set(&vha->vp_state, VP_FAILED);
949 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
950 }
951
952 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
953 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
954 break;
955
956 case MBA_PORT_UPDATE:
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972 if (IS_QLA2XXX_MIDTYPE(ha) &&
973 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
974 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
975 break;
976
977 if (mb[2] == 0x7) {
978 ql_dbg(ql_dbg_async, vha, 0x5010,
979 "Port %s %04x %04x %04x.\n",
980 mb[1] == 0xffff ? "unavailable" : "logout",
981 mb[1], mb[2], mb[3]);
982
983 if (mb[1] == 0xffff)
984 goto global_port_update;
985
986 if (mb[1] == NPH_SNS_LID(ha)) {
987 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
988 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
989 break;
990 }
991
992
993 if (IS_FWI2_CAPABLE(ha))
994 handle_cnt = NPH_SNS;
995 else
996 handle_cnt = SIMPLE_NAME_SERVER;
997 if (mb[1] == handle_cnt) {
998 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
999 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1000 break;
1001 }
1002
1003
1004 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1005 if (!fcport)
1006 break;
1007 if (atomic_read(&fcport->state) != FCS_ONLINE)
1008 break;
1009 ql_dbg(ql_dbg_async, vha, 0x508a,
1010 "Marking port lost loopid=%04x portid=%06x.\n",
1011 fcport->loop_id, fcport->d_id.b24);
1012 if (qla_ini_mode_enabled(vha)) {
1013 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1014 fcport->logout_on_delete = 0;
1015 qlt_schedule_sess_for_deletion(fcport);
1016 }
1017 break;
1018
1019global_port_update:
1020 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1021 atomic_set(&vha->loop_state, LOOP_DOWN);
1022 atomic_set(&vha->loop_down_timer,
1023 LOOP_DOWN_TIME);
1024 vha->device_flags |= DFLG_NO_CABLE;
1025 qla2x00_mark_all_devices_lost(vha, 1);
1026 }
1027
1028 if (vha->vp_idx) {
1029 atomic_set(&vha->vp_state, VP_FAILED);
1030 fc_vport_set_state(vha->fc_vport,
1031 FC_VPORT_FAILED);
1032 qla2x00_mark_all_devices_lost(vha, 1);
1033 }
1034
1035 vha->flags.management_server_logged_in = 0;
1036 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1037 break;
1038 }
1039
1040
1041
1042
1043
1044
1045 atomic_set(&vha->loop_down_timer, 0);
1046 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1047 !ha->flags.n2n_ae &&
1048 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1049 ql_dbg(ql_dbg_async, vha, 0x5011,
1050 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1051 mb[1], mb[2], mb[3]);
1052
1053 qlt_async_event(mb[0], vha, mb);
1054 break;
1055 }
1056
1057 ql_dbg(ql_dbg_async, vha, 0x5012,
1058 "Port database changed %04x %04x %04x.\n",
1059 mb[1], mb[2], mb[3]);
1060
1061
1062
1063
1064 atomic_set(&vha->loop_state, LOOP_UP);
1065 vha->scan.scan_retry = 0;
1066
1067 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1068 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1069 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1070
1071 qlt_async_event(mb[0], vha, mb);
1072 break;
1073
1074 case MBA_RSCN_UPDATE:
1075
1076 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1077 break;
1078
1079 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1080 break;
1081
1082 ql_dbg(ql_dbg_async, vha, 0x5013,
1083 "RSCN database changed -- %04x %04x %04x.\n",
1084 mb[1], mb[2], mb[3]);
1085
1086 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1087 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1088 | vha->d_id.b.al_pa;
1089 if (rscn_entry == host_pid) {
1090 ql_dbg(ql_dbg_async, vha, 0x5014,
1091 "Ignoring RSCN update to local host "
1092 "port ID (%06x).\n", host_pid);
1093 break;
1094 }
1095
1096
1097 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1098
1099
1100 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1101 break;
1102
1103 atomic_set(&vha->loop_down_timer, 0);
1104 vha->flags.management_server_logged_in = 0;
1105 {
1106 struct event_arg ea;
1107
1108 memset(&ea, 0, sizeof(ea));
1109 ea.event = FCME_RSCN;
1110 ea.id.b24 = rscn_entry;
1111 ea.id.b.rsvd_1 = rscn_entry >> 24;
1112 qla2x00_fcport_event_handler(vha, &ea);
1113 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1114 }
1115 break;
1116
1117 case MBA_ZIO_RESPONSE:
1118 ql_dbg(ql_dbg_async, vha, 0x5015,
1119 "[R|Z]IO update completion.\n");
1120
1121 if (IS_FWI2_CAPABLE(ha))
1122 qla24xx_process_response_queue(vha, rsp);
1123 else
1124 qla2x00_process_response_queue(rsp);
1125 break;
1126
1127 case MBA_DISCARD_RND_FRAME:
1128 ql_dbg(ql_dbg_async, vha, 0x5016,
1129 "Discard RND Frame -- %04x %04x %04x.\n",
1130 mb[1], mb[2], mb[3]);
1131 break;
1132
1133 case MBA_TRACE_NOTIFICATION:
1134 ql_dbg(ql_dbg_async, vha, 0x5017,
1135 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1136 break;
1137
1138 case MBA_ISP84XX_ALERT:
1139 ql_dbg(ql_dbg_async, vha, 0x5018,
1140 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1141 mb[1], mb[2], mb[3]);
1142
1143 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1144 switch (mb[1]) {
1145 case A84_PANIC_RECOVERY:
1146 ql_log(ql_log_info, vha, 0x5019,
1147 "Alert 84XX: panic recovery %04x %04x.\n",
1148 mb[2], mb[3]);
1149 break;
1150 case A84_OP_LOGIN_COMPLETE:
1151 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1152 ql_log(ql_log_info, vha, 0x501a,
1153 "Alert 84XX: firmware version %x.\n",
1154 ha->cs84xx->op_fw_version);
1155 break;
1156 case A84_DIAG_LOGIN_COMPLETE:
1157 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1158 ql_log(ql_log_info, vha, 0x501b,
1159 "Alert 84XX: diagnostic firmware version %x.\n",
1160 ha->cs84xx->diag_fw_version);
1161 break;
1162 case A84_GOLD_LOGIN_COMPLETE:
1163 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1164 ha->cs84xx->fw_update = 1;
1165 ql_log(ql_log_info, vha, 0x501c,
1166 "Alert 84XX: gold firmware version %x.\n",
1167 ha->cs84xx->gold_fw_version);
1168 break;
1169 default:
1170 ql_log(ql_log_warn, vha, 0x501d,
1171 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1172 mb[1], mb[2], mb[3]);
1173 }
1174 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1175 break;
1176 case MBA_DCBX_START:
1177 ql_dbg(ql_dbg_async, vha, 0x501e,
1178 "DCBX Started -- %04x %04x %04x.\n",
1179 mb[1], mb[2], mb[3]);
1180 break;
1181 case MBA_DCBX_PARAM_UPDATE:
1182 ql_dbg(ql_dbg_async, vha, 0x501f,
1183 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1184 mb[1], mb[2], mb[3]);
1185 break;
1186 case MBA_FCF_CONF_ERR:
1187 ql_dbg(ql_dbg_async, vha, 0x5020,
1188 "FCF Configuration Error -- %04x %04x %04x.\n",
1189 mb[1], mb[2], mb[3]);
1190 break;
1191 case MBA_IDC_NOTIFY:
1192 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1193 mb[4] = RD_REG_WORD(®24->mailbox4);
1194 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1195 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1196 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1197 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1198
1199
1200
1201 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1202 atomic_set(&vha->loop_down_timer,
1203 LOOP_DOWN_TIME);
1204 qla2xxx_wake_dpc(vha);
1205 }
1206 }
1207
1208 case MBA_IDC_COMPLETE:
1209 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1210 complete(&ha->lb_portup_comp);
1211
1212 case MBA_IDC_TIME_EXT:
1213 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1214 IS_QLA8044(ha))
1215 qla81xx_idc_event(vha, mb[0], mb[1]);
1216 break;
1217
1218 case MBA_IDC_AEN:
1219 mb[4] = RD_REG_WORD(®24->mailbox4);
1220 mb[5] = RD_REG_WORD(®24->mailbox5);
1221 mb[6] = RD_REG_WORD(®24->mailbox6);
1222 mb[7] = RD_REG_WORD(®24->mailbox7);
1223 qla83xx_handle_8200_aen(vha, mb);
1224 break;
1225
1226 case MBA_DPORT_DIAGNOSTICS:
1227 ql_dbg(ql_dbg_async, vha, 0x5052,
1228 "D-Port Diagnostics: %04x result=%s\n",
1229 mb[0],
1230 mb[1] == 0 ? "start" :
1231 mb[1] == 1 ? "done (pass)" :
1232 mb[1] == 2 ? "done (error)" : "other");
1233 break;
1234
1235 case MBA_TEMPERATURE_ALERT:
1236 ql_dbg(ql_dbg_async, vha, 0x505e,
1237 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1238 if (mb[1] == 0x12)
1239 schedule_work(&ha->board_disable);
1240 break;
1241
1242 case MBA_TRANS_INSERT:
1243 ql_dbg(ql_dbg_async, vha, 0x5091,
1244 "Transceiver Insertion: %04x\n", mb[1]);
1245 break;
1246
1247 default:
1248 ql_dbg(ql_dbg_async, vha, 0x5057,
1249 "Unknown AEN:%04x %04x %04x %04x\n",
1250 mb[0], mb[1], mb[2], mb[3]);
1251 }
1252
1253 qlt_async_event(mb[0], vha, mb);
1254
1255 if (!vha->vp_idx && ha->num_vhosts)
1256 qla2x00_alert_all_vps(rsp, mb);
1257}
1258
1259
1260
1261
1262
1263
1264
1265void
1266qla2x00_process_completed_request(struct scsi_qla_host *vha,
1267 struct req_que *req, uint32_t index)
1268{
1269 srb_t *sp;
1270 struct qla_hw_data *ha = vha->hw;
1271
1272
1273 if (index >= req->num_outstanding_cmds) {
1274 ql_log(ql_log_warn, vha, 0x3014,
1275 "Invalid SCSI command index (%x).\n", index);
1276
1277 if (IS_P3P_TYPE(ha))
1278 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1279 else
1280 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1281 return;
1282 }
1283
1284 sp = req->outstanding_cmds[index];
1285 if (sp) {
1286
1287 req->outstanding_cmds[index] = NULL;
1288
1289
1290 sp->done(sp, DID_OK << 16);
1291 } else {
1292 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1293
1294 if (IS_P3P_TYPE(ha))
1295 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1296 else
1297 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1298 }
1299}
1300
1301srb_t *
1302qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1303 struct req_que *req, void *iocb)
1304{
1305 struct qla_hw_data *ha = vha->hw;
1306 sts_entry_t *pkt = iocb;
1307 srb_t *sp = NULL;
1308 uint16_t index;
1309
1310 index = LSW(pkt->handle);
1311 if (index >= req->num_outstanding_cmds) {
1312 ql_log(ql_log_warn, vha, 0x5031,
1313 "Invalid command index (%x) type %8ph.\n",
1314 index, iocb);
1315 if (IS_P3P_TYPE(ha))
1316 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1317 else
1318 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1319 goto done;
1320 }
1321 sp = req->outstanding_cmds[index];
1322 if (!sp) {
1323 ql_log(ql_log_warn, vha, 0x5032,
1324 "Invalid completion handle (%x) -- timed-out.\n", index);
1325 return sp;
1326 }
1327 if (sp->handle != index) {
1328 ql_log(ql_log_warn, vha, 0x5033,
1329 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1330 return NULL;
1331 }
1332
1333 req->outstanding_cmds[index] = NULL;
1334
1335done:
1336 return sp;
1337}
1338
1339static void
1340qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1341 struct mbx_entry *mbx)
1342{
1343 const char func[] = "MBX-IOCB";
1344 const char *type;
1345 fc_port_t *fcport;
1346 srb_t *sp;
1347 struct srb_iocb *lio;
1348 uint16_t *data;
1349 uint16_t status;
1350
1351 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1352 if (!sp)
1353 return;
1354
1355 lio = &sp->u.iocb_cmd;
1356 type = sp->name;
1357 fcport = sp->fcport;
1358 data = lio->u.logio.data;
1359
1360 data[0] = MBS_COMMAND_ERROR;
1361 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1362 QLA_LOGIO_LOGIN_RETRIED : 0;
1363 if (mbx->entry_status) {
1364 ql_dbg(ql_dbg_async, vha, 0x5043,
1365 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1366 "entry-status=%x status=%x state-flag=%x "
1367 "status-flags=%x.\n", type, sp->handle,
1368 fcport->d_id.b.domain, fcport->d_id.b.area,
1369 fcport->d_id.b.al_pa, mbx->entry_status,
1370 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1371 le16_to_cpu(mbx->status_flags));
1372
1373 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1374 (uint8_t *)mbx, sizeof(*mbx));
1375
1376 goto logio_done;
1377 }
1378
1379 status = le16_to_cpu(mbx->status);
1380 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1381 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1382 status = 0;
1383 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1384 ql_dbg(ql_dbg_async, vha, 0x5045,
1385 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1386 type, sp->handle, fcport->d_id.b.domain,
1387 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1388 le16_to_cpu(mbx->mb1));
1389
1390 data[0] = MBS_COMMAND_COMPLETE;
1391 if (sp->type == SRB_LOGIN_CMD) {
1392 fcport->port_type = FCT_TARGET;
1393 if (le16_to_cpu(mbx->mb1) & BIT_0)
1394 fcport->port_type = FCT_INITIATOR;
1395 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1396 fcport->flags |= FCF_FCP2_DEVICE;
1397 }
1398 goto logio_done;
1399 }
1400
1401 data[0] = le16_to_cpu(mbx->mb0);
1402 switch (data[0]) {
1403 case MBS_PORT_ID_USED:
1404 data[1] = le16_to_cpu(mbx->mb1);
1405 break;
1406 case MBS_LOOP_ID_USED:
1407 break;
1408 default:
1409 data[0] = MBS_COMMAND_ERROR;
1410 break;
1411 }
1412
1413 ql_log(ql_log_warn, vha, 0x5046,
1414 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1415 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1416 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1417 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1418 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1419 le16_to_cpu(mbx->mb7));
1420
1421logio_done:
1422 sp->done(sp, 0);
1423}
1424
1425static void
1426qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1427 struct mbx_24xx_entry *pkt)
1428{
1429 const char func[] = "MBX-IOCB2";
1430 srb_t *sp;
1431 struct srb_iocb *si;
1432 u16 sz, i;
1433 int res;
1434
1435 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1436 if (!sp)
1437 return;
1438
1439 si = &sp->u.iocb_cmd;
1440 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1441
1442 for (i = 0; i < sz; i++)
1443 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
1444
1445 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1446
1447 sp->done(sp, res);
1448}
1449
1450static void
1451qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1452 struct nack_to_isp *pkt)
1453{
1454 const char func[] = "nack";
1455 srb_t *sp;
1456 int res = 0;
1457
1458 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1459 if (!sp)
1460 return;
1461
1462 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1463 res = QLA_FUNCTION_FAILED;
1464
1465 sp->done(sp, res);
1466}
1467
1468static void
1469qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1470 sts_entry_t *pkt, int iocb_type)
1471{
1472 const char func[] = "CT_IOCB";
1473 const char *type;
1474 srb_t *sp;
1475 struct bsg_job *bsg_job;
1476 struct fc_bsg_reply *bsg_reply;
1477 uint16_t comp_status;
1478 int res = 0;
1479
1480 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1481 if (!sp)
1482 return;
1483
1484 switch (sp->type) {
1485 case SRB_CT_CMD:
1486 bsg_job = sp->u.bsg_job;
1487 bsg_reply = bsg_job->reply;
1488
1489 type = "ct pass-through";
1490
1491 comp_status = le16_to_cpu(pkt->comp_status);
1492
1493
1494
1495
1496
1497 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1498 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1499
1500 if (comp_status != CS_COMPLETE) {
1501 if (comp_status == CS_DATA_UNDERRUN) {
1502 res = DID_OK << 16;
1503 bsg_reply->reply_payload_rcv_len =
1504 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1505
1506 ql_log(ql_log_warn, vha, 0x5048,
1507 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1508 type, comp_status,
1509 bsg_reply->reply_payload_rcv_len);
1510 } else {
1511 ql_log(ql_log_warn, vha, 0x5049,
1512 "CT pass-through-%s error comp_status=0x%x.\n",
1513 type, comp_status);
1514 res = DID_ERROR << 16;
1515 bsg_reply->reply_payload_rcv_len = 0;
1516 }
1517 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1518 (uint8_t *)pkt, sizeof(*pkt));
1519 } else {
1520 res = DID_OK << 16;
1521 bsg_reply->reply_payload_rcv_len =
1522 bsg_job->reply_payload.payload_len;
1523 bsg_job->reply_len = 0;
1524 }
1525 break;
1526 case SRB_CT_PTHRU_CMD:
1527
1528
1529
1530
1531 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1532 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1533 sp->name);
1534 break;
1535 }
1536
1537 sp->done(sp, res);
1538}
1539
1540static void
1541qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1542 struct sts_entry_24xx *pkt, int iocb_type)
1543{
1544 const char func[] = "ELS_CT_IOCB";
1545 const char *type;
1546 srb_t *sp;
1547 struct bsg_job *bsg_job;
1548 struct fc_bsg_reply *bsg_reply;
1549 uint16_t comp_status;
1550 uint32_t fw_status[3];
1551 int res;
1552 struct srb_iocb *els;
1553
1554 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1555 if (!sp)
1556 return;
1557
1558 type = NULL;
1559 switch (sp->type) {
1560 case SRB_ELS_CMD_RPT:
1561 case SRB_ELS_CMD_HST:
1562 type = "els";
1563 break;
1564 case SRB_CT_CMD:
1565 type = "ct pass-through";
1566 break;
1567 case SRB_ELS_DCMD:
1568 type = "Driver ELS logo";
1569 if (iocb_type != ELS_IOCB_TYPE) {
1570 ql_dbg(ql_dbg_user, vha, 0x5047,
1571 "Completing %s: (%p) type=%d.\n",
1572 type, sp, sp->type);
1573 sp->done(sp, 0);
1574 return;
1575 }
1576 break;
1577 case SRB_CT_PTHRU_CMD:
1578
1579
1580
1581 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1582 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1583 sp->name);
1584 sp->done(sp, res);
1585 return;
1586 default:
1587 ql_dbg(ql_dbg_user, vha, 0x503e,
1588 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1589 return;
1590 }
1591
1592 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1593 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1594 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1595
1596 if (iocb_type == ELS_IOCB_TYPE) {
1597 els = &sp->u.iocb_cmd;
1598 els->u.els_plogi.fw_status[0] = fw_status[0];
1599 els->u.els_plogi.fw_status[1] = fw_status[1];
1600 els->u.els_plogi.fw_status[2] = fw_status[2];
1601 els->u.els_plogi.comp_status = fw_status[0];
1602 if (comp_status == CS_COMPLETE) {
1603 res = DID_OK << 16;
1604 } else {
1605 if (comp_status == CS_DATA_UNDERRUN) {
1606 res = DID_OK << 16;
1607 els->u.els_plogi.len =
1608 le16_to_cpu(((struct els_sts_entry_24xx *)
1609 pkt)->total_byte_count);
1610 } else {
1611 els->u.els_plogi.len = 0;
1612 res = DID_ERROR << 16;
1613 }
1614 }
1615 ql_log(ql_log_info, vha, 0x503f,
1616 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
1617 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1618 le16_to_cpu(((struct els_sts_entry_24xx *)
1619 pkt)->total_byte_count));
1620 goto els_ct_done;
1621 }
1622
1623
1624
1625
1626 bsg_job = sp->u.bsg_job;
1627 bsg_reply = bsg_job->reply;
1628 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1629 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1630
1631 if (comp_status != CS_COMPLETE) {
1632 if (comp_status == CS_DATA_UNDERRUN) {
1633 res = DID_OK << 16;
1634 bsg_reply->reply_payload_rcv_len =
1635 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1636
1637 ql_dbg(ql_dbg_user, vha, 0x503f,
1638 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1639 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1640 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1641 le16_to_cpu(((struct els_sts_entry_24xx *)
1642 pkt)->total_byte_count));
1643 } else {
1644 ql_dbg(ql_dbg_user, vha, 0x5040,
1645 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1646 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1647 type, sp->handle, comp_status,
1648 le16_to_cpu(((struct els_sts_entry_24xx *)
1649 pkt)->error_subcode_1),
1650 le16_to_cpu(((struct els_sts_entry_24xx *)
1651 pkt)->error_subcode_2));
1652 res = DID_ERROR << 16;
1653 bsg_reply->reply_payload_rcv_len = 0;
1654 }
1655 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
1656 fw_status, sizeof(fw_status));
1657 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1658 (uint8_t *)pkt, sizeof(*pkt));
1659 }
1660 else {
1661 res = DID_OK << 16;
1662 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1663 bsg_job->reply_len = 0;
1664 }
1665els_ct_done:
1666
1667 sp->done(sp, res);
1668}
1669
1670static void
1671qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1672 struct logio_entry_24xx *logio)
1673{
1674 const char func[] = "LOGIO-IOCB";
1675 const char *type;
1676 fc_port_t *fcport;
1677 srb_t *sp;
1678 struct srb_iocb *lio;
1679 uint16_t *data;
1680 uint32_t iop[2];
1681
1682 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1683 if (!sp)
1684 return;
1685
1686 lio = &sp->u.iocb_cmd;
1687 type = sp->name;
1688 fcport = sp->fcport;
1689 data = lio->u.logio.data;
1690
1691 data[0] = MBS_COMMAND_ERROR;
1692 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1693 QLA_LOGIO_LOGIN_RETRIED : 0;
1694 if (logio->entry_status) {
1695 ql_log(ql_log_warn, fcport->vha, 0x5034,
1696 "Async-%s error entry - %8phC hdl=%x"
1697 "portid=%02x%02x%02x entry-status=%x.\n",
1698 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1699 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1700 logio->entry_status);
1701 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1702 (uint8_t *)logio, sizeof(*logio));
1703
1704 goto logio_done;
1705 }
1706
1707 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1708 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1709 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1710 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1711 fcport->d_id.b.domain,
1712 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1713 le32_to_cpu(logio->io_parameter[0]));
1714
1715 vha->hw->exch_starvation = 0;
1716 data[0] = MBS_COMMAND_COMPLETE;
1717 if (sp->type != SRB_LOGIN_CMD)
1718 goto logio_done;
1719
1720 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1721 if (iop[0] & BIT_4) {
1722 fcport->port_type = FCT_TARGET;
1723 if (iop[0] & BIT_8)
1724 fcport->flags |= FCF_FCP2_DEVICE;
1725 } else if (iop[0] & BIT_5)
1726 fcport->port_type = FCT_INITIATOR;
1727
1728 if (iop[0] & BIT_7)
1729 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1730
1731 if (logio->io_parameter[7] || logio->io_parameter[8])
1732 fcport->supported_classes |= FC_COS_CLASS2;
1733 if (logio->io_parameter[9] || logio->io_parameter[10])
1734 fcport->supported_classes |= FC_COS_CLASS3;
1735
1736 goto logio_done;
1737 }
1738
1739 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1740 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1741 lio->u.logio.iop[0] = iop[0];
1742 lio->u.logio.iop[1] = iop[1];
1743 switch (iop[0]) {
1744 case LSC_SCODE_PORTID_USED:
1745 data[0] = MBS_PORT_ID_USED;
1746 data[1] = LSW(iop[1]);
1747 break;
1748 case LSC_SCODE_NPORT_USED:
1749 data[0] = MBS_LOOP_ID_USED;
1750 break;
1751 case LSC_SCODE_CMD_FAILED:
1752 if (iop[1] == 0x0606) {
1753
1754
1755
1756
1757 data[0] = MBS_COMMAND_COMPLETE;
1758 goto logio_done;
1759 }
1760 data[0] = MBS_COMMAND_ERROR;
1761 break;
1762 case LSC_SCODE_NOXCB:
1763 vha->hw->exch_starvation++;
1764 if (vha->hw->exch_starvation > 5) {
1765 ql_log(ql_log_warn, vha, 0xd046,
1766 "Exchange starvation. Resetting RISC\n");
1767
1768 vha->hw->exch_starvation = 0;
1769
1770 if (IS_P3P_TYPE(vha->hw))
1771 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1772 else
1773 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1774 qla2xxx_wake_dpc(vha);
1775 }
1776
1777 default:
1778 data[0] = MBS_COMMAND_ERROR;
1779 break;
1780 }
1781
1782 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1783 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1784 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1785 sp->handle, fcport->d_id.b.domain,
1786 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1787 le16_to_cpu(logio->comp_status),
1788 le32_to_cpu(logio->io_parameter[0]),
1789 le32_to_cpu(logio->io_parameter[1]));
1790
1791logio_done:
1792 sp->done(sp, 0);
1793}
1794
1795static void
1796qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1797{
1798 const char func[] = "TMF-IOCB";
1799 const char *type;
1800 fc_port_t *fcport;
1801 srb_t *sp;
1802 struct srb_iocb *iocb;
1803 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1804
1805 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1806 if (!sp)
1807 return;
1808
1809 iocb = &sp->u.iocb_cmd;
1810 type = sp->name;
1811 fcport = sp->fcport;
1812 iocb->u.tmf.data = QLA_SUCCESS;
1813
1814 if (sts->entry_status) {
1815 ql_log(ql_log_warn, fcport->vha, 0x5038,
1816 "Async-%s error - hdl=%x entry-status(%x).\n",
1817 type, sp->handle, sts->entry_status);
1818 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1819 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1820 ql_log(ql_log_warn, fcport->vha, 0x5039,
1821 "Async-%s error - hdl=%x completion status(%x).\n",
1822 type, sp->handle, sts->comp_status);
1823 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1824 } else if ((le16_to_cpu(sts->scsi_status) &
1825 SS_RESPONSE_INFO_LEN_VALID)) {
1826 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1827 ql_log(ql_log_warn, fcport->vha, 0x503b,
1828 "Async-%s error - hdl=%x not enough response(%d).\n",
1829 type, sp->handle, sts->rsp_data_len);
1830 } else if (sts->data[3]) {
1831 ql_log(ql_log_warn, fcport->vha, 0x503c,
1832 "Async-%s error - hdl=%x response(%x).\n",
1833 type, sp->handle, sts->data[3]);
1834 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1835 }
1836 }
1837
1838 if (iocb->u.tmf.data != QLA_SUCCESS)
1839 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1840 (uint8_t *)sts, sizeof(*sts));
1841
1842 sp->done(sp, 0);
1843}
1844
1845static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1846 void *tsk, srb_t *sp)
1847{
1848 fc_port_t *fcport;
1849 struct srb_iocb *iocb;
1850 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1851 uint16_t state_flags;
1852 struct nvmefc_fcp_req *fd;
1853 uint16_t ret = QLA_SUCCESS;
1854 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1855
1856 iocb = &sp->u.iocb_cmd;
1857 fcport = sp->fcport;
1858 iocb->u.nvme.comp_status = comp_status;
1859 state_flags = le16_to_cpu(sts->state_flags);
1860 fd = iocb->u.nvme.desc;
1861
1862 if (unlikely(iocb->u.nvme.aen_op))
1863 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
1874 iocb->u.nvme.rsp_pyld_len = 0;
1875 } else if ((state_flags & SF_FCP_RSP_DMA)) {
1876 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1877 } else if (state_flags & SF_NVME_ERSP) {
1878 uint32_t *inbuf, *outbuf;
1879 uint16_t iter;
1880
1881 inbuf = (uint32_t *)&sts->nvme_ersp_data;
1882 outbuf = (uint32_t *)fd->rspaddr;
1883 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1884 iter = iocb->u.nvme.rsp_pyld_len >> 2;
1885 for (; iter; iter--)
1886 *outbuf++ = swab32(*inbuf++);
1887 } else {
1888 ql_log(ql_log_warn, fcport->vha, 0x503a,
1889 "NVME-%s error. Unhandled state_flags of %x\n",
1890 sp->name, state_flags);
1891 }
1892
1893 fd->transferred_length = fd->payload_length -
1894 le32_to_cpu(sts->residual_len);
1895
1896 if (unlikely(comp_status != CS_COMPLETE))
1897 ql_log(ql_log_warn, fcport->vha, 0x5060,
1898 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
1899 sp->name, sp->handle, comp_status,
1900 fd->transferred_length, le32_to_cpu(sts->residual_len),
1901 sts->ox_id);
1902
1903
1904
1905
1906
1907 switch (comp_status) {
1908 case CS_COMPLETE:
1909 break;
1910
1911 case CS_RESET:
1912 case CS_PORT_UNAVAILABLE:
1913 case CS_PORT_LOGGED_OUT:
1914 fcport->nvme_flag |= NVME_FLAG_RESETTING;
1915
1916 case CS_ABORTED:
1917 case CS_PORT_BUSY:
1918 fd->transferred_length = 0;
1919 iocb->u.nvme.rsp_pyld_len = 0;
1920 ret = QLA_ABORTED;
1921 break;
1922 case CS_DATA_UNDERRUN:
1923 break;
1924 default:
1925 ret = QLA_FUNCTION_FAILED;
1926 break;
1927 }
1928 sp->done(sp, ret);
1929}
1930
1931static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
1932 struct vp_ctrl_entry_24xx *vce)
1933{
1934 const char func[] = "CTRLVP-IOCB";
1935 srb_t *sp;
1936 int rval = QLA_SUCCESS;
1937
1938 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
1939 if (!sp)
1940 return;
1941
1942 if (vce->entry_status != 0) {
1943 ql_dbg(ql_dbg_vport, vha, 0x10c4,
1944 "%s: Failed to complete IOCB -- error status (%x)\n",
1945 sp->name, vce->entry_status);
1946 rval = QLA_FUNCTION_FAILED;
1947 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
1948 ql_dbg(ql_dbg_vport, vha, 0x10c5,
1949 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
1950 sp->name, le16_to_cpu(vce->comp_status),
1951 le16_to_cpu(vce->vp_idx_failed));
1952 rval = QLA_FUNCTION_FAILED;
1953 } else {
1954 ql_dbg(ql_dbg_vport, vha, 0x10c6,
1955 "Done %s.\n", __func__);
1956 }
1957
1958 sp->rc = rval;
1959 sp->done(sp, rval);
1960}
1961
1962
1963
1964
1965
1966void
1967qla2x00_process_response_queue(struct rsp_que *rsp)
1968{
1969 struct scsi_qla_host *vha;
1970 struct qla_hw_data *ha = rsp->hw;
1971 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1972 sts_entry_t *pkt;
1973 uint16_t handle_cnt;
1974 uint16_t cnt;
1975
1976 vha = pci_get_drvdata(ha->pdev);
1977
1978 if (!vha->flags.online)
1979 return;
1980
1981 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1982 pkt = (sts_entry_t *)rsp->ring_ptr;
1983
1984 rsp->ring_index++;
1985 if (rsp->ring_index == rsp->length) {
1986 rsp->ring_index = 0;
1987 rsp->ring_ptr = rsp->ring;
1988 } else {
1989 rsp->ring_ptr++;
1990 }
1991
1992 if (pkt->entry_status != 0) {
1993 qla2x00_error_entry(vha, rsp, pkt);
1994 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1995 wmb();
1996 continue;
1997 }
1998
1999 switch (pkt->entry_type) {
2000 case STATUS_TYPE:
2001 qla2x00_status_entry(vha, rsp, pkt);
2002 break;
2003 case STATUS_TYPE_21:
2004 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
2005 for (cnt = 0; cnt < handle_cnt; cnt++) {
2006 qla2x00_process_completed_request(vha, rsp->req,
2007 ((sts21_entry_t *)pkt)->handle[cnt]);
2008 }
2009 break;
2010 case STATUS_TYPE_22:
2011 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
2012 for (cnt = 0; cnt < handle_cnt; cnt++) {
2013 qla2x00_process_completed_request(vha, rsp->req,
2014 ((sts22_entry_t *)pkt)->handle[cnt]);
2015 }
2016 break;
2017 case STATUS_CONT_TYPE:
2018 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2019 break;
2020 case MBX_IOCB_TYPE:
2021 qla2x00_mbx_iocb_entry(vha, rsp->req,
2022 (struct mbx_entry *)pkt);
2023 break;
2024 case CT_IOCB_TYPE:
2025 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2026 break;
2027 default:
2028
2029 ql_log(ql_log_warn, vha, 0x504a,
2030 "Received unknown response pkt type %x "
2031 "entry status=%x.\n",
2032 pkt->entry_type, pkt->entry_status);
2033 break;
2034 }
2035 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2036 wmb();
2037 }
2038
2039
2040 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2041}
2042
2043static inline void
2044qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2045 uint32_t sense_len, struct rsp_que *rsp, int res)
2046{
2047 struct scsi_qla_host *vha = sp->vha;
2048 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2049 uint32_t track_sense_len;
2050
2051 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2052 sense_len = SCSI_SENSE_BUFFERSIZE;
2053
2054 SET_CMD_SENSE_LEN(sp, sense_len);
2055 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2056 track_sense_len = sense_len;
2057
2058 if (sense_len > par_sense_len)
2059 sense_len = par_sense_len;
2060
2061 memcpy(cp->sense_buffer, sense_data, sense_len);
2062
2063 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2064 track_sense_len -= sense_len;
2065 SET_CMD_SENSE_LEN(sp, track_sense_len);
2066
2067 if (track_sense_len != 0) {
2068 rsp->status_srb = sp;
2069 cp->result = res;
2070 }
2071
2072 if (sense_len) {
2073 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2074 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2075 sp->vha->host_no, cp->device->id, cp->device->lun,
2076 cp);
2077 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2078 cp->sense_buffer, sense_len);
2079 }
2080}
2081
2082struct scsi_dif_tuple {
2083 __be16 guard;
2084 __be16 app_tag;
2085 __be32 ref_tag;
2086};
2087
2088
2089
2090
2091
2092
2093
2094static inline int
2095qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2096{
2097 struct scsi_qla_host *vha = sp->vha;
2098 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2099 uint8_t *ap = &sts24->data[12];
2100 uint8_t *ep = &sts24->data[20];
2101 uint32_t e_ref_tag, a_ref_tag;
2102 uint16_t e_app_tag, a_app_tag;
2103 uint16_t e_guard, a_guard;
2104
2105
2106
2107
2108
2109 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
2110 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
2111 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
2112 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
2113 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
2114 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
2115
2116 ql_dbg(ql_dbg_io, vha, 0x3023,
2117 "iocb(s) %p Returned STATUS.\n", sts24);
2118
2119 ql_dbg(ql_dbg_io, vha, 0x3024,
2120 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2121 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2122 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2123 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2124 a_app_tag, e_app_tag, a_guard, e_guard);
2125
2126
2127
2128
2129
2130
2131 if ((a_app_tag == T10_PI_APP_ESCAPE) &&
2132 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
2133 (a_ref_tag == T10_PI_REF_ESCAPE))) {
2134 uint32_t blocks_done, resid;
2135 sector_t lba_s = scsi_get_lba(cmd);
2136
2137
2138 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2139
2140 resid = scsi_bufflen(cmd) - (blocks_done *
2141 cmd->device->sector_size);
2142
2143 scsi_set_resid(cmd, resid);
2144 cmd->result = DID_OK << 16;
2145
2146
2147 if (scsi_prot_sg_count(cmd)) {
2148 uint32_t i, j = 0, k = 0, num_ent;
2149 struct scatterlist *sg;
2150 struct t10_pi_tuple *spt;
2151
2152
2153 scsi_for_each_prot_sg(cmd, sg,
2154 scsi_prot_sg_count(cmd), i) {
2155 num_ent = sg_dma_len(sg) / 8;
2156 if (k + num_ent < blocks_done) {
2157 k += num_ent;
2158 continue;
2159 }
2160 j = blocks_done - k - 1;
2161 k = blocks_done;
2162 break;
2163 }
2164
2165 if (k != blocks_done) {
2166 ql_log(ql_log_warn, vha, 0x302f,
2167 "unexpected tag values tag:lba=%x:%llx)\n",
2168 e_ref_tag, (unsigned long long)lba_s);
2169 return 1;
2170 }
2171
2172 spt = page_address(sg_page(sg)) + sg->offset;
2173 spt += j;
2174
2175 spt->app_tag = T10_PI_APP_ESCAPE;
2176 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2177 spt->ref_tag = T10_PI_REF_ESCAPE;
2178 }
2179
2180 return 0;
2181 }
2182
2183
2184 if (e_guard != a_guard) {
2185 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2186 0x10, 0x1);
2187 set_driver_byte(cmd, DRIVER_SENSE);
2188 set_host_byte(cmd, DID_ABORT);
2189 cmd->result |= SAM_STAT_CHECK_CONDITION;
2190 return 1;
2191 }
2192
2193
2194 if (e_ref_tag != a_ref_tag) {
2195 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2196 0x10, 0x3);
2197 set_driver_byte(cmd, DRIVER_SENSE);
2198 set_host_byte(cmd, DID_ABORT);
2199 cmd->result |= SAM_STAT_CHECK_CONDITION;
2200 return 1;
2201 }
2202
2203
2204 if (e_app_tag != a_app_tag) {
2205 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2206 0x10, 0x2);
2207 set_driver_byte(cmd, DRIVER_SENSE);
2208 set_host_byte(cmd, DID_ABORT);
2209 cmd->result |= SAM_STAT_CHECK_CONDITION;
2210 return 1;
2211 }
2212
2213 return 1;
2214}
2215
2216static void
2217qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2218 struct req_que *req, uint32_t index)
2219{
2220 struct qla_hw_data *ha = vha->hw;
2221 srb_t *sp;
2222 uint16_t comp_status;
2223 uint16_t scsi_status;
2224 uint16_t thread_id;
2225 uint32_t rval = EXT_STATUS_OK;
2226 struct bsg_job *bsg_job = NULL;
2227 struct fc_bsg_request *bsg_request;
2228 struct fc_bsg_reply *bsg_reply;
2229 sts_entry_t *sts;
2230 struct sts_entry_24xx *sts24;
2231 sts = (sts_entry_t *) pkt;
2232 sts24 = (struct sts_entry_24xx *) pkt;
2233
2234
2235 if (index >= req->num_outstanding_cmds) {
2236 ql_log(ql_log_warn, vha, 0x70af,
2237 "Invalid SCSI completion handle 0x%x.\n", index);
2238 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2239 return;
2240 }
2241
2242 sp = req->outstanding_cmds[index];
2243 if (!sp) {
2244 ql_log(ql_log_warn, vha, 0x70b0,
2245 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2246 req->id, index);
2247
2248 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2249 return;
2250 }
2251
2252
2253 req->outstanding_cmds[index] = NULL;
2254 bsg_job = sp->u.bsg_job;
2255 bsg_request = bsg_job->request;
2256 bsg_reply = bsg_job->reply;
2257
2258 if (IS_FWI2_CAPABLE(ha)) {
2259 comp_status = le16_to_cpu(sts24->comp_status);
2260 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2261 } else {
2262 comp_status = le16_to_cpu(sts->comp_status);
2263 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2264 }
2265
2266 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2267 switch (comp_status) {
2268 case CS_COMPLETE:
2269 if (scsi_status == 0) {
2270 bsg_reply->reply_payload_rcv_len =
2271 bsg_job->reply_payload.payload_len;
2272 vha->qla_stats.input_bytes +=
2273 bsg_reply->reply_payload_rcv_len;
2274 vha->qla_stats.input_requests++;
2275 rval = EXT_STATUS_OK;
2276 }
2277 goto done;
2278
2279 case CS_DATA_OVERRUN:
2280 ql_dbg(ql_dbg_user, vha, 0x70b1,
2281 "Command completed with data overrun thread_id=%d\n",
2282 thread_id);
2283 rval = EXT_STATUS_DATA_OVERRUN;
2284 break;
2285
2286 case CS_DATA_UNDERRUN:
2287 ql_dbg(ql_dbg_user, vha, 0x70b2,
2288 "Command completed with data underrun thread_id=%d\n",
2289 thread_id);
2290 rval = EXT_STATUS_DATA_UNDERRUN;
2291 break;
2292 case CS_BIDIR_RD_OVERRUN:
2293 ql_dbg(ql_dbg_user, vha, 0x70b3,
2294 "Command completed with read data overrun thread_id=%d\n",
2295 thread_id);
2296 rval = EXT_STATUS_DATA_OVERRUN;
2297 break;
2298
2299 case CS_BIDIR_RD_WR_OVERRUN:
2300 ql_dbg(ql_dbg_user, vha, 0x70b4,
2301 "Command completed with read and write data overrun "
2302 "thread_id=%d\n", thread_id);
2303 rval = EXT_STATUS_DATA_OVERRUN;
2304 break;
2305
2306 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2307 ql_dbg(ql_dbg_user, vha, 0x70b5,
2308 "Command completed with read data over and write data "
2309 "underrun thread_id=%d\n", thread_id);
2310 rval = EXT_STATUS_DATA_OVERRUN;
2311 break;
2312
2313 case CS_BIDIR_RD_UNDERRUN:
2314 ql_dbg(ql_dbg_user, vha, 0x70b6,
2315 "Command completed with read data underrun "
2316 "thread_id=%d\n", thread_id);
2317 rval = EXT_STATUS_DATA_UNDERRUN;
2318 break;
2319
2320 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2321 ql_dbg(ql_dbg_user, vha, 0x70b7,
2322 "Command completed with read data under and write data "
2323 "overrun thread_id=%d\n", thread_id);
2324 rval = EXT_STATUS_DATA_UNDERRUN;
2325 break;
2326
2327 case CS_BIDIR_RD_WR_UNDERRUN:
2328 ql_dbg(ql_dbg_user, vha, 0x70b8,
2329 "Command completed with read and write data underrun "
2330 "thread_id=%d\n", thread_id);
2331 rval = EXT_STATUS_DATA_UNDERRUN;
2332 break;
2333
2334 case CS_BIDIR_DMA:
2335 ql_dbg(ql_dbg_user, vha, 0x70b9,
2336 "Command completed with data DMA error thread_id=%d\n",
2337 thread_id);
2338 rval = EXT_STATUS_DMA_ERR;
2339 break;
2340
2341 case CS_TIMEOUT:
2342 ql_dbg(ql_dbg_user, vha, 0x70ba,
2343 "Command completed with timeout thread_id=%d\n",
2344 thread_id);
2345 rval = EXT_STATUS_TIMEOUT;
2346 break;
2347 default:
2348 ql_dbg(ql_dbg_user, vha, 0x70bb,
2349 "Command completed with completion status=0x%x "
2350 "thread_id=%d\n", comp_status, thread_id);
2351 rval = EXT_STATUS_ERR;
2352 break;
2353 }
2354 bsg_reply->reply_payload_rcv_len = 0;
2355
2356done:
2357
2358 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2359 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2360
2361
2362 sp->done(sp, DID_OK << 16);
2363
2364}
2365
2366
2367
2368
2369
2370
2371
2372static void
2373qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2374{
2375 srb_t *sp;
2376 fc_port_t *fcport;
2377 struct scsi_cmnd *cp;
2378 sts_entry_t *sts;
2379 struct sts_entry_24xx *sts24;
2380 uint16_t comp_status;
2381 uint16_t scsi_status;
2382 uint16_t ox_id;
2383 uint8_t lscsi_status;
2384 int32_t resid;
2385 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2386 fw_resid_len;
2387 uint8_t *rsp_info, *sense_data;
2388 struct qla_hw_data *ha = vha->hw;
2389 uint32_t handle;
2390 uint16_t que;
2391 struct req_que *req;
2392 int logit = 1;
2393 int res = 0;
2394 uint16_t state_flags = 0;
2395 uint16_t retry_delay = 0;
2396
2397 sts = (sts_entry_t *) pkt;
2398 sts24 = (struct sts_entry_24xx *) pkt;
2399 if (IS_FWI2_CAPABLE(ha)) {
2400 comp_status = le16_to_cpu(sts24->comp_status);
2401 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2402 state_flags = le16_to_cpu(sts24->state_flags);
2403 } else {
2404 comp_status = le16_to_cpu(sts->comp_status);
2405 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2406 }
2407 handle = (uint32_t) LSW(sts->handle);
2408 que = MSW(sts->handle);
2409 req = ha->req_q_map[que];
2410
2411
2412 if (req == NULL ||
2413 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2414 ql_dbg(ql_dbg_io, vha, 0x3059,
2415 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2416 "que=%u.\n", sts->handle, req, que);
2417 return;
2418 }
2419
2420
2421 if (handle < req->num_outstanding_cmds) {
2422 sp = req->outstanding_cmds[handle];
2423 if (!sp) {
2424 ql_dbg(ql_dbg_io, vha, 0x3075,
2425 "%s(%ld): Already returned command for status handle (0x%x).\n",
2426 __func__, vha->host_no, sts->handle);
2427 return;
2428 }
2429 } else {
2430 ql_dbg(ql_dbg_io, vha, 0x3017,
2431 "Invalid status handle, out of range (0x%x).\n",
2432 sts->handle);
2433
2434 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2435 if (IS_P3P_TYPE(ha))
2436 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2437 else
2438 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2439 qla2xxx_wake_dpc(vha);
2440 }
2441 return;
2442 }
2443
2444 if (sp->cmd_type != TYPE_SRB) {
2445 req->outstanding_cmds[handle] = NULL;
2446 ql_dbg(ql_dbg_io, vha, 0x3015,
2447 "Unknown sp->cmd_type %x %p).\n",
2448 sp->cmd_type, sp);
2449 return;
2450 }
2451
2452
2453 if (sp->type == SRB_NVME_CMD) {
2454 req->outstanding_cmds[handle] = NULL;
2455 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2456 return;
2457 }
2458
2459 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2460 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2461 return;
2462 }
2463
2464
2465 if (sp->type == SRB_TM_CMD) {
2466 qla24xx_tm_iocb_entry(vha, req, pkt);
2467 return;
2468 }
2469
2470
2471 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2472 qla2x00_process_completed_request(vha, req, handle);
2473
2474 return;
2475 }
2476
2477 req->outstanding_cmds[handle] = NULL;
2478 cp = GET_CMD_SP(sp);
2479 if (cp == NULL) {
2480 ql_dbg(ql_dbg_io, vha, 0x3018,
2481 "Command already returned (0x%x/%p).\n",
2482 sts->handle, sp);
2483
2484 return;
2485 }
2486
2487 lscsi_status = scsi_status & STATUS_MASK;
2488
2489 fcport = sp->fcport;
2490
2491 ox_id = 0;
2492 sense_len = par_sense_len = rsp_info_len = resid_len =
2493 fw_resid_len = 0;
2494 if (IS_FWI2_CAPABLE(ha)) {
2495 if (scsi_status & SS_SENSE_LEN_VALID)
2496 sense_len = le32_to_cpu(sts24->sense_len);
2497 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2498 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2499 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2500 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2501 if (comp_status == CS_DATA_UNDERRUN)
2502 fw_resid_len = le32_to_cpu(sts24->residual_len);
2503 rsp_info = sts24->data;
2504 sense_data = sts24->data;
2505 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2506 ox_id = le16_to_cpu(sts24->ox_id);
2507 par_sense_len = sizeof(sts24->data);
2508
2509 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
2510 retry_delay = sts24->retry_delay & 0x3fff;
2511 ql_dbg(ql_dbg_io, sp->vha, 0x3033,
2512 "%s: scope=%#x retry_delay=%#x\n", __func__,
2513 sts24->retry_delay >> 14, retry_delay);
2514 }
2515 } else {
2516 if (scsi_status & SS_SENSE_LEN_VALID)
2517 sense_len = le16_to_cpu(sts->req_sense_length);
2518 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2519 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2520 resid_len = le32_to_cpu(sts->residual_length);
2521 rsp_info = sts->rsp_info;
2522 sense_data = sts->req_sense_data;
2523 par_sense_len = sizeof(sts->req_sense_data);
2524 }
2525
2526
2527 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2528
2529 if (IS_FWI2_CAPABLE(ha)) {
2530 sense_data += rsp_info_len;
2531 par_sense_len -= rsp_info_len;
2532 }
2533 if (rsp_info_len > 3 && rsp_info[3]) {
2534 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2535 "FCP I/O protocol failure (0x%x/0x%x).\n",
2536 rsp_info_len, rsp_info[3]);
2537
2538 res = DID_BUS_BUSY << 16;
2539 goto out;
2540 }
2541 }
2542
2543
2544 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2545 scsi_status & SS_RESIDUAL_OVER)
2546 comp_status = CS_DATA_OVERRUN;
2547
2548
2549
2550
2551
2552 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2553 lscsi_status == SAM_STAT_BUSY)
2554 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2555
2556
2557
2558
2559 switch (comp_status) {
2560 case CS_COMPLETE:
2561 case CS_QUEUE_FULL:
2562 if (scsi_status == 0) {
2563 res = DID_OK << 16;
2564 break;
2565 }
2566 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2567 resid = resid_len;
2568 scsi_set_resid(cp, resid);
2569
2570 if (!lscsi_status &&
2571 ((unsigned)(scsi_bufflen(cp) - resid) <
2572 cp->underflow)) {
2573 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2574 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2575 resid, scsi_bufflen(cp));
2576
2577 res = DID_ERROR << 16;
2578 break;
2579 }
2580 }
2581 res = DID_OK << 16 | lscsi_status;
2582
2583 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2584 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2585 "QUEUE FULL detected.\n");
2586 break;
2587 }
2588 logit = 0;
2589 if (lscsi_status != SS_CHECK_CONDITION)
2590 break;
2591
2592 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2593 if (!(scsi_status & SS_SENSE_LEN_VALID))
2594 break;
2595
2596 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2597 rsp, res);
2598 break;
2599
2600 case CS_DATA_UNDERRUN:
2601
2602 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2603 scsi_set_resid(cp, resid);
2604 if (scsi_status & SS_RESIDUAL_UNDER) {
2605 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2606 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2607 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2608 resid, scsi_bufflen(cp));
2609
2610 res = DID_ERROR << 16 | lscsi_status;
2611 goto check_scsi_status;
2612 }
2613
2614 if (!lscsi_status &&
2615 ((unsigned)(scsi_bufflen(cp) - resid) <
2616 cp->underflow)) {
2617 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2618 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2619 resid, scsi_bufflen(cp));
2620
2621 res = DID_ERROR << 16;
2622 break;
2623 }
2624 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2625 lscsi_status != SAM_STAT_BUSY) {
2626
2627
2628
2629
2630
2631 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2632 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2633 resid, scsi_bufflen(cp));
2634
2635 res = DID_ERROR << 16 | lscsi_status;
2636 goto check_scsi_status;
2637 } else {
2638 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2639 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2640 scsi_status, lscsi_status);
2641 }
2642
2643 res = DID_OK << 16 | lscsi_status;
2644 logit = 0;
2645
2646check_scsi_status:
2647
2648
2649
2650
2651 if (lscsi_status != 0) {
2652 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2653 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2654 "QUEUE FULL detected.\n");
2655 logit = 1;
2656 break;
2657 }
2658 if (lscsi_status != SS_CHECK_CONDITION)
2659 break;
2660
2661 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2662 if (!(scsi_status & SS_SENSE_LEN_VALID))
2663 break;
2664
2665 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2666 sense_len, rsp, res);
2667 }
2668 break;
2669
2670 case CS_PORT_LOGGED_OUT:
2671 case CS_PORT_CONFIG_CHG:
2672 case CS_PORT_BUSY:
2673 case CS_INCOMPLETE:
2674 case CS_PORT_UNAVAILABLE:
2675 case CS_TIMEOUT:
2676 case CS_RESET:
2677
2678
2679
2680
2681
2682
2683 res = DID_TRANSPORT_DISRUPTED << 16;
2684
2685 if (comp_status == CS_TIMEOUT) {
2686 if (IS_FWI2_CAPABLE(ha))
2687 break;
2688 else if ((le16_to_cpu(sts->status_flags) &
2689 SF_LOGOUT_SENT) == 0)
2690 break;
2691 }
2692
2693 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2694 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2695 "Port to be marked lost on fcport=%02x%02x%02x, current "
2696 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2697 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2698 port_state_str[atomic_read(&fcport->state)],
2699 comp_status);
2700
2701 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2702 qlt_schedule_sess_for_deletion(fcport);
2703 }
2704
2705 break;
2706
2707 case CS_ABORTED:
2708 res = DID_RESET << 16;
2709 break;
2710
2711 case CS_DIF_ERROR:
2712 logit = qla2x00_handle_dif_error(sp, sts24);
2713 res = cp->result;
2714 break;
2715
2716 case CS_TRANSPORT:
2717 res = DID_ERROR << 16;
2718
2719 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2720 break;
2721
2722 if (state_flags & BIT_4)
2723 scmd_printk(KERN_WARNING, cp,
2724 "Unsupported device '%s' found.\n",
2725 cp->device->vendor);
2726 break;
2727
2728 default:
2729 res = DID_ERROR << 16;
2730 break;
2731 }
2732
2733out:
2734 if (logit)
2735 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2736 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2737 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2738 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2739 comp_status, scsi_status, res, vha->host_no,
2740 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2741 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2742 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2743 resid_len, fw_resid_len, sp, cp);
2744
2745 if (rsp->status_srb == NULL)
2746 sp->done(sp, res);
2747}
2748
2749
2750
2751
2752
2753
2754
2755
2756static void
2757qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2758{
2759 uint8_t sense_sz = 0;
2760 struct qla_hw_data *ha = rsp->hw;
2761 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2762 srb_t *sp = rsp->status_srb;
2763 struct scsi_cmnd *cp;
2764 uint32_t sense_len;
2765 uint8_t *sense_ptr;
2766
2767 if (!sp || !GET_CMD_SENSE_LEN(sp))
2768 return;
2769
2770 sense_len = GET_CMD_SENSE_LEN(sp);
2771 sense_ptr = GET_CMD_SENSE_PTR(sp);
2772
2773 cp = GET_CMD_SP(sp);
2774 if (cp == NULL) {
2775 ql_log(ql_log_warn, vha, 0x3025,
2776 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2777
2778 rsp->status_srb = NULL;
2779 return;
2780 }
2781
2782 if (sense_len > sizeof(pkt->data))
2783 sense_sz = sizeof(pkt->data);
2784 else
2785 sense_sz = sense_len;
2786
2787
2788 if (IS_FWI2_CAPABLE(ha))
2789 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2790 memcpy(sense_ptr, pkt->data, sense_sz);
2791 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2792 sense_ptr, sense_sz);
2793
2794 sense_len -= sense_sz;
2795 sense_ptr += sense_sz;
2796
2797 SET_CMD_SENSE_PTR(sp, sense_ptr);
2798 SET_CMD_SENSE_LEN(sp, sense_len);
2799
2800
2801 if (sense_len == 0) {
2802 rsp->status_srb = NULL;
2803 sp->done(sp, cp->result);
2804 }
2805}
2806
2807
2808
2809
2810
2811
2812
2813
2814static int
2815qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2816{
2817 srb_t *sp;
2818 struct qla_hw_data *ha = vha->hw;
2819 const char func[] = "ERROR-IOCB";
2820 uint16_t que = MSW(pkt->handle);
2821 struct req_que *req = NULL;
2822 int res = DID_ERROR << 16;
2823
2824 ql_dbg(ql_dbg_async, vha, 0x502a,
2825 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
2826 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
2827
2828 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2829 goto fatal;
2830
2831 req = ha->req_q_map[que];
2832
2833 if (pkt->entry_status & RF_BUSY)
2834 res = DID_BUS_BUSY << 16;
2835
2836 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
2837 return 0;
2838
2839 switch (pkt->entry_type) {
2840 case NOTIFY_ACK_TYPE:
2841 case STATUS_TYPE:
2842 case STATUS_CONT_TYPE:
2843 case LOGINOUT_PORT_IOCB_TYPE:
2844 case CT_IOCB_TYPE:
2845 case ELS_IOCB_TYPE:
2846 case ABORT_IOCB_TYPE:
2847 case MBX_IOCB_TYPE:
2848 default:
2849 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2850 if (sp) {
2851 sp->done(sp, res);
2852 return 0;
2853 }
2854 break;
2855
2856 case ABTS_RESP_24XX:
2857 case CTIO_TYPE7:
2858 case CTIO_CRC2:
2859 return 1;
2860 }
2861fatal:
2862 ql_log(ql_log_warn, vha, 0x5030,
2863 "Error entry - invalid handle/queue (%04x).\n", que);
2864 return 0;
2865}
2866
2867
2868
2869
2870
2871
2872static void
2873qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2874{
2875 uint16_t cnt;
2876 uint32_t mboxes;
2877 uint16_t __iomem *wptr;
2878 struct qla_hw_data *ha = vha->hw;
2879 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2880
2881
2882 WARN_ON_ONCE(ha->mbx_count > 32);
2883 mboxes = (1ULL << ha->mbx_count) - 1;
2884 if (!ha->mcp)
2885 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2886 else
2887 mboxes = ha->mcp->in_mb;
2888
2889
2890 ha->flags.mbox_int = 1;
2891 ha->mailbox_out[0] = mb0;
2892 mboxes >>= 1;
2893 wptr = (uint16_t __iomem *)®->mailbox1;
2894
2895 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2896 if (mboxes & BIT_0)
2897 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2898
2899 mboxes >>= 1;
2900 wptr++;
2901 }
2902}
2903
2904static void
2905qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2906 struct abort_entry_24xx *pkt)
2907{
2908 const char func[] = "ABT_IOCB";
2909 srb_t *sp;
2910 struct srb_iocb *abt;
2911
2912 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2913 if (!sp)
2914 return;
2915
2916 abt = &sp->u.iocb_cmd;
2917 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2918 sp->done(sp, 0);
2919}
2920
2921void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
2922 struct pt_ls4_request *pkt, struct req_que *req)
2923{
2924 srb_t *sp;
2925 const char func[] = "LS4_IOCB";
2926 uint16_t comp_status;
2927
2928 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2929 if (!sp)
2930 return;
2931
2932 comp_status = le16_to_cpu(pkt->status);
2933 sp->done(sp, comp_status);
2934}
2935
2936
2937
2938
2939
2940
2941void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2942 struct rsp_que *rsp)
2943{
2944 struct sts_entry_24xx *pkt;
2945 struct qla_hw_data *ha = vha->hw;
2946
2947 if (!ha->flags.fw_started)
2948 return;
2949
2950 if (rsp->qpair->cpuid != smp_processor_id())
2951 qla_cpu_update(rsp->qpair, smp_processor_id());
2952
2953 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2954 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2955
2956 rsp->ring_index++;
2957 if (rsp->ring_index == rsp->length) {
2958 rsp->ring_index = 0;
2959 rsp->ring_ptr = rsp->ring;
2960 } else {
2961 rsp->ring_ptr++;
2962 }
2963
2964 if (pkt->entry_status != 0) {
2965 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
2966 goto process_err;
2967
2968 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2969 wmb();
2970 continue;
2971 }
2972process_err:
2973
2974 switch (pkt->entry_type) {
2975 case STATUS_TYPE:
2976 qla2x00_status_entry(vha, rsp, pkt);
2977 break;
2978 case STATUS_CONT_TYPE:
2979 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2980 break;
2981 case VP_RPT_ID_IOCB_TYPE:
2982 qla24xx_report_id_acquisition(vha,
2983 (struct vp_rpt_id_entry_24xx *)pkt);
2984 break;
2985 case LOGINOUT_PORT_IOCB_TYPE:
2986 qla24xx_logio_entry(vha, rsp->req,
2987 (struct logio_entry_24xx *)pkt);
2988 break;
2989 case CT_IOCB_TYPE:
2990 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2991 break;
2992 case ELS_IOCB_TYPE:
2993 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2994 break;
2995 case ABTS_RECV_24XX:
2996 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2997
2998 qlt_handle_abts_recv(vha, rsp,
2999 (response_t *)pkt);
3000 break;
3001 } else {
3002 qlt_24xx_process_atio_queue(vha, 1);
3003 }
3004
3005 case ABTS_RESP_24XX:
3006 case CTIO_TYPE7:
3007 case CTIO_CRC2:
3008 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3009 break;
3010 case PT_LS4_REQUEST:
3011 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3012 rsp->req);
3013 break;
3014 case NOTIFY_ACK_TYPE:
3015 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3016 qlt_response_pkt_all_vps(vha, rsp,
3017 (response_t *)pkt);
3018 else
3019 qla24xxx_nack_iocb_entry(vha, rsp->req,
3020 (struct nack_to_isp *)pkt);
3021 break;
3022 case MARKER_TYPE:
3023
3024
3025
3026 break;
3027 case ABORT_IOCB_TYPE:
3028 qla24xx_abort_iocb_entry(vha, rsp->req,
3029 (struct abort_entry_24xx *)pkt);
3030 break;
3031 case MBX_IOCB_TYPE:
3032 qla24xx_mbx_iocb_entry(vha, rsp->req,
3033 (struct mbx_24xx_entry *)pkt);
3034 break;
3035 case VP_CTRL_IOCB_TYPE:
3036 qla_ctrlvp_completed(vha, rsp->req,
3037 (struct vp_ctrl_entry_24xx *)pkt);
3038 break;
3039 default:
3040
3041 ql_dbg(ql_dbg_async, vha, 0x5042,
3042 "Received unknown response pkt type %x "
3043 "entry status=%x.\n",
3044 pkt->entry_type, pkt->entry_status);
3045 break;
3046 }
3047 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3048 wmb();
3049 }
3050
3051
3052 if (IS_P3P_TYPE(ha)) {
3053 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3054 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
3055 } else {
3056 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
3057 }
3058}
3059
3060static void
3061qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3062{
3063 int rval;
3064 uint32_t cnt;
3065 struct qla_hw_data *ha = vha->hw;
3066 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3067
3068 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3069 !IS_QLA27XX(ha))
3070 return;
3071
3072 rval = QLA_SUCCESS;
3073 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
3074 RD_REG_DWORD(®->iobase_addr);
3075 WRT_REG_DWORD(®->iobase_window, 0x0001);
3076 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3077 rval == QLA_SUCCESS; cnt--) {
3078 if (cnt) {
3079 WRT_REG_DWORD(®->iobase_window, 0x0001);
3080 udelay(10);
3081 } else
3082 rval = QLA_FUNCTION_TIMEOUT;
3083 }
3084 if (rval == QLA_SUCCESS)
3085 goto next_test;
3086
3087 rval = QLA_SUCCESS;
3088 WRT_REG_DWORD(®->iobase_window, 0x0003);
3089 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3090 rval == QLA_SUCCESS; cnt--) {
3091 if (cnt) {
3092 WRT_REG_DWORD(®->iobase_window, 0x0003);
3093 udelay(10);
3094 } else
3095 rval = QLA_FUNCTION_TIMEOUT;
3096 }
3097 if (rval != QLA_SUCCESS)
3098 goto done;
3099
3100next_test:
3101 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
3102 ql_log(ql_log_info, vha, 0x504c,
3103 "Additional code -- 0x55AA.\n");
3104
3105done:
3106 WRT_REG_DWORD(®->iobase_window, 0x0000);
3107 RD_REG_DWORD(®->iobase_window);
3108}
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119irqreturn_t
3120qla24xx_intr_handler(int irq, void *dev_id)
3121{
3122 scsi_qla_host_t *vha;
3123 struct qla_hw_data *ha;
3124 struct device_reg_24xx __iomem *reg;
3125 int status;
3126 unsigned long iter;
3127 uint32_t stat;
3128 uint32_t hccr;
3129 uint16_t mb[8];
3130 struct rsp_que *rsp;
3131 unsigned long flags;
3132 bool process_atio = false;
3133
3134 rsp = (struct rsp_que *) dev_id;
3135 if (!rsp) {
3136 ql_log(ql_log_info, NULL, 0x5059,
3137 "%s: NULL response queue pointer.\n", __func__);
3138 return IRQ_NONE;
3139 }
3140
3141 ha = rsp->hw;
3142 reg = &ha->iobase->isp24;
3143 status = 0;
3144
3145 if (unlikely(pci_channel_offline(ha->pdev)))
3146 return IRQ_HANDLED;
3147
3148 spin_lock_irqsave(&ha->hardware_lock, flags);
3149 vha = pci_get_drvdata(ha->pdev);
3150 for (iter = 50; iter--; ) {
3151 stat = RD_REG_DWORD(®->host_status);
3152 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3153 break;
3154 if (stat & HSRX_RISC_PAUSED) {
3155 if (unlikely(pci_channel_offline(ha->pdev)))
3156 break;
3157
3158 hccr = RD_REG_DWORD(®->hccr);
3159
3160 ql_log(ql_log_warn, vha, 0x504b,
3161 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3162 hccr);
3163
3164 qla2xxx_check_risc_status(vha);
3165
3166 ha->isp_ops->fw_dump(vha, 1);
3167 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3168 break;
3169 } else if ((stat & HSRX_RISC_INT) == 0)
3170 break;
3171
3172 switch (stat & 0xff) {
3173 case INTR_ROM_MB_SUCCESS:
3174 case INTR_ROM_MB_FAILED:
3175 case INTR_MB_SUCCESS:
3176 case INTR_MB_FAILED:
3177 qla24xx_mbx_completion(vha, MSW(stat));
3178 status |= MBX_INTERRUPT;
3179
3180 break;
3181 case INTR_ASYNC_EVENT:
3182 mb[0] = MSW(stat);
3183 mb[1] = RD_REG_WORD(®->mailbox1);
3184 mb[2] = RD_REG_WORD(®->mailbox2);
3185 mb[3] = RD_REG_WORD(®->mailbox3);
3186 qla2x00_async_event(vha, rsp, mb);
3187 break;
3188 case INTR_RSP_QUE_UPDATE:
3189 case INTR_RSP_QUE_UPDATE_83XX:
3190 qla24xx_process_response_queue(vha, rsp);
3191 break;
3192 case INTR_ATIO_QUE_UPDATE_27XX:
3193 case INTR_ATIO_QUE_UPDATE:
3194 process_atio = true;
3195 break;
3196 case INTR_ATIO_RSP_QUE_UPDATE:
3197 process_atio = true;
3198 qla24xx_process_response_queue(vha, rsp);
3199 break;
3200 default:
3201 ql_dbg(ql_dbg_async, vha, 0x504f,
3202 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3203 break;
3204 }
3205 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3206 RD_REG_DWORD_RELAXED(®->hccr);
3207 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3208 ndelay(3500);
3209 }
3210 qla2x00_handle_mbx_completion(ha, status);
3211 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3212
3213 if (process_atio) {
3214 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3215 qlt_24xx_process_atio_queue(vha, 0);
3216 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3217 }
3218
3219 return IRQ_HANDLED;
3220}
3221
3222static irqreturn_t
3223qla24xx_msix_rsp_q(int irq, void *dev_id)
3224{
3225 struct qla_hw_data *ha;
3226 struct rsp_que *rsp;
3227 struct device_reg_24xx __iomem *reg;
3228 struct scsi_qla_host *vha;
3229 unsigned long flags;
3230
3231 rsp = (struct rsp_que *) dev_id;
3232 if (!rsp) {
3233 ql_log(ql_log_info, NULL, 0x505a,
3234 "%s: NULL response queue pointer.\n", __func__);
3235 return IRQ_NONE;
3236 }
3237 ha = rsp->hw;
3238 reg = &ha->iobase->isp24;
3239
3240 spin_lock_irqsave(&ha->hardware_lock, flags);
3241
3242 vha = pci_get_drvdata(ha->pdev);
3243 qla24xx_process_response_queue(vha, rsp);
3244 if (!ha->flags.disable_msix_handshake) {
3245 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3246 RD_REG_DWORD_RELAXED(®->hccr);
3247 }
3248 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3249
3250 return IRQ_HANDLED;
3251}
3252
3253static irqreturn_t
3254qla24xx_msix_default(int irq, void *dev_id)
3255{
3256 scsi_qla_host_t *vha;
3257 struct qla_hw_data *ha;
3258 struct rsp_que *rsp;
3259 struct device_reg_24xx __iomem *reg;
3260 int status;
3261 uint32_t stat;
3262 uint32_t hccr;
3263 uint16_t mb[8];
3264 unsigned long flags;
3265 bool process_atio = false;
3266
3267 rsp = (struct rsp_que *) dev_id;
3268 if (!rsp) {
3269 ql_log(ql_log_info, NULL, 0x505c,
3270 "%s: NULL response queue pointer.\n", __func__);
3271 return IRQ_NONE;
3272 }
3273 ha = rsp->hw;
3274 reg = &ha->iobase->isp24;
3275 status = 0;
3276
3277 spin_lock_irqsave(&ha->hardware_lock, flags);
3278 vha = pci_get_drvdata(ha->pdev);
3279 do {
3280 stat = RD_REG_DWORD(®->host_status);
3281 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3282 break;
3283 if (stat & HSRX_RISC_PAUSED) {
3284 if (unlikely(pci_channel_offline(ha->pdev)))
3285 break;
3286
3287 hccr = RD_REG_DWORD(®->hccr);
3288
3289 ql_log(ql_log_info, vha, 0x5050,
3290 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3291 hccr);
3292
3293 qla2xxx_check_risc_status(vha);
3294
3295 ha->isp_ops->fw_dump(vha, 1);
3296 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3297 break;
3298 } else if ((stat & HSRX_RISC_INT) == 0)
3299 break;
3300
3301 switch (stat & 0xff) {
3302 case INTR_ROM_MB_SUCCESS:
3303 case INTR_ROM_MB_FAILED:
3304 case INTR_MB_SUCCESS:
3305 case INTR_MB_FAILED:
3306 qla24xx_mbx_completion(vha, MSW(stat));
3307 status |= MBX_INTERRUPT;
3308
3309 break;
3310 case INTR_ASYNC_EVENT:
3311 mb[0] = MSW(stat);
3312 mb[1] = RD_REG_WORD(®->mailbox1);
3313 mb[2] = RD_REG_WORD(®->mailbox2);
3314 mb[3] = RD_REG_WORD(®->mailbox3);
3315 qla2x00_async_event(vha, rsp, mb);
3316 break;
3317 case INTR_RSP_QUE_UPDATE:
3318 case INTR_RSP_QUE_UPDATE_83XX:
3319 qla24xx_process_response_queue(vha, rsp);
3320 break;
3321 case INTR_ATIO_QUE_UPDATE_27XX:
3322 case INTR_ATIO_QUE_UPDATE:
3323 process_atio = true;
3324 break;
3325 case INTR_ATIO_RSP_QUE_UPDATE:
3326 process_atio = true;
3327 qla24xx_process_response_queue(vha, rsp);
3328 break;
3329 default:
3330 ql_dbg(ql_dbg_async, vha, 0x5051,
3331 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3332 break;
3333 }
3334 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3335 } while (0);
3336 qla2x00_handle_mbx_completion(ha, status);
3337 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3338
3339 if (process_atio) {
3340 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3341 qlt_24xx_process_atio_queue(vha, 0);
3342 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3343 }
3344
3345 return IRQ_HANDLED;
3346}
3347
3348irqreturn_t
3349qla2xxx_msix_rsp_q(int irq, void *dev_id)
3350{
3351 struct qla_hw_data *ha;
3352 struct qla_qpair *qpair;
3353 struct device_reg_24xx __iomem *reg;
3354 unsigned long flags;
3355
3356 qpair = dev_id;
3357 if (!qpair) {
3358 ql_log(ql_log_info, NULL, 0x505b,
3359 "%s: NULL response queue pointer.\n", __func__);
3360 return IRQ_NONE;
3361 }
3362 ha = qpair->hw;
3363
3364
3365 if (unlikely(!ha->flags.disable_msix_handshake)) {
3366 reg = &ha->iobase->isp24;
3367 spin_lock_irqsave(&ha->hardware_lock, flags);
3368 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3369 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3370 }
3371
3372 queue_work(ha->wq, &qpair->q_work);
3373
3374 return IRQ_HANDLED;
3375}
3376
3377
3378
3379struct qla_init_msix_entry {
3380 const char *name;
3381 irq_handler_t handler;
3382};
3383
3384static const struct qla_init_msix_entry msix_entries[] = {
3385 { "default", qla24xx_msix_default },
3386 { "rsp_q", qla24xx_msix_rsp_q },
3387 { "atio_q", qla83xx_msix_atio_q },
3388 { "qpair_multiq", qla2xxx_msix_rsp_q },
3389};
3390
3391static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3392 { "qla2xxx (default)", qla82xx_msix_default },
3393 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3394};
3395
3396static int
3397qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3398{
3399 int i, ret;
3400 struct qla_msix_entry *qentry;
3401 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3402 int min_vecs = QLA_BASE_VECTORS;
3403 struct irq_affinity desc = {
3404 .pre_vectors = QLA_BASE_VECTORS,
3405 };
3406
3407 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3408 IS_ATIO_MSIX_CAPABLE(ha)) {
3409 desc.pre_vectors++;
3410 min_vecs++;
3411 }
3412
3413 if (USER_CTRL_IRQ(ha)) {
3414
3415 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3416 ha->msix_count, PCI_IRQ_MSIX);
3417 } else
3418 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3419 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3420 &desc);
3421
3422 if (ret < 0) {
3423 ql_log(ql_log_fatal, vha, 0x00c7,
3424 "MSI-X: Failed to enable support, "
3425 "giving up -- %d/%d.\n",
3426 ha->msix_count, ret);
3427 goto msix_out;
3428 } else if (ret < ha->msix_count) {
3429 ql_log(ql_log_warn, vha, 0x00c6,
3430 "MSI-X: Failed to enable support "
3431 "with %d vectors, using %d vectors.\n",
3432 ha->msix_count, ret);
3433 ha->msix_count = ret;
3434
3435 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3436 ha->max_req_queues = ha->msix_count - 1;
3437
3438
3439 if (QLA_TGT_MODE_ENABLED())
3440 ha->max_req_queues--;
3441
3442 ha->max_rsp_queues = ha->max_req_queues;
3443
3444 ha->max_qpairs = ha->max_req_queues - 1;
3445 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3446 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3447 }
3448 }
3449 vha->irq_offset = desc.pre_vectors;
3450 ha->msix_entries = kcalloc(ha->msix_count,
3451 sizeof(struct qla_msix_entry),
3452 GFP_KERNEL);
3453 if (!ha->msix_entries) {
3454 ql_log(ql_log_fatal, vha, 0x00c8,
3455 "Failed to allocate memory for ha->msix_entries.\n");
3456 ret = -ENOMEM;
3457 goto msix_out;
3458 }
3459 ha->flags.msix_enabled = 1;
3460
3461 for (i = 0; i < ha->msix_count; i++) {
3462 qentry = &ha->msix_entries[i];
3463 qentry->vector = pci_irq_vector(ha->pdev, i);
3464 qentry->entry = i;
3465 qentry->have_irq = 0;
3466 qentry->in_use = 0;
3467 qentry->handle = NULL;
3468 }
3469
3470
3471 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3472 qentry = &ha->msix_entries[i];
3473 qentry->handle = rsp;
3474 rsp->msix = qentry;
3475 scnprintf(qentry->name, sizeof(qentry->name),
3476 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3477 if (IS_P3P_TYPE(ha))
3478 ret = request_irq(qentry->vector,
3479 qla82xx_msix_entries[i].handler,
3480 0, qla82xx_msix_entries[i].name, rsp);
3481 else
3482 ret = request_irq(qentry->vector,
3483 msix_entries[i].handler,
3484 0, qentry->name, rsp);
3485 if (ret)
3486 goto msix_register_fail;
3487 qentry->have_irq = 1;
3488 qentry->in_use = 1;
3489 }
3490
3491
3492
3493
3494
3495 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3496 IS_ATIO_MSIX_CAPABLE(ha)) {
3497 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3498 rsp->msix = qentry;
3499 qentry->handle = rsp;
3500 scnprintf(qentry->name, sizeof(qentry->name),
3501 "qla2xxx%lu_%s", vha->host_no,
3502 msix_entries[QLA_ATIO_VECTOR].name);
3503 qentry->in_use = 1;
3504 ret = request_irq(qentry->vector,
3505 msix_entries[QLA_ATIO_VECTOR].handler,
3506 0, qentry->name, rsp);
3507 qentry->have_irq = 1;
3508 }
3509
3510msix_register_fail:
3511 if (ret) {
3512 ql_log(ql_log_fatal, vha, 0x00cb,
3513 "MSI-X: unable to register handler -- %x/%d.\n",
3514 qentry->vector, ret);
3515 qla2x00_free_irqs(vha);
3516 ha->mqenable = 0;
3517 goto msix_out;
3518 }
3519
3520
3521 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3522 if (ha->msixbase && ha->mqiobase &&
3523 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3524 ql2xmqsupport))
3525 ha->mqenable = 1;
3526 } else
3527 if (ha->mqiobase &&
3528 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3529 ql2xmqsupport))
3530 ha->mqenable = 1;
3531 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3532 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3533 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3534 ql_dbg(ql_dbg_init, vha, 0x0055,
3535 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3536 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3537
3538msix_out:
3539 return ret;
3540}
3541
3542int
3543qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3544{
3545 int ret = QLA_FUNCTION_FAILED;
3546 device_reg_t *reg = ha->iobase;
3547 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3548
3549
3550 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3551 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
3552 !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
3553 goto skip_msi;
3554
3555 if (ql2xenablemsix == 2)
3556 goto skip_msix;
3557
3558 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3559 (ha->pdev->subsystem_device == 0x7040 ||
3560 ha->pdev->subsystem_device == 0x7041 ||
3561 ha->pdev->subsystem_device == 0x1705)) {
3562 ql_log(ql_log_warn, vha, 0x0034,
3563 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3564 ha->pdev->subsystem_vendor,
3565 ha->pdev->subsystem_device);
3566 goto skip_msi;
3567 }
3568
3569 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3570 ql_log(ql_log_warn, vha, 0x0035,
3571 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3572 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3573 goto skip_msix;
3574 }
3575
3576 ret = qla24xx_enable_msix(ha, rsp);
3577 if (!ret) {
3578 ql_dbg(ql_dbg_init, vha, 0x0036,
3579 "MSI-X: Enabled (0x%X, 0x%X).\n",
3580 ha->chip_revision, ha->fw_attributes);
3581 goto clear_risc_ints;
3582 }
3583
3584skip_msix:
3585
3586 ql_log(ql_log_info, vha, 0x0037,
3587 "Falling back-to MSI mode -%d.\n", ret);
3588
3589 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3590 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3591 !IS_QLA27XX(ha))
3592 goto skip_msi;
3593
3594 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3595 if (!ret) {
3596 ql_dbg(ql_dbg_init, vha, 0x0038,
3597 "MSI: Enabled.\n");
3598 ha->flags.msi_enabled = 1;
3599 } else
3600 ql_log(ql_log_warn, vha, 0x0039,
3601 "Falling back-to INTa mode -- %d.\n", ret);
3602skip_msi:
3603
3604
3605 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3606 return QLA_FUNCTION_FAILED;
3607
3608 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3609 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3610 QLA2XXX_DRIVER_NAME, rsp);
3611 if (ret) {
3612 ql_log(ql_log_warn, vha, 0x003a,
3613 "Failed to reserve interrupt %d already in use.\n",
3614 ha->pdev->irq);
3615 goto fail;
3616 } else if (!ha->flags.msi_enabled) {
3617 ql_dbg(ql_dbg_init, vha, 0x0125,
3618 "INTa mode: Enabled.\n");
3619 ha->flags.mr_intr_valid = 1;
3620 }
3621
3622clear_risc_ints:
3623 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3624 goto fail;
3625
3626 spin_lock_irq(&ha->hardware_lock);
3627 WRT_REG_WORD(®->isp.semaphore, 0);
3628 spin_unlock_irq(&ha->hardware_lock);
3629
3630fail:
3631 return ret;
3632}
3633
3634void
3635qla2x00_free_irqs(scsi_qla_host_t *vha)
3636{
3637 struct qla_hw_data *ha = vha->hw;
3638 struct rsp_que *rsp;
3639 struct qla_msix_entry *qentry;
3640 int i;
3641
3642
3643
3644
3645
3646 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3647 goto free_irqs;
3648 rsp = ha->rsp_q_map[0];
3649
3650 if (ha->flags.msix_enabled) {
3651 for (i = 0; i < ha->msix_count; i++) {
3652 qentry = &ha->msix_entries[i];
3653 if (qentry->have_irq) {
3654 irq_set_affinity_notifier(qentry->vector, NULL);
3655 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3656 }
3657 }
3658 kfree(ha->msix_entries);
3659 ha->msix_entries = NULL;
3660 ha->flags.msix_enabled = 0;
3661 ql_dbg(ql_dbg_init, vha, 0x0042,
3662 "Disabled MSI-X.\n");
3663 } else {
3664 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3665 }
3666
3667free_irqs:
3668 pci_free_irq_vectors(ha->pdev);
3669}
3670
3671int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3672 struct qla_msix_entry *msix, int vector_type)
3673{
3674 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3675 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3676 int ret;
3677
3678 scnprintf(msix->name, sizeof(msix->name),
3679 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3680 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3681 if (ret) {
3682 ql_log(ql_log_fatal, vha, 0x00e6,
3683 "MSI-X: Unable to register handler -- %x/%d.\n",
3684 msix->vector, ret);
3685 return ret;
3686 }
3687 msix->have_irq = 1;
3688 msix->handle = qpair;
3689 return ret;
3690}
3691