1
2
3
4
5
6
7#include "qla_def.h"
8#include "qla_target.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <linux/cpu.h>
13#include <linux/t10-pi.h>
14#include <scsi/scsi_tcq.h>
15#include <scsi/scsi_bsg_fc.h>
16#include <scsi/scsi_eh.h>
17
18static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
19static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
20static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
21static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
22 sts_entry_t *);
23
24
25
26
27
28
29
30
31
32
33irqreturn_t
34qla2100_intr_handler(int irq, void *dev_id)
35{
36 scsi_qla_host_t *vha;
37 struct qla_hw_data *ha;
38 struct device_reg_2xxx __iomem *reg;
39 int status;
40 unsigned long iter;
41 uint16_t hccr;
42 uint16_t mb[4];
43 struct rsp_que *rsp;
44 unsigned long flags;
45
46 rsp = (struct rsp_que *) dev_id;
47 if (!rsp) {
48 ql_log(ql_log_info, NULL, 0x505d,
49 "%s: NULL response queue pointer.\n", __func__);
50 return (IRQ_NONE);
51 }
52
53 ha = rsp->hw;
54 reg = &ha->iobase->isp;
55 status = 0;
56
57 spin_lock_irqsave(&ha->hardware_lock, flags);
58 vha = pci_get_drvdata(ha->pdev);
59 for (iter = 50; iter--; ) {
60 hccr = RD_REG_WORD(®->hccr);
61 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
62 break;
63 if (hccr & HCCR_RISC_PAUSE) {
64 if (pci_channel_offline(ha->pdev))
65 break;
66
67
68
69
70
71
72 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
73 RD_REG_WORD(®->hccr);
74
75 ha->isp_ops->fw_dump(vha, 1);
76 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
77 break;
78 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
79 break;
80
81 if (RD_REG_WORD(®->semaphore) & BIT_0) {
82 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
83 RD_REG_WORD(®->hccr);
84
85
86 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
87 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
88 qla2x00_mbx_completion(vha, mb[0]);
89 status |= MBX_INTERRUPT;
90 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
91 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
92 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
93 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
94 qla2x00_async_event(vha, rsp, mb);
95 } else {
96
97 ql_dbg(ql_dbg_async, vha, 0x5025,
98 "Unrecognized interrupt type (%d).\n",
99 mb[0]);
100 }
101
102 WRT_REG_WORD(®->semaphore, 0);
103 RD_REG_WORD(®->semaphore);
104 } else {
105 qla2x00_process_response_queue(rsp);
106
107 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
108 RD_REG_WORD(®->hccr);
109 }
110 }
111 qla2x00_handle_mbx_completion(ha, status);
112 spin_unlock_irqrestore(&ha->hardware_lock, flags);
113
114 return (IRQ_HANDLED);
115}
116
117bool
118qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
119{
120
121 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
122 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
123 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
124 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
125
126
127
128
129
130 schedule_work(&vha->hw->board_disable);
131 }
132 return true;
133 } else
134 return false;
135}
136
137bool
138qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
139{
140 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
141}
142
143
144
145
146
147
148
149
150
151
152irqreturn_t
153qla2300_intr_handler(int irq, void *dev_id)
154{
155 scsi_qla_host_t *vha;
156 struct device_reg_2xxx __iomem *reg;
157 int status;
158 unsigned long iter;
159 uint32_t stat;
160 uint16_t hccr;
161 uint16_t mb[4];
162 struct rsp_que *rsp;
163 struct qla_hw_data *ha;
164 unsigned long flags;
165
166 rsp = (struct rsp_que *) dev_id;
167 if (!rsp) {
168 ql_log(ql_log_info, NULL, 0x5058,
169 "%s: NULL response queue pointer.\n", __func__);
170 return (IRQ_NONE);
171 }
172
173 ha = rsp->hw;
174 reg = &ha->iobase->isp;
175 status = 0;
176
177 spin_lock_irqsave(&ha->hardware_lock, flags);
178 vha = pci_get_drvdata(ha->pdev);
179 for (iter = 50; iter--; ) {
180 stat = RD_REG_DWORD(®->u.isp2300.host_status);
181 if (qla2x00_check_reg32_for_disconnect(vha, stat))
182 break;
183 if (stat & HSR_RISC_PAUSED) {
184 if (unlikely(pci_channel_offline(ha->pdev)))
185 break;
186
187 hccr = RD_REG_WORD(®->hccr);
188
189 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
190 ql_log(ql_log_warn, vha, 0x5026,
191 "Parity error -- HCCR=%x, Dumping "
192 "firmware.\n", hccr);
193 else
194 ql_log(ql_log_warn, vha, 0x5027,
195 "RISC paused -- HCCR=%x, Dumping "
196 "firmware.\n", hccr);
197
198
199
200
201
202
203 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
204 RD_REG_WORD(®->hccr);
205
206 ha->isp_ops->fw_dump(vha, 1);
207 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
208 break;
209 } else if ((stat & HSR_RISC_INT) == 0)
210 break;
211
212 switch (stat & 0xff) {
213 case 0x1:
214 case 0x2:
215 case 0x10:
216 case 0x11:
217 qla2x00_mbx_completion(vha, MSW(stat));
218 status |= MBX_INTERRUPT;
219
220
221 WRT_REG_WORD(®->semaphore, 0);
222 break;
223 case 0x12:
224 mb[0] = MSW(stat);
225 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
226 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
227 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
228 qla2x00_async_event(vha, rsp, mb);
229 break;
230 case 0x13:
231 qla2x00_process_response_queue(rsp);
232 break;
233 case 0x15:
234 mb[0] = MBA_CMPLT_1_16BIT;
235 mb[1] = MSW(stat);
236 qla2x00_async_event(vha, rsp, mb);
237 break;
238 case 0x16:
239 mb[0] = MBA_SCSI_COMPLETION;
240 mb[1] = MSW(stat);
241 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
242 qla2x00_async_event(vha, rsp, mb);
243 break;
244 default:
245 ql_dbg(ql_dbg_async, vha, 0x5028,
246 "Unrecognized interrupt type (%d).\n", stat & 0xff);
247 break;
248 }
249 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
250 RD_REG_WORD_RELAXED(®->hccr);
251 }
252 qla2x00_handle_mbx_completion(ha, status);
253 spin_unlock_irqrestore(&ha->hardware_lock, flags);
254
255 return (IRQ_HANDLED);
256}
257
258
259
260
261
262
263static void
264qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
265{
266 uint16_t cnt;
267 uint32_t mboxes;
268 uint16_t __iomem *wptr;
269 struct qla_hw_data *ha = vha->hw;
270 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
271
272
273 mboxes = (1 << ha->mbx_count) - 1;
274 if (!ha->mcp)
275 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
276 else
277 mboxes = ha->mcp->in_mb;
278
279
280 ha->flags.mbox_int = 1;
281 ha->mailbox_out[0] = mb0;
282 mboxes >>= 1;
283 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
284
285 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
286 if (IS_QLA2200(ha) && cnt == 8)
287 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
288 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
289 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
290 else if (mboxes & BIT_0)
291 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
292
293 wptr++;
294 mboxes >>= 1;
295 }
296}
297
298static void
299qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
300{
301 static char *event[] =
302 { "Complete", "Request Notification", "Time Extension" };
303 int rval;
304 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
305 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
306 uint16_t __iomem *wptr;
307 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
308
309
310 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
311 wptr = (uint16_t __iomem *)®24->mailbox1;
312 else if (IS_QLA8044(vha->hw))
313 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
314 else
315 return;
316
317 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
318 mb[cnt] = RD_REG_WORD(wptr);
319
320 ql_dbg(ql_dbg_async, vha, 0x5021,
321 "Inter-Driver Communication %s -- "
322 "%04x %04x %04x %04x %04x %04x %04x.\n",
323 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
324 mb[4], mb[5], mb[6]);
325 switch (aen) {
326
327 case MBA_IDC_COMPLETE:
328 if (mb[1] >> 15) {
329 vha->hw->flags.idc_compl_status = 1;
330 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
331 complete(&vha->hw->dcbx_comp);
332 }
333 break;
334
335 case MBA_IDC_NOTIFY:
336
337 timeout = (descr >> 8) & 0xf;
338 ql_dbg(ql_dbg_async, vha, 0x5022,
339 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
340 vha->host_no, event[aen & 0xff], timeout);
341
342 if (!timeout)
343 return;
344 rval = qla2x00_post_idc_ack_work(vha, mb);
345 if (rval != QLA_SUCCESS)
346 ql_log(ql_log_warn, vha, 0x5023,
347 "IDC failed to post ACK.\n");
348 break;
349 case MBA_IDC_TIME_EXT:
350 vha->hw->idc_extend_tmo = descr;
351 ql_dbg(ql_dbg_async, vha, 0x5087,
352 "%lu Inter-Driver Communication %s -- "
353 "Extend timeout by=%d.\n",
354 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
355 break;
356 }
357}
358
359#define LS_UNKNOWN 2
360const char *
361qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
362{
363 static const char *const link_speeds[] = {
364 "1", "2", "?", "4", "8", "16", "32", "10"
365 };
366#define QLA_LAST_SPEED 7
367
368 if (IS_QLA2100(ha) || IS_QLA2200(ha))
369 return link_speeds[0];
370 else if (speed == 0x13)
371 return link_speeds[QLA_LAST_SPEED];
372 else if (speed < QLA_LAST_SPEED)
373 return link_speeds[speed];
374 else
375 return link_speeds[LS_UNKNOWN];
376}
377
378static void
379qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
380{
381 struct qla_hw_data *ha = vha->hw;
382
383
384
385
386
387
388
389
390
391
392
393
394 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
395 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
396 mb[0], mb[1], mb[2], mb[6]);
397 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
398 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
399 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
400
401 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
402 IDC_HEARTBEAT_FAILURE)) {
403 ha->flags.nic_core_hung = 1;
404 ql_log(ql_log_warn, vha, 0x5060,
405 "83XX: F/W Error Reported: Check if reset required.\n");
406
407 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
408 uint32_t protocol_engine_id, fw_err_code, err_level;
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423 protocol_engine_id = (mb[2] & 0xff);
424 fw_err_code = (((mb[2] & 0xff00) >> 8) |
425 ((mb[6] & 0x1fff) << 8));
426 err_level = ((mb[6] & 0xe000) >> 13);
427 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
428 "Register: protocol_engine_id=0x%x "
429 "fw_err_code=0x%x err_level=0x%x.\n",
430 protocol_engine_id, fw_err_code, err_level);
431 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
432 "Register: 0x%x%x.\n", mb[7], mb[3]);
433 if (err_level == ERR_LEVEL_NON_FATAL) {
434 ql_log(ql_log_warn, vha, 0x5063,
435 "Not a fatal error, f/w has recovered itself.\n");
436 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
437 ql_log(ql_log_fatal, vha, 0x5064,
438 "Recoverable Fatal error: Chip reset "
439 "required.\n");
440 qla83xx_schedule_work(vha,
441 QLA83XX_NIC_CORE_RESET);
442 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
443 ql_log(ql_log_fatal, vha, 0x5065,
444 "Unrecoverable Fatal error: Set FAILED "
445 "state, reboot required.\n");
446 qla83xx_schedule_work(vha,
447 QLA83XX_NIC_CORE_UNRECOVERABLE);
448 }
449 }
450
451 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
452 uint16_t peg_fw_state, nw_interface_link_up;
453 uint16_t nw_interface_signal_detect, sfp_status;
454 uint16_t htbt_counter, htbt_monitor_enable;
455 uint16_t sfp_additonal_info, sfp_multirate;
456 uint16_t sfp_tx_fault, link_speed, dcbx_status;
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489 peg_fw_state = (mb[2] & 0x00ff);
490 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
491 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
492 sfp_status = ((mb[2] & 0x0c00) >> 10);
493 htbt_counter = ((mb[2] & 0x7000) >> 12);
494 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
495 sfp_additonal_info = (mb[6] & 0x0003);
496 sfp_multirate = ((mb[6] & 0x0004) >> 2);
497 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
498 link_speed = ((mb[6] & 0x0070) >> 4);
499 dcbx_status = ((mb[6] & 0x7000) >> 12);
500
501 ql_log(ql_log_warn, vha, 0x5066,
502 "Peg-to-Fc Status Register:\n"
503 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
504 "nw_interface_signal_detect=0x%x"
505 "\nsfp_statis=0x%x.\n ", peg_fw_state,
506 nw_interface_link_up, nw_interface_signal_detect,
507 sfp_status);
508 ql_log(ql_log_warn, vha, 0x5067,
509 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
510 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
511 htbt_counter, htbt_monitor_enable,
512 sfp_additonal_info, sfp_multirate);
513 ql_log(ql_log_warn, vha, 0x5068,
514 "sfp_tx_fault=0x%x, link_state=0x%x, "
515 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
516 dcbx_status);
517
518 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
519 }
520
521 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
522 ql_log(ql_log_warn, vha, 0x5069,
523 "Heartbeat Failure encountered, chip reset "
524 "required.\n");
525
526 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
527 }
528 }
529
530 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
531 ql_log(ql_log_info, vha, 0x506a,
532 "IDC Device-State changed = 0x%x.\n", mb[4]);
533 if (ha->flags.nic_core_reset_owner)
534 return;
535 qla83xx_schedule_work(vha, MBA_IDC_AEN);
536 }
537}
538
539int
540qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
541{
542 struct qla_hw_data *ha = vha->hw;
543 scsi_qla_host_t *vp;
544 uint32_t vp_did;
545 unsigned long flags;
546 int ret = 0;
547
548 if (!ha->num_vhosts)
549 return ret;
550
551 spin_lock_irqsave(&ha->vport_slock, flags);
552 list_for_each_entry(vp, &ha->vp_list, list) {
553 vp_did = vp->d_id.b24;
554 if (vp_did == rscn_entry) {
555 ret = 1;
556 break;
557 }
558 }
559 spin_unlock_irqrestore(&ha->vport_slock, flags);
560
561 return ret;
562}
563
564fc_port_t *
565qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
566{
567 fc_port_t *f, *tf;
568
569 f = tf = NULL;
570 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
571 if (f->loop_id == loop_id)
572 return f;
573 return NULL;
574}
575
576fc_port_t *
577qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
578{
579 fc_port_t *f, *tf;
580
581 f = tf = NULL;
582 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
583 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
584 if (incl_deleted)
585 return f;
586 else if (f->deleted == 0)
587 return f;
588 }
589 }
590 return NULL;
591}
592
593fc_port_t *
594qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
595 u8 incl_deleted)
596{
597 fc_port_t *f, *tf;
598
599 f = tf = NULL;
600 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
601 if (f->d_id.b24 == id->b24) {
602 if (incl_deleted)
603 return f;
604 else if (f->deleted == 0)
605 return f;
606 }
607 }
608 return NULL;
609}
610
611
612
613
614
615
616void
617qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
618{
619 uint16_t handle_cnt;
620 uint16_t cnt, mbx;
621 uint32_t handles[5];
622 struct qla_hw_data *ha = vha->hw;
623 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
624 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
625 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
626 uint32_t rscn_entry, host_pid;
627 unsigned long flags;
628 fc_port_t *fcport = NULL;
629
630
631 handle_cnt = 0;
632 if (IS_CNA_CAPABLE(ha))
633 goto skip_rio;
634 switch (mb[0]) {
635 case MBA_SCSI_COMPLETION:
636 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
637 handle_cnt = 1;
638 break;
639 case MBA_CMPLT_1_16BIT:
640 handles[0] = mb[1];
641 handle_cnt = 1;
642 mb[0] = MBA_SCSI_COMPLETION;
643 break;
644 case MBA_CMPLT_2_16BIT:
645 handles[0] = mb[1];
646 handles[1] = mb[2];
647 handle_cnt = 2;
648 mb[0] = MBA_SCSI_COMPLETION;
649 break;
650 case MBA_CMPLT_3_16BIT:
651 handles[0] = mb[1];
652 handles[1] = mb[2];
653 handles[2] = mb[3];
654 handle_cnt = 3;
655 mb[0] = MBA_SCSI_COMPLETION;
656 break;
657 case MBA_CMPLT_4_16BIT:
658 handles[0] = mb[1];
659 handles[1] = mb[2];
660 handles[2] = mb[3];
661 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
662 handle_cnt = 4;
663 mb[0] = MBA_SCSI_COMPLETION;
664 break;
665 case MBA_CMPLT_5_16BIT:
666 handles[0] = mb[1];
667 handles[1] = mb[2];
668 handles[2] = mb[3];
669 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
670 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
671 handle_cnt = 5;
672 mb[0] = MBA_SCSI_COMPLETION;
673 break;
674 case MBA_CMPLT_2_32BIT:
675 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
676 handles[1] = le32_to_cpu(
677 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
678 RD_MAILBOX_REG(ha, reg, 6));
679 handle_cnt = 2;
680 mb[0] = MBA_SCSI_COMPLETION;
681 break;
682 default:
683 break;
684 }
685skip_rio:
686 switch (mb[0]) {
687 case MBA_SCSI_COMPLETION:
688 if (!vha->flags.online)
689 break;
690
691 for (cnt = 0; cnt < handle_cnt; cnt++)
692 qla2x00_process_completed_request(vha, rsp->req,
693 handles[cnt]);
694 break;
695
696 case MBA_RESET:
697 ql_dbg(ql_dbg_async, vha, 0x5002,
698 "Asynchronous RESET.\n");
699
700 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
701 break;
702
703 case MBA_SYSTEM_ERR:
704 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
705 RD_REG_WORD(®24->mailbox7) : 0;
706 ql_log(ql_log_warn, vha, 0x5003,
707 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
708 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
709
710 ha->isp_ops->fw_dump(vha, 1);
711 ha->flags.fw_init_done = 0;
712 QLA_FW_STOPPED(ha);
713
714 if (IS_FWI2_CAPABLE(ha)) {
715 if (mb[1] == 0 && mb[2] == 0) {
716 ql_log(ql_log_fatal, vha, 0x5004,
717 "Unrecoverable Hardware Error: adapter "
718 "marked OFFLINE!\n");
719 vha->flags.online = 0;
720 vha->device_flags |= DFLG_DEV_FAILED;
721 } else {
722
723 if ((mbx & MBX_3) && (ha->port_no == 0))
724 set_bit(MPI_RESET_NEEDED,
725 &vha->dpc_flags);
726
727 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
728 }
729 } else if (mb[1] == 0) {
730 ql_log(ql_log_fatal, vha, 0x5005,
731 "Unrecoverable Hardware Error: adapter marked "
732 "OFFLINE!\n");
733 vha->flags.online = 0;
734 vha->device_flags |= DFLG_DEV_FAILED;
735 } else
736 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
737 break;
738
739 case MBA_REQ_TRANSFER_ERR:
740 ql_log(ql_log_warn, vha, 0x5006,
741 "ISP Request Transfer Error (%x).\n", mb[1]);
742
743 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
744 break;
745
746 case MBA_RSP_TRANSFER_ERR:
747 ql_log(ql_log_warn, vha, 0x5007,
748 "ISP Response Transfer Error (%x).\n", mb[1]);
749
750 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
751 break;
752
753 case MBA_WAKEUP_THRES:
754 ql_dbg(ql_dbg_async, vha, 0x5008,
755 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
756 break;
757
758 case MBA_LOOP_INIT_ERR:
759 ql_log(ql_log_warn, vha, 0x5090,
760 "LOOP INIT ERROR (%x).\n", mb[1]);
761 ha->isp_ops->fw_dump(vha, 1);
762 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
763 break;
764
765 case MBA_LIP_OCCURRED:
766 ha->flags.lip_ae = 1;
767 ha->flags.n2n_ae = 0;
768
769 ql_dbg(ql_dbg_async, vha, 0x5009,
770 "LIP occurred (%x).\n", mb[1]);
771
772 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
773 atomic_set(&vha->loop_state, LOOP_DOWN);
774 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
775 qla2x00_mark_all_devices_lost(vha, 1);
776 }
777
778 if (vha->vp_idx) {
779 atomic_set(&vha->vp_state, VP_FAILED);
780 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
781 }
782
783 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
784 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
785
786 vha->flags.management_server_logged_in = 0;
787 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
788 break;
789
790 case MBA_LOOP_UP:
791 if (IS_QLA2100(ha) || IS_QLA2200(ha))
792 ha->link_data_rate = PORT_SPEED_1GB;
793 else
794 ha->link_data_rate = mb[1];
795
796 ql_log(ql_log_info, vha, 0x500a,
797 "LOOP UP detected (%s Gbps).\n",
798 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
799
800 vha->flags.management_server_logged_in = 0;
801 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
802 break;
803
804 case MBA_LOOP_DOWN:
805 ha->flags.n2n_ae = 0;
806 ha->flags.lip_ae = 0;
807 ha->current_topology = 0;
808
809 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
810 ? RD_REG_WORD(®24->mailbox4) : 0;
811 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
812 : mbx;
813 ql_log(ql_log_info, vha, 0x500b,
814 "LOOP DOWN detected (%x %x %x %x).\n",
815 mb[1], mb[2], mb[3], mbx);
816
817 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
818 atomic_set(&vha->loop_state, LOOP_DOWN);
819 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
820
821
822
823
824
825 if (!vha->vp_idx) {
826 if (ha->flags.fawwpn_enabled) {
827 void *wwpn = ha->init_cb->port_name;
828 memcpy(vha->port_name, wwpn, WWN_SIZE);
829 fc_host_port_name(vha->host) =
830 wwn_to_u64(vha->port_name);
831 ql_dbg(ql_dbg_init + ql_dbg_verbose,
832 vha, 0x00d8, "LOOP DOWN detected,"
833 "restore WWPN %016llx\n",
834 wwn_to_u64(vha->port_name));
835 }
836
837 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
838 }
839
840 vha->device_flags |= DFLG_NO_CABLE;
841 qla2x00_mark_all_devices_lost(vha, 1);
842 }
843
844 if (vha->vp_idx) {
845 atomic_set(&vha->vp_state, VP_FAILED);
846 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
847 }
848
849 vha->flags.management_server_logged_in = 0;
850 ha->link_data_rate = PORT_SPEED_UNKNOWN;
851 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
852 break;
853
854 case MBA_LIP_RESET:
855 ql_dbg(ql_dbg_async, vha, 0x500c,
856 "LIP reset occurred (%x).\n", mb[1]);
857
858 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
859 atomic_set(&vha->loop_state, LOOP_DOWN);
860 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
861 qla2x00_mark_all_devices_lost(vha, 1);
862 }
863
864 if (vha->vp_idx) {
865 atomic_set(&vha->vp_state, VP_FAILED);
866 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
867 }
868
869 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
870
871 ha->operating_mode = LOOP;
872 vha->flags.management_server_logged_in = 0;
873 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
874 break;
875
876
877 case MBA_POINT_TO_POINT:
878 ha->flags.lip_ae = 0;
879 ha->flags.n2n_ae = 1;
880
881 if (IS_QLA2100(ha))
882 break;
883
884 if (IS_CNA_CAPABLE(ha)) {
885 ql_dbg(ql_dbg_async, vha, 0x500d,
886 "DCBX Completed -- %04x %04x %04x.\n",
887 mb[1], mb[2], mb[3]);
888 if (ha->notify_dcbx_comp && !vha->vp_idx)
889 complete(&ha->dcbx_comp);
890
891 } else
892 ql_dbg(ql_dbg_async, vha, 0x500e,
893 "Asynchronous P2P MODE received.\n");
894
895
896
897
898
899 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
900 atomic_set(&vha->loop_state, LOOP_DOWN);
901 if (!atomic_read(&vha->loop_down_timer))
902 atomic_set(&vha->loop_down_timer,
903 LOOP_DOWN_TIME);
904 qla2x00_mark_all_devices_lost(vha, 1);
905 }
906
907 if (vha->vp_idx) {
908 atomic_set(&vha->vp_state, VP_FAILED);
909 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
910 }
911
912 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
913 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
914
915 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
916 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
917
918 ha->flags.gpsc_supported = 1;
919 vha->flags.management_server_logged_in = 0;
920 break;
921
922 case MBA_CHG_IN_CONNECTION:
923 if (IS_QLA2100(ha))
924 break;
925
926 ql_dbg(ql_dbg_async, vha, 0x500f,
927 "Configuration change detected: value=%x.\n", mb[1]);
928
929 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
930 atomic_set(&vha->loop_state, LOOP_DOWN);
931 if (!atomic_read(&vha->loop_down_timer))
932 atomic_set(&vha->loop_down_timer,
933 LOOP_DOWN_TIME);
934 qla2x00_mark_all_devices_lost(vha, 1);
935 }
936
937 if (vha->vp_idx) {
938 atomic_set(&vha->vp_state, VP_FAILED);
939 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
940 }
941
942 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
943 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
944 break;
945
946 case MBA_PORT_UPDATE:
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962 if (IS_QLA2XXX_MIDTYPE(ha) &&
963 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
964 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
965 break;
966
967 if (mb[2] == 0x7) {
968 ql_dbg(ql_dbg_async, vha, 0x5010,
969 "Port %s %04x %04x %04x.\n",
970 mb[1] == 0xffff ? "unavailable" : "logout",
971 mb[1], mb[2], mb[3]);
972
973 if (mb[1] == 0xffff)
974 goto global_port_update;
975
976 if (mb[1] == NPH_SNS_LID(ha)) {
977 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
978 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
979 break;
980 }
981
982
983 if (IS_FWI2_CAPABLE(ha))
984 handle_cnt = NPH_SNS;
985 else
986 handle_cnt = SIMPLE_NAME_SERVER;
987 if (mb[1] == handle_cnt) {
988 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
989 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
990 break;
991 }
992
993
994 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
995 if (!fcport)
996 break;
997 if (atomic_read(&fcport->state) != FCS_ONLINE)
998 break;
999 ql_dbg(ql_dbg_async, vha, 0x508a,
1000 "Marking port lost loopid=%04x portid=%06x.\n",
1001 fcport->loop_id, fcport->d_id.b24);
1002 if (qla_ini_mode_enabled(vha)) {
1003 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1004 fcport->logout_on_delete = 0;
1005 qlt_schedule_sess_for_deletion_lock(fcport);
1006 }
1007 break;
1008
1009global_port_update:
1010 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1011 atomic_set(&vha->loop_state, LOOP_DOWN);
1012 atomic_set(&vha->loop_down_timer,
1013 LOOP_DOWN_TIME);
1014 vha->device_flags |= DFLG_NO_CABLE;
1015 qla2x00_mark_all_devices_lost(vha, 1);
1016 }
1017
1018 if (vha->vp_idx) {
1019 atomic_set(&vha->vp_state, VP_FAILED);
1020 fc_vport_set_state(vha->fc_vport,
1021 FC_VPORT_FAILED);
1022 qla2x00_mark_all_devices_lost(vha, 1);
1023 }
1024
1025 vha->flags.management_server_logged_in = 0;
1026 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1027 break;
1028 }
1029
1030
1031
1032
1033
1034
1035 atomic_set(&vha->loop_down_timer, 0);
1036 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1037 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1038 ql_dbg(ql_dbg_async, vha, 0x5011,
1039 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1040 mb[1], mb[2], mb[3]);
1041
1042 qlt_async_event(mb[0], vha, mb);
1043 break;
1044 }
1045
1046 ql_dbg(ql_dbg_async, vha, 0x5012,
1047 "Port database changed %04x %04x %04x.\n",
1048 mb[1], mb[2], mb[3]);
1049
1050
1051
1052
1053 atomic_set(&vha->loop_state, LOOP_UP);
1054
1055 qla2x00_mark_all_devices_lost(vha, 1);
1056
1057 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1058 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1059 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1060
1061 qlt_async_event(mb[0], vha, mb);
1062 break;
1063
1064 case MBA_RSCN_UPDATE:
1065
1066 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1067 break;
1068
1069 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1070 break;
1071
1072 ql_dbg(ql_dbg_async, vha, 0x5013,
1073 "RSCN database changed -- %04x %04x %04x.\n",
1074 mb[1], mb[2], mb[3]);
1075
1076 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1077 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1078 | vha->d_id.b.al_pa;
1079 if (rscn_entry == host_pid) {
1080 ql_dbg(ql_dbg_async, vha, 0x5014,
1081 "Ignoring RSCN update to local host "
1082 "port ID (%06x).\n", host_pid);
1083 break;
1084 }
1085
1086
1087 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1088
1089
1090 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1091 break;
1092
1093 atomic_set(&vha->loop_down_timer, 0);
1094 vha->flags.management_server_logged_in = 0;
1095 {
1096 struct event_arg ea;
1097
1098 memset(&ea, 0, sizeof(ea));
1099 ea.event = FCME_RSCN;
1100 ea.id.b24 = rscn_entry;
1101 ea.id.b.rsvd_1 = rscn_entry >> 24;
1102 qla2x00_fcport_event_handler(vha, &ea);
1103 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1104 }
1105 break;
1106
1107 case MBA_ZIO_RESPONSE:
1108 ql_dbg(ql_dbg_async, vha, 0x5015,
1109 "[R|Z]IO update completion.\n");
1110
1111 if (IS_FWI2_CAPABLE(ha))
1112 qla24xx_process_response_queue(vha, rsp);
1113 else
1114 qla2x00_process_response_queue(rsp);
1115 break;
1116
1117 case MBA_DISCARD_RND_FRAME:
1118 ql_dbg(ql_dbg_async, vha, 0x5016,
1119 "Discard RND Frame -- %04x %04x %04x.\n",
1120 mb[1], mb[2], mb[3]);
1121 break;
1122
1123 case MBA_TRACE_NOTIFICATION:
1124 ql_dbg(ql_dbg_async, vha, 0x5017,
1125 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1126 break;
1127
1128 case MBA_ISP84XX_ALERT:
1129 ql_dbg(ql_dbg_async, vha, 0x5018,
1130 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1131 mb[1], mb[2], mb[3]);
1132
1133 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1134 switch (mb[1]) {
1135 case A84_PANIC_RECOVERY:
1136 ql_log(ql_log_info, vha, 0x5019,
1137 "Alert 84XX: panic recovery %04x %04x.\n",
1138 mb[2], mb[3]);
1139 break;
1140 case A84_OP_LOGIN_COMPLETE:
1141 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1142 ql_log(ql_log_info, vha, 0x501a,
1143 "Alert 84XX: firmware version %x.\n",
1144 ha->cs84xx->op_fw_version);
1145 break;
1146 case A84_DIAG_LOGIN_COMPLETE:
1147 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1148 ql_log(ql_log_info, vha, 0x501b,
1149 "Alert 84XX: diagnostic firmware version %x.\n",
1150 ha->cs84xx->diag_fw_version);
1151 break;
1152 case A84_GOLD_LOGIN_COMPLETE:
1153 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1154 ha->cs84xx->fw_update = 1;
1155 ql_log(ql_log_info, vha, 0x501c,
1156 "Alert 84XX: gold firmware version %x.\n",
1157 ha->cs84xx->gold_fw_version);
1158 break;
1159 default:
1160 ql_log(ql_log_warn, vha, 0x501d,
1161 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1162 mb[1], mb[2], mb[3]);
1163 }
1164 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1165 break;
1166 case MBA_DCBX_START:
1167 ql_dbg(ql_dbg_async, vha, 0x501e,
1168 "DCBX Started -- %04x %04x %04x.\n",
1169 mb[1], mb[2], mb[3]);
1170 break;
1171 case MBA_DCBX_PARAM_UPDATE:
1172 ql_dbg(ql_dbg_async, vha, 0x501f,
1173 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1174 mb[1], mb[2], mb[3]);
1175 break;
1176 case MBA_FCF_CONF_ERR:
1177 ql_dbg(ql_dbg_async, vha, 0x5020,
1178 "FCF Configuration Error -- %04x %04x %04x.\n",
1179 mb[1], mb[2], mb[3]);
1180 break;
1181 case MBA_IDC_NOTIFY:
1182 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1183 mb[4] = RD_REG_WORD(®24->mailbox4);
1184 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1185 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1186 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1187 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1188
1189
1190
1191 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1192 atomic_set(&vha->loop_down_timer,
1193 LOOP_DOWN_TIME);
1194 qla2xxx_wake_dpc(vha);
1195 }
1196 }
1197 case MBA_IDC_COMPLETE:
1198 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1199 complete(&ha->lb_portup_comp);
1200
1201 case MBA_IDC_TIME_EXT:
1202 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1203 IS_QLA8044(ha))
1204 qla81xx_idc_event(vha, mb[0], mb[1]);
1205 break;
1206
1207 case MBA_IDC_AEN:
1208 mb[4] = RD_REG_WORD(®24->mailbox4);
1209 mb[5] = RD_REG_WORD(®24->mailbox5);
1210 mb[6] = RD_REG_WORD(®24->mailbox6);
1211 mb[7] = RD_REG_WORD(®24->mailbox7);
1212 qla83xx_handle_8200_aen(vha, mb);
1213 break;
1214
1215 case MBA_DPORT_DIAGNOSTICS:
1216 ql_dbg(ql_dbg_async, vha, 0x5052,
1217 "D-Port Diagnostics: %04x result=%s\n",
1218 mb[0],
1219 mb[1] == 0 ? "start" :
1220 mb[1] == 1 ? "done (pass)" :
1221 mb[1] == 2 ? "done (error)" : "other");
1222 break;
1223
1224 case MBA_TEMPERATURE_ALERT:
1225 ql_dbg(ql_dbg_async, vha, 0x505e,
1226 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1227 if (mb[1] == 0x12)
1228 schedule_work(&ha->board_disable);
1229 break;
1230
1231 default:
1232 ql_dbg(ql_dbg_async, vha, 0x5057,
1233 "Unknown AEN:%04x %04x %04x %04x\n",
1234 mb[0], mb[1], mb[2], mb[3]);
1235 }
1236
1237 qlt_async_event(mb[0], vha, mb);
1238
1239 if (!vha->vp_idx && ha->num_vhosts)
1240 qla2x00_alert_all_vps(rsp, mb);
1241}
1242
1243
1244
1245
1246
1247
1248void
1249qla2x00_process_completed_request(struct scsi_qla_host *vha,
1250 struct req_que *req, uint32_t index)
1251{
1252 srb_t *sp;
1253 struct qla_hw_data *ha = vha->hw;
1254
1255
1256 if (index >= req->num_outstanding_cmds) {
1257 ql_log(ql_log_warn, vha, 0x3014,
1258 "Invalid SCSI command index (%x).\n", index);
1259
1260 if (IS_P3P_TYPE(ha))
1261 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1262 else
1263 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1264 return;
1265 }
1266
1267 sp = req->outstanding_cmds[index];
1268 if (sp) {
1269
1270 req->outstanding_cmds[index] = NULL;
1271
1272
1273 sp->done(sp, DID_OK << 16);
1274 } else {
1275 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1276
1277 if (IS_P3P_TYPE(ha))
1278 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1279 else
1280 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1281 }
1282}
1283
1284srb_t *
1285qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1286 struct req_que *req, void *iocb)
1287{
1288 struct qla_hw_data *ha = vha->hw;
1289 sts_entry_t *pkt = iocb;
1290 srb_t *sp = NULL;
1291 uint16_t index;
1292
1293 index = LSW(pkt->handle);
1294 if (index >= req->num_outstanding_cmds) {
1295 ql_log(ql_log_warn, vha, 0x5031,
1296 "Invalid command index (%x) type %8ph.\n",
1297 index, iocb);
1298 if (IS_P3P_TYPE(ha))
1299 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1300 else
1301 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1302 goto done;
1303 }
1304 sp = req->outstanding_cmds[index];
1305 if (!sp) {
1306 ql_log(ql_log_warn, vha, 0x5032,
1307 "Invalid completion handle (%x) -- timed-out.\n", index);
1308 return sp;
1309 }
1310 if (sp->handle != index) {
1311 ql_log(ql_log_warn, vha, 0x5033,
1312 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1313 return NULL;
1314 }
1315
1316 req->outstanding_cmds[index] = NULL;
1317
1318done:
1319 return sp;
1320}
1321
1322static void
1323qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1324 struct mbx_entry *mbx)
1325{
1326 const char func[] = "MBX-IOCB";
1327 const char *type;
1328 fc_port_t *fcport;
1329 srb_t *sp;
1330 struct srb_iocb *lio;
1331 uint16_t *data;
1332 uint16_t status;
1333
1334 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1335 if (!sp)
1336 return;
1337
1338 lio = &sp->u.iocb_cmd;
1339 type = sp->name;
1340 fcport = sp->fcport;
1341 data = lio->u.logio.data;
1342
1343 data[0] = MBS_COMMAND_ERROR;
1344 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1345 QLA_LOGIO_LOGIN_RETRIED : 0;
1346 if (mbx->entry_status) {
1347 ql_dbg(ql_dbg_async, vha, 0x5043,
1348 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1349 "entry-status=%x status=%x state-flag=%x "
1350 "status-flags=%x.\n", type, sp->handle,
1351 fcport->d_id.b.domain, fcport->d_id.b.area,
1352 fcport->d_id.b.al_pa, mbx->entry_status,
1353 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1354 le16_to_cpu(mbx->status_flags));
1355
1356 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1357 (uint8_t *)mbx, sizeof(*mbx));
1358
1359 goto logio_done;
1360 }
1361
1362 status = le16_to_cpu(mbx->status);
1363 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1364 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1365 status = 0;
1366 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1367 ql_dbg(ql_dbg_async, vha, 0x5045,
1368 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1369 type, sp->handle, fcport->d_id.b.domain,
1370 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1371 le16_to_cpu(mbx->mb1));
1372
1373 data[0] = MBS_COMMAND_COMPLETE;
1374 if (sp->type == SRB_LOGIN_CMD) {
1375 fcport->port_type = FCT_TARGET;
1376 if (le16_to_cpu(mbx->mb1) & BIT_0)
1377 fcport->port_type = FCT_INITIATOR;
1378 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1379 fcport->flags |= FCF_FCP2_DEVICE;
1380 }
1381 goto logio_done;
1382 }
1383
1384 data[0] = le16_to_cpu(mbx->mb0);
1385 switch (data[0]) {
1386 case MBS_PORT_ID_USED:
1387 data[1] = le16_to_cpu(mbx->mb1);
1388 break;
1389 case MBS_LOOP_ID_USED:
1390 break;
1391 default:
1392 data[0] = MBS_COMMAND_ERROR;
1393 break;
1394 }
1395
1396 ql_log(ql_log_warn, vha, 0x5046,
1397 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1398 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1399 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1400 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1401 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1402 le16_to_cpu(mbx->mb7));
1403
1404logio_done:
1405 sp->done(sp, 0);
1406}
1407
1408static void
1409qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1410 struct mbx_24xx_entry *pkt)
1411{
1412 const char func[] = "MBX-IOCB2";
1413 srb_t *sp;
1414 struct srb_iocb *si;
1415 u16 sz, i;
1416 int res;
1417
1418 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1419 if (!sp)
1420 return;
1421
1422 si = &sp->u.iocb_cmd;
1423 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1424
1425 for (i = 0; i < sz; i++)
1426 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
1427
1428 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1429
1430 sp->done(sp, res);
1431}
1432
1433static void
1434qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1435 struct nack_to_isp *pkt)
1436{
1437 const char func[] = "nack";
1438 srb_t *sp;
1439 int res = 0;
1440
1441 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1442 if (!sp)
1443 return;
1444
1445 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1446 res = QLA_FUNCTION_FAILED;
1447
1448 sp->done(sp, res);
1449}
1450
1451static void
1452qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1453 sts_entry_t *pkt, int iocb_type)
1454{
1455 const char func[] = "CT_IOCB";
1456 const char *type;
1457 srb_t *sp;
1458 struct bsg_job *bsg_job;
1459 struct fc_bsg_reply *bsg_reply;
1460 uint16_t comp_status;
1461 int res = 0;
1462
1463 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1464 if (!sp)
1465 return;
1466
1467 switch (sp->type) {
1468 case SRB_CT_CMD:
1469 bsg_job = sp->u.bsg_job;
1470 bsg_reply = bsg_job->reply;
1471
1472 type = "ct pass-through";
1473
1474 comp_status = le16_to_cpu(pkt->comp_status);
1475
1476
1477
1478
1479
1480 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1481 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1482
1483 if (comp_status != CS_COMPLETE) {
1484 if (comp_status == CS_DATA_UNDERRUN) {
1485 res = DID_OK << 16;
1486 bsg_reply->reply_payload_rcv_len =
1487 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1488
1489 ql_log(ql_log_warn, vha, 0x5048,
1490 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1491 type, comp_status,
1492 bsg_reply->reply_payload_rcv_len);
1493 } else {
1494 ql_log(ql_log_warn, vha, 0x5049,
1495 "CT pass-through-%s error comp_status=0x%x.\n",
1496 type, comp_status);
1497 res = DID_ERROR << 16;
1498 bsg_reply->reply_payload_rcv_len = 0;
1499 }
1500 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1501 (uint8_t *)pkt, sizeof(*pkt));
1502 } else {
1503 res = DID_OK << 16;
1504 bsg_reply->reply_payload_rcv_len =
1505 bsg_job->reply_payload.payload_len;
1506 bsg_job->reply_len = 0;
1507 }
1508 break;
1509 case SRB_CT_PTHRU_CMD:
1510
1511
1512
1513
1514 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1515 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1516 sp->name);
1517 break;
1518 }
1519
1520 sp->done(sp, res);
1521}
1522
1523static void
1524qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1525 struct sts_entry_24xx *pkt, int iocb_type)
1526{
1527 const char func[] = "ELS_CT_IOCB";
1528 const char *type;
1529 srb_t *sp;
1530 struct bsg_job *bsg_job;
1531 struct fc_bsg_reply *bsg_reply;
1532 uint16_t comp_status;
1533 uint32_t fw_status[3];
1534 uint8_t* fw_sts_ptr;
1535 int res;
1536
1537 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1538 if (!sp)
1539 return;
1540 bsg_job = sp->u.bsg_job;
1541 bsg_reply = bsg_job->reply;
1542
1543 type = NULL;
1544 switch (sp->type) {
1545 case SRB_ELS_CMD_RPT:
1546 case SRB_ELS_CMD_HST:
1547 type = "els";
1548 break;
1549 case SRB_CT_CMD:
1550 type = "ct pass-through";
1551 break;
1552 case SRB_ELS_DCMD:
1553 type = "Driver ELS logo";
1554 ql_dbg(ql_dbg_user, vha, 0x5047,
1555 "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
1556 sp->done(sp, 0);
1557 return;
1558 case SRB_CT_PTHRU_CMD:
1559
1560
1561
1562 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1563 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1564 sp->name);
1565 sp->done(sp, res);
1566 return;
1567 default:
1568 ql_dbg(ql_dbg_user, vha, 0x503e,
1569 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1570 return;
1571 }
1572
1573 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1574 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1575 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1576
1577
1578
1579
1580 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1581 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1582
1583 if (comp_status != CS_COMPLETE) {
1584 if (comp_status == CS_DATA_UNDERRUN) {
1585 res = DID_OK << 16;
1586 bsg_reply->reply_payload_rcv_len =
1587 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1588
1589 ql_dbg(ql_dbg_user, vha, 0x503f,
1590 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1591 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1592 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1593 le16_to_cpu(((struct els_sts_entry_24xx *)
1594 pkt)->total_byte_count));
1595 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
1596 sizeof(struct fc_bsg_reply);
1597 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1598 }
1599 else {
1600 ql_dbg(ql_dbg_user, vha, 0x5040,
1601 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1602 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1603 type, sp->handle, comp_status,
1604 le16_to_cpu(((struct els_sts_entry_24xx *)
1605 pkt)->error_subcode_1),
1606 le16_to_cpu(((struct els_sts_entry_24xx *)
1607 pkt)->error_subcode_2));
1608 res = DID_ERROR << 16;
1609 bsg_reply->reply_payload_rcv_len = 0;
1610 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) +
1611 sizeof(struct fc_bsg_reply);
1612 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1613 }
1614 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1615 (uint8_t *)pkt, sizeof(*pkt));
1616 }
1617 else {
1618 res = DID_OK << 16;
1619 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1620 bsg_job->reply_len = 0;
1621 }
1622
1623 sp->done(sp, res);
1624}
1625
1626static void
1627qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1628 struct logio_entry_24xx *logio)
1629{
1630 const char func[] = "LOGIO-IOCB";
1631 const char *type;
1632 fc_port_t *fcport;
1633 srb_t *sp;
1634 struct srb_iocb *lio;
1635 uint16_t *data;
1636 uint32_t iop[2];
1637
1638 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1639 if (!sp)
1640 return;
1641
1642 lio = &sp->u.iocb_cmd;
1643 type = sp->name;
1644 fcport = sp->fcport;
1645 data = lio->u.logio.data;
1646
1647 data[0] = MBS_COMMAND_ERROR;
1648 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1649 QLA_LOGIO_LOGIN_RETRIED : 0;
1650 if (logio->entry_status) {
1651 ql_log(ql_log_warn, fcport->vha, 0x5034,
1652 "Async-%s error entry - %8phC hdl=%x"
1653 "portid=%02x%02x%02x entry-status=%x.\n",
1654 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1655 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1656 logio->entry_status);
1657 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1658 (uint8_t *)logio, sizeof(*logio));
1659
1660 goto logio_done;
1661 }
1662
1663 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1664 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1665 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1666 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1667 fcport->d_id.b.domain,
1668 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1669 le32_to_cpu(logio->io_parameter[0]));
1670
1671 vha->hw->exch_starvation = 0;
1672 data[0] = MBS_COMMAND_COMPLETE;
1673 if (sp->type != SRB_LOGIN_CMD)
1674 goto logio_done;
1675
1676 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1677 if (iop[0] & BIT_4) {
1678 fcport->port_type = FCT_TARGET;
1679 if (iop[0] & BIT_8)
1680 fcport->flags |= FCF_FCP2_DEVICE;
1681 } else if (iop[0] & BIT_5)
1682 fcport->port_type = FCT_INITIATOR;
1683
1684 if (iop[0] & BIT_7)
1685 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1686
1687 if (logio->io_parameter[7] || logio->io_parameter[8])
1688 fcport->supported_classes |= FC_COS_CLASS2;
1689 if (logio->io_parameter[9] || logio->io_parameter[10])
1690 fcport->supported_classes |= FC_COS_CLASS3;
1691
1692 goto logio_done;
1693 }
1694
1695 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1696 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1697 lio->u.logio.iop[0] = iop[0];
1698 lio->u.logio.iop[1] = iop[1];
1699 switch (iop[0]) {
1700 case LSC_SCODE_PORTID_USED:
1701 data[0] = MBS_PORT_ID_USED;
1702 data[1] = LSW(iop[1]);
1703 break;
1704 case LSC_SCODE_NPORT_USED:
1705 data[0] = MBS_LOOP_ID_USED;
1706 break;
1707 case LSC_SCODE_CMD_FAILED:
1708 if (iop[1] == 0x0606) {
1709
1710
1711
1712
1713 data[0] = MBS_COMMAND_COMPLETE;
1714 goto logio_done;
1715 }
1716 data[0] = MBS_COMMAND_ERROR;
1717 break;
1718 case LSC_SCODE_NOXCB:
1719 vha->hw->exch_starvation++;
1720 if (vha->hw->exch_starvation > 5) {
1721 ql_log(ql_log_warn, vha, 0xd046,
1722 "Exchange starvation. Resetting RISC\n");
1723
1724 vha->hw->exch_starvation = 0;
1725
1726 if (IS_P3P_TYPE(vha->hw))
1727 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1728 else
1729 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1730 qla2xxx_wake_dpc(vha);
1731 }
1732
1733 default:
1734 data[0] = MBS_COMMAND_ERROR;
1735 break;
1736 }
1737
1738 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1739 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1740 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1741 sp->handle, fcport->d_id.b.domain,
1742 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1743 le16_to_cpu(logio->comp_status),
1744 le32_to_cpu(logio->io_parameter[0]),
1745 le32_to_cpu(logio->io_parameter[1]));
1746
1747logio_done:
1748 sp->done(sp, 0);
1749}
1750
1751static void
1752qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1753{
1754 const char func[] = "TMF-IOCB";
1755 const char *type;
1756 fc_port_t *fcport;
1757 srb_t *sp;
1758 struct srb_iocb *iocb;
1759 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1760
1761 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1762 if (!sp)
1763 return;
1764
1765 iocb = &sp->u.iocb_cmd;
1766 type = sp->name;
1767 fcport = sp->fcport;
1768 iocb->u.tmf.data = QLA_SUCCESS;
1769
1770 if (sts->entry_status) {
1771 ql_log(ql_log_warn, fcport->vha, 0x5038,
1772 "Async-%s error - hdl=%x entry-status(%x).\n",
1773 type, sp->handle, sts->entry_status);
1774 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1775 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1776 ql_log(ql_log_warn, fcport->vha, 0x5039,
1777 "Async-%s error - hdl=%x completion status(%x).\n",
1778 type, sp->handle, sts->comp_status);
1779 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1780 } else if ((le16_to_cpu(sts->scsi_status) &
1781 SS_RESPONSE_INFO_LEN_VALID)) {
1782 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1783 ql_log(ql_log_warn, fcport->vha, 0x503b,
1784 "Async-%s error - hdl=%x not enough response(%d).\n",
1785 type, sp->handle, sts->rsp_data_len);
1786 } else if (sts->data[3]) {
1787 ql_log(ql_log_warn, fcport->vha, 0x503c,
1788 "Async-%s error - hdl=%x response(%x).\n",
1789 type, sp->handle, sts->data[3]);
1790 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1791 }
1792 }
1793
1794 if (iocb->u.tmf.data != QLA_SUCCESS)
1795 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1796 (uint8_t *)sts, sizeof(*sts));
1797
1798 sp->done(sp, 0);
1799}
1800
1801static void
1802qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1803{
1804 const char func[] = "NVME-IOCB";
1805 fc_port_t *fcport;
1806 srb_t *sp;
1807 struct srb_iocb *iocb;
1808 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1809 uint16_t state_flags;
1810 struct nvmefc_fcp_req *fd;
1811 uint16_t ret = 0;
1812 struct srb_iocb *nvme;
1813
1814 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1815 if (!sp)
1816 return;
1817
1818 iocb = &sp->u.iocb_cmd;
1819 fcport = sp->fcport;
1820 iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
1821 state_flags = le16_to_cpu(sts->state_flags);
1822 fd = iocb->u.nvme.desc;
1823 nvme = &sp->u.iocb_cmd;
1824
1825 if (unlikely(nvme->u.nvme.aen_op))
1826 atomic_dec(&sp->vha->nvme_active_aen_cnt);
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
1837 iocb->u.nvme.rsp_pyld_len = 0;
1838 } else if ((state_flags & SF_FCP_RSP_DMA)) {
1839 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1840 } else if (state_flags & SF_NVME_ERSP) {
1841 uint32_t *inbuf, *outbuf;
1842 uint16_t iter;
1843
1844 inbuf = (uint32_t *)&sts->nvme_ersp_data;
1845 outbuf = (uint32_t *)fd->rspaddr;
1846 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1847 iter = iocb->u.nvme.rsp_pyld_len >> 2;
1848 for (; iter; iter--)
1849 *outbuf++ = swab32(*inbuf++);
1850 } else {
1851 ql_log(ql_log_warn, fcport->vha, 0x503a,
1852 "NVME-%s error. Unhandled state_flags of %x\n",
1853 sp->name, state_flags);
1854 }
1855
1856 fd->transferred_length = fd->payload_length -
1857 le32_to_cpu(sts->residual_len);
1858
1859 if (sts->entry_status) {
1860 ql_log(ql_log_warn, fcport->vha, 0x5038,
1861 "NVME-%s error - hdl=%x entry-status(%x).\n",
1862 sp->name, sp->handle, sts->entry_status);
1863 ret = QLA_FUNCTION_FAILED;
1864 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1865 ql_log(ql_log_warn, fcport->vha, 0x5039,
1866 "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
1867 sp->name, sp->handle, sts->comp_status,
1868 le32_to_cpu(sts->residual_len), sts->ox_id);
1869 ret = QLA_FUNCTION_FAILED;
1870 }
1871 sp->done(sp, ret);
1872}
1873
1874
1875
1876
1877
1878void
1879qla2x00_process_response_queue(struct rsp_que *rsp)
1880{
1881 struct scsi_qla_host *vha;
1882 struct qla_hw_data *ha = rsp->hw;
1883 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1884 sts_entry_t *pkt;
1885 uint16_t handle_cnt;
1886 uint16_t cnt;
1887
1888 vha = pci_get_drvdata(ha->pdev);
1889
1890 if (!vha->flags.online)
1891 return;
1892
1893 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1894 pkt = (sts_entry_t *)rsp->ring_ptr;
1895
1896 rsp->ring_index++;
1897 if (rsp->ring_index == rsp->length) {
1898 rsp->ring_index = 0;
1899 rsp->ring_ptr = rsp->ring;
1900 } else {
1901 rsp->ring_ptr++;
1902 }
1903
1904 if (pkt->entry_status != 0) {
1905 qla2x00_error_entry(vha, rsp, pkt);
1906 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1907 wmb();
1908 continue;
1909 }
1910
1911 switch (pkt->entry_type) {
1912 case STATUS_TYPE:
1913 qla2x00_status_entry(vha, rsp, pkt);
1914 break;
1915 case STATUS_TYPE_21:
1916 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1917 for (cnt = 0; cnt < handle_cnt; cnt++) {
1918 qla2x00_process_completed_request(vha, rsp->req,
1919 ((sts21_entry_t *)pkt)->handle[cnt]);
1920 }
1921 break;
1922 case STATUS_TYPE_22:
1923 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1924 for (cnt = 0; cnt < handle_cnt; cnt++) {
1925 qla2x00_process_completed_request(vha, rsp->req,
1926 ((sts22_entry_t *)pkt)->handle[cnt]);
1927 }
1928 break;
1929 case STATUS_CONT_TYPE:
1930 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1931 break;
1932 case MBX_IOCB_TYPE:
1933 qla2x00_mbx_iocb_entry(vha, rsp->req,
1934 (struct mbx_entry *)pkt);
1935 break;
1936 case CT_IOCB_TYPE:
1937 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1938 break;
1939 default:
1940
1941 ql_log(ql_log_warn, vha, 0x504a,
1942 "Received unknown response pkt type %x "
1943 "entry status=%x.\n",
1944 pkt->entry_type, pkt->entry_status);
1945 break;
1946 }
1947 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1948 wmb();
1949 }
1950
1951
1952 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1953}
1954
1955static inline void
1956qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1957 uint32_t sense_len, struct rsp_que *rsp, int res)
1958{
1959 struct scsi_qla_host *vha = sp->vha;
1960 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1961 uint32_t track_sense_len;
1962
1963 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1964 sense_len = SCSI_SENSE_BUFFERSIZE;
1965
1966 SET_CMD_SENSE_LEN(sp, sense_len);
1967 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1968 track_sense_len = sense_len;
1969
1970 if (sense_len > par_sense_len)
1971 sense_len = par_sense_len;
1972
1973 memcpy(cp->sense_buffer, sense_data, sense_len);
1974
1975 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1976 track_sense_len -= sense_len;
1977 SET_CMD_SENSE_LEN(sp, track_sense_len);
1978
1979 if (track_sense_len != 0) {
1980 rsp->status_srb = sp;
1981 cp->result = res;
1982 }
1983
1984 if (sense_len) {
1985 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1986 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
1987 sp->vha->host_no, cp->device->id, cp->device->lun,
1988 cp);
1989 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1990 cp->sense_buffer, sense_len);
1991 }
1992}
1993
1994struct scsi_dif_tuple {
1995 __be16 guard;
1996 __be16 app_tag;
1997 __be32 ref_tag;
1998};
1999
2000
2001
2002
2003
2004
2005
2006static inline int
2007qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2008{
2009 struct scsi_qla_host *vha = sp->vha;
2010 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2011 uint8_t *ap = &sts24->data[12];
2012 uint8_t *ep = &sts24->data[20];
2013 uint32_t e_ref_tag, a_ref_tag;
2014 uint16_t e_app_tag, a_app_tag;
2015 uint16_t e_guard, a_guard;
2016
2017
2018
2019
2020
2021 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
2022 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
2023 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
2024 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
2025 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
2026 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
2027
2028 ql_dbg(ql_dbg_io, vha, 0x3023,
2029 "iocb(s) %p Returned STATUS.\n", sts24);
2030
2031 ql_dbg(ql_dbg_io, vha, 0x3024,
2032 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2033 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2034 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2035 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2036 a_app_tag, e_app_tag, a_guard, e_guard);
2037
2038
2039
2040
2041
2042
2043 if ((a_app_tag == T10_PI_APP_ESCAPE) &&
2044 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
2045 (a_ref_tag == T10_PI_REF_ESCAPE))) {
2046 uint32_t blocks_done, resid;
2047 sector_t lba_s = scsi_get_lba(cmd);
2048
2049
2050 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2051
2052 resid = scsi_bufflen(cmd) - (blocks_done *
2053 cmd->device->sector_size);
2054
2055 scsi_set_resid(cmd, resid);
2056 cmd->result = DID_OK << 16;
2057
2058
2059 if (scsi_prot_sg_count(cmd)) {
2060 uint32_t i, j = 0, k = 0, num_ent;
2061 struct scatterlist *sg;
2062 struct t10_pi_tuple *spt;
2063
2064
2065 scsi_for_each_prot_sg(cmd, sg,
2066 scsi_prot_sg_count(cmd), i) {
2067 num_ent = sg_dma_len(sg) / 8;
2068 if (k + num_ent < blocks_done) {
2069 k += num_ent;
2070 continue;
2071 }
2072 j = blocks_done - k - 1;
2073 k = blocks_done;
2074 break;
2075 }
2076
2077 if (k != blocks_done) {
2078 ql_log(ql_log_warn, vha, 0x302f,
2079 "unexpected tag values tag:lba=%x:%llx)\n",
2080 e_ref_tag, (unsigned long long)lba_s);
2081 return 1;
2082 }
2083
2084 spt = page_address(sg_page(sg)) + sg->offset;
2085 spt += j;
2086
2087 spt->app_tag = T10_PI_APP_ESCAPE;
2088 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2089 spt->ref_tag = T10_PI_REF_ESCAPE;
2090 }
2091
2092 return 0;
2093 }
2094
2095
2096 if (e_guard != a_guard) {
2097 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2098 0x10, 0x1);
2099 set_driver_byte(cmd, DRIVER_SENSE);
2100 set_host_byte(cmd, DID_ABORT);
2101 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
2102 return 1;
2103 }
2104
2105
2106 if (e_ref_tag != a_ref_tag) {
2107 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2108 0x10, 0x3);
2109 set_driver_byte(cmd, DRIVER_SENSE);
2110 set_host_byte(cmd, DID_ABORT);
2111 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
2112 return 1;
2113 }
2114
2115
2116 if (e_app_tag != a_app_tag) {
2117 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2118 0x10, 0x2);
2119 set_driver_byte(cmd, DRIVER_SENSE);
2120 set_host_byte(cmd, DID_ABORT);
2121 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
2122 return 1;
2123 }
2124
2125 return 1;
2126}
2127
2128static void
2129qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2130 struct req_que *req, uint32_t index)
2131{
2132 struct qla_hw_data *ha = vha->hw;
2133 srb_t *sp;
2134 uint16_t comp_status;
2135 uint16_t scsi_status;
2136 uint16_t thread_id;
2137 uint32_t rval = EXT_STATUS_OK;
2138 struct bsg_job *bsg_job = NULL;
2139 struct fc_bsg_request *bsg_request;
2140 struct fc_bsg_reply *bsg_reply;
2141 sts_entry_t *sts;
2142 struct sts_entry_24xx *sts24;
2143 sts = (sts_entry_t *) pkt;
2144 sts24 = (struct sts_entry_24xx *) pkt;
2145
2146
2147 if (index >= req->num_outstanding_cmds) {
2148 ql_log(ql_log_warn, vha, 0x70af,
2149 "Invalid SCSI completion handle 0x%x.\n", index);
2150 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2151 return;
2152 }
2153
2154 sp = req->outstanding_cmds[index];
2155 if (!sp) {
2156 ql_log(ql_log_warn, vha, 0x70b0,
2157 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2158 req->id, index);
2159
2160 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2161 return;
2162 }
2163
2164
2165 req->outstanding_cmds[index] = NULL;
2166 bsg_job = sp->u.bsg_job;
2167 bsg_request = bsg_job->request;
2168 bsg_reply = bsg_job->reply;
2169
2170 if (IS_FWI2_CAPABLE(ha)) {
2171 comp_status = le16_to_cpu(sts24->comp_status);
2172 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2173 } else {
2174 comp_status = le16_to_cpu(sts->comp_status);
2175 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2176 }
2177
2178 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2179 switch (comp_status) {
2180 case CS_COMPLETE:
2181 if (scsi_status == 0) {
2182 bsg_reply->reply_payload_rcv_len =
2183 bsg_job->reply_payload.payload_len;
2184 vha->qla_stats.input_bytes +=
2185 bsg_reply->reply_payload_rcv_len;
2186 vha->qla_stats.input_requests++;
2187 rval = EXT_STATUS_OK;
2188 }
2189 goto done;
2190
2191 case CS_DATA_OVERRUN:
2192 ql_dbg(ql_dbg_user, vha, 0x70b1,
2193 "Command completed with data overrun thread_id=%d\n",
2194 thread_id);
2195 rval = EXT_STATUS_DATA_OVERRUN;
2196 break;
2197
2198 case CS_DATA_UNDERRUN:
2199 ql_dbg(ql_dbg_user, vha, 0x70b2,
2200 "Command completed with data underrun thread_id=%d\n",
2201 thread_id);
2202 rval = EXT_STATUS_DATA_UNDERRUN;
2203 break;
2204 case CS_BIDIR_RD_OVERRUN:
2205 ql_dbg(ql_dbg_user, vha, 0x70b3,
2206 "Command completed with read data overrun thread_id=%d\n",
2207 thread_id);
2208 rval = EXT_STATUS_DATA_OVERRUN;
2209 break;
2210
2211 case CS_BIDIR_RD_WR_OVERRUN:
2212 ql_dbg(ql_dbg_user, vha, 0x70b4,
2213 "Command completed with read and write data overrun "
2214 "thread_id=%d\n", thread_id);
2215 rval = EXT_STATUS_DATA_OVERRUN;
2216 break;
2217
2218 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2219 ql_dbg(ql_dbg_user, vha, 0x70b5,
2220 "Command completed with read data over and write data "
2221 "underrun thread_id=%d\n", thread_id);
2222 rval = EXT_STATUS_DATA_OVERRUN;
2223 break;
2224
2225 case CS_BIDIR_RD_UNDERRUN:
2226 ql_dbg(ql_dbg_user, vha, 0x70b6,
2227 "Command completed with read data underrun "
2228 "thread_id=%d\n", thread_id);
2229 rval = EXT_STATUS_DATA_UNDERRUN;
2230 break;
2231
2232 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2233 ql_dbg(ql_dbg_user, vha, 0x70b7,
2234 "Command completed with read data under and write data "
2235 "overrun thread_id=%d\n", thread_id);
2236 rval = EXT_STATUS_DATA_UNDERRUN;
2237 break;
2238
2239 case CS_BIDIR_RD_WR_UNDERRUN:
2240 ql_dbg(ql_dbg_user, vha, 0x70b8,
2241 "Command completed with read and write data underrun "
2242 "thread_id=%d\n", thread_id);
2243 rval = EXT_STATUS_DATA_UNDERRUN;
2244 break;
2245
2246 case CS_BIDIR_DMA:
2247 ql_dbg(ql_dbg_user, vha, 0x70b9,
2248 "Command completed with data DMA error thread_id=%d\n",
2249 thread_id);
2250 rval = EXT_STATUS_DMA_ERR;
2251 break;
2252
2253 case CS_TIMEOUT:
2254 ql_dbg(ql_dbg_user, vha, 0x70ba,
2255 "Command completed with timeout thread_id=%d\n",
2256 thread_id);
2257 rval = EXT_STATUS_TIMEOUT;
2258 break;
2259 default:
2260 ql_dbg(ql_dbg_user, vha, 0x70bb,
2261 "Command completed with completion status=0x%x "
2262 "thread_id=%d\n", comp_status, thread_id);
2263 rval = EXT_STATUS_ERR;
2264 break;
2265 }
2266 bsg_reply->reply_payload_rcv_len = 0;
2267
2268done:
2269
2270 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2271 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2272
2273
2274 sp->done(sp, DID_OK << 6);
2275
2276}
2277
2278
2279
2280
2281
2282
2283static void
2284qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2285{
2286 srb_t *sp;
2287 fc_port_t *fcport;
2288 struct scsi_cmnd *cp;
2289 sts_entry_t *sts;
2290 struct sts_entry_24xx *sts24;
2291 uint16_t comp_status;
2292 uint16_t scsi_status;
2293 uint16_t ox_id;
2294 uint8_t lscsi_status;
2295 int32_t resid;
2296 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2297 fw_resid_len;
2298 uint8_t *rsp_info, *sense_data;
2299 struct qla_hw_data *ha = vha->hw;
2300 uint32_t handle;
2301 uint16_t que;
2302 struct req_que *req;
2303 int logit = 1;
2304 int res = 0;
2305 uint16_t state_flags = 0;
2306 uint16_t retry_delay = 0;
2307 uint8_t no_logout = 0;
2308
2309 sts = (sts_entry_t *) pkt;
2310 sts24 = (struct sts_entry_24xx *) pkt;
2311 if (IS_FWI2_CAPABLE(ha)) {
2312 comp_status = le16_to_cpu(sts24->comp_status);
2313 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2314 state_flags = le16_to_cpu(sts24->state_flags);
2315 } else {
2316 comp_status = le16_to_cpu(sts->comp_status);
2317 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2318 }
2319 handle = (uint32_t) LSW(sts->handle);
2320 que = MSW(sts->handle);
2321 req = ha->req_q_map[que];
2322
2323
2324 if (req == NULL ||
2325 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2326 ql_dbg(ql_dbg_io, vha, 0x3059,
2327 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2328 "que=%u.\n", sts->handle, req, que);
2329 return;
2330 }
2331
2332
2333 if (handle < req->num_outstanding_cmds) {
2334 sp = req->outstanding_cmds[handle];
2335 if (!sp) {
2336 ql_dbg(ql_dbg_io, vha, 0x3075,
2337 "%s(%ld): Already returned command for status handle (0x%x).\n",
2338 __func__, vha->host_no, sts->handle);
2339 return;
2340 }
2341 } else {
2342 ql_dbg(ql_dbg_io, vha, 0x3017,
2343 "Invalid status handle, out of range (0x%x).\n",
2344 sts->handle);
2345
2346 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2347 if (IS_P3P_TYPE(ha))
2348 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2349 else
2350 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2351 qla2xxx_wake_dpc(vha);
2352 }
2353 return;
2354 }
2355
2356 if (sp->cmd_type != TYPE_SRB) {
2357 req->outstanding_cmds[handle] = NULL;
2358 ql_dbg(ql_dbg_io, vha, 0x3015,
2359 "Unknown sp->cmd_type %x %p).\n",
2360 sp->cmd_type, sp);
2361 return;
2362 }
2363
2364
2365 if (sp->type == SRB_NVME_CMD) {
2366 qla24xx_nvme_iocb_entry(vha, req, pkt);
2367 return;
2368 }
2369
2370 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2371 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2372 return;
2373 }
2374
2375
2376 if (sp->type == SRB_TM_CMD) {
2377 qla24xx_tm_iocb_entry(vha, req, pkt);
2378 return;
2379 }
2380
2381
2382 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2383 qla2x00_process_completed_request(vha, req, handle);
2384
2385 return;
2386 }
2387
2388 req->outstanding_cmds[handle] = NULL;
2389 cp = GET_CMD_SP(sp);
2390 if (cp == NULL) {
2391 ql_dbg(ql_dbg_io, vha, 0x3018,
2392 "Command already returned (0x%x/%p).\n",
2393 sts->handle, sp);
2394
2395 return;
2396 }
2397
2398 lscsi_status = scsi_status & STATUS_MASK;
2399
2400 fcport = sp->fcport;
2401
2402 ox_id = 0;
2403 sense_len = par_sense_len = rsp_info_len = resid_len =
2404 fw_resid_len = 0;
2405 if (IS_FWI2_CAPABLE(ha)) {
2406 if (scsi_status & SS_SENSE_LEN_VALID)
2407 sense_len = le32_to_cpu(sts24->sense_len);
2408 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2409 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2410 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2411 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2412 if (comp_status == CS_DATA_UNDERRUN)
2413 fw_resid_len = le32_to_cpu(sts24->residual_len);
2414 rsp_info = sts24->data;
2415 sense_data = sts24->data;
2416 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2417 ox_id = le16_to_cpu(sts24->ox_id);
2418 par_sense_len = sizeof(sts24->data);
2419
2420 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
2421 retry_delay = sts24->retry_delay;
2422 } else {
2423 if (scsi_status & SS_SENSE_LEN_VALID)
2424 sense_len = le16_to_cpu(sts->req_sense_length);
2425 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2426 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2427 resid_len = le32_to_cpu(sts->residual_length);
2428 rsp_info = sts->rsp_info;
2429 sense_data = sts->req_sense_data;
2430 par_sense_len = sizeof(sts->req_sense_data);
2431 }
2432
2433
2434 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2435
2436 if (IS_FWI2_CAPABLE(ha)) {
2437 sense_data += rsp_info_len;
2438 par_sense_len -= rsp_info_len;
2439 }
2440 if (rsp_info_len > 3 && rsp_info[3]) {
2441 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2442 "FCP I/O protocol failure (0x%x/0x%x).\n",
2443 rsp_info_len, rsp_info[3]);
2444
2445 res = DID_BUS_BUSY << 16;
2446 goto out;
2447 }
2448 }
2449
2450
2451 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2452 scsi_status & SS_RESIDUAL_OVER)
2453 comp_status = CS_DATA_OVERRUN;
2454
2455
2456
2457
2458
2459 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2460 lscsi_status == SAM_STAT_BUSY)
2461 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2462
2463
2464
2465
2466 switch (comp_status) {
2467 case CS_COMPLETE:
2468 case CS_QUEUE_FULL:
2469 if (scsi_status == 0) {
2470 res = DID_OK << 16;
2471 break;
2472 }
2473 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2474 resid = resid_len;
2475 scsi_set_resid(cp, resid);
2476
2477 if (!lscsi_status &&
2478 ((unsigned)(scsi_bufflen(cp) - resid) <
2479 cp->underflow)) {
2480 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2481 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2482 resid, scsi_bufflen(cp));
2483
2484 res = DID_ERROR << 16;
2485 break;
2486 }
2487 }
2488 res = DID_OK << 16 | lscsi_status;
2489
2490 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2491 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2492 "QUEUE FULL detected.\n");
2493 break;
2494 }
2495 logit = 0;
2496 if (lscsi_status != SS_CHECK_CONDITION)
2497 break;
2498
2499 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2500 if (!(scsi_status & SS_SENSE_LEN_VALID))
2501 break;
2502
2503 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2504 rsp, res);
2505 break;
2506
2507 case CS_DATA_UNDERRUN:
2508
2509 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2510 scsi_set_resid(cp, resid);
2511 if (scsi_status & SS_RESIDUAL_UNDER) {
2512 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2513 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2514 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2515 resid, scsi_bufflen(cp));
2516
2517 res = DID_ERROR << 16 | lscsi_status;
2518 goto check_scsi_status;
2519 }
2520
2521 if (!lscsi_status &&
2522 ((unsigned)(scsi_bufflen(cp) - resid) <
2523 cp->underflow)) {
2524 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2525 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2526 resid, scsi_bufflen(cp));
2527
2528 res = DID_ERROR << 16;
2529 break;
2530 }
2531 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2532 lscsi_status != SAM_STAT_BUSY) {
2533
2534
2535
2536
2537
2538 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2539 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2540 resid, scsi_bufflen(cp));
2541
2542 res = DID_ERROR << 16 | lscsi_status;
2543 goto check_scsi_status;
2544 } else {
2545 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2546 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2547 scsi_status, lscsi_status);
2548 }
2549
2550 res = DID_OK << 16 | lscsi_status;
2551 logit = 0;
2552
2553check_scsi_status:
2554
2555
2556
2557
2558 if (lscsi_status != 0) {
2559 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2560 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2561 "QUEUE FULL detected.\n");
2562 logit = 1;
2563 break;
2564 }
2565 if (lscsi_status != SS_CHECK_CONDITION)
2566 break;
2567
2568 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2569 if (!(scsi_status & SS_SENSE_LEN_VALID))
2570 break;
2571
2572 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2573 sense_len, rsp, res);
2574 }
2575 break;
2576
2577 case CS_PORT_LOGGED_OUT:
2578 no_logout = 1;
2579 case CS_PORT_CONFIG_CHG:
2580 case CS_PORT_BUSY:
2581 case CS_INCOMPLETE:
2582 case CS_PORT_UNAVAILABLE:
2583 case CS_TIMEOUT:
2584 case CS_RESET:
2585
2586
2587
2588
2589
2590
2591 res = DID_TRANSPORT_DISRUPTED << 16;
2592
2593 if (comp_status == CS_TIMEOUT) {
2594 if (IS_FWI2_CAPABLE(ha))
2595 break;
2596 else if ((le16_to_cpu(sts->status_flags) &
2597 SF_LOGOUT_SENT) == 0)
2598 break;
2599 }
2600
2601 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2602 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2603 "Port to be marked lost on fcport=%02x%02x%02x, current "
2604 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2605 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2606 port_state_str[atomic_read(&fcport->state)],
2607 comp_status);
2608
2609 if (no_logout)
2610 fcport->logout_on_delete = 0;
2611
2612 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2613 qlt_schedule_sess_for_deletion_lock(fcport);
2614 }
2615
2616 break;
2617
2618 case CS_ABORTED:
2619 res = DID_RESET << 16;
2620 break;
2621
2622 case CS_DIF_ERROR:
2623 logit = qla2x00_handle_dif_error(sp, sts24);
2624 res = cp->result;
2625 break;
2626
2627 case CS_TRANSPORT:
2628 res = DID_ERROR << 16;
2629
2630 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2631 break;
2632
2633 if (state_flags & BIT_4)
2634 scmd_printk(KERN_WARNING, cp,
2635 "Unsupported device '%s' found.\n",
2636 cp->device->vendor);
2637 break;
2638
2639 default:
2640 res = DID_ERROR << 16;
2641 break;
2642 }
2643
2644out:
2645 if (logit)
2646 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2647 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2648 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2649 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2650 comp_status, scsi_status, res, vha->host_no,
2651 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2652 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2653 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2654 resid_len, fw_resid_len, sp, cp);
2655
2656 if (rsp->status_srb == NULL)
2657 sp->done(sp, res);
2658}
2659
2660
2661
2662
2663
2664
2665
2666
2667static void
2668qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2669{
2670 uint8_t sense_sz = 0;
2671 struct qla_hw_data *ha = rsp->hw;
2672 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2673 srb_t *sp = rsp->status_srb;
2674 struct scsi_cmnd *cp;
2675 uint32_t sense_len;
2676 uint8_t *sense_ptr;
2677
2678 if (!sp || !GET_CMD_SENSE_LEN(sp))
2679 return;
2680
2681 sense_len = GET_CMD_SENSE_LEN(sp);
2682 sense_ptr = GET_CMD_SENSE_PTR(sp);
2683
2684 cp = GET_CMD_SP(sp);
2685 if (cp == NULL) {
2686 ql_log(ql_log_warn, vha, 0x3025,
2687 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2688
2689 rsp->status_srb = NULL;
2690 return;
2691 }
2692
2693 if (sense_len > sizeof(pkt->data))
2694 sense_sz = sizeof(pkt->data);
2695 else
2696 sense_sz = sense_len;
2697
2698
2699 if (IS_FWI2_CAPABLE(ha))
2700 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2701 memcpy(sense_ptr, pkt->data, sense_sz);
2702 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2703 sense_ptr, sense_sz);
2704
2705 sense_len -= sense_sz;
2706 sense_ptr += sense_sz;
2707
2708 SET_CMD_SENSE_PTR(sp, sense_ptr);
2709 SET_CMD_SENSE_LEN(sp, sense_len);
2710
2711
2712 if (sense_len == 0) {
2713 rsp->status_srb = NULL;
2714 sp->done(sp, cp->result);
2715 }
2716}
2717
2718
2719
2720
2721
2722
2723
2724static int
2725qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2726{
2727 srb_t *sp;
2728 struct qla_hw_data *ha = vha->hw;
2729 const char func[] = "ERROR-IOCB";
2730 uint16_t que = MSW(pkt->handle);
2731 struct req_que *req = NULL;
2732 int res = DID_ERROR << 16;
2733
2734 ql_dbg(ql_dbg_async, vha, 0x502a,
2735 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
2736 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
2737
2738 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2739 goto fatal;
2740
2741 req = ha->req_q_map[que];
2742
2743 if (pkt->entry_status & RF_BUSY)
2744 res = DID_BUS_BUSY << 16;
2745
2746 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
2747 return 0;
2748
2749 switch (pkt->entry_type) {
2750 case NOTIFY_ACK_TYPE:
2751 case STATUS_TYPE:
2752 case STATUS_CONT_TYPE:
2753 case LOGINOUT_PORT_IOCB_TYPE:
2754 case CT_IOCB_TYPE:
2755 case ELS_IOCB_TYPE:
2756 case ABORT_IOCB_TYPE:
2757 case MBX_IOCB_TYPE:
2758 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2759 if (sp) {
2760 sp->done(sp, res);
2761 return 0;
2762 }
2763 break;
2764
2765 case ABTS_RESP_24XX:
2766 case CTIO_TYPE7:
2767 case CTIO_CRC2:
2768 default:
2769 return 1;
2770 }
2771fatal:
2772 ql_log(ql_log_warn, vha, 0x5030,
2773 "Error entry - invalid handle/queue (%04x).\n", que);
2774 return 0;
2775}
2776
2777
2778
2779
2780
2781
2782static void
2783qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2784{
2785 uint16_t cnt;
2786 uint32_t mboxes;
2787 uint16_t __iomem *wptr;
2788 struct qla_hw_data *ha = vha->hw;
2789 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2790
2791
2792 mboxes = (1 << ha->mbx_count) - 1;
2793 if (!ha->mcp)
2794 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2795 else
2796 mboxes = ha->mcp->in_mb;
2797
2798
2799 ha->flags.mbox_int = 1;
2800 ha->mailbox_out[0] = mb0;
2801 mboxes >>= 1;
2802 wptr = (uint16_t __iomem *)®->mailbox1;
2803
2804 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2805 if (mboxes & BIT_0)
2806 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2807
2808 mboxes >>= 1;
2809 wptr++;
2810 }
2811}
2812
2813static void
2814qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2815 struct abort_entry_24xx *pkt)
2816{
2817 const char func[] = "ABT_IOCB";
2818 srb_t *sp;
2819 struct srb_iocb *abt;
2820
2821 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2822 if (!sp)
2823 return;
2824
2825 abt = &sp->u.iocb_cmd;
2826 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2827 sp->done(sp, 0);
2828}
2829
2830void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt,
2831 struct req_que *req)
2832{
2833 srb_t *sp;
2834 const char func[] = "LS4_IOCB";
2835 uint16_t comp_status;
2836
2837 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2838 if (!sp)
2839 return;
2840
2841 comp_status = le16_to_cpu(pkt->status);
2842 sp->done(sp, comp_status);
2843}
2844
2845
2846
2847
2848
2849void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2850 struct rsp_que *rsp)
2851{
2852 struct sts_entry_24xx *pkt;
2853 struct qla_hw_data *ha = vha->hw;
2854
2855 if (!ha->flags.fw_started)
2856 return;
2857
2858 if (rsp->qpair->cpuid != smp_processor_id())
2859 qla_cpu_update(rsp->qpair, smp_processor_id());
2860
2861 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2862 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2863
2864 rsp->ring_index++;
2865 if (rsp->ring_index == rsp->length) {
2866 rsp->ring_index = 0;
2867 rsp->ring_ptr = rsp->ring;
2868 } else {
2869 rsp->ring_ptr++;
2870 }
2871
2872 if (pkt->entry_status != 0) {
2873 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
2874 goto process_err;
2875
2876 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2877 wmb();
2878 continue;
2879 }
2880process_err:
2881
2882 switch (pkt->entry_type) {
2883 case STATUS_TYPE:
2884 qla2x00_status_entry(vha, rsp, pkt);
2885 break;
2886 case STATUS_CONT_TYPE:
2887 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2888 break;
2889 case VP_RPT_ID_IOCB_TYPE:
2890 qla24xx_report_id_acquisition(vha,
2891 (struct vp_rpt_id_entry_24xx *)pkt);
2892 break;
2893 case LOGINOUT_PORT_IOCB_TYPE:
2894 qla24xx_logio_entry(vha, rsp->req,
2895 (struct logio_entry_24xx *)pkt);
2896 break;
2897 case CT_IOCB_TYPE:
2898 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2899 break;
2900 case ELS_IOCB_TYPE:
2901 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2902 break;
2903 case ABTS_RECV_24XX:
2904 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2905
2906 qlt_handle_abts_recv(vha, rsp,
2907 (response_t *)pkt);
2908 break;
2909 } else {
2910
2911 qlt_24xx_process_atio_queue(vha, 1);
2912 }
2913 case ABTS_RESP_24XX:
2914 case CTIO_TYPE7:
2915 case CTIO_CRC2:
2916 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
2917 break;
2918 case PT_LS4_REQUEST:
2919 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
2920 rsp->req);
2921 break;
2922 case NOTIFY_ACK_TYPE:
2923 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
2924 qlt_response_pkt_all_vps(vha, rsp,
2925 (response_t *)pkt);
2926 else
2927 qla24xxx_nack_iocb_entry(vha, rsp->req,
2928 (struct nack_to_isp *)pkt);
2929 break;
2930 case MARKER_TYPE:
2931
2932
2933
2934 break;
2935 case ABORT_IOCB_TYPE:
2936 qla24xx_abort_iocb_entry(vha, rsp->req,
2937 (struct abort_entry_24xx *)pkt);
2938 break;
2939 case MBX_IOCB_TYPE:
2940 qla24xx_mbx_iocb_entry(vha, rsp->req,
2941 (struct mbx_24xx_entry *)pkt);
2942 break;
2943 default:
2944
2945 ql_dbg(ql_dbg_async, vha, 0x5042,
2946 "Received unknown response pkt type %x "
2947 "entry status=%x.\n",
2948 pkt->entry_type, pkt->entry_status);
2949 break;
2950 }
2951 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2952 wmb();
2953 }
2954
2955
2956 if (IS_P3P_TYPE(ha)) {
2957 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2958 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2959 } else {
2960 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2961 }
2962}
2963
2964static void
2965qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2966{
2967 int rval;
2968 uint32_t cnt;
2969 struct qla_hw_data *ha = vha->hw;
2970 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2971
2972 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2973 !IS_QLA27XX(ha))
2974 return;
2975
2976 rval = QLA_SUCCESS;
2977 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2978 RD_REG_DWORD(®->iobase_addr);
2979 WRT_REG_DWORD(®->iobase_window, 0x0001);
2980 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2981 rval == QLA_SUCCESS; cnt--) {
2982 if (cnt) {
2983 WRT_REG_DWORD(®->iobase_window, 0x0001);
2984 udelay(10);
2985 } else
2986 rval = QLA_FUNCTION_TIMEOUT;
2987 }
2988 if (rval == QLA_SUCCESS)
2989 goto next_test;
2990
2991 rval = QLA_SUCCESS;
2992 WRT_REG_DWORD(®->iobase_window, 0x0003);
2993 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2994 rval == QLA_SUCCESS; cnt--) {
2995 if (cnt) {
2996 WRT_REG_DWORD(®->iobase_window, 0x0003);
2997 udelay(10);
2998 } else
2999 rval = QLA_FUNCTION_TIMEOUT;
3000 }
3001 if (rval != QLA_SUCCESS)
3002 goto done;
3003
3004next_test:
3005 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
3006 ql_log(ql_log_info, vha, 0x504c,
3007 "Additional code -- 0x55AA.\n");
3008
3009done:
3010 WRT_REG_DWORD(®->iobase_window, 0x0000);
3011 RD_REG_DWORD(®->iobase_window);
3012}
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023irqreturn_t
3024qla24xx_intr_handler(int irq, void *dev_id)
3025{
3026 scsi_qla_host_t *vha;
3027 struct qla_hw_data *ha;
3028 struct device_reg_24xx __iomem *reg;
3029 int status;
3030 unsigned long iter;
3031 uint32_t stat;
3032 uint32_t hccr;
3033 uint16_t mb[8];
3034 struct rsp_que *rsp;
3035 unsigned long flags;
3036
3037 rsp = (struct rsp_que *) dev_id;
3038 if (!rsp) {
3039 ql_log(ql_log_info, NULL, 0x5059,
3040 "%s: NULL response queue pointer.\n", __func__);
3041 return IRQ_NONE;
3042 }
3043
3044 ha = rsp->hw;
3045 reg = &ha->iobase->isp24;
3046 status = 0;
3047
3048 if (unlikely(pci_channel_offline(ha->pdev)))
3049 return IRQ_HANDLED;
3050
3051 spin_lock_irqsave(&ha->hardware_lock, flags);
3052 vha = pci_get_drvdata(ha->pdev);
3053 for (iter = 50; iter--; ) {
3054 stat = RD_REG_DWORD(®->host_status);
3055 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3056 break;
3057 if (stat & HSRX_RISC_PAUSED) {
3058 if (unlikely(pci_channel_offline(ha->pdev)))
3059 break;
3060
3061 hccr = RD_REG_DWORD(®->hccr);
3062
3063 ql_log(ql_log_warn, vha, 0x504b,
3064 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3065 hccr);
3066
3067 qla2xxx_check_risc_status(vha);
3068
3069 ha->isp_ops->fw_dump(vha, 1);
3070 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3071 break;
3072 } else if ((stat & HSRX_RISC_INT) == 0)
3073 break;
3074
3075 switch (stat & 0xff) {
3076 case INTR_ROM_MB_SUCCESS:
3077 case INTR_ROM_MB_FAILED:
3078 case INTR_MB_SUCCESS:
3079 case INTR_MB_FAILED:
3080 qla24xx_mbx_completion(vha, MSW(stat));
3081 status |= MBX_INTERRUPT;
3082
3083 break;
3084 case INTR_ASYNC_EVENT:
3085 mb[0] = MSW(stat);
3086 mb[1] = RD_REG_WORD(®->mailbox1);
3087 mb[2] = RD_REG_WORD(®->mailbox2);
3088 mb[3] = RD_REG_WORD(®->mailbox3);
3089 qla2x00_async_event(vha, rsp, mb);
3090 break;
3091 case INTR_RSP_QUE_UPDATE:
3092 case INTR_RSP_QUE_UPDATE_83XX:
3093 qla24xx_process_response_queue(vha, rsp);
3094 break;
3095 case INTR_ATIO_QUE_UPDATE:{
3096 unsigned long flags2;
3097 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3098 qlt_24xx_process_atio_queue(vha, 1);
3099 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3100 break;
3101 }
3102 case INTR_ATIO_RSP_QUE_UPDATE: {
3103 unsigned long flags2;
3104 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3105 qlt_24xx_process_atio_queue(vha, 1);
3106 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3107
3108 qla24xx_process_response_queue(vha, rsp);
3109 break;
3110 }
3111 default:
3112 ql_dbg(ql_dbg_async, vha, 0x504f,
3113 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3114 break;
3115 }
3116 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3117 RD_REG_DWORD_RELAXED(®->hccr);
3118 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3119 ndelay(3500);
3120 }
3121 qla2x00_handle_mbx_completion(ha, status);
3122 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3123
3124 return IRQ_HANDLED;
3125}
3126
3127static irqreturn_t
3128qla24xx_msix_rsp_q(int irq, void *dev_id)
3129{
3130 struct qla_hw_data *ha;
3131 struct rsp_que *rsp;
3132 struct device_reg_24xx __iomem *reg;
3133 struct scsi_qla_host *vha;
3134 unsigned long flags;
3135 uint32_t stat = 0;
3136
3137 rsp = (struct rsp_que *) dev_id;
3138 if (!rsp) {
3139 ql_log(ql_log_info, NULL, 0x505a,
3140 "%s: NULL response queue pointer.\n", __func__);
3141 return IRQ_NONE;
3142 }
3143 ha = rsp->hw;
3144 reg = &ha->iobase->isp24;
3145
3146 spin_lock_irqsave(&ha->hardware_lock, flags);
3147
3148 vha = pci_get_drvdata(ha->pdev);
3149
3150
3151
3152
3153 stat = RD_REG_DWORD(®->host_status);
3154 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3155 goto out;
3156 qla24xx_process_response_queue(vha, rsp);
3157 if (!ha->flags.disable_msix_handshake) {
3158 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3159 RD_REG_DWORD_RELAXED(®->hccr);
3160 }
3161out:
3162 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3163
3164 return IRQ_HANDLED;
3165}
3166
3167static irqreturn_t
3168qla24xx_msix_default(int irq, void *dev_id)
3169{
3170 scsi_qla_host_t *vha;
3171 struct qla_hw_data *ha;
3172 struct rsp_que *rsp;
3173 struct device_reg_24xx __iomem *reg;
3174 int status;
3175 uint32_t stat;
3176 uint32_t hccr;
3177 uint16_t mb[8];
3178 unsigned long flags;
3179
3180 rsp = (struct rsp_que *) dev_id;
3181 if (!rsp) {
3182 ql_log(ql_log_info, NULL, 0x505c,
3183 "%s: NULL response queue pointer.\n", __func__);
3184 return IRQ_NONE;
3185 }
3186 ha = rsp->hw;
3187 reg = &ha->iobase->isp24;
3188 status = 0;
3189
3190 spin_lock_irqsave(&ha->hardware_lock, flags);
3191 vha = pci_get_drvdata(ha->pdev);
3192 do {
3193 stat = RD_REG_DWORD(®->host_status);
3194 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3195 break;
3196 if (stat & HSRX_RISC_PAUSED) {
3197 if (unlikely(pci_channel_offline(ha->pdev)))
3198 break;
3199
3200 hccr = RD_REG_DWORD(®->hccr);
3201
3202 ql_log(ql_log_info, vha, 0x5050,
3203 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3204 hccr);
3205
3206 qla2xxx_check_risc_status(vha);
3207
3208 ha->isp_ops->fw_dump(vha, 1);
3209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3210 break;
3211 } else if ((stat & HSRX_RISC_INT) == 0)
3212 break;
3213
3214 switch (stat & 0xff) {
3215 case INTR_ROM_MB_SUCCESS:
3216 case INTR_ROM_MB_FAILED:
3217 case INTR_MB_SUCCESS:
3218 case INTR_MB_FAILED:
3219 qla24xx_mbx_completion(vha, MSW(stat));
3220 status |= MBX_INTERRUPT;
3221
3222 break;
3223 case INTR_ASYNC_EVENT:
3224 mb[0] = MSW(stat);
3225 mb[1] = RD_REG_WORD(®->mailbox1);
3226 mb[2] = RD_REG_WORD(®->mailbox2);
3227 mb[3] = RD_REG_WORD(®->mailbox3);
3228 qla2x00_async_event(vha, rsp, mb);
3229 break;
3230 case INTR_RSP_QUE_UPDATE:
3231 case INTR_RSP_QUE_UPDATE_83XX:
3232 qla24xx_process_response_queue(vha, rsp);
3233 break;
3234 case INTR_ATIO_QUE_UPDATE:{
3235 unsigned long flags2;
3236 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3237 qlt_24xx_process_atio_queue(vha, 1);
3238 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3239 break;
3240 }
3241 case INTR_ATIO_RSP_QUE_UPDATE: {
3242 unsigned long flags2;
3243 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
3244 qlt_24xx_process_atio_queue(vha, 1);
3245 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
3246
3247 qla24xx_process_response_queue(vha, rsp);
3248 break;
3249 }
3250 default:
3251 ql_dbg(ql_dbg_async, vha, 0x5051,
3252 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3253 break;
3254 }
3255 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3256 } while (0);
3257 qla2x00_handle_mbx_completion(ha, status);
3258 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3259
3260 return IRQ_HANDLED;
3261}
3262
3263irqreturn_t
3264qla2xxx_msix_rsp_q(int irq, void *dev_id)
3265{
3266 struct qla_hw_data *ha;
3267 struct qla_qpair *qpair;
3268 struct device_reg_24xx __iomem *reg;
3269 unsigned long flags;
3270
3271 qpair = dev_id;
3272 if (!qpair) {
3273 ql_log(ql_log_info, NULL, 0x505b,
3274 "%s: NULL response queue pointer.\n", __func__);
3275 return IRQ_NONE;
3276 }
3277 ha = qpair->hw;
3278
3279
3280 if (unlikely(!ha->flags.disable_msix_handshake)) {
3281 reg = &ha->iobase->isp24;
3282 spin_lock_irqsave(&ha->hardware_lock, flags);
3283 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3284 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3285 }
3286
3287 queue_work(ha->wq, &qpair->q_work);
3288
3289 return IRQ_HANDLED;
3290}
3291
3292
3293
3294struct qla_init_msix_entry {
3295 const char *name;
3296 irq_handler_t handler;
3297};
3298
3299static const struct qla_init_msix_entry msix_entries[] = {
3300 { "default", qla24xx_msix_default },
3301 { "rsp_q", qla24xx_msix_rsp_q },
3302 { "atio_q", qla83xx_msix_atio_q },
3303 { "qpair_multiq", qla2xxx_msix_rsp_q },
3304};
3305
3306static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3307 { "qla2xxx (default)", qla82xx_msix_default },
3308 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3309};
3310
3311static int
3312qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3313{
3314 int i, ret;
3315 struct qla_msix_entry *qentry;
3316 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3317 int min_vecs = QLA_BASE_VECTORS;
3318 struct irq_affinity desc = {
3319 .pre_vectors = QLA_BASE_VECTORS,
3320 };
3321
3322 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3323 desc.pre_vectors++;
3324 min_vecs++;
3325 }
3326
3327 if (USER_CTRL_IRQ(ha)) {
3328
3329 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3330 ha->msix_count, PCI_IRQ_MSIX);
3331 } else
3332 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3333 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3334 &desc);
3335
3336 if (ret < 0) {
3337 ql_log(ql_log_fatal, vha, 0x00c7,
3338 "MSI-X: Failed to enable support, "
3339 "giving up -- %d/%d.\n",
3340 ha->msix_count, ret);
3341 goto msix_out;
3342 } else if (ret < ha->msix_count) {
3343 ql_log(ql_log_warn, vha, 0x00c6,
3344 "MSI-X: Failed to enable support "
3345 "with %d vectors, using %d vectors.\n",
3346 ha->msix_count, ret);
3347 ha->msix_count = ret;
3348
3349 if (ha->mqiobase && ql2xmqsupport) {
3350 ha->max_req_queues = ha->msix_count - 1;
3351
3352
3353 if (QLA_TGT_MODE_ENABLED())
3354 ha->max_req_queues--;
3355
3356 ha->max_rsp_queues = ha->max_req_queues;
3357
3358 ha->max_qpairs = ha->max_req_queues - 1;
3359 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3360 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3361 }
3362 }
3363 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3364 ha->msix_count, GFP_KERNEL);
3365 if (!ha->msix_entries) {
3366 ql_log(ql_log_fatal, vha, 0x00c8,
3367 "Failed to allocate memory for ha->msix_entries.\n");
3368 ret = -ENOMEM;
3369 goto msix_out;
3370 }
3371 ha->flags.msix_enabled = 1;
3372
3373 for (i = 0; i < ha->msix_count; i++) {
3374 qentry = &ha->msix_entries[i];
3375 qentry->vector = pci_irq_vector(ha->pdev, i);
3376 qentry->entry = i;
3377 qentry->have_irq = 0;
3378 qentry->in_use = 0;
3379 qentry->handle = NULL;
3380 }
3381
3382
3383 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3384 qentry = &ha->msix_entries[i];
3385 qentry->handle = rsp;
3386 rsp->msix = qentry;
3387 scnprintf(qentry->name, sizeof(qentry->name),
3388 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3389 if (IS_P3P_TYPE(ha))
3390 ret = request_irq(qentry->vector,
3391 qla82xx_msix_entries[i].handler,
3392 0, qla82xx_msix_entries[i].name, rsp);
3393 else
3394 ret = request_irq(qentry->vector,
3395 msix_entries[i].handler,
3396 0, qentry->name, rsp);
3397 if (ret)
3398 goto msix_register_fail;
3399 qentry->have_irq = 1;
3400 qentry->in_use = 1;
3401 }
3402
3403
3404
3405
3406
3407 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3408 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3409 rsp->msix = qentry;
3410 qentry->handle = rsp;
3411 scnprintf(qentry->name, sizeof(qentry->name),
3412 "qla2xxx%lu_%s", vha->host_no,
3413 msix_entries[QLA_ATIO_VECTOR].name);
3414 qentry->in_use = 1;
3415 ret = request_irq(qentry->vector,
3416 msix_entries[QLA_ATIO_VECTOR].handler,
3417 0, qentry->name, rsp);
3418 qentry->have_irq = 1;
3419 }
3420
3421msix_register_fail:
3422 if (ret) {
3423 ql_log(ql_log_fatal, vha, 0x00cb,
3424 "MSI-X: unable to register handler -- %x/%d.\n",
3425 qentry->vector, ret);
3426 qla2x00_free_irqs(vha);
3427 ha->mqenable = 0;
3428 goto msix_out;
3429 }
3430
3431
3432 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3433 if (ha->msixbase && ha->mqiobase &&
3434 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3435 ql2xmqsupport))
3436 ha->mqenable = 1;
3437 } else
3438 if (ha->mqiobase &&
3439 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3440 ql2xmqsupport))
3441 ha->mqenable = 1;
3442 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3443 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3444 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3445 ql_dbg(ql_dbg_init, vha, 0x0055,
3446 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3447 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3448
3449msix_out:
3450 return ret;
3451}
3452
3453int
3454qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3455{
3456 int ret = QLA_FUNCTION_FAILED;
3457 device_reg_t *reg = ha->iobase;
3458 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3459
3460
3461 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3462 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3463 !IS_QLA27XX(ha))
3464 goto skip_msi;
3465
3466 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3467 (ha->pdev->subsystem_device == 0x7040 ||
3468 ha->pdev->subsystem_device == 0x7041 ||
3469 ha->pdev->subsystem_device == 0x1705)) {
3470 ql_log(ql_log_warn, vha, 0x0034,
3471 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3472 ha->pdev->subsystem_vendor,
3473 ha->pdev->subsystem_device);
3474 goto skip_msi;
3475 }
3476
3477 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3478 ql_log(ql_log_warn, vha, 0x0035,
3479 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3480 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3481 goto skip_msix;
3482 }
3483
3484 ret = qla24xx_enable_msix(ha, rsp);
3485 if (!ret) {
3486 ql_dbg(ql_dbg_init, vha, 0x0036,
3487 "MSI-X: Enabled (0x%X, 0x%X).\n",
3488 ha->chip_revision, ha->fw_attributes);
3489 goto clear_risc_ints;
3490 }
3491
3492skip_msix:
3493
3494 ql_log(ql_log_info, vha, 0x0037,
3495 "Falling back-to MSI mode -%d.\n", ret);
3496
3497 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3498 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3499 !IS_QLA27XX(ha))
3500 goto skip_msi;
3501
3502 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3503 if (!ret) {
3504 ql_dbg(ql_dbg_init, vha, 0x0038,
3505 "MSI: Enabled.\n");
3506 ha->flags.msi_enabled = 1;
3507 } else
3508 ql_log(ql_log_warn, vha, 0x0039,
3509 "Falling back-to INTa mode -- %d.\n", ret);
3510skip_msi:
3511
3512
3513 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3514 return QLA_FUNCTION_FAILED;
3515
3516 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3517 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3518 QLA2XXX_DRIVER_NAME, rsp);
3519 if (ret) {
3520 ql_log(ql_log_warn, vha, 0x003a,
3521 "Failed to reserve interrupt %d already in use.\n",
3522 ha->pdev->irq);
3523 goto fail;
3524 } else if (!ha->flags.msi_enabled) {
3525 ql_dbg(ql_dbg_init, vha, 0x0125,
3526 "INTa mode: Enabled.\n");
3527 ha->flags.mr_intr_valid = 1;
3528 }
3529
3530clear_risc_ints:
3531 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3532 goto fail;
3533
3534 spin_lock_irq(&ha->hardware_lock);
3535 WRT_REG_WORD(®->isp.semaphore, 0);
3536 spin_unlock_irq(&ha->hardware_lock);
3537
3538fail:
3539 return ret;
3540}
3541
3542void
3543qla2x00_free_irqs(scsi_qla_host_t *vha)
3544{
3545 struct qla_hw_data *ha = vha->hw;
3546 struct rsp_que *rsp;
3547 struct qla_msix_entry *qentry;
3548 int i;
3549
3550
3551
3552
3553
3554 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3555 goto free_irqs;
3556 rsp = ha->rsp_q_map[0];
3557
3558 if (ha->flags.msix_enabled) {
3559 for (i = 0; i < ha->msix_count; i++) {
3560 qentry = &ha->msix_entries[i];
3561 if (qentry->have_irq) {
3562 irq_set_affinity_notifier(qentry->vector, NULL);
3563 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3564 }
3565 }
3566 kfree(ha->msix_entries);
3567 ha->msix_entries = NULL;
3568 ha->flags.msix_enabled = 0;
3569 ql_dbg(ql_dbg_init, vha, 0x0042,
3570 "Disabled MSI-X.\n");
3571 } else {
3572 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3573 }
3574
3575free_irqs:
3576 pci_free_irq_vectors(ha->pdev);
3577}
3578
3579int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3580 struct qla_msix_entry *msix, int vector_type)
3581{
3582 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3583 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3584 int ret;
3585
3586 scnprintf(msix->name, sizeof(msix->name),
3587 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3588 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3589 if (ret) {
3590 ql_log(ql_log_fatal, vha, 0x00e6,
3591 "MSI-X: Unable to register handler -- %x/%d.\n",
3592 msix->vector, ret);
3593 return ret;
3594 }
3595 msix->have_irq = 1;
3596 msix->handle = qpair;
3597 return ret;
3598}
3599