1
2
3
4
5
6
7#include "qla_def.h"
8#include "qla_target.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <scsi/scsi_tcq.h>
13#include <scsi/scsi_bsg_fc.h>
14#include <scsi/scsi_eh.h>
15
16static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
20 sts_entry_t *);
21
22
23
24
25
26
27
28
29
30
31irqreturn_t
32qla2100_intr_handler(int irq, void *dev_id)
33{
34 scsi_qla_host_t *vha;
35 struct qla_hw_data *ha;
36 struct device_reg_2xxx __iomem *reg;
37 int status;
38 unsigned long iter;
39 uint16_t hccr;
40 uint16_t mb[4];
41 struct rsp_que *rsp;
42 unsigned long flags;
43
44 rsp = (struct rsp_que *) dev_id;
45 if (!rsp) {
46 ql_log(ql_log_info, NULL, 0x505d,
47 "%s: NULL response queue pointer.\n", __func__);
48 return (IRQ_NONE);
49 }
50
51 ha = rsp->hw;
52 reg = &ha->iobase->isp;
53 status = 0;
54
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 vha = pci_get_drvdata(ha->pdev);
57 for (iter = 50; iter--; ) {
58 hccr = RD_REG_WORD(®->hccr);
59 if (hccr & HCCR_RISC_PAUSE) {
60 if (pci_channel_offline(ha->pdev))
61 break;
62
63
64
65
66
67
68 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
69 RD_REG_WORD(®->hccr);
70
71 ha->isp_ops->fw_dump(vha, 1);
72 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
73 break;
74 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
75 break;
76
77 if (RD_REG_WORD(®->semaphore) & BIT_0) {
78 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
79 RD_REG_WORD(®->hccr);
80
81
82 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
83 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
84 qla2x00_mbx_completion(vha, mb[0]);
85 status |= MBX_INTERRUPT;
86 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
87 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
88 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
89 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
90 qla2x00_async_event(vha, rsp, mb);
91 } else {
92
93 ql_dbg(ql_dbg_async, vha, 0x5025,
94 "Unrecognized interrupt type (%d).\n",
95 mb[0]);
96 }
97
98 WRT_REG_WORD(®->semaphore, 0);
99 RD_REG_WORD(®->semaphore);
100 } else {
101 qla2x00_process_response_queue(rsp);
102
103 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
104 RD_REG_WORD(®->hccr);
105 }
106 }
107 qla2x00_handle_mbx_completion(ha, status);
108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
109
110 return (IRQ_HANDLED);
111}
112
113
114
115
116
117
118
119
120
121
122irqreturn_t
123qla2300_intr_handler(int irq, void *dev_id)
124{
125 scsi_qla_host_t *vha;
126 struct device_reg_2xxx __iomem *reg;
127 int status;
128 unsigned long iter;
129 uint32_t stat;
130 uint16_t hccr;
131 uint16_t mb[4];
132 struct rsp_que *rsp;
133 struct qla_hw_data *ha;
134 unsigned long flags;
135
136 rsp = (struct rsp_que *) dev_id;
137 if (!rsp) {
138 ql_log(ql_log_info, NULL, 0x5058,
139 "%s: NULL response queue pointer.\n", __func__);
140 return (IRQ_NONE);
141 }
142
143 ha = rsp->hw;
144 reg = &ha->iobase->isp;
145 status = 0;
146
147 spin_lock_irqsave(&ha->hardware_lock, flags);
148 vha = pci_get_drvdata(ha->pdev);
149 for (iter = 50; iter--; ) {
150 stat = RD_REG_DWORD(®->u.isp2300.host_status);
151 if (stat & HSR_RISC_PAUSED) {
152 if (unlikely(pci_channel_offline(ha->pdev)))
153 break;
154
155 hccr = RD_REG_WORD(®->hccr);
156 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
157 ql_log(ql_log_warn, vha, 0x5026,
158 "Parity error -- HCCR=%x, Dumping "
159 "firmware.\n", hccr);
160 else
161 ql_log(ql_log_warn, vha, 0x5027,
162 "RISC paused -- HCCR=%x, Dumping "
163 "firmware.\n", hccr);
164
165
166
167
168
169
170 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
171 RD_REG_WORD(®->hccr);
172
173 ha->isp_ops->fw_dump(vha, 1);
174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175 break;
176 } else if ((stat & HSR_RISC_INT) == 0)
177 break;
178
179 switch (stat & 0xff) {
180 case 0x1:
181 case 0x2:
182 case 0x10:
183 case 0x11:
184 qla2x00_mbx_completion(vha, MSW(stat));
185 status |= MBX_INTERRUPT;
186
187
188 WRT_REG_WORD(®->semaphore, 0);
189 break;
190 case 0x12:
191 mb[0] = MSW(stat);
192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195 qla2x00_async_event(vha, rsp, mb);
196 break;
197 case 0x13:
198 qla2x00_process_response_queue(rsp);
199 break;
200 case 0x15:
201 mb[0] = MBA_CMPLT_1_16BIT;
202 mb[1] = MSW(stat);
203 qla2x00_async_event(vha, rsp, mb);
204 break;
205 case 0x16:
206 mb[0] = MBA_SCSI_COMPLETION;
207 mb[1] = MSW(stat);
208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209 qla2x00_async_event(vha, rsp, mb);
210 break;
211 default:
212 ql_dbg(ql_dbg_async, vha, 0x5028,
213 "Unrecognized interrupt type (%d).\n", stat & 0xff);
214 break;
215 }
216 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
217 RD_REG_WORD_RELAXED(®->hccr);
218 }
219 qla2x00_handle_mbx_completion(ha, status);
220 spin_unlock_irqrestore(&ha->hardware_lock, flags);
221
222 return (IRQ_HANDLED);
223}
224
225
226
227
228
229
230static void
231qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
232{
233 uint16_t cnt;
234 uint32_t mboxes;
235 uint16_t __iomem *wptr;
236 struct qla_hw_data *ha = vha->hw;
237 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
238
239
240 mboxes = (1 << ha->mbx_count) - 1;
241 if (!ha->mcp)
242 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
243 else
244 mboxes = ha->mcp->in_mb;
245
246
247 ha->flags.mbox_int = 1;
248 ha->mailbox_out[0] = mb0;
249 mboxes >>= 1;
250 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
251
252 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
253 if (IS_QLA2200(ha) && cnt == 8)
254 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
255 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
256 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
257 else if (mboxes & BIT_0)
258 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
259
260 wptr++;
261 mboxes >>= 1;
262 }
263}
264
265static void
266qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
267{
268 static char *event[] =
269 { "Complete", "Request Notification", "Time Extension" };
270 int rval;
271 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
272 uint16_t __iomem *wptr;
273 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
274
275
276 wptr = (uint16_t __iomem *)®24->mailbox1;
277 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
278 mb[cnt] = RD_REG_WORD(wptr);
279
280 ql_dbg(ql_dbg_async, vha, 0x5021,
281 "Inter-Driver Communication %s -- "
282 "%04x %04x %04x %04x %04x %04x %04x.\n",
283 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
284 mb[4], mb[5], mb[6]);
285 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
286 vha->hw->flags.idc_compl_status = 1;
287 if (vha->hw->notify_dcbx_comp)
288 complete(&vha->hw->dcbx_comp);
289 }
290
291
292 timeout = (descr >> 8) & 0xf;
293 if (aen != MBA_IDC_NOTIFY || !timeout)
294 return;
295
296 ql_dbg(ql_dbg_async, vha, 0x5022,
297 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
298 vha->host_no, event[aen & 0xff], timeout);
299
300 rval = qla2x00_post_idc_ack_work(vha, mb);
301 if (rval != QLA_SUCCESS)
302 ql_log(ql_log_warn, vha, 0x5023,
303 "IDC failed to post ACK.\n");
304}
305
306#define LS_UNKNOWN 2
307const char *
308qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
309{
310 static const char * const link_speeds[] = {
311 "1", "2", "?", "4", "8", "16", "10"
312 };
313
314 if (IS_QLA2100(ha) || IS_QLA2200(ha))
315 return link_speeds[0];
316 else if (speed == 0x13)
317 return link_speeds[6];
318 else if (speed < 6)
319 return link_speeds[speed];
320 else
321 return link_speeds[LS_UNKNOWN];
322}
323
324static void
325qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
326{
327 struct qla_hw_data *ha = vha->hw;
328
329
330
331
332
333
334
335
336
337
338
339
340 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
341 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
342 mb[0], mb[1], mb[2], mb[6]);
343 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
344 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
345 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
346
347 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
348 IDC_HEARTBEAT_FAILURE)) {
349 ha->flags.nic_core_hung = 1;
350 ql_log(ql_log_warn, vha, 0x5060,
351 "83XX: F/W Error Reported: Check if reset required.\n");
352
353 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
354 uint32_t protocol_engine_id, fw_err_code, err_level;
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369 protocol_engine_id = (mb[2] & 0xff);
370 fw_err_code = (((mb[2] & 0xff00) >> 8) |
371 ((mb[6] & 0x1fff) << 8));
372 err_level = ((mb[6] & 0xe000) >> 13);
373 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
374 "Register: protocol_engine_id=0x%x "
375 "fw_err_code=0x%x err_level=0x%x.\n",
376 protocol_engine_id, fw_err_code, err_level);
377 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
378 "Register: 0x%x%x.\n", mb[7], mb[3]);
379 if (err_level == ERR_LEVEL_NON_FATAL) {
380 ql_log(ql_log_warn, vha, 0x5063,
381 "Not a fatal error, f/w has recovered "
382 "iteself.\n");
383 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
384 ql_log(ql_log_fatal, vha, 0x5064,
385 "Recoverable Fatal error: Chip reset "
386 "required.\n");
387 qla83xx_schedule_work(vha,
388 QLA83XX_NIC_CORE_RESET);
389 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
390 ql_log(ql_log_fatal, vha, 0x5065,
391 "Unrecoverable Fatal error: Set FAILED "
392 "state, reboot required.\n");
393 qla83xx_schedule_work(vha,
394 QLA83XX_NIC_CORE_UNRECOVERABLE);
395 }
396 }
397
398 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
399 uint16_t peg_fw_state, nw_interface_link_up;
400 uint16_t nw_interface_signal_detect, sfp_status;
401 uint16_t htbt_counter, htbt_monitor_enable;
402 uint16_t sfp_additonal_info, sfp_multirate;
403 uint16_t sfp_tx_fault, link_speed, dcbx_status;
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436 peg_fw_state = (mb[2] & 0x00ff);
437 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
438 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
439 sfp_status = ((mb[2] & 0x0c00) >> 10);
440 htbt_counter = ((mb[2] & 0x7000) >> 12);
441 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
442 sfp_additonal_info = (mb[6] & 0x0003);
443 sfp_multirate = ((mb[6] & 0x0004) >> 2);
444 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
445 link_speed = ((mb[6] & 0x0070) >> 4);
446 dcbx_status = ((mb[6] & 0x7000) >> 12);
447
448 ql_log(ql_log_warn, vha, 0x5066,
449 "Peg-to-Fc Status Register:\n"
450 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
451 "nw_interface_signal_detect=0x%x"
452 "\nsfp_statis=0x%x.\n ", peg_fw_state,
453 nw_interface_link_up, nw_interface_signal_detect,
454 sfp_status);
455 ql_log(ql_log_warn, vha, 0x5067,
456 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
457 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
458 htbt_counter, htbt_monitor_enable,
459 sfp_additonal_info, sfp_multirate);
460 ql_log(ql_log_warn, vha, 0x5068,
461 "sfp_tx_fault=0x%x, link_state=0x%x, "
462 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
463 dcbx_status);
464
465 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
466 }
467
468 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
469 ql_log(ql_log_warn, vha, 0x5069,
470 "Heartbeat Failure encountered, chip reset "
471 "required.\n");
472
473 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
474 }
475 }
476
477 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
478 ql_log(ql_log_info, vha, 0x506a,
479 "IDC Device-State changed = 0x%x.\n", mb[4]);
480 if (ha->flags.nic_core_reset_owner)
481 return;
482 qla83xx_schedule_work(vha, MBA_IDC_AEN);
483 }
484}
485
486int
487qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
488{
489 struct qla_hw_data *ha = vha->hw;
490 scsi_qla_host_t *vp;
491 uint32_t vp_did;
492 unsigned long flags;
493 int ret = 0;
494
495 if (!ha->num_vhosts)
496 return ret;
497
498 spin_lock_irqsave(&ha->vport_slock, flags);
499 list_for_each_entry(vp, &ha->vp_list, list) {
500 vp_did = vp->d_id.b24;
501 if (vp_did == rscn_entry) {
502 ret = 1;
503 break;
504 }
505 }
506 spin_unlock_irqrestore(&ha->vport_slock, flags);
507
508 return ret;
509}
510
511
512
513
514
515
516void
517qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
518{
519 uint16_t handle_cnt;
520 uint16_t cnt, mbx;
521 uint32_t handles[5];
522 struct qla_hw_data *ha = vha->hw;
523 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
524 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
525 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
526 uint32_t rscn_entry, host_pid;
527 unsigned long flags;
528
529
530 handle_cnt = 0;
531 if (IS_CNA_CAPABLE(ha))
532 goto skip_rio;
533 switch (mb[0]) {
534 case MBA_SCSI_COMPLETION:
535 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
536 handle_cnt = 1;
537 break;
538 case MBA_CMPLT_1_16BIT:
539 handles[0] = mb[1];
540 handle_cnt = 1;
541 mb[0] = MBA_SCSI_COMPLETION;
542 break;
543 case MBA_CMPLT_2_16BIT:
544 handles[0] = mb[1];
545 handles[1] = mb[2];
546 handle_cnt = 2;
547 mb[0] = MBA_SCSI_COMPLETION;
548 break;
549 case MBA_CMPLT_3_16BIT:
550 handles[0] = mb[1];
551 handles[1] = mb[2];
552 handles[2] = mb[3];
553 handle_cnt = 3;
554 mb[0] = MBA_SCSI_COMPLETION;
555 break;
556 case MBA_CMPLT_4_16BIT:
557 handles[0] = mb[1];
558 handles[1] = mb[2];
559 handles[2] = mb[3];
560 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
561 handle_cnt = 4;
562 mb[0] = MBA_SCSI_COMPLETION;
563 break;
564 case MBA_CMPLT_5_16BIT:
565 handles[0] = mb[1];
566 handles[1] = mb[2];
567 handles[2] = mb[3];
568 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
569 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
570 handle_cnt = 5;
571 mb[0] = MBA_SCSI_COMPLETION;
572 break;
573 case MBA_CMPLT_2_32BIT:
574 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
575 handles[1] = le32_to_cpu(
576 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
577 RD_MAILBOX_REG(ha, reg, 6));
578 handle_cnt = 2;
579 mb[0] = MBA_SCSI_COMPLETION;
580 break;
581 default:
582 break;
583 }
584skip_rio:
585 switch (mb[0]) {
586 case MBA_SCSI_COMPLETION:
587 if (!vha->flags.online)
588 break;
589
590 for (cnt = 0; cnt < handle_cnt; cnt++)
591 qla2x00_process_completed_request(vha, rsp->req,
592 handles[cnt]);
593 break;
594
595 case MBA_RESET:
596 ql_dbg(ql_dbg_async, vha, 0x5002,
597 "Asynchronous RESET.\n");
598
599 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
600 break;
601
602 case MBA_SYSTEM_ERR:
603 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
604 RD_REG_WORD(®24->mailbox7) : 0;
605 ql_log(ql_log_warn, vha, 0x5003,
606 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
607 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
608
609 ha->isp_ops->fw_dump(vha, 1);
610
611 if (IS_FWI2_CAPABLE(ha)) {
612 if (mb[1] == 0 && mb[2] == 0) {
613 ql_log(ql_log_fatal, vha, 0x5004,
614 "Unrecoverable Hardware Error: adapter "
615 "marked OFFLINE!\n");
616 vha->flags.online = 0;
617 vha->device_flags |= DFLG_DEV_FAILED;
618 } else {
619
620 if ((mbx & MBX_3) && (ha->flags.port0))
621 set_bit(MPI_RESET_NEEDED,
622 &vha->dpc_flags);
623
624 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
625 }
626 } else if (mb[1] == 0) {
627 ql_log(ql_log_fatal, vha, 0x5005,
628 "Unrecoverable Hardware Error: adapter marked "
629 "OFFLINE!\n");
630 vha->flags.online = 0;
631 vha->device_flags |= DFLG_DEV_FAILED;
632 } else
633 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
634 break;
635
636 case MBA_REQ_TRANSFER_ERR:
637 ql_log(ql_log_warn, vha, 0x5006,
638 "ISP Request Transfer Error (%x).\n", mb[1]);
639
640 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
641 break;
642
643 case MBA_RSP_TRANSFER_ERR:
644 ql_log(ql_log_warn, vha, 0x5007,
645 "ISP Response Transfer Error.\n");
646
647 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
648 break;
649
650 case MBA_WAKEUP_THRES:
651 ql_dbg(ql_dbg_async, vha, 0x5008,
652 "Asynchronous WAKEUP_THRES.\n");
653
654 break;
655 case MBA_LIP_OCCURRED:
656 ql_dbg(ql_dbg_async, vha, 0x5009,
657 "LIP occurred (%x).\n", mb[1]);
658
659 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
660 atomic_set(&vha->loop_state, LOOP_DOWN);
661 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
662 qla2x00_mark_all_devices_lost(vha, 1);
663 }
664
665 if (vha->vp_idx) {
666 atomic_set(&vha->vp_state, VP_FAILED);
667 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
668 }
669
670 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
671 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
672
673 vha->flags.management_server_logged_in = 0;
674 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
675 break;
676
677 case MBA_LOOP_UP:
678 if (IS_QLA2100(ha) || IS_QLA2200(ha))
679 ha->link_data_rate = PORT_SPEED_1GB;
680 else
681 ha->link_data_rate = mb[1];
682
683 ql_dbg(ql_dbg_async, vha, 0x500a,
684 "LOOP UP detected (%s Gbps).\n",
685 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
686
687 vha->flags.management_server_logged_in = 0;
688 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
689 break;
690
691 case MBA_LOOP_DOWN:
692 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
693 ? RD_REG_WORD(®24->mailbox4) : 0;
694 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx;
695 ql_dbg(ql_dbg_async, vha, 0x500b,
696 "LOOP DOWN detected (%x %x %x %x).\n",
697 mb[1], mb[2], mb[3], mbx);
698
699 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
700 atomic_set(&vha->loop_state, LOOP_DOWN);
701 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
702 vha->device_flags |= DFLG_NO_CABLE;
703 qla2x00_mark_all_devices_lost(vha, 1);
704 }
705
706 if (vha->vp_idx) {
707 atomic_set(&vha->vp_state, VP_FAILED);
708 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
709 }
710
711 vha->flags.management_server_logged_in = 0;
712 ha->link_data_rate = PORT_SPEED_UNKNOWN;
713 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
714 break;
715
716 case MBA_LIP_RESET:
717 ql_dbg(ql_dbg_async, vha, 0x500c,
718 "LIP reset occurred (%x).\n", mb[1]);
719
720 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
721 atomic_set(&vha->loop_state, LOOP_DOWN);
722 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
723 qla2x00_mark_all_devices_lost(vha, 1);
724 }
725
726 if (vha->vp_idx) {
727 atomic_set(&vha->vp_state, VP_FAILED);
728 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
729 }
730
731 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
732
733 ha->operating_mode = LOOP;
734 vha->flags.management_server_logged_in = 0;
735 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
736 break;
737
738
739 case MBA_POINT_TO_POINT:
740 if (IS_QLA2100(ha))
741 break;
742
743 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
744 ql_dbg(ql_dbg_async, vha, 0x500d,
745 "DCBX Completed -- %04x %04x %04x.\n",
746 mb[1], mb[2], mb[3]);
747 if (ha->notify_dcbx_comp)
748 complete(&ha->dcbx_comp);
749
750 } else
751 ql_dbg(ql_dbg_async, vha, 0x500e,
752 "Asynchronous P2P MODE received.\n");
753
754
755
756
757
758 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
759 atomic_set(&vha->loop_state, LOOP_DOWN);
760 if (!atomic_read(&vha->loop_down_timer))
761 atomic_set(&vha->loop_down_timer,
762 LOOP_DOWN_TIME);
763 qla2x00_mark_all_devices_lost(vha, 1);
764 }
765
766 if (vha->vp_idx) {
767 atomic_set(&vha->vp_state, VP_FAILED);
768 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
769 }
770
771 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
772 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
773
774 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
775 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
776
777 ha->flags.gpsc_supported = 1;
778 vha->flags.management_server_logged_in = 0;
779 break;
780
781 case MBA_CHG_IN_CONNECTION:
782 if (IS_QLA2100(ha))
783 break;
784
785 ql_dbg(ql_dbg_async, vha, 0x500f,
786 "Configuration change detected: value=%x.\n", mb[1]);
787
788 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
789 atomic_set(&vha->loop_state, LOOP_DOWN);
790 if (!atomic_read(&vha->loop_down_timer))
791 atomic_set(&vha->loop_down_timer,
792 LOOP_DOWN_TIME);
793 qla2x00_mark_all_devices_lost(vha, 1);
794 }
795
796 if (vha->vp_idx) {
797 atomic_set(&vha->vp_state, VP_FAILED);
798 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
799 }
800
801 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
802 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
803 break;
804
805 case MBA_PORT_UPDATE:
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821 if (IS_QLA2XXX_MIDTYPE(ha) &&
822 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
823 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
824 break;
825
826
827 if (mb[1] == 0xffff && mb[2] == 0x7) {
828 ql_dbg(ql_dbg_async, vha, 0x5010,
829 "Port unavailable %04x %04x %04x.\n",
830 mb[1], mb[2], mb[3]);
831 ql_log(ql_log_warn, vha, 0x505e,
832 "Link is offline.\n");
833
834 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
835 atomic_set(&vha->loop_state, LOOP_DOWN);
836 atomic_set(&vha->loop_down_timer,
837 LOOP_DOWN_TIME);
838 vha->device_flags |= DFLG_NO_CABLE;
839 qla2x00_mark_all_devices_lost(vha, 1);
840 }
841
842 if (vha->vp_idx) {
843 atomic_set(&vha->vp_state, VP_FAILED);
844 fc_vport_set_state(vha->fc_vport,
845 FC_VPORT_FAILED);
846 qla2x00_mark_all_devices_lost(vha, 1);
847 }
848
849 vha->flags.management_server_logged_in = 0;
850 ha->link_data_rate = PORT_SPEED_UNKNOWN;
851 break;
852 }
853
854
855
856
857
858
859 atomic_set(&vha->loop_down_timer, 0);
860 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
861 ql_dbg(ql_dbg_async, vha, 0x5011,
862 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
863 mb[1], mb[2], mb[3]);
864
865 qlt_async_event(mb[0], vha, mb);
866 break;
867 }
868
869 ql_dbg(ql_dbg_async, vha, 0x5012,
870 "Port database changed %04x %04x %04x.\n",
871 mb[1], mb[2], mb[3]);
872 ql_log(ql_log_warn, vha, 0x505f,
873 "Link is operational (%s Gbps).\n",
874 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
875
876
877
878
879 atomic_set(&vha->loop_state, LOOP_UP);
880
881 qla2x00_mark_all_devices_lost(vha, 1);
882
883 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
884 set_bit(SCR_PENDING, &vha->dpc_flags);
885
886 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
887 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
888
889 qlt_async_event(mb[0], vha, mb);
890 break;
891
892 case MBA_RSCN_UPDATE:
893
894 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
895 break;
896
897 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
898 break;
899
900 ql_dbg(ql_dbg_async, vha, 0x5013,
901 "RSCN database changed -- %04x %04x %04x.\n",
902 mb[1], mb[2], mb[3]);
903
904 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
905 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
906 | vha->d_id.b.al_pa;
907 if (rscn_entry == host_pid) {
908 ql_dbg(ql_dbg_async, vha, 0x5014,
909 "Ignoring RSCN update to local host "
910 "port ID (%06x).\n", host_pid);
911 break;
912 }
913
914
915 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
916
917
918 if (qla2x00_is_a_vp_did(vha, rscn_entry))
919 break;
920
921 atomic_set(&vha->loop_down_timer, 0);
922 vha->flags.management_server_logged_in = 0;
923
924 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
925 set_bit(RSCN_UPDATE, &vha->dpc_flags);
926 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
927 break;
928
929
930 case MBA_ZIO_RESPONSE:
931 ql_dbg(ql_dbg_async, vha, 0x5015,
932 "[R|Z]IO update completion.\n");
933
934 if (IS_FWI2_CAPABLE(ha))
935 qla24xx_process_response_queue(vha, rsp);
936 else
937 qla2x00_process_response_queue(rsp);
938 break;
939
940 case MBA_DISCARD_RND_FRAME:
941 ql_dbg(ql_dbg_async, vha, 0x5016,
942 "Discard RND Frame -- %04x %04x %04x.\n",
943 mb[1], mb[2], mb[3]);
944 break;
945
946 case MBA_TRACE_NOTIFICATION:
947 ql_dbg(ql_dbg_async, vha, 0x5017,
948 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
949 break;
950
951 case MBA_ISP84XX_ALERT:
952 ql_dbg(ql_dbg_async, vha, 0x5018,
953 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
954 mb[1], mb[2], mb[3]);
955
956 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
957 switch (mb[1]) {
958 case A84_PANIC_RECOVERY:
959 ql_log(ql_log_info, vha, 0x5019,
960 "Alert 84XX: panic recovery %04x %04x.\n",
961 mb[2], mb[3]);
962 break;
963 case A84_OP_LOGIN_COMPLETE:
964 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
965 ql_log(ql_log_info, vha, 0x501a,
966 "Alert 84XX: firmware version %x.\n",
967 ha->cs84xx->op_fw_version);
968 break;
969 case A84_DIAG_LOGIN_COMPLETE:
970 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
971 ql_log(ql_log_info, vha, 0x501b,
972 "Alert 84XX: diagnostic firmware version %x.\n",
973 ha->cs84xx->diag_fw_version);
974 break;
975 case A84_GOLD_LOGIN_COMPLETE:
976 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
977 ha->cs84xx->fw_update = 1;
978 ql_log(ql_log_info, vha, 0x501c,
979 "Alert 84XX: gold firmware version %x.\n",
980 ha->cs84xx->gold_fw_version);
981 break;
982 default:
983 ql_log(ql_log_warn, vha, 0x501d,
984 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
985 mb[1], mb[2], mb[3]);
986 }
987 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
988 break;
989 case MBA_DCBX_START:
990 ql_dbg(ql_dbg_async, vha, 0x501e,
991 "DCBX Started -- %04x %04x %04x.\n",
992 mb[1], mb[2], mb[3]);
993 break;
994 case MBA_DCBX_PARAM_UPDATE:
995 ql_dbg(ql_dbg_async, vha, 0x501f,
996 "DCBX Parameters Updated -- %04x %04x %04x.\n",
997 mb[1], mb[2], mb[3]);
998 break;
999 case MBA_FCF_CONF_ERR:
1000 ql_dbg(ql_dbg_async, vha, 0x5020,
1001 "FCF Configuration Error -- %04x %04x %04x.\n",
1002 mb[1], mb[2], mb[3]);
1003 break;
1004 case MBA_IDC_NOTIFY:
1005 if (IS_QLA8031(vha->hw)) {
1006 mb[4] = RD_REG_WORD(®24->mailbox4);
1007 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1008 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1009 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1010 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1011
1012
1013
1014 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1015 atomic_set(&vha->loop_down_timer,
1016 LOOP_DOWN_TIME);
1017 qla2xxx_wake_dpc(vha);
1018 }
1019 }
1020 case MBA_IDC_COMPLETE:
1021 if (ha->notify_lb_portup_comp)
1022 complete(&ha->lb_portup_comp);
1023
1024 case MBA_IDC_TIME_EXT:
1025 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
1026 qla81xx_idc_event(vha, mb[0], mb[1]);
1027 break;
1028
1029 case MBA_IDC_AEN:
1030 mb[4] = RD_REG_WORD(®24->mailbox4);
1031 mb[5] = RD_REG_WORD(®24->mailbox5);
1032 mb[6] = RD_REG_WORD(®24->mailbox6);
1033 mb[7] = RD_REG_WORD(®24->mailbox7);
1034 qla83xx_handle_8200_aen(vha, mb);
1035 break;
1036
1037 default:
1038 ql_dbg(ql_dbg_async, vha, 0x5057,
1039 "Unknown AEN:%04x %04x %04x %04x\n",
1040 mb[0], mb[1], mb[2], mb[3]);
1041 }
1042
1043 qlt_async_event(mb[0], vha, mb);
1044
1045 if (!vha->vp_idx && ha->num_vhosts)
1046 qla2x00_alert_all_vps(rsp, mb);
1047}
1048
1049
1050
1051
1052
1053
1054void
1055qla2x00_process_completed_request(struct scsi_qla_host *vha,
1056 struct req_que *req, uint32_t index)
1057{
1058 srb_t *sp;
1059 struct qla_hw_data *ha = vha->hw;
1060
1061
1062 if (index >= req->num_outstanding_cmds) {
1063 ql_log(ql_log_warn, vha, 0x3014,
1064 "Invalid SCSI command index (%x).\n", index);
1065
1066 if (IS_QLA82XX(ha))
1067 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1068 else
1069 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1070 return;
1071 }
1072
1073 sp = req->outstanding_cmds[index];
1074 if (sp) {
1075
1076 req->outstanding_cmds[index] = NULL;
1077
1078
1079 sp->done(ha, sp, DID_OK << 16);
1080 } else {
1081 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1082
1083 if (IS_QLA82XX(ha))
1084 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1085 else
1086 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1087 }
1088}
1089
1090srb_t *
1091qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1092 struct req_que *req, void *iocb)
1093{
1094 struct qla_hw_data *ha = vha->hw;
1095 sts_entry_t *pkt = iocb;
1096 srb_t *sp = NULL;
1097 uint16_t index;
1098
1099 index = LSW(pkt->handle);
1100 if (index >= req->num_outstanding_cmds) {
1101 ql_log(ql_log_warn, vha, 0x5031,
1102 "Invalid command index (%x).\n", index);
1103 if (IS_QLA82XX(ha))
1104 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1105 else
1106 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1107 goto done;
1108 }
1109 sp = req->outstanding_cmds[index];
1110 if (!sp) {
1111 ql_log(ql_log_warn, vha, 0x5032,
1112 "Invalid completion handle (%x) -- timed-out.\n", index);
1113 return sp;
1114 }
1115 if (sp->handle != index) {
1116 ql_log(ql_log_warn, vha, 0x5033,
1117 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1118 return NULL;
1119 }
1120
1121 req->outstanding_cmds[index] = NULL;
1122
1123done:
1124 return sp;
1125}
1126
1127static void
1128qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1129 struct mbx_entry *mbx)
1130{
1131 const char func[] = "MBX-IOCB";
1132 const char *type;
1133 fc_port_t *fcport;
1134 srb_t *sp;
1135 struct srb_iocb *lio;
1136 uint16_t *data;
1137 uint16_t status;
1138
1139 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1140 if (!sp)
1141 return;
1142
1143 lio = &sp->u.iocb_cmd;
1144 type = sp->name;
1145 fcport = sp->fcport;
1146 data = lio->u.logio.data;
1147
1148 data[0] = MBS_COMMAND_ERROR;
1149 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1150 QLA_LOGIO_LOGIN_RETRIED : 0;
1151 if (mbx->entry_status) {
1152 ql_dbg(ql_dbg_async, vha, 0x5043,
1153 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1154 "entry-status=%x status=%x state-flag=%x "
1155 "status-flags=%x.\n", type, sp->handle,
1156 fcport->d_id.b.domain, fcport->d_id.b.area,
1157 fcport->d_id.b.al_pa, mbx->entry_status,
1158 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1159 le16_to_cpu(mbx->status_flags));
1160
1161 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1162 (uint8_t *)mbx, sizeof(*mbx));
1163
1164 goto logio_done;
1165 }
1166
1167 status = le16_to_cpu(mbx->status);
1168 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1169 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1170 status = 0;
1171 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1172 ql_dbg(ql_dbg_async, vha, 0x5045,
1173 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1174 type, sp->handle, fcport->d_id.b.domain,
1175 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1176 le16_to_cpu(mbx->mb1));
1177
1178 data[0] = MBS_COMMAND_COMPLETE;
1179 if (sp->type == SRB_LOGIN_CMD) {
1180 fcport->port_type = FCT_TARGET;
1181 if (le16_to_cpu(mbx->mb1) & BIT_0)
1182 fcport->port_type = FCT_INITIATOR;
1183 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1184 fcport->flags |= FCF_FCP2_DEVICE;
1185 }
1186 goto logio_done;
1187 }
1188
1189 data[0] = le16_to_cpu(mbx->mb0);
1190 switch (data[0]) {
1191 case MBS_PORT_ID_USED:
1192 data[1] = le16_to_cpu(mbx->mb1);
1193 break;
1194 case MBS_LOOP_ID_USED:
1195 break;
1196 default:
1197 data[0] = MBS_COMMAND_ERROR;
1198 break;
1199 }
1200
1201 ql_log(ql_log_warn, vha, 0x5046,
1202 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1203 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1204 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1205 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1206 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1207 le16_to_cpu(mbx->mb7));
1208
1209logio_done:
1210 sp->done(vha, sp, 0);
1211}
1212
1213static void
1214qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1215 sts_entry_t *pkt, int iocb_type)
1216{
1217 const char func[] = "CT_IOCB";
1218 const char *type;
1219 srb_t *sp;
1220 struct fc_bsg_job *bsg_job;
1221 uint16_t comp_status;
1222 int res;
1223
1224 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1225 if (!sp)
1226 return;
1227
1228 bsg_job = sp->u.bsg_job;
1229
1230 type = "ct pass-through";
1231
1232 comp_status = le16_to_cpu(pkt->comp_status);
1233
1234
1235
1236
1237 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1238 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1239
1240 if (comp_status != CS_COMPLETE) {
1241 if (comp_status == CS_DATA_UNDERRUN) {
1242 res = DID_OK << 16;
1243 bsg_job->reply->reply_payload_rcv_len =
1244 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1245
1246 ql_log(ql_log_warn, vha, 0x5048,
1247 "CT pass-through-%s error "
1248 "comp_status-status=0x%x total_byte = 0x%x.\n",
1249 type, comp_status,
1250 bsg_job->reply->reply_payload_rcv_len);
1251 } else {
1252 ql_log(ql_log_warn, vha, 0x5049,
1253 "CT pass-through-%s error "
1254 "comp_status-status=0x%x.\n", type, comp_status);
1255 res = DID_ERROR << 16;
1256 bsg_job->reply->reply_payload_rcv_len = 0;
1257 }
1258 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1259 (uint8_t *)pkt, sizeof(*pkt));
1260 } else {
1261 res = DID_OK << 16;
1262 bsg_job->reply->reply_payload_rcv_len =
1263 bsg_job->reply_payload.payload_len;
1264 bsg_job->reply_len = 0;
1265 }
1266
1267 sp->done(vha, sp, res);
1268}
1269
1270static void
1271qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1272 struct sts_entry_24xx *pkt, int iocb_type)
1273{
1274 const char func[] = "ELS_CT_IOCB";
1275 const char *type;
1276 srb_t *sp;
1277 struct fc_bsg_job *bsg_job;
1278 uint16_t comp_status;
1279 uint32_t fw_status[3];
1280 uint8_t* fw_sts_ptr;
1281 int res;
1282
1283 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1284 if (!sp)
1285 return;
1286 bsg_job = sp->u.bsg_job;
1287
1288 type = NULL;
1289 switch (sp->type) {
1290 case SRB_ELS_CMD_RPT:
1291 case SRB_ELS_CMD_HST:
1292 type = "els";
1293 break;
1294 case SRB_CT_CMD:
1295 type = "ct pass-through";
1296 break;
1297 default:
1298 ql_dbg(ql_dbg_user, vha, 0x503e,
1299 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1300 return;
1301 }
1302
1303 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1304 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1305 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1306
1307
1308
1309
1310 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1311 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1312
1313 if (comp_status != CS_COMPLETE) {
1314 if (comp_status == CS_DATA_UNDERRUN) {
1315 res = DID_OK << 16;
1316 bsg_job->reply->reply_payload_rcv_len =
1317 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1318
1319 ql_dbg(ql_dbg_user, vha, 0x503f,
1320 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1321 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1322 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1323 le16_to_cpu(((struct els_sts_entry_24xx *)
1324 pkt)->total_byte_count));
1325 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1326 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1327 }
1328 else {
1329 ql_dbg(ql_dbg_user, vha, 0x5040,
1330 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1331 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1332 type, sp->handle, comp_status,
1333 le16_to_cpu(((struct els_sts_entry_24xx *)
1334 pkt)->error_subcode_1),
1335 le16_to_cpu(((struct els_sts_entry_24xx *)
1336 pkt)->error_subcode_2));
1337 res = DID_ERROR << 16;
1338 bsg_job->reply->reply_payload_rcv_len = 0;
1339 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1340 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1341 }
1342 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1343 (uint8_t *)pkt, sizeof(*pkt));
1344 }
1345 else {
1346 res = DID_OK << 16;
1347 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1348 bsg_job->reply_len = 0;
1349 }
1350
1351 sp->done(vha, sp, res);
1352}
1353
1354static void
1355qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1356 struct logio_entry_24xx *logio)
1357{
1358 const char func[] = "LOGIO-IOCB";
1359 const char *type;
1360 fc_port_t *fcport;
1361 srb_t *sp;
1362 struct srb_iocb *lio;
1363 uint16_t *data;
1364 uint32_t iop[2];
1365
1366 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1367 if (!sp)
1368 return;
1369
1370 lio = &sp->u.iocb_cmd;
1371 type = sp->name;
1372 fcport = sp->fcport;
1373 data = lio->u.logio.data;
1374
1375 data[0] = MBS_COMMAND_ERROR;
1376 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1377 QLA_LOGIO_LOGIN_RETRIED : 0;
1378 if (logio->entry_status) {
1379 ql_log(ql_log_warn, fcport->vha, 0x5034,
1380 "Async-%s error entry - hdl=%x"
1381 "portid=%02x%02x%02x entry-status=%x.\n",
1382 type, sp->handle, fcport->d_id.b.domain,
1383 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1384 logio->entry_status);
1385 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1386 (uint8_t *)logio, sizeof(*logio));
1387
1388 goto logio_done;
1389 }
1390
1391 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1392 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1393 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1394 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1395 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1396 le32_to_cpu(logio->io_parameter[0]));
1397
1398 data[0] = MBS_COMMAND_COMPLETE;
1399 if (sp->type != SRB_LOGIN_CMD)
1400 goto logio_done;
1401
1402 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1403 if (iop[0] & BIT_4) {
1404 fcport->port_type = FCT_TARGET;
1405 if (iop[0] & BIT_8)
1406 fcport->flags |= FCF_FCP2_DEVICE;
1407 } else if (iop[0] & BIT_5)
1408 fcport->port_type = FCT_INITIATOR;
1409
1410 if (iop[0] & BIT_7)
1411 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1412
1413 if (logio->io_parameter[7] || logio->io_parameter[8])
1414 fcport->supported_classes |= FC_COS_CLASS2;
1415 if (logio->io_parameter[9] || logio->io_parameter[10])
1416 fcport->supported_classes |= FC_COS_CLASS3;
1417
1418 goto logio_done;
1419 }
1420
1421 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1422 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1423 switch (iop[0]) {
1424 case LSC_SCODE_PORTID_USED:
1425 data[0] = MBS_PORT_ID_USED;
1426 data[1] = LSW(iop[1]);
1427 break;
1428 case LSC_SCODE_NPORT_USED:
1429 data[0] = MBS_LOOP_ID_USED;
1430 break;
1431 default:
1432 data[0] = MBS_COMMAND_ERROR;
1433 break;
1434 }
1435
1436 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1437 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1438 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1439 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1440 le16_to_cpu(logio->comp_status),
1441 le32_to_cpu(logio->io_parameter[0]),
1442 le32_to_cpu(logio->io_parameter[1]));
1443
1444logio_done:
1445 sp->done(vha, sp, 0);
1446}
1447
1448static void
1449qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1450 struct tsk_mgmt_entry *tsk)
1451{
1452 const char func[] = "TMF-IOCB";
1453 const char *type;
1454 fc_port_t *fcport;
1455 srb_t *sp;
1456 struct srb_iocb *iocb;
1457 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1458 int error = 1;
1459
1460 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1461 if (!sp)
1462 return;
1463
1464 iocb = &sp->u.iocb_cmd;
1465 type = sp->name;
1466 fcport = sp->fcport;
1467
1468 if (sts->entry_status) {
1469 ql_log(ql_log_warn, fcport->vha, 0x5038,
1470 "Async-%s error - hdl=%x entry-status(%x).\n",
1471 type, sp->handle, sts->entry_status);
1472 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1473 ql_log(ql_log_warn, fcport->vha, 0x5039,
1474 "Async-%s error - hdl=%x completion status(%x).\n",
1475 type, sp->handle, sts->comp_status);
1476 } else if (!(le16_to_cpu(sts->scsi_status) &
1477 SS_RESPONSE_INFO_LEN_VALID)) {
1478 ql_log(ql_log_warn, fcport->vha, 0x503a,
1479 "Async-%s error - hdl=%x no response info(%x).\n",
1480 type, sp->handle, sts->scsi_status);
1481 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1482 ql_log(ql_log_warn, fcport->vha, 0x503b,
1483 "Async-%s error - hdl=%x not enough response(%d).\n",
1484 type, sp->handle, sts->rsp_data_len);
1485 } else if (sts->data[3]) {
1486 ql_log(ql_log_warn, fcport->vha, 0x503c,
1487 "Async-%s error - hdl=%x response(%x).\n",
1488 type, sp->handle, sts->data[3]);
1489 } else {
1490 error = 0;
1491 }
1492
1493 if (error) {
1494 iocb->u.tmf.data = error;
1495 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1496 (uint8_t *)sts, sizeof(*sts));
1497 }
1498
1499 sp->done(vha, sp, 0);
1500}
1501
1502
1503
1504
1505
1506void
1507qla2x00_process_response_queue(struct rsp_que *rsp)
1508{
1509 struct scsi_qla_host *vha;
1510 struct qla_hw_data *ha = rsp->hw;
1511 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1512 sts_entry_t *pkt;
1513 uint16_t handle_cnt;
1514 uint16_t cnt;
1515
1516 vha = pci_get_drvdata(ha->pdev);
1517
1518 if (!vha->flags.online)
1519 return;
1520
1521 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1522 pkt = (sts_entry_t *)rsp->ring_ptr;
1523
1524 rsp->ring_index++;
1525 if (rsp->ring_index == rsp->length) {
1526 rsp->ring_index = 0;
1527 rsp->ring_ptr = rsp->ring;
1528 } else {
1529 rsp->ring_ptr++;
1530 }
1531
1532 if (pkt->entry_status != 0) {
1533 qla2x00_error_entry(vha, rsp, pkt);
1534 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1535 wmb();
1536 continue;
1537 }
1538
1539 switch (pkt->entry_type) {
1540 case STATUS_TYPE:
1541 qla2x00_status_entry(vha, rsp, pkt);
1542 break;
1543 case STATUS_TYPE_21:
1544 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1545 for (cnt = 0; cnt < handle_cnt; cnt++) {
1546 qla2x00_process_completed_request(vha, rsp->req,
1547 ((sts21_entry_t *)pkt)->handle[cnt]);
1548 }
1549 break;
1550 case STATUS_TYPE_22:
1551 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1552 for (cnt = 0; cnt < handle_cnt; cnt++) {
1553 qla2x00_process_completed_request(vha, rsp->req,
1554 ((sts22_entry_t *)pkt)->handle[cnt]);
1555 }
1556 break;
1557 case STATUS_CONT_TYPE:
1558 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1559 break;
1560 case MBX_IOCB_TYPE:
1561 qla2x00_mbx_iocb_entry(vha, rsp->req,
1562 (struct mbx_entry *)pkt);
1563 break;
1564 case CT_IOCB_TYPE:
1565 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1566 break;
1567 default:
1568
1569 ql_log(ql_log_warn, vha, 0x504a,
1570 "Received unknown response pkt type %x "
1571 "entry status=%x.\n",
1572 pkt->entry_type, pkt->entry_status);
1573 break;
1574 }
1575 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1576 wmb();
1577 }
1578
1579
1580 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1581}
1582
1583static inline void
1584qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1585 uint32_t sense_len, struct rsp_que *rsp, int res)
1586{
1587 struct scsi_qla_host *vha = sp->fcport->vha;
1588 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1589 uint32_t track_sense_len;
1590
1591 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1592 sense_len = SCSI_SENSE_BUFFERSIZE;
1593
1594 SET_CMD_SENSE_LEN(sp, sense_len);
1595 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1596 track_sense_len = sense_len;
1597
1598 if (sense_len > par_sense_len)
1599 sense_len = par_sense_len;
1600
1601 memcpy(cp->sense_buffer, sense_data, sense_len);
1602
1603 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1604 track_sense_len -= sense_len;
1605 SET_CMD_SENSE_LEN(sp, track_sense_len);
1606
1607 if (track_sense_len != 0) {
1608 rsp->status_srb = sp;
1609 cp->result = res;
1610 }
1611
1612 if (sense_len) {
1613 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1614 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1615 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1616 cp);
1617 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1618 cp->sense_buffer, sense_len);
1619 }
1620}
1621
1622struct scsi_dif_tuple {
1623 __be16 guard;
1624 __be16 app_tag;
1625 __be32 ref_tag;
1626};
1627
1628
1629
1630
1631
1632
1633
1634static inline int
1635qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1636{
1637 struct scsi_qla_host *vha = sp->fcport->vha;
1638 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1639 uint8_t *ap = &sts24->data[12];
1640 uint8_t *ep = &sts24->data[20];
1641 uint32_t e_ref_tag, a_ref_tag;
1642 uint16_t e_app_tag, a_app_tag;
1643 uint16_t e_guard, a_guard;
1644
1645
1646
1647
1648
1649 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1650 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1651 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1652 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1653 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1654 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1655
1656 ql_dbg(ql_dbg_io, vha, 0x3023,
1657 "iocb(s) %p Returned STATUS.\n", sts24);
1658
1659 ql_dbg(ql_dbg_io, vha, 0x3024,
1660 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1661 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1662 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1663 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1664 a_app_tag, e_app_tag, a_guard, e_guard);
1665
1666
1667
1668
1669
1670
1671 if ((a_app_tag == 0xffff) &&
1672 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1673 (a_ref_tag == 0xffffffff))) {
1674 uint32_t blocks_done, resid;
1675 sector_t lba_s = scsi_get_lba(cmd);
1676
1677
1678 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1679
1680 resid = scsi_bufflen(cmd) - (blocks_done *
1681 cmd->device->sector_size);
1682
1683 scsi_set_resid(cmd, resid);
1684 cmd->result = DID_OK << 16;
1685
1686
1687 if (scsi_prot_sg_count(cmd)) {
1688 uint32_t i, j = 0, k = 0, num_ent;
1689 struct scatterlist *sg;
1690 struct sd_dif_tuple *spt;
1691
1692
1693 scsi_for_each_prot_sg(cmd, sg,
1694 scsi_prot_sg_count(cmd), i) {
1695 num_ent = sg_dma_len(sg) / 8;
1696 if (k + num_ent < blocks_done) {
1697 k += num_ent;
1698 continue;
1699 }
1700 j = blocks_done - k - 1;
1701 k = blocks_done;
1702 break;
1703 }
1704
1705 if (k != blocks_done) {
1706 ql_log(ql_log_warn, vha, 0x302f,
1707 "unexpected tag values tag:lba=%x:%llx)\n",
1708 e_ref_tag, (unsigned long long)lba_s);
1709 return 1;
1710 }
1711
1712 spt = page_address(sg_page(sg)) + sg->offset;
1713 spt += j;
1714
1715 spt->app_tag = 0xffff;
1716 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1717 spt->ref_tag = 0xffffffff;
1718 }
1719
1720 return 0;
1721 }
1722
1723
1724 if (e_guard != a_guard) {
1725 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1726 0x10, 0x1);
1727 set_driver_byte(cmd, DRIVER_SENSE);
1728 set_host_byte(cmd, DID_ABORT);
1729 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1730 return 1;
1731 }
1732
1733
1734 if (e_ref_tag != a_ref_tag) {
1735 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1736 0x10, 0x3);
1737 set_driver_byte(cmd, DRIVER_SENSE);
1738 set_host_byte(cmd, DID_ABORT);
1739 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1740 return 1;
1741 }
1742
1743
1744 if (e_app_tag != a_app_tag) {
1745 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1746 0x10, 0x2);
1747 set_driver_byte(cmd, DRIVER_SENSE);
1748 set_host_byte(cmd, DID_ABORT);
1749 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1750 return 1;
1751 }
1752
1753 return 1;
1754}
1755
1756static void
1757qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1758 struct req_que *req, uint32_t index)
1759{
1760 struct qla_hw_data *ha = vha->hw;
1761 srb_t *sp;
1762 uint16_t comp_status;
1763 uint16_t scsi_status;
1764 uint16_t thread_id;
1765 uint32_t rval = EXT_STATUS_OK;
1766 struct fc_bsg_job *bsg_job = NULL;
1767 sts_entry_t *sts;
1768 struct sts_entry_24xx *sts24;
1769 sts = (sts_entry_t *) pkt;
1770 sts24 = (struct sts_entry_24xx *) pkt;
1771
1772
1773 if (index >= req->num_outstanding_cmds) {
1774 ql_log(ql_log_warn, vha, 0x70af,
1775 "Invalid SCSI completion handle 0x%x.\n", index);
1776 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1777 return;
1778 }
1779
1780 sp = req->outstanding_cmds[index];
1781 if (sp) {
1782
1783 req->outstanding_cmds[index] = NULL;
1784 bsg_job = sp->u.bsg_job;
1785 } else {
1786 ql_log(ql_log_warn, vha, 0x70b0,
1787 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1788 req->id, index);
1789
1790 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1791 return;
1792 }
1793
1794 if (IS_FWI2_CAPABLE(ha)) {
1795 comp_status = le16_to_cpu(sts24->comp_status);
1796 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1797 } else {
1798 comp_status = le16_to_cpu(sts->comp_status);
1799 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1800 }
1801
1802 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1803 switch (comp_status) {
1804 case CS_COMPLETE:
1805 if (scsi_status == 0) {
1806 bsg_job->reply->reply_payload_rcv_len =
1807 bsg_job->reply_payload.payload_len;
1808 rval = EXT_STATUS_OK;
1809 }
1810 goto done;
1811
1812 case CS_DATA_OVERRUN:
1813 ql_dbg(ql_dbg_user, vha, 0x70b1,
1814 "Command completed with date overrun thread_id=%d\n",
1815 thread_id);
1816 rval = EXT_STATUS_DATA_OVERRUN;
1817 break;
1818
1819 case CS_DATA_UNDERRUN:
1820 ql_dbg(ql_dbg_user, vha, 0x70b2,
1821 "Command completed with date underrun thread_id=%d\n",
1822 thread_id);
1823 rval = EXT_STATUS_DATA_UNDERRUN;
1824 break;
1825 case CS_BIDIR_RD_OVERRUN:
1826 ql_dbg(ql_dbg_user, vha, 0x70b3,
1827 "Command completed with read data overrun thread_id=%d\n",
1828 thread_id);
1829 rval = EXT_STATUS_DATA_OVERRUN;
1830 break;
1831
1832 case CS_BIDIR_RD_WR_OVERRUN:
1833 ql_dbg(ql_dbg_user, vha, 0x70b4,
1834 "Command completed with read and write data overrun "
1835 "thread_id=%d\n", thread_id);
1836 rval = EXT_STATUS_DATA_OVERRUN;
1837 break;
1838
1839 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1840 ql_dbg(ql_dbg_user, vha, 0x70b5,
1841 "Command completed with read data over and write data "
1842 "underrun thread_id=%d\n", thread_id);
1843 rval = EXT_STATUS_DATA_OVERRUN;
1844 break;
1845
1846 case CS_BIDIR_RD_UNDERRUN:
1847 ql_dbg(ql_dbg_user, vha, 0x70b6,
1848 "Command completed with read data data underrun "
1849 "thread_id=%d\n", thread_id);
1850 rval = EXT_STATUS_DATA_UNDERRUN;
1851 break;
1852
1853 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1854 ql_dbg(ql_dbg_user, vha, 0x70b7,
1855 "Command completed with read data under and write data "
1856 "overrun thread_id=%d\n", thread_id);
1857 rval = EXT_STATUS_DATA_UNDERRUN;
1858 break;
1859
1860 case CS_BIDIR_RD_WR_UNDERRUN:
1861 ql_dbg(ql_dbg_user, vha, 0x70b8,
1862 "Command completed with read and write data underrun "
1863 "thread_id=%d\n", thread_id);
1864 rval = EXT_STATUS_DATA_UNDERRUN;
1865 break;
1866
1867 case CS_BIDIR_DMA:
1868 ql_dbg(ql_dbg_user, vha, 0x70b9,
1869 "Command completed with data DMA error thread_id=%d\n",
1870 thread_id);
1871 rval = EXT_STATUS_DMA_ERR;
1872 break;
1873
1874 case CS_TIMEOUT:
1875 ql_dbg(ql_dbg_user, vha, 0x70ba,
1876 "Command completed with timeout thread_id=%d\n",
1877 thread_id);
1878 rval = EXT_STATUS_TIMEOUT;
1879 break;
1880 default:
1881 ql_dbg(ql_dbg_user, vha, 0x70bb,
1882 "Command completed with completion status=0x%x "
1883 "thread_id=%d\n", comp_status, thread_id);
1884 rval = EXT_STATUS_ERR;
1885 break;
1886 }
1887 bsg_job->reply->reply_payload_rcv_len = 0;
1888
1889done:
1890
1891 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1892 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1893
1894
1895 sp->done(vha, sp, (DID_OK << 6));
1896
1897}
1898
1899
1900
1901
1902
1903
1904static void
1905qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1906{
1907 srb_t *sp;
1908 fc_port_t *fcport;
1909 struct scsi_cmnd *cp;
1910 sts_entry_t *sts;
1911 struct sts_entry_24xx *sts24;
1912 uint16_t comp_status;
1913 uint16_t scsi_status;
1914 uint16_t ox_id;
1915 uint8_t lscsi_status;
1916 int32_t resid;
1917 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1918 fw_resid_len;
1919 uint8_t *rsp_info, *sense_data;
1920 struct qla_hw_data *ha = vha->hw;
1921 uint32_t handle;
1922 uint16_t que;
1923 struct req_que *req;
1924 int logit = 1;
1925 int res = 0;
1926 uint16_t state_flags = 0;
1927
1928 sts = (sts_entry_t *) pkt;
1929 sts24 = (struct sts_entry_24xx *) pkt;
1930 if (IS_FWI2_CAPABLE(ha)) {
1931 comp_status = le16_to_cpu(sts24->comp_status);
1932 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1933 state_flags = le16_to_cpu(sts24->state_flags);
1934 } else {
1935 comp_status = le16_to_cpu(sts->comp_status);
1936 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1937 }
1938 handle = (uint32_t) LSW(sts->handle);
1939 que = MSW(sts->handle);
1940 req = ha->req_q_map[que];
1941
1942
1943 if (handle < req->num_outstanding_cmds)
1944 sp = req->outstanding_cmds[handle];
1945 else
1946 sp = NULL;
1947
1948 if (sp == NULL) {
1949 ql_dbg(ql_dbg_io, vha, 0x3017,
1950 "Invalid status handle (0x%x).\n", sts->handle);
1951
1952 if (IS_QLA82XX(ha))
1953 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1954 else
1955 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1956 qla2xxx_wake_dpc(vha);
1957 return;
1958 }
1959
1960 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1961 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1962 return;
1963 }
1964
1965
1966 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1967 qla2x00_do_host_ramp_up(vha);
1968 qla2x00_process_completed_request(vha, req, handle);
1969
1970 return;
1971 }
1972
1973 req->outstanding_cmds[handle] = NULL;
1974 cp = GET_CMD_SP(sp);
1975 if (cp == NULL) {
1976 ql_dbg(ql_dbg_io, vha, 0x3018,
1977 "Command already returned (0x%x/%p).\n",
1978 sts->handle, sp);
1979
1980 return;
1981 }
1982
1983 lscsi_status = scsi_status & STATUS_MASK;
1984
1985 fcport = sp->fcport;
1986
1987 ox_id = 0;
1988 sense_len = par_sense_len = rsp_info_len = resid_len =
1989 fw_resid_len = 0;
1990 if (IS_FWI2_CAPABLE(ha)) {
1991 if (scsi_status & SS_SENSE_LEN_VALID)
1992 sense_len = le32_to_cpu(sts24->sense_len);
1993 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1994 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1995 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1996 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1997 if (comp_status == CS_DATA_UNDERRUN)
1998 fw_resid_len = le32_to_cpu(sts24->residual_len);
1999 rsp_info = sts24->data;
2000 sense_data = sts24->data;
2001 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2002 ox_id = le16_to_cpu(sts24->ox_id);
2003 par_sense_len = sizeof(sts24->data);
2004 } else {
2005 if (scsi_status & SS_SENSE_LEN_VALID)
2006 sense_len = le16_to_cpu(sts->req_sense_length);
2007 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2008 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2009 resid_len = le32_to_cpu(sts->residual_length);
2010 rsp_info = sts->rsp_info;
2011 sense_data = sts->req_sense_data;
2012 par_sense_len = sizeof(sts->req_sense_data);
2013 }
2014
2015
2016 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2017
2018 if (IS_FWI2_CAPABLE(ha)) {
2019 sense_data += rsp_info_len;
2020 par_sense_len -= rsp_info_len;
2021 }
2022 if (rsp_info_len > 3 && rsp_info[3]) {
2023 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2024 "FCP I/O protocol failure (0x%x/0x%x).\n",
2025 rsp_info_len, rsp_info[3]);
2026
2027 res = DID_BUS_BUSY << 16;
2028 goto out;
2029 }
2030 }
2031
2032
2033 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2034 scsi_status & SS_RESIDUAL_OVER)
2035 comp_status = CS_DATA_OVERRUN;
2036
2037
2038
2039
2040 switch (comp_status) {
2041 case CS_COMPLETE:
2042 case CS_QUEUE_FULL:
2043 if (scsi_status == 0) {
2044 res = DID_OK << 16;
2045 break;
2046 }
2047 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2048 resid = resid_len;
2049 scsi_set_resid(cp, resid);
2050
2051 if (!lscsi_status &&
2052 ((unsigned)(scsi_bufflen(cp) - resid) <
2053 cp->underflow)) {
2054 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2055 "Mid-layer underflow "
2056 "detected (0x%x of 0x%x bytes).\n",
2057 resid, scsi_bufflen(cp));
2058
2059 res = DID_ERROR << 16;
2060 break;
2061 }
2062 }
2063 res = DID_OK << 16 | lscsi_status;
2064
2065 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2066 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2067 "QUEUE FULL detected.\n");
2068 break;
2069 }
2070 logit = 0;
2071 if (lscsi_status != SS_CHECK_CONDITION)
2072 break;
2073
2074 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2075 if (!(scsi_status & SS_SENSE_LEN_VALID))
2076 break;
2077
2078 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2079 rsp, res);
2080 break;
2081
2082 case CS_DATA_UNDERRUN:
2083
2084 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2085 scsi_set_resid(cp, resid);
2086 if (scsi_status & SS_RESIDUAL_UNDER) {
2087 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2088 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2089 "Dropped frame(s) detected "
2090 "(0x%x of 0x%x bytes).\n",
2091 resid, scsi_bufflen(cp));
2092
2093 res = DID_ERROR << 16 | lscsi_status;
2094 goto check_scsi_status;
2095 }
2096
2097 if (!lscsi_status &&
2098 ((unsigned)(scsi_bufflen(cp) - resid) <
2099 cp->underflow)) {
2100 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2101 "Mid-layer underflow "
2102 "detected (0x%x of 0x%x bytes).\n",
2103 resid, scsi_bufflen(cp));
2104
2105 res = DID_ERROR << 16;
2106 break;
2107 }
2108 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2109 lscsi_status != SAM_STAT_BUSY) {
2110
2111
2112
2113
2114
2115 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2116 "Dropped frame(s) detected (0x%x "
2117 "of 0x%x bytes).\n", resid,
2118 scsi_bufflen(cp));
2119
2120 res = DID_ERROR << 16 | lscsi_status;
2121 goto check_scsi_status;
2122 } else {
2123 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2124 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2125 scsi_status, lscsi_status);
2126 }
2127
2128 res = DID_OK << 16 | lscsi_status;
2129 logit = 0;
2130
2131check_scsi_status:
2132
2133
2134
2135
2136 if (lscsi_status != 0) {
2137 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2138 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2139 "QUEUE FULL detected.\n");
2140 logit = 1;
2141 break;
2142 }
2143 if (lscsi_status != SS_CHECK_CONDITION)
2144 break;
2145
2146 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2147 if (!(scsi_status & SS_SENSE_LEN_VALID))
2148 break;
2149
2150 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2151 sense_len, rsp, res);
2152 }
2153 break;
2154
2155 case CS_PORT_LOGGED_OUT:
2156 case CS_PORT_CONFIG_CHG:
2157 case CS_PORT_BUSY:
2158 case CS_INCOMPLETE:
2159 case CS_PORT_UNAVAILABLE:
2160 case CS_TIMEOUT:
2161 case CS_RESET:
2162
2163
2164
2165
2166
2167
2168 res = DID_TRANSPORT_DISRUPTED << 16;
2169
2170 if (comp_status == CS_TIMEOUT) {
2171 if (IS_FWI2_CAPABLE(ha))
2172 break;
2173 else if ((le16_to_cpu(sts->status_flags) &
2174 SF_LOGOUT_SENT) == 0)
2175 break;
2176 }
2177
2178 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2179 "Port down status: port-state=0x%x.\n",
2180 atomic_read(&fcport->state));
2181
2182 if (atomic_read(&fcport->state) == FCS_ONLINE)
2183 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2184 break;
2185
2186 case CS_ABORTED:
2187 res = DID_RESET << 16;
2188 break;
2189
2190 case CS_DIF_ERROR:
2191 logit = qla2x00_handle_dif_error(sp, sts24);
2192 res = cp->result;
2193 break;
2194
2195 case CS_TRANSPORT:
2196 res = DID_ERROR << 16;
2197
2198 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2199 break;
2200
2201 if (state_flags & BIT_4)
2202 scmd_printk(KERN_WARNING, cp,
2203 "Unsupported device '%s' found.\n",
2204 cp->device->vendor);
2205 break;
2206
2207 default:
2208 res = DID_ERROR << 16;
2209 break;
2210 }
2211
2212out:
2213 if (logit)
2214 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2215 "FCP command status: 0x%x-0x%x (0x%x) "
2216 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2217 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2218 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2219 comp_status, scsi_status, res, vha->host_no,
2220 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2221 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2222 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2223 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2224 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
2225 resid_len, fw_resid_len);
2226
2227 if (!res)
2228 qla2x00_do_host_ramp_up(vha);
2229
2230 if (rsp->status_srb == NULL)
2231 sp->done(ha, sp, res);
2232}
2233
2234
2235
2236
2237
2238
2239
2240
2241static void
2242qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2243{
2244 uint8_t sense_sz = 0;
2245 struct qla_hw_data *ha = rsp->hw;
2246 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2247 srb_t *sp = rsp->status_srb;
2248 struct scsi_cmnd *cp;
2249 uint32_t sense_len;
2250 uint8_t *sense_ptr;
2251
2252 if (!sp || !GET_CMD_SENSE_LEN(sp))
2253 return;
2254
2255 sense_len = GET_CMD_SENSE_LEN(sp);
2256 sense_ptr = GET_CMD_SENSE_PTR(sp);
2257
2258 cp = GET_CMD_SP(sp);
2259 if (cp == NULL) {
2260 ql_log(ql_log_warn, vha, 0x3025,
2261 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2262
2263 rsp->status_srb = NULL;
2264 return;
2265 }
2266
2267 if (sense_len > sizeof(pkt->data))
2268 sense_sz = sizeof(pkt->data);
2269 else
2270 sense_sz = sense_len;
2271
2272
2273 if (IS_FWI2_CAPABLE(ha))
2274 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2275 memcpy(sense_ptr, pkt->data, sense_sz);
2276 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2277 sense_ptr, sense_sz);
2278
2279 sense_len -= sense_sz;
2280 sense_ptr += sense_sz;
2281
2282 SET_CMD_SENSE_PTR(sp, sense_ptr);
2283 SET_CMD_SENSE_LEN(sp, sense_len);
2284
2285
2286 if (sense_len == 0) {
2287 rsp->status_srb = NULL;
2288 sp->done(ha, sp, cp->result);
2289 }
2290}
2291
2292
2293
2294
2295
2296
2297static void
2298qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2299{
2300 srb_t *sp;
2301 struct qla_hw_data *ha = vha->hw;
2302 const char func[] = "ERROR-IOCB";
2303 uint16_t que = MSW(pkt->handle);
2304 struct req_que *req = NULL;
2305 int res = DID_ERROR << 16;
2306
2307 ql_dbg(ql_dbg_async, vha, 0x502a,
2308 "type of error status in response: 0x%x\n", pkt->entry_status);
2309
2310 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2311 goto fatal;
2312
2313 req = ha->req_q_map[que];
2314
2315 if (pkt->entry_status & RF_BUSY)
2316 res = DID_BUS_BUSY << 16;
2317
2318 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2319 if (sp) {
2320 sp->done(ha, sp, res);
2321 return;
2322 }
2323fatal:
2324 ql_log(ql_log_warn, vha, 0x5030,
2325 "Error entry - invalid handle/queue.\n");
2326
2327 if (IS_QLA82XX(ha))
2328 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2329 else
2330 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2331 qla2xxx_wake_dpc(vha);
2332}
2333
2334
2335
2336
2337
2338
2339static void
2340qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2341{
2342 uint16_t cnt;
2343 uint32_t mboxes;
2344 uint16_t __iomem *wptr;
2345 struct qla_hw_data *ha = vha->hw;
2346 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2347
2348
2349 mboxes = (1 << ha->mbx_count) - 1;
2350 if (!ha->mcp)
2351 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2352 else
2353 mboxes = ha->mcp->in_mb;
2354
2355
2356 ha->flags.mbox_int = 1;
2357 ha->mailbox_out[0] = mb0;
2358 mboxes >>= 1;
2359 wptr = (uint16_t __iomem *)®->mailbox1;
2360
2361 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2362 if (mboxes & BIT_0)
2363 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2364
2365 mboxes >>= 1;
2366 wptr++;
2367 }
2368}
2369
2370
2371
2372
2373
2374void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2375 struct rsp_que *rsp)
2376{
2377 struct sts_entry_24xx *pkt;
2378 struct qla_hw_data *ha = vha->hw;
2379
2380 if (!vha->flags.online)
2381 return;
2382
2383 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2384 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2385
2386 rsp->ring_index++;
2387 if (rsp->ring_index == rsp->length) {
2388 rsp->ring_index = 0;
2389 rsp->ring_ptr = rsp->ring;
2390 } else {
2391 rsp->ring_ptr++;
2392 }
2393
2394 if (pkt->entry_status != 0) {
2395 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2396
2397 (void)qlt_24xx_process_response_error(vha, pkt);
2398
2399 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2400 wmb();
2401 continue;
2402 }
2403
2404 switch (pkt->entry_type) {
2405 case STATUS_TYPE:
2406 qla2x00_status_entry(vha, rsp, pkt);
2407 break;
2408 case STATUS_CONT_TYPE:
2409 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2410 break;
2411 case VP_RPT_ID_IOCB_TYPE:
2412 qla24xx_report_id_acquisition(vha,
2413 (struct vp_rpt_id_entry_24xx *)pkt);
2414 break;
2415 case LOGINOUT_PORT_IOCB_TYPE:
2416 qla24xx_logio_entry(vha, rsp->req,
2417 (struct logio_entry_24xx *)pkt);
2418 break;
2419 case TSK_MGMT_IOCB_TYPE:
2420 qla24xx_tm_iocb_entry(vha, rsp->req,
2421 (struct tsk_mgmt_entry *)pkt);
2422 break;
2423 case CT_IOCB_TYPE:
2424 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2425 break;
2426 case ELS_IOCB_TYPE:
2427 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2428 break;
2429 case ABTS_RECV_24XX:
2430
2431 qlt_24xx_process_atio_queue(vha);
2432 case ABTS_RESP_24XX:
2433 case CTIO_TYPE7:
2434 case NOTIFY_ACK_TYPE:
2435 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2436 break;
2437 case MARKER_TYPE:
2438
2439
2440
2441 break;
2442 default:
2443
2444 ql_dbg(ql_dbg_async, vha, 0x5042,
2445 "Received unknown response pkt type %x "
2446 "entry status=%x.\n",
2447 pkt->entry_type, pkt->entry_status);
2448 break;
2449 }
2450 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2451 wmb();
2452 }
2453
2454
2455 if (IS_QLA82XX(ha)) {
2456 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2457 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2458 } else
2459 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2460}
2461
2462static void
2463qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2464{
2465 int rval;
2466 uint32_t cnt;
2467 struct qla_hw_data *ha = vha->hw;
2468 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2469
2470 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2471 return;
2472
2473 rval = QLA_SUCCESS;
2474 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2475 RD_REG_DWORD(®->iobase_addr);
2476 WRT_REG_DWORD(®->iobase_window, 0x0001);
2477 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2478 rval == QLA_SUCCESS; cnt--) {
2479 if (cnt) {
2480 WRT_REG_DWORD(®->iobase_window, 0x0001);
2481 udelay(10);
2482 } else
2483 rval = QLA_FUNCTION_TIMEOUT;
2484 }
2485 if (rval == QLA_SUCCESS)
2486 goto next_test;
2487
2488 WRT_REG_DWORD(®->iobase_window, 0x0003);
2489 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2490 rval == QLA_SUCCESS; cnt--) {
2491 if (cnt) {
2492 WRT_REG_DWORD(®->iobase_window, 0x0003);
2493 udelay(10);
2494 } else
2495 rval = QLA_FUNCTION_TIMEOUT;
2496 }
2497 if (rval != QLA_SUCCESS)
2498 goto done;
2499
2500next_test:
2501 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2502 ql_log(ql_log_info, vha, 0x504c,
2503 "Additional code -- 0x55AA.\n");
2504
2505done:
2506 WRT_REG_DWORD(®->iobase_window, 0x0000);
2507 RD_REG_DWORD(®->iobase_window);
2508}
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519irqreturn_t
2520qla24xx_intr_handler(int irq, void *dev_id)
2521{
2522 scsi_qla_host_t *vha;
2523 struct qla_hw_data *ha;
2524 struct device_reg_24xx __iomem *reg;
2525 int status;
2526 unsigned long iter;
2527 uint32_t stat;
2528 uint32_t hccr;
2529 uint16_t mb[8];
2530 struct rsp_que *rsp;
2531 unsigned long flags;
2532
2533 rsp = (struct rsp_que *) dev_id;
2534 if (!rsp) {
2535 ql_log(ql_log_info, NULL, 0x5059,
2536 "%s: NULL response queue pointer.\n", __func__);
2537 return IRQ_NONE;
2538 }
2539
2540 ha = rsp->hw;
2541 reg = &ha->iobase->isp24;
2542 status = 0;
2543
2544 if (unlikely(pci_channel_offline(ha->pdev)))
2545 return IRQ_HANDLED;
2546
2547 spin_lock_irqsave(&ha->hardware_lock, flags);
2548 vha = pci_get_drvdata(ha->pdev);
2549 for (iter = 50; iter--; ) {
2550 stat = RD_REG_DWORD(®->host_status);
2551 if (stat & HSRX_RISC_PAUSED) {
2552 if (unlikely(pci_channel_offline(ha->pdev)))
2553 break;
2554
2555 hccr = RD_REG_DWORD(®->hccr);
2556
2557 ql_log(ql_log_warn, vha, 0x504b,
2558 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2559 hccr);
2560
2561 qla2xxx_check_risc_status(vha);
2562
2563 ha->isp_ops->fw_dump(vha, 1);
2564 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2565 break;
2566 } else if ((stat & HSRX_RISC_INT) == 0)
2567 break;
2568
2569 switch (stat & 0xff) {
2570 case INTR_ROM_MB_SUCCESS:
2571 case INTR_ROM_MB_FAILED:
2572 case INTR_MB_SUCCESS:
2573 case INTR_MB_FAILED:
2574 qla24xx_mbx_completion(vha, MSW(stat));
2575 status |= MBX_INTERRUPT;
2576
2577 break;
2578 case INTR_ASYNC_EVENT:
2579 mb[0] = MSW(stat);
2580 mb[1] = RD_REG_WORD(®->mailbox1);
2581 mb[2] = RD_REG_WORD(®->mailbox2);
2582 mb[3] = RD_REG_WORD(®->mailbox3);
2583 qla2x00_async_event(vha, rsp, mb);
2584 break;
2585 case INTR_RSP_QUE_UPDATE:
2586 case INTR_RSP_QUE_UPDATE_83XX:
2587 qla24xx_process_response_queue(vha, rsp);
2588 break;
2589 case INTR_ATIO_QUE_UPDATE:
2590 qlt_24xx_process_atio_queue(vha);
2591 break;
2592 case INTR_ATIO_RSP_QUE_UPDATE:
2593 qlt_24xx_process_atio_queue(vha);
2594 qla24xx_process_response_queue(vha, rsp);
2595 break;
2596 default:
2597 ql_dbg(ql_dbg_async, vha, 0x504f,
2598 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2599 break;
2600 }
2601 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2602 RD_REG_DWORD_RELAXED(®->hccr);
2603 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2604 ndelay(3500);
2605 }
2606 qla2x00_handle_mbx_completion(ha, status);
2607 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2608
2609 return IRQ_HANDLED;
2610}
2611
2612static irqreturn_t
2613qla24xx_msix_rsp_q(int irq, void *dev_id)
2614{
2615 struct qla_hw_data *ha;
2616 struct rsp_que *rsp;
2617 struct device_reg_24xx __iomem *reg;
2618 struct scsi_qla_host *vha;
2619 unsigned long flags;
2620
2621 rsp = (struct rsp_que *) dev_id;
2622 if (!rsp) {
2623 ql_log(ql_log_info, NULL, 0x505a,
2624 "%s: NULL response queue pointer.\n", __func__);
2625 return IRQ_NONE;
2626 }
2627 ha = rsp->hw;
2628 reg = &ha->iobase->isp24;
2629
2630 spin_lock_irqsave(&ha->hardware_lock, flags);
2631
2632 vha = pci_get_drvdata(ha->pdev);
2633 qla24xx_process_response_queue(vha, rsp);
2634 if (!ha->flags.disable_msix_handshake) {
2635 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2636 RD_REG_DWORD_RELAXED(®->hccr);
2637 }
2638 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2639
2640 return IRQ_HANDLED;
2641}
2642
2643static irqreturn_t
2644qla25xx_msix_rsp_q(int irq, void *dev_id)
2645{
2646 struct qla_hw_data *ha;
2647 struct rsp_que *rsp;
2648 struct device_reg_24xx __iomem *reg;
2649 unsigned long flags;
2650
2651 rsp = (struct rsp_que *) dev_id;
2652 if (!rsp) {
2653 ql_log(ql_log_info, NULL, 0x505b,
2654 "%s: NULL response queue pointer.\n", __func__);
2655 return IRQ_NONE;
2656 }
2657 ha = rsp->hw;
2658
2659
2660 if (!ha->flags.disable_msix_handshake) {
2661 reg = &ha->iobase->isp24;
2662 spin_lock_irqsave(&ha->hardware_lock, flags);
2663 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2664 RD_REG_DWORD_RELAXED(®->hccr);
2665 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2666 }
2667 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2668
2669 return IRQ_HANDLED;
2670}
2671
2672static irqreturn_t
2673qla24xx_msix_default(int irq, void *dev_id)
2674{
2675 scsi_qla_host_t *vha;
2676 struct qla_hw_data *ha;
2677 struct rsp_que *rsp;
2678 struct device_reg_24xx __iomem *reg;
2679 int status;
2680 uint32_t stat;
2681 uint32_t hccr;
2682 uint16_t mb[8];
2683 unsigned long flags;
2684
2685 rsp = (struct rsp_que *) dev_id;
2686 if (!rsp) {
2687 ql_log(ql_log_info, NULL, 0x505c,
2688 "%s: NULL response queue pointer.\n", __func__);
2689 return IRQ_NONE;
2690 }
2691 ha = rsp->hw;
2692 reg = &ha->iobase->isp24;
2693 status = 0;
2694
2695 spin_lock_irqsave(&ha->hardware_lock, flags);
2696 vha = pci_get_drvdata(ha->pdev);
2697 do {
2698 stat = RD_REG_DWORD(®->host_status);
2699 if (stat & HSRX_RISC_PAUSED) {
2700 if (unlikely(pci_channel_offline(ha->pdev)))
2701 break;
2702
2703 hccr = RD_REG_DWORD(®->hccr);
2704
2705 ql_log(ql_log_info, vha, 0x5050,
2706 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2707 hccr);
2708
2709 qla2xxx_check_risc_status(vha);
2710
2711 ha->isp_ops->fw_dump(vha, 1);
2712 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2713 break;
2714 } else if ((stat & HSRX_RISC_INT) == 0)
2715 break;
2716
2717 switch (stat & 0xff) {
2718 case INTR_ROM_MB_SUCCESS:
2719 case INTR_ROM_MB_FAILED:
2720 case INTR_MB_SUCCESS:
2721 case INTR_MB_FAILED:
2722 qla24xx_mbx_completion(vha, MSW(stat));
2723 status |= MBX_INTERRUPT;
2724
2725 break;
2726 case INTR_ASYNC_EVENT:
2727 mb[0] = MSW(stat);
2728 mb[1] = RD_REG_WORD(®->mailbox1);
2729 mb[2] = RD_REG_WORD(®->mailbox2);
2730 mb[3] = RD_REG_WORD(®->mailbox3);
2731 qla2x00_async_event(vha, rsp, mb);
2732 break;
2733 case INTR_RSP_QUE_UPDATE:
2734 case INTR_RSP_QUE_UPDATE_83XX:
2735 qla24xx_process_response_queue(vha, rsp);
2736 break;
2737 case INTR_ATIO_QUE_UPDATE:
2738 qlt_24xx_process_atio_queue(vha);
2739 break;
2740 case INTR_ATIO_RSP_QUE_UPDATE:
2741 qlt_24xx_process_atio_queue(vha);
2742 qla24xx_process_response_queue(vha, rsp);
2743 break;
2744 default:
2745 ql_dbg(ql_dbg_async, vha, 0x5051,
2746 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2747 break;
2748 }
2749 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2750 } while (0);
2751 qla2x00_handle_mbx_completion(ha, status);
2752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2753
2754 return IRQ_HANDLED;
2755}
2756
2757
2758
2759struct qla_init_msix_entry {
2760 const char *name;
2761 irq_handler_t handler;
2762};
2763
2764static struct qla_init_msix_entry msix_entries[3] = {
2765 { "qla2xxx (default)", qla24xx_msix_default },
2766 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2767 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2768};
2769
2770static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2771 { "qla2xxx (default)", qla82xx_msix_default },
2772 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2773};
2774
2775static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2776 { "qla2xxx (default)", qla24xx_msix_default },
2777 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2778 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2779};
2780
2781static void
2782qla24xx_disable_msix(struct qla_hw_data *ha)
2783{
2784 int i;
2785 struct qla_msix_entry *qentry;
2786 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2787
2788 for (i = 0; i < ha->msix_count; i++) {
2789 qentry = &ha->msix_entries[i];
2790 if (qentry->have_irq)
2791 free_irq(qentry->vector, qentry->rsp);
2792 }
2793 pci_disable_msix(ha->pdev);
2794 kfree(ha->msix_entries);
2795 ha->msix_entries = NULL;
2796 ha->flags.msix_enabled = 0;
2797 ql_dbg(ql_dbg_init, vha, 0x0042,
2798 "Disabled the MSI.\n");
2799}
2800
2801static int
2802qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2803{
2804#define MIN_MSIX_COUNT 2
2805 int i, ret;
2806 struct msix_entry *entries;
2807 struct qla_msix_entry *qentry;
2808 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2809
2810 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2811 GFP_KERNEL);
2812 if (!entries) {
2813 ql_log(ql_log_warn, vha, 0x00bc,
2814 "Failed to allocate memory for msix_entry.\n");
2815 return -ENOMEM;
2816 }
2817
2818 for (i = 0; i < ha->msix_count; i++)
2819 entries[i].entry = i;
2820
2821 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2822 if (ret) {
2823 if (ret < MIN_MSIX_COUNT)
2824 goto msix_failed;
2825
2826 ql_log(ql_log_warn, vha, 0x00c6,
2827 "MSI-X: Failed to enable support "
2828 "-- %d/%d\n Retry with %d vectors.\n",
2829 ha->msix_count, ret, ret);
2830 ha->msix_count = ret;
2831 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2832 if (ret) {
2833msix_failed:
2834 ql_log(ql_log_fatal, vha, 0x00c7,
2835 "MSI-X: Failed to enable support, "
2836 "giving up -- %d/%d.\n",
2837 ha->msix_count, ret);
2838 goto msix_out;
2839 }
2840 ha->max_rsp_queues = ha->msix_count - 1;
2841 }
2842 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2843 ha->msix_count, GFP_KERNEL);
2844 if (!ha->msix_entries) {
2845 ql_log(ql_log_fatal, vha, 0x00c8,
2846 "Failed to allocate memory for ha->msix_entries.\n");
2847 ret = -ENOMEM;
2848 goto msix_out;
2849 }
2850 ha->flags.msix_enabled = 1;
2851
2852 for (i = 0; i < ha->msix_count; i++) {
2853 qentry = &ha->msix_entries[i];
2854 qentry->vector = entries[i].vector;
2855 qentry->entry = entries[i].entry;
2856 qentry->have_irq = 0;
2857 qentry->rsp = NULL;
2858 }
2859
2860
2861 for (i = 0; i < ha->msix_count; i++) {
2862 qentry = &ha->msix_entries[i];
2863 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2864 ret = request_irq(qentry->vector,
2865 qla83xx_msix_entries[i].handler,
2866 0, qla83xx_msix_entries[i].name, rsp);
2867 } else if (IS_QLA82XX(ha)) {
2868 ret = request_irq(qentry->vector,
2869 qla82xx_msix_entries[i].handler,
2870 0, qla82xx_msix_entries[i].name, rsp);
2871 } else {
2872 ret = request_irq(qentry->vector,
2873 msix_entries[i].handler,
2874 0, msix_entries[i].name, rsp);
2875 }
2876 if (ret) {
2877 ql_log(ql_log_fatal, vha, 0x00cb,
2878 "MSI-X: unable to register handler -- %x/%d.\n",
2879 qentry->vector, ret);
2880 qla24xx_disable_msix(ha);
2881 ha->mqenable = 0;
2882 goto msix_out;
2883 }
2884 qentry->have_irq = 1;
2885 qentry->rsp = rsp;
2886 rsp->msix = qentry;
2887 }
2888
2889
2890 if (IS_QLA83XX(ha)) {
2891 if (ha->msixbase && ha->mqiobase &&
2892 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2893 ha->mqenable = 1;
2894 } else
2895 if (ha->mqiobase
2896 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2897 ha->mqenable = 1;
2898 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2899 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2900 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2901 ql_dbg(ql_dbg_init, vha, 0x0055,
2902 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2903 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2904
2905msix_out:
2906 kfree(entries);
2907 return ret;
2908}
2909
2910int
2911qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2912{
2913 int ret;
2914 device_reg_t __iomem *reg = ha->iobase;
2915 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2916
2917
2918 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2919 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
2920 goto skip_msi;
2921
2922 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2923 (ha->pdev->subsystem_device == 0x7040 ||
2924 ha->pdev->subsystem_device == 0x7041 ||
2925 ha->pdev->subsystem_device == 0x1705)) {
2926 ql_log(ql_log_warn, vha, 0x0034,
2927 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2928 ha->pdev->subsystem_vendor,
2929 ha->pdev->subsystem_device);
2930 goto skip_msi;
2931 }
2932
2933 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2934 ql_log(ql_log_warn, vha, 0x0035,
2935 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2936 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2937 goto skip_msix;
2938 }
2939
2940 ret = qla24xx_enable_msix(ha, rsp);
2941 if (!ret) {
2942 ql_dbg(ql_dbg_init, vha, 0x0036,
2943 "MSI-X: Enabled (0x%X, 0x%X).\n",
2944 ha->chip_revision, ha->fw_attributes);
2945 goto clear_risc_ints;
2946 }
2947 ql_log(ql_log_info, vha, 0x0037,
2948 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2949skip_msix:
2950
2951 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2952 !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
2953 goto skip_msi;
2954
2955 ret = pci_enable_msi(ha->pdev);
2956 if (!ret) {
2957 ql_dbg(ql_dbg_init, vha, 0x0038,
2958 "MSI: Enabled.\n");
2959 ha->flags.msi_enabled = 1;
2960 } else
2961 ql_log(ql_log_warn, vha, 0x0039,
2962 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2963
2964
2965 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2966 return QLA_FUNCTION_FAILED;
2967
2968skip_msi:
2969
2970 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2971 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2972 QLA2XXX_DRIVER_NAME, rsp);
2973 if (ret) {
2974 ql_log(ql_log_warn, vha, 0x003a,
2975 "Failed to reserve interrupt %d already in use.\n",
2976 ha->pdev->irq);
2977 goto fail;
2978 } else if (!ha->flags.msi_enabled) {
2979 ql_dbg(ql_dbg_init, vha, 0x0125,
2980 "INTa mode: Enabled.\n");
2981 ha->flags.mr_intr_valid = 1;
2982 }
2983
2984clear_risc_ints:
2985
2986 spin_lock_irq(&ha->hardware_lock);
2987 if (!IS_FWI2_CAPABLE(ha))
2988 WRT_REG_WORD(®->isp.semaphore, 0);
2989 spin_unlock_irq(&ha->hardware_lock);
2990
2991fail:
2992 return ret;
2993}
2994
2995void
2996qla2x00_free_irqs(scsi_qla_host_t *vha)
2997{
2998 struct qla_hw_data *ha = vha->hw;
2999 struct rsp_que *rsp;
3000
3001
3002
3003
3004
3005 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3006 return;
3007 rsp = ha->rsp_q_map[0];
3008
3009 if (ha->flags.msix_enabled)
3010 qla24xx_disable_msix(ha);
3011 else if (ha->flags.msi_enabled) {
3012 free_irq(ha->pdev->irq, rsp);
3013 pci_disable_msi(ha->pdev);
3014 } else
3015 free_irq(ha->pdev->irq, rsp);
3016}
3017
3018
3019int qla25xx_request_irq(struct rsp_que *rsp)
3020{
3021 struct qla_hw_data *ha = rsp->hw;
3022 struct qla_init_msix_entry *intr = &msix_entries[2];
3023 struct qla_msix_entry *msix = rsp->msix;
3024 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3025 int ret;
3026
3027 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3028 if (ret) {
3029 ql_log(ql_log_fatal, vha, 0x00e6,
3030 "MSI-X: Unable to register handler -- %x/%d.\n",
3031 msix->vector, ret);
3032 return ret;
3033 }
3034 msix->have_irq = 1;
3035 msix->rsp = rsp;
3036 return ret;
3037}
3038