1
2
3
4
5
6
7#include "qla_def.h"
8#include "qla_target.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <scsi/scsi_tcq.h>
13#include <scsi/scsi_bsg_fc.h>
14#include <scsi/scsi_eh.h>
15
16static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
20 sts_entry_t *);
21
22
23
24
25
26
27
28
29
30
31irqreturn_t
32qla2100_intr_handler(int irq, void *dev_id)
33{
34 scsi_qla_host_t *vha;
35 struct qla_hw_data *ha;
36 struct device_reg_2xxx __iomem *reg;
37 int status;
38 unsigned long iter;
39 uint16_t hccr;
40 uint16_t mb[4];
41 struct rsp_que *rsp;
42 unsigned long flags;
43
44 rsp = (struct rsp_que *) dev_id;
45 if (!rsp) {
46 ql_log(ql_log_info, NULL, 0x505d,
47 "%s: NULL response queue pointer.\n", __func__);
48 return (IRQ_NONE);
49 }
50
51 ha = rsp->hw;
52 reg = &ha->iobase->isp;
53 status = 0;
54
55 spin_lock_irqsave(&ha->hardware_lock, flags);
56 vha = pci_get_drvdata(ha->pdev);
57 for (iter = 50; iter--; ) {
58 hccr = RD_REG_WORD(®->hccr);
59 if (hccr & HCCR_RISC_PAUSE) {
60 if (pci_channel_offline(ha->pdev))
61 break;
62
63
64
65
66
67
68 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
69 RD_REG_WORD(®->hccr);
70
71 ha->isp_ops->fw_dump(vha, 1);
72 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
73 break;
74 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
75 break;
76
77 if (RD_REG_WORD(®->semaphore) & BIT_0) {
78 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
79 RD_REG_WORD(®->hccr);
80
81
82 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
83 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
84 qla2x00_mbx_completion(vha, mb[0]);
85 status |= MBX_INTERRUPT;
86 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
87 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
88 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
89 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
90 qla2x00_async_event(vha, rsp, mb);
91 } else {
92
93 ql_dbg(ql_dbg_async, vha, 0x5025,
94 "Unrecognized interrupt type (%d).\n",
95 mb[0]);
96 }
97
98 WRT_REG_WORD(®->semaphore, 0);
99 RD_REG_WORD(®->semaphore);
100 } else {
101 qla2x00_process_response_queue(rsp);
102
103 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
104 RD_REG_WORD(®->hccr);
105 }
106 }
107 qla2x00_handle_mbx_completion(ha, status);
108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
109
110 return (IRQ_HANDLED);
111}
112
113
114
115
116
117
118
119
120
121
122irqreturn_t
123qla2300_intr_handler(int irq, void *dev_id)
124{
125 scsi_qla_host_t *vha;
126 struct device_reg_2xxx __iomem *reg;
127 int status;
128 unsigned long iter;
129 uint32_t stat;
130 uint16_t hccr;
131 uint16_t mb[4];
132 struct rsp_que *rsp;
133 struct qla_hw_data *ha;
134 unsigned long flags;
135
136 rsp = (struct rsp_que *) dev_id;
137 if (!rsp) {
138 ql_log(ql_log_info, NULL, 0x5058,
139 "%s: NULL response queue pointer.\n", __func__);
140 return (IRQ_NONE);
141 }
142
143 ha = rsp->hw;
144 reg = &ha->iobase->isp;
145 status = 0;
146
147 spin_lock_irqsave(&ha->hardware_lock, flags);
148 vha = pci_get_drvdata(ha->pdev);
149 for (iter = 50; iter--; ) {
150 stat = RD_REG_DWORD(®->u.isp2300.host_status);
151 if (stat & HSR_RISC_PAUSED) {
152 if (unlikely(pci_channel_offline(ha->pdev)))
153 break;
154
155 hccr = RD_REG_WORD(®->hccr);
156 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
157 ql_log(ql_log_warn, vha, 0x5026,
158 "Parity error -- HCCR=%x, Dumping "
159 "firmware.\n", hccr);
160 else
161 ql_log(ql_log_warn, vha, 0x5027,
162 "RISC paused -- HCCR=%x, Dumping "
163 "firmware.\n", hccr);
164
165
166
167
168
169
170 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
171 RD_REG_WORD(®->hccr);
172
173 ha->isp_ops->fw_dump(vha, 1);
174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175 break;
176 } else if ((stat & HSR_RISC_INT) == 0)
177 break;
178
179 switch (stat & 0xff) {
180 case 0x1:
181 case 0x2:
182 case 0x10:
183 case 0x11:
184 qla2x00_mbx_completion(vha, MSW(stat));
185 status |= MBX_INTERRUPT;
186
187
188 WRT_REG_WORD(®->semaphore, 0);
189 break;
190 case 0x12:
191 mb[0] = MSW(stat);
192 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195 qla2x00_async_event(vha, rsp, mb);
196 break;
197 case 0x13:
198 qla2x00_process_response_queue(rsp);
199 break;
200 case 0x15:
201 mb[0] = MBA_CMPLT_1_16BIT;
202 mb[1] = MSW(stat);
203 qla2x00_async_event(vha, rsp, mb);
204 break;
205 case 0x16:
206 mb[0] = MBA_SCSI_COMPLETION;
207 mb[1] = MSW(stat);
208 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209 qla2x00_async_event(vha, rsp, mb);
210 break;
211 default:
212 ql_dbg(ql_dbg_async, vha, 0x5028,
213 "Unrecognized interrupt type (%d).\n", stat & 0xff);
214 break;
215 }
216 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
217 RD_REG_WORD_RELAXED(®->hccr);
218 }
219 qla2x00_handle_mbx_completion(ha, status);
220 spin_unlock_irqrestore(&ha->hardware_lock, flags);
221
222 return (IRQ_HANDLED);
223}
224
225
226
227
228
229
230static void
231qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
232{
233 uint16_t cnt;
234 uint32_t mboxes;
235 uint16_t __iomem *wptr;
236 struct qla_hw_data *ha = vha->hw;
237 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
238
239
240 mboxes = (1 << ha->mbx_count) - 1;
241 if (!ha->mcp)
242 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
243 else
244 mboxes = ha->mcp->in_mb;
245
246
247 ha->flags.mbox_int = 1;
248 ha->mailbox_out[0] = mb0;
249 mboxes >>= 1;
250 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
251
252 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
253 if (IS_QLA2200(ha) && cnt == 8)
254 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
255 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
256 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
257 else if (mboxes & BIT_0)
258 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
259
260 wptr++;
261 mboxes >>= 1;
262 }
263}
264
265static void
266qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
267{
268 static char *event[] =
269 { "Complete", "Request Notification", "Time Extension" };
270 int rval;
271 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
272 uint16_t __iomem *wptr;
273 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
274
275
276 wptr = (uint16_t __iomem *)®24->mailbox1;
277 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
278 mb[cnt] = RD_REG_WORD(wptr);
279
280 ql_dbg(ql_dbg_async, vha, 0x5021,
281 "Inter-Driver Communication %s -- "
282 "%04x %04x %04x %04x %04x %04x %04x.\n",
283 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
284 mb[4], mb[5], mb[6]);
285 switch (aen) {
286
287 case MBA_IDC_COMPLETE:
288 if (mb[1] >> 15) {
289 vha->hw->flags.idc_compl_status = 1;
290 if (vha->hw->notify_dcbx_comp)
291 complete(&vha->hw->dcbx_comp);
292 }
293 break;
294
295 case MBA_IDC_NOTIFY:
296
297 timeout = (descr >> 8) & 0xf;
298 ql_dbg(ql_dbg_async, vha, 0x5022,
299 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
300 vha->host_no, event[aen & 0xff], timeout);
301
302 if (!timeout)
303 return;
304 rval = qla2x00_post_idc_ack_work(vha, mb);
305 if (rval != QLA_SUCCESS)
306 ql_log(ql_log_warn, vha, 0x5023,
307 "IDC failed to post ACK.\n");
308 break;
309 case MBA_IDC_TIME_EXT:
310 vha->hw->idc_extend_tmo = descr;
311 ql_dbg(ql_dbg_async, vha, 0x5087,
312 "%lu Inter-Driver Communication %s -- "
313 "Extend timeout by=%d.\n",
314 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
315 break;
316 }
317}
318
319#define LS_UNKNOWN 2
320const char *
321qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
322{
323 static const char * const link_speeds[] = {
324 "1", "2", "?", "4", "8", "16", "10"
325 };
326
327 if (IS_QLA2100(ha) || IS_QLA2200(ha))
328 return link_speeds[0];
329 else if (speed == 0x13)
330 return link_speeds[6];
331 else if (speed < 6)
332 return link_speeds[speed];
333 else
334 return link_speeds[LS_UNKNOWN];
335}
336
337static void
338qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
339{
340 struct qla_hw_data *ha = vha->hw;
341
342
343
344
345
346
347
348
349
350
351
352
353 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
354 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
355 mb[0], mb[1], mb[2], mb[6]);
356 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
357 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
358 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
359
360 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
361 IDC_HEARTBEAT_FAILURE)) {
362 ha->flags.nic_core_hung = 1;
363 ql_log(ql_log_warn, vha, 0x5060,
364 "83XX: F/W Error Reported: Check if reset required.\n");
365
366 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
367 uint32_t protocol_engine_id, fw_err_code, err_level;
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382 protocol_engine_id = (mb[2] & 0xff);
383 fw_err_code = (((mb[2] & 0xff00) >> 8) |
384 ((mb[6] & 0x1fff) << 8));
385 err_level = ((mb[6] & 0xe000) >> 13);
386 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
387 "Register: protocol_engine_id=0x%x "
388 "fw_err_code=0x%x err_level=0x%x.\n",
389 protocol_engine_id, fw_err_code, err_level);
390 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
391 "Register: 0x%x%x.\n", mb[7], mb[3]);
392 if (err_level == ERR_LEVEL_NON_FATAL) {
393 ql_log(ql_log_warn, vha, 0x5063,
394 "Not a fatal error, f/w has recovered "
395 "iteself.\n");
396 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
397 ql_log(ql_log_fatal, vha, 0x5064,
398 "Recoverable Fatal error: Chip reset "
399 "required.\n");
400 qla83xx_schedule_work(vha,
401 QLA83XX_NIC_CORE_RESET);
402 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
403 ql_log(ql_log_fatal, vha, 0x5065,
404 "Unrecoverable Fatal error: Set FAILED "
405 "state, reboot required.\n");
406 qla83xx_schedule_work(vha,
407 QLA83XX_NIC_CORE_UNRECOVERABLE);
408 }
409 }
410
411 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
412 uint16_t peg_fw_state, nw_interface_link_up;
413 uint16_t nw_interface_signal_detect, sfp_status;
414 uint16_t htbt_counter, htbt_monitor_enable;
415 uint16_t sfp_additonal_info, sfp_multirate;
416 uint16_t sfp_tx_fault, link_speed, dcbx_status;
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449 peg_fw_state = (mb[2] & 0x00ff);
450 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
451 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
452 sfp_status = ((mb[2] & 0x0c00) >> 10);
453 htbt_counter = ((mb[2] & 0x7000) >> 12);
454 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
455 sfp_additonal_info = (mb[6] & 0x0003);
456 sfp_multirate = ((mb[6] & 0x0004) >> 2);
457 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
458 link_speed = ((mb[6] & 0x0070) >> 4);
459 dcbx_status = ((mb[6] & 0x7000) >> 12);
460
461 ql_log(ql_log_warn, vha, 0x5066,
462 "Peg-to-Fc Status Register:\n"
463 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
464 "nw_interface_signal_detect=0x%x"
465 "\nsfp_statis=0x%x.\n ", peg_fw_state,
466 nw_interface_link_up, nw_interface_signal_detect,
467 sfp_status);
468 ql_log(ql_log_warn, vha, 0x5067,
469 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
470 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
471 htbt_counter, htbt_monitor_enable,
472 sfp_additonal_info, sfp_multirate);
473 ql_log(ql_log_warn, vha, 0x5068,
474 "sfp_tx_fault=0x%x, link_state=0x%x, "
475 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
476 dcbx_status);
477
478 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
479 }
480
481 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
482 ql_log(ql_log_warn, vha, 0x5069,
483 "Heartbeat Failure encountered, chip reset "
484 "required.\n");
485
486 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
487 }
488 }
489
490 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
491 ql_log(ql_log_info, vha, 0x506a,
492 "IDC Device-State changed = 0x%x.\n", mb[4]);
493 if (ha->flags.nic_core_reset_owner)
494 return;
495 qla83xx_schedule_work(vha, MBA_IDC_AEN);
496 }
497}
498
499int
500qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
501{
502 struct qla_hw_data *ha = vha->hw;
503 scsi_qla_host_t *vp;
504 uint32_t vp_did;
505 unsigned long flags;
506 int ret = 0;
507
508 if (!ha->num_vhosts)
509 return ret;
510
511 spin_lock_irqsave(&ha->vport_slock, flags);
512 list_for_each_entry(vp, &ha->vp_list, list) {
513 vp_did = vp->d_id.b24;
514 if (vp_did == rscn_entry) {
515 ret = 1;
516 break;
517 }
518 }
519 spin_unlock_irqrestore(&ha->vport_slock, flags);
520
521 return ret;
522}
523
524
525
526
527
528
529void
530qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
531{
532 uint16_t handle_cnt;
533 uint16_t cnt, mbx;
534 uint32_t handles[5];
535 struct qla_hw_data *ha = vha->hw;
536 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
537 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
538 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
539 uint32_t rscn_entry, host_pid;
540 unsigned long flags;
541
542
543 handle_cnt = 0;
544 if (IS_CNA_CAPABLE(ha))
545 goto skip_rio;
546 switch (mb[0]) {
547 case MBA_SCSI_COMPLETION:
548 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
549 handle_cnt = 1;
550 break;
551 case MBA_CMPLT_1_16BIT:
552 handles[0] = mb[1];
553 handle_cnt = 1;
554 mb[0] = MBA_SCSI_COMPLETION;
555 break;
556 case MBA_CMPLT_2_16BIT:
557 handles[0] = mb[1];
558 handles[1] = mb[2];
559 handle_cnt = 2;
560 mb[0] = MBA_SCSI_COMPLETION;
561 break;
562 case MBA_CMPLT_3_16BIT:
563 handles[0] = mb[1];
564 handles[1] = mb[2];
565 handles[2] = mb[3];
566 handle_cnt = 3;
567 mb[0] = MBA_SCSI_COMPLETION;
568 break;
569 case MBA_CMPLT_4_16BIT:
570 handles[0] = mb[1];
571 handles[1] = mb[2];
572 handles[2] = mb[3];
573 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
574 handle_cnt = 4;
575 mb[0] = MBA_SCSI_COMPLETION;
576 break;
577 case MBA_CMPLT_5_16BIT:
578 handles[0] = mb[1];
579 handles[1] = mb[2];
580 handles[2] = mb[3];
581 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
582 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
583 handle_cnt = 5;
584 mb[0] = MBA_SCSI_COMPLETION;
585 break;
586 case MBA_CMPLT_2_32BIT:
587 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
588 handles[1] = le32_to_cpu(
589 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
590 RD_MAILBOX_REG(ha, reg, 6));
591 handle_cnt = 2;
592 mb[0] = MBA_SCSI_COMPLETION;
593 break;
594 default:
595 break;
596 }
597skip_rio:
598 switch (mb[0]) {
599 case MBA_SCSI_COMPLETION:
600 if (!vha->flags.online)
601 break;
602
603 for (cnt = 0; cnt < handle_cnt; cnt++)
604 qla2x00_process_completed_request(vha, rsp->req,
605 handles[cnt]);
606 break;
607
608 case MBA_RESET:
609 ql_dbg(ql_dbg_async, vha, 0x5002,
610 "Asynchronous RESET.\n");
611
612 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
613 break;
614
615 case MBA_SYSTEM_ERR:
616 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
617 RD_REG_WORD(®24->mailbox7) : 0;
618 ql_log(ql_log_warn, vha, 0x5003,
619 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
620 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
621
622 ha->isp_ops->fw_dump(vha, 1);
623
624 if (IS_FWI2_CAPABLE(ha)) {
625 if (mb[1] == 0 && mb[2] == 0) {
626 ql_log(ql_log_fatal, vha, 0x5004,
627 "Unrecoverable Hardware Error: adapter "
628 "marked OFFLINE!\n");
629 vha->flags.online = 0;
630 vha->device_flags |= DFLG_DEV_FAILED;
631 } else {
632
633 if ((mbx & MBX_3) && (ha->flags.port0))
634 set_bit(MPI_RESET_NEEDED,
635 &vha->dpc_flags);
636
637 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
638 }
639 } else if (mb[1] == 0) {
640 ql_log(ql_log_fatal, vha, 0x5005,
641 "Unrecoverable Hardware Error: adapter marked "
642 "OFFLINE!\n");
643 vha->flags.online = 0;
644 vha->device_flags |= DFLG_DEV_FAILED;
645 } else
646 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
647 break;
648
649 case MBA_REQ_TRANSFER_ERR:
650 ql_log(ql_log_warn, vha, 0x5006,
651 "ISP Request Transfer Error (%x).\n", mb[1]);
652
653 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
654 break;
655
656 case MBA_RSP_TRANSFER_ERR:
657 ql_log(ql_log_warn, vha, 0x5007,
658 "ISP Response Transfer Error.\n");
659
660 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
661 break;
662
663 case MBA_WAKEUP_THRES:
664 ql_dbg(ql_dbg_async, vha, 0x5008,
665 "Asynchronous WAKEUP_THRES.\n");
666
667 break;
668 case MBA_LIP_OCCURRED:
669 ql_dbg(ql_dbg_async, vha, 0x5009,
670 "LIP occurred (%x).\n", mb[1]);
671
672 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
673 atomic_set(&vha->loop_state, LOOP_DOWN);
674 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
675 qla2x00_mark_all_devices_lost(vha, 1);
676 }
677
678 if (vha->vp_idx) {
679 atomic_set(&vha->vp_state, VP_FAILED);
680 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
681 }
682
683 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
684 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
685
686 vha->flags.management_server_logged_in = 0;
687 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
688 break;
689
690 case MBA_LOOP_UP:
691 if (IS_QLA2100(ha) || IS_QLA2200(ha))
692 ha->link_data_rate = PORT_SPEED_1GB;
693 else
694 ha->link_data_rate = mb[1];
695
696 ql_dbg(ql_dbg_async, vha, 0x500a,
697 "LOOP UP detected (%s Gbps).\n",
698 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
699
700 vha->flags.management_server_logged_in = 0;
701 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
702 break;
703
704 case MBA_LOOP_DOWN:
705 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
706 ? RD_REG_WORD(®24->mailbox4) : 0;
707 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
708 : mbx;
709 ql_dbg(ql_dbg_async, vha, 0x500b,
710 "LOOP DOWN detected (%x %x %x %x).\n",
711 mb[1], mb[2], mb[3], mbx);
712
713 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
714 atomic_set(&vha->loop_state, LOOP_DOWN);
715 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
716 vha->device_flags |= DFLG_NO_CABLE;
717 qla2x00_mark_all_devices_lost(vha, 1);
718 }
719
720 if (vha->vp_idx) {
721 atomic_set(&vha->vp_state, VP_FAILED);
722 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
723 }
724
725 vha->flags.management_server_logged_in = 0;
726 ha->link_data_rate = PORT_SPEED_UNKNOWN;
727 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
728 break;
729
730 case MBA_LIP_RESET:
731 ql_dbg(ql_dbg_async, vha, 0x500c,
732 "LIP reset occurred (%x).\n", mb[1]);
733
734 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
735 atomic_set(&vha->loop_state, LOOP_DOWN);
736 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
737 qla2x00_mark_all_devices_lost(vha, 1);
738 }
739
740 if (vha->vp_idx) {
741 atomic_set(&vha->vp_state, VP_FAILED);
742 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
743 }
744
745 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
746
747 ha->operating_mode = LOOP;
748 vha->flags.management_server_logged_in = 0;
749 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
750 break;
751
752
753 case MBA_POINT_TO_POINT:
754 if (IS_QLA2100(ha))
755 break;
756
757 if (IS_CNA_CAPABLE(ha)) {
758 ql_dbg(ql_dbg_async, vha, 0x500d,
759 "DCBX Completed -- %04x %04x %04x.\n",
760 mb[1], mb[2], mb[3]);
761 if (ha->notify_dcbx_comp)
762 complete(&ha->dcbx_comp);
763
764 } else
765 ql_dbg(ql_dbg_async, vha, 0x500e,
766 "Asynchronous P2P MODE received.\n");
767
768
769
770
771
772 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
773 atomic_set(&vha->loop_state, LOOP_DOWN);
774 if (!atomic_read(&vha->loop_down_timer))
775 atomic_set(&vha->loop_down_timer,
776 LOOP_DOWN_TIME);
777 qla2x00_mark_all_devices_lost(vha, 1);
778 }
779
780 if (vha->vp_idx) {
781 atomic_set(&vha->vp_state, VP_FAILED);
782 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
783 }
784
785 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
786 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
787
788 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
789 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
790
791 ha->flags.gpsc_supported = 1;
792 vha->flags.management_server_logged_in = 0;
793 break;
794
795 case MBA_CHG_IN_CONNECTION:
796 if (IS_QLA2100(ha))
797 break;
798
799 ql_dbg(ql_dbg_async, vha, 0x500f,
800 "Configuration change detected: value=%x.\n", mb[1]);
801
802 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
803 atomic_set(&vha->loop_state, LOOP_DOWN);
804 if (!atomic_read(&vha->loop_down_timer))
805 atomic_set(&vha->loop_down_timer,
806 LOOP_DOWN_TIME);
807 qla2x00_mark_all_devices_lost(vha, 1);
808 }
809
810 if (vha->vp_idx) {
811 atomic_set(&vha->vp_state, VP_FAILED);
812 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
813 }
814
815 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
816 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
817 break;
818
819 case MBA_PORT_UPDATE:
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835 if (IS_QLA2XXX_MIDTYPE(ha) &&
836 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
837 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
838 break;
839
840
841 if (mb[1] == 0xffff && mb[2] == 0x7) {
842 ql_dbg(ql_dbg_async, vha, 0x5010,
843 "Port unavailable %04x %04x %04x.\n",
844 mb[1], mb[2], mb[3]);
845 ql_log(ql_log_warn, vha, 0x505e,
846 "Link is offline.\n");
847
848 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
849 atomic_set(&vha->loop_state, LOOP_DOWN);
850 atomic_set(&vha->loop_down_timer,
851 LOOP_DOWN_TIME);
852 vha->device_flags |= DFLG_NO_CABLE;
853 qla2x00_mark_all_devices_lost(vha, 1);
854 }
855
856 if (vha->vp_idx) {
857 atomic_set(&vha->vp_state, VP_FAILED);
858 fc_vport_set_state(vha->fc_vport,
859 FC_VPORT_FAILED);
860 qla2x00_mark_all_devices_lost(vha, 1);
861 }
862
863 vha->flags.management_server_logged_in = 0;
864 ha->link_data_rate = PORT_SPEED_UNKNOWN;
865 break;
866 }
867
868
869
870
871
872
873 atomic_set(&vha->loop_down_timer, 0);
874 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
875 ql_dbg(ql_dbg_async, vha, 0x5011,
876 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
877 mb[1], mb[2], mb[3]);
878
879 qlt_async_event(mb[0], vha, mb);
880 break;
881 }
882
883 ql_dbg(ql_dbg_async, vha, 0x5012,
884 "Port database changed %04x %04x %04x.\n",
885 mb[1], mb[2], mb[3]);
886 ql_log(ql_log_warn, vha, 0x505f,
887 "Link is operational (%s Gbps).\n",
888 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
889
890
891
892
893 atomic_set(&vha->loop_state, LOOP_UP);
894
895 qla2x00_mark_all_devices_lost(vha, 1);
896
897 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
898 set_bit(SCR_PENDING, &vha->dpc_flags);
899
900 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
901 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
902
903 qlt_async_event(mb[0], vha, mb);
904 break;
905
906 case MBA_RSCN_UPDATE:
907
908 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
909 break;
910
911 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
912 break;
913
914 ql_dbg(ql_dbg_async, vha, 0x5013,
915 "RSCN database changed -- %04x %04x %04x.\n",
916 mb[1], mb[2], mb[3]);
917
918 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
919 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
920 | vha->d_id.b.al_pa;
921 if (rscn_entry == host_pid) {
922 ql_dbg(ql_dbg_async, vha, 0x5014,
923 "Ignoring RSCN update to local host "
924 "port ID (%06x).\n", host_pid);
925 break;
926 }
927
928
929 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
930
931
932 if (qla2x00_is_a_vp_did(vha, rscn_entry))
933 break;
934
935 atomic_set(&vha->loop_down_timer, 0);
936 vha->flags.management_server_logged_in = 0;
937
938 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
939 set_bit(RSCN_UPDATE, &vha->dpc_flags);
940 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
941 break;
942
943
944 case MBA_ZIO_RESPONSE:
945 ql_dbg(ql_dbg_async, vha, 0x5015,
946 "[R|Z]IO update completion.\n");
947
948 if (IS_FWI2_CAPABLE(ha))
949 qla24xx_process_response_queue(vha, rsp);
950 else
951 qla2x00_process_response_queue(rsp);
952 break;
953
954 case MBA_DISCARD_RND_FRAME:
955 ql_dbg(ql_dbg_async, vha, 0x5016,
956 "Discard RND Frame -- %04x %04x %04x.\n",
957 mb[1], mb[2], mb[3]);
958 break;
959
960 case MBA_TRACE_NOTIFICATION:
961 ql_dbg(ql_dbg_async, vha, 0x5017,
962 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
963 break;
964
965 case MBA_ISP84XX_ALERT:
966 ql_dbg(ql_dbg_async, vha, 0x5018,
967 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
968 mb[1], mb[2], mb[3]);
969
970 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
971 switch (mb[1]) {
972 case A84_PANIC_RECOVERY:
973 ql_log(ql_log_info, vha, 0x5019,
974 "Alert 84XX: panic recovery %04x %04x.\n",
975 mb[2], mb[3]);
976 break;
977 case A84_OP_LOGIN_COMPLETE:
978 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
979 ql_log(ql_log_info, vha, 0x501a,
980 "Alert 84XX: firmware version %x.\n",
981 ha->cs84xx->op_fw_version);
982 break;
983 case A84_DIAG_LOGIN_COMPLETE:
984 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
985 ql_log(ql_log_info, vha, 0x501b,
986 "Alert 84XX: diagnostic firmware version %x.\n",
987 ha->cs84xx->diag_fw_version);
988 break;
989 case A84_GOLD_LOGIN_COMPLETE:
990 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
991 ha->cs84xx->fw_update = 1;
992 ql_log(ql_log_info, vha, 0x501c,
993 "Alert 84XX: gold firmware version %x.\n",
994 ha->cs84xx->gold_fw_version);
995 break;
996 default:
997 ql_log(ql_log_warn, vha, 0x501d,
998 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
999 mb[1], mb[2], mb[3]);
1000 }
1001 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1002 break;
1003 case MBA_DCBX_START:
1004 ql_dbg(ql_dbg_async, vha, 0x501e,
1005 "DCBX Started -- %04x %04x %04x.\n",
1006 mb[1], mb[2], mb[3]);
1007 break;
1008 case MBA_DCBX_PARAM_UPDATE:
1009 ql_dbg(ql_dbg_async, vha, 0x501f,
1010 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1011 mb[1], mb[2], mb[3]);
1012 break;
1013 case MBA_FCF_CONF_ERR:
1014 ql_dbg(ql_dbg_async, vha, 0x5020,
1015 "FCF Configuration Error -- %04x %04x %04x.\n",
1016 mb[1], mb[2], mb[3]);
1017 break;
1018 case MBA_IDC_NOTIFY:
1019 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1020 mb[4] = RD_REG_WORD(®24->mailbox4);
1021 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1022 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1023 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1024 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1025
1026
1027
1028 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1029 atomic_set(&vha->loop_down_timer,
1030 LOOP_DOWN_TIME);
1031 qla2xxx_wake_dpc(vha);
1032 }
1033 }
1034 case MBA_IDC_COMPLETE:
1035 if (ha->notify_lb_portup_comp)
1036 complete(&ha->lb_portup_comp);
1037
1038 case MBA_IDC_TIME_EXT:
1039 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1040 IS_QLA8044(ha))
1041 qla81xx_idc_event(vha, mb[0], mb[1]);
1042 break;
1043
1044 case MBA_IDC_AEN:
1045 mb[4] = RD_REG_WORD(®24->mailbox4);
1046 mb[5] = RD_REG_WORD(®24->mailbox5);
1047 mb[6] = RD_REG_WORD(®24->mailbox6);
1048 mb[7] = RD_REG_WORD(®24->mailbox7);
1049 qla83xx_handle_8200_aen(vha, mb);
1050 break;
1051
1052 default:
1053 ql_dbg(ql_dbg_async, vha, 0x5057,
1054 "Unknown AEN:%04x %04x %04x %04x\n",
1055 mb[0], mb[1], mb[2], mb[3]);
1056 }
1057
1058 qlt_async_event(mb[0], vha, mb);
1059
1060 if (!vha->vp_idx && ha->num_vhosts)
1061 qla2x00_alert_all_vps(rsp, mb);
1062}
1063
1064
1065
1066
1067
1068
1069void
1070qla2x00_process_completed_request(struct scsi_qla_host *vha,
1071 struct req_que *req, uint32_t index)
1072{
1073 srb_t *sp;
1074 struct qla_hw_data *ha = vha->hw;
1075
1076
1077 if (index >= req->num_outstanding_cmds) {
1078 ql_log(ql_log_warn, vha, 0x3014,
1079 "Invalid SCSI command index (%x).\n", index);
1080
1081 if (IS_P3P_TYPE(ha))
1082 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1083 else
1084 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1085 return;
1086 }
1087
1088 sp = req->outstanding_cmds[index];
1089 if (sp) {
1090
1091 req->outstanding_cmds[index] = NULL;
1092
1093
1094 sp->done(ha, sp, DID_OK << 16);
1095 } else {
1096 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1097
1098 if (IS_P3P_TYPE(ha))
1099 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1100 else
1101 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1102 }
1103}
1104
1105srb_t *
1106qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1107 struct req_que *req, void *iocb)
1108{
1109 struct qla_hw_data *ha = vha->hw;
1110 sts_entry_t *pkt = iocb;
1111 srb_t *sp = NULL;
1112 uint16_t index;
1113
1114 index = LSW(pkt->handle);
1115 if (index >= req->num_outstanding_cmds) {
1116 ql_log(ql_log_warn, vha, 0x5031,
1117 "Invalid command index (%x).\n", index);
1118 if (IS_P3P_TYPE(ha))
1119 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1120 else
1121 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1122 goto done;
1123 }
1124 sp = req->outstanding_cmds[index];
1125 if (!sp) {
1126 ql_log(ql_log_warn, vha, 0x5032,
1127 "Invalid completion handle (%x) -- timed-out.\n", index);
1128 return sp;
1129 }
1130 if (sp->handle != index) {
1131 ql_log(ql_log_warn, vha, 0x5033,
1132 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1133 return NULL;
1134 }
1135
1136 req->outstanding_cmds[index] = NULL;
1137
1138done:
1139 return sp;
1140}
1141
1142static void
1143qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1144 struct mbx_entry *mbx)
1145{
1146 const char func[] = "MBX-IOCB";
1147 const char *type;
1148 fc_port_t *fcport;
1149 srb_t *sp;
1150 struct srb_iocb *lio;
1151 uint16_t *data;
1152 uint16_t status;
1153
1154 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1155 if (!sp)
1156 return;
1157
1158 lio = &sp->u.iocb_cmd;
1159 type = sp->name;
1160 fcport = sp->fcport;
1161 data = lio->u.logio.data;
1162
1163 data[0] = MBS_COMMAND_ERROR;
1164 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1165 QLA_LOGIO_LOGIN_RETRIED : 0;
1166 if (mbx->entry_status) {
1167 ql_dbg(ql_dbg_async, vha, 0x5043,
1168 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1169 "entry-status=%x status=%x state-flag=%x "
1170 "status-flags=%x.\n", type, sp->handle,
1171 fcport->d_id.b.domain, fcport->d_id.b.area,
1172 fcport->d_id.b.al_pa, mbx->entry_status,
1173 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1174 le16_to_cpu(mbx->status_flags));
1175
1176 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1177 (uint8_t *)mbx, sizeof(*mbx));
1178
1179 goto logio_done;
1180 }
1181
1182 status = le16_to_cpu(mbx->status);
1183 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1184 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1185 status = 0;
1186 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1187 ql_dbg(ql_dbg_async, vha, 0x5045,
1188 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1189 type, sp->handle, fcport->d_id.b.domain,
1190 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1191 le16_to_cpu(mbx->mb1));
1192
1193 data[0] = MBS_COMMAND_COMPLETE;
1194 if (sp->type == SRB_LOGIN_CMD) {
1195 fcport->port_type = FCT_TARGET;
1196 if (le16_to_cpu(mbx->mb1) & BIT_0)
1197 fcport->port_type = FCT_INITIATOR;
1198 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1199 fcport->flags |= FCF_FCP2_DEVICE;
1200 }
1201 goto logio_done;
1202 }
1203
1204 data[0] = le16_to_cpu(mbx->mb0);
1205 switch (data[0]) {
1206 case MBS_PORT_ID_USED:
1207 data[1] = le16_to_cpu(mbx->mb1);
1208 break;
1209 case MBS_LOOP_ID_USED:
1210 break;
1211 default:
1212 data[0] = MBS_COMMAND_ERROR;
1213 break;
1214 }
1215
1216 ql_log(ql_log_warn, vha, 0x5046,
1217 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1218 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1219 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1220 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1221 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1222 le16_to_cpu(mbx->mb7));
1223
1224logio_done:
1225 sp->done(vha, sp, 0);
1226}
1227
1228static void
1229qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1230 sts_entry_t *pkt, int iocb_type)
1231{
1232 const char func[] = "CT_IOCB";
1233 const char *type;
1234 srb_t *sp;
1235 struct fc_bsg_job *bsg_job;
1236 uint16_t comp_status;
1237 int res;
1238
1239 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1240 if (!sp)
1241 return;
1242
1243 bsg_job = sp->u.bsg_job;
1244
1245 type = "ct pass-through";
1246
1247 comp_status = le16_to_cpu(pkt->comp_status);
1248
1249
1250
1251
1252 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1253 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1254
1255 if (comp_status != CS_COMPLETE) {
1256 if (comp_status == CS_DATA_UNDERRUN) {
1257 res = DID_OK << 16;
1258 bsg_job->reply->reply_payload_rcv_len =
1259 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1260
1261 ql_log(ql_log_warn, vha, 0x5048,
1262 "CT pass-through-%s error "
1263 "comp_status-status=0x%x total_byte = 0x%x.\n",
1264 type, comp_status,
1265 bsg_job->reply->reply_payload_rcv_len);
1266 } else {
1267 ql_log(ql_log_warn, vha, 0x5049,
1268 "CT pass-through-%s error "
1269 "comp_status-status=0x%x.\n", type, comp_status);
1270 res = DID_ERROR << 16;
1271 bsg_job->reply->reply_payload_rcv_len = 0;
1272 }
1273 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1274 (uint8_t *)pkt, sizeof(*pkt));
1275 } else {
1276 res = DID_OK << 16;
1277 bsg_job->reply->reply_payload_rcv_len =
1278 bsg_job->reply_payload.payload_len;
1279 bsg_job->reply_len = 0;
1280 }
1281
1282 sp->done(vha, sp, res);
1283}
1284
1285static void
1286qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1287 struct sts_entry_24xx *pkt, int iocb_type)
1288{
1289 const char func[] = "ELS_CT_IOCB";
1290 const char *type;
1291 srb_t *sp;
1292 struct fc_bsg_job *bsg_job;
1293 uint16_t comp_status;
1294 uint32_t fw_status[3];
1295 uint8_t* fw_sts_ptr;
1296 int res;
1297
1298 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1299 if (!sp)
1300 return;
1301 bsg_job = sp->u.bsg_job;
1302
1303 type = NULL;
1304 switch (sp->type) {
1305 case SRB_ELS_CMD_RPT:
1306 case SRB_ELS_CMD_HST:
1307 type = "els";
1308 break;
1309 case SRB_CT_CMD:
1310 type = "ct pass-through";
1311 break;
1312 default:
1313 ql_dbg(ql_dbg_user, vha, 0x503e,
1314 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1315 return;
1316 }
1317
1318 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1319 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1320 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1321
1322
1323
1324
1325 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1326 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1327
1328 if (comp_status != CS_COMPLETE) {
1329 if (comp_status == CS_DATA_UNDERRUN) {
1330 res = DID_OK << 16;
1331 bsg_job->reply->reply_payload_rcv_len =
1332 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1333
1334 ql_dbg(ql_dbg_user, vha, 0x503f,
1335 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1336 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1337 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1338 le16_to_cpu(((struct els_sts_entry_24xx *)
1339 pkt)->total_byte_count));
1340 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1341 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1342 }
1343 else {
1344 ql_dbg(ql_dbg_user, vha, 0x5040,
1345 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1346 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1347 type, sp->handle, comp_status,
1348 le16_to_cpu(((struct els_sts_entry_24xx *)
1349 pkt)->error_subcode_1),
1350 le16_to_cpu(((struct els_sts_entry_24xx *)
1351 pkt)->error_subcode_2));
1352 res = DID_ERROR << 16;
1353 bsg_job->reply->reply_payload_rcv_len = 0;
1354 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1355 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1356 }
1357 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1358 (uint8_t *)pkt, sizeof(*pkt));
1359 }
1360 else {
1361 res = DID_OK << 16;
1362 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1363 bsg_job->reply_len = 0;
1364 }
1365
1366 sp->done(vha, sp, res);
1367}
1368
1369static void
1370qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1371 struct logio_entry_24xx *logio)
1372{
1373 const char func[] = "LOGIO-IOCB";
1374 const char *type;
1375 fc_port_t *fcport;
1376 srb_t *sp;
1377 struct srb_iocb *lio;
1378 uint16_t *data;
1379 uint32_t iop[2];
1380
1381 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1382 if (!sp)
1383 return;
1384
1385 lio = &sp->u.iocb_cmd;
1386 type = sp->name;
1387 fcport = sp->fcport;
1388 data = lio->u.logio.data;
1389
1390 data[0] = MBS_COMMAND_ERROR;
1391 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1392 QLA_LOGIO_LOGIN_RETRIED : 0;
1393 if (logio->entry_status) {
1394 ql_log(ql_log_warn, fcport->vha, 0x5034,
1395 "Async-%s error entry - hdl=%x"
1396 "portid=%02x%02x%02x entry-status=%x.\n",
1397 type, sp->handle, fcport->d_id.b.domain,
1398 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1399 logio->entry_status);
1400 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1401 (uint8_t *)logio, sizeof(*logio));
1402
1403 goto logio_done;
1404 }
1405
1406 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1407 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1408 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1409 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1410 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1411 le32_to_cpu(logio->io_parameter[0]));
1412
1413 data[0] = MBS_COMMAND_COMPLETE;
1414 if (sp->type != SRB_LOGIN_CMD)
1415 goto logio_done;
1416
1417 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1418 if (iop[0] & BIT_4) {
1419 fcport->port_type = FCT_TARGET;
1420 if (iop[0] & BIT_8)
1421 fcport->flags |= FCF_FCP2_DEVICE;
1422 } else if (iop[0] & BIT_5)
1423 fcport->port_type = FCT_INITIATOR;
1424
1425 if (iop[0] & BIT_7)
1426 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1427
1428 if (logio->io_parameter[7] || logio->io_parameter[8])
1429 fcport->supported_classes |= FC_COS_CLASS2;
1430 if (logio->io_parameter[9] || logio->io_parameter[10])
1431 fcport->supported_classes |= FC_COS_CLASS3;
1432
1433 goto logio_done;
1434 }
1435
1436 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1437 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1438 switch (iop[0]) {
1439 case LSC_SCODE_PORTID_USED:
1440 data[0] = MBS_PORT_ID_USED;
1441 data[1] = LSW(iop[1]);
1442 break;
1443 case LSC_SCODE_NPORT_USED:
1444 data[0] = MBS_LOOP_ID_USED;
1445 break;
1446 default:
1447 data[0] = MBS_COMMAND_ERROR;
1448 break;
1449 }
1450
1451 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1452 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1453 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1454 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1455 le16_to_cpu(logio->comp_status),
1456 le32_to_cpu(logio->io_parameter[0]),
1457 le32_to_cpu(logio->io_parameter[1]));
1458
1459logio_done:
1460 sp->done(vha, sp, 0);
1461}
1462
1463static void
1464qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1465 struct tsk_mgmt_entry *tsk)
1466{
1467 const char func[] = "TMF-IOCB";
1468 const char *type;
1469 fc_port_t *fcport;
1470 srb_t *sp;
1471 struct srb_iocb *iocb;
1472 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1473 int error = 1;
1474
1475 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1476 if (!sp)
1477 return;
1478
1479 iocb = &sp->u.iocb_cmd;
1480 type = sp->name;
1481 fcport = sp->fcport;
1482
1483 if (sts->entry_status) {
1484 ql_log(ql_log_warn, fcport->vha, 0x5038,
1485 "Async-%s error - hdl=%x entry-status(%x).\n",
1486 type, sp->handle, sts->entry_status);
1487 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1488 ql_log(ql_log_warn, fcport->vha, 0x5039,
1489 "Async-%s error - hdl=%x completion status(%x).\n",
1490 type, sp->handle, sts->comp_status);
1491 } else if (!(le16_to_cpu(sts->scsi_status) &
1492 SS_RESPONSE_INFO_LEN_VALID)) {
1493 ql_log(ql_log_warn, fcport->vha, 0x503a,
1494 "Async-%s error - hdl=%x no response info(%x).\n",
1495 type, sp->handle, sts->scsi_status);
1496 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1497 ql_log(ql_log_warn, fcport->vha, 0x503b,
1498 "Async-%s error - hdl=%x not enough response(%d).\n",
1499 type, sp->handle, sts->rsp_data_len);
1500 } else if (sts->data[3]) {
1501 ql_log(ql_log_warn, fcport->vha, 0x503c,
1502 "Async-%s error - hdl=%x response(%x).\n",
1503 type, sp->handle, sts->data[3]);
1504 } else {
1505 error = 0;
1506 }
1507
1508 if (error) {
1509 iocb->u.tmf.data = error;
1510 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1511 (uint8_t *)sts, sizeof(*sts));
1512 }
1513
1514 sp->done(vha, sp, 0);
1515}
1516
1517
1518
1519
1520
1521void
1522qla2x00_process_response_queue(struct rsp_que *rsp)
1523{
1524 struct scsi_qla_host *vha;
1525 struct qla_hw_data *ha = rsp->hw;
1526 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1527 sts_entry_t *pkt;
1528 uint16_t handle_cnt;
1529 uint16_t cnt;
1530
1531 vha = pci_get_drvdata(ha->pdev);
1532
1533 if (!vha->flags.online)
1534 return;
1535
1536 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1537 pkt = (sts_entry_t *)rsp->ring_ptr;
1538
1539 rsp->ring_index++;
1540 if (rsp->ring_index == rsp->length) {
1541 rsp->ring_index = 0;
1542 rsp->ring_ptr = rsp->ring;
1543 } else {
1544 rsp->ring_ptr++;
1545 }
1546
1547 if (pkt->entry_status != 0) {
1548 qla2x00_error_entry(vha, rsp, pkt);
1549 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1550 wmb();
1551 continue;
1552 }
1553
1554 switch (pkt->entry_type) {
1555 case STATUS_TYPE:
1556 qla2x00_status_entry(vha, rsp, pkt);
1557 break;
1558 case STATUS_TYPE_21:
1559 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1560 for (cnt = 0; cnt < handle_cnt; cnt++) {
1561 qla2x00_process_completed_request(vha, rsp->req,
1562 ((sts21_entry_t *)pkt)->handle[cnt]);
1563 }
1564 break;
1565 case STATUS_TYPE_22:
1566 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1567 for (cnt = 0; cnt < handle_cnt; cnt++) {
1568 qla2x00_process_completed_request(vha, rsp->req,
1569 ((sts22_entry_t *)pkt)->handle[cnt]);
1570 }
1571 break;
1572 case STATUS_CONT_TYPE:
1573 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1574 break;
1575 case MBX_IOCB_TYPE:
1576 qla2x00_mbx_iocb_entry(vha, rsp->req,
1577 (struct mbx_entry *)pkt);
1578 break;
1579 case CT_IOCB_TYPE:
1580 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1581 break;
1582 default:
1583
1584 ql_log(ql_log_warn, vha, 0x504a,
1585 "Received unknown response pkt type %x "
1586 "entry status=%x.\n",
1587 pkt->entry_type, pkt->entry_status);
1588 break;
1589 }
1590 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1591 wmb();
1592 }
1593
1594
1595 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1596}
1597
1598static inline void
1599qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1600 uint32_t sense_len, struct rsp_que *rsp, int res)
1601{
1602 struct scsi_qla_host *vha = sp->fcport->vha;
1603 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1604 uint32_t track_sense_len;
1605
1606 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1607 sense_len = SCSI_SENSE_BUFFERSIZE;
1608
1609 SET_CMD_SENSE_LEN(sp, sense_len);
1610 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1611 track_sense_len = sense_len;
1612
1613 if (sense_len > par_sense_len)
1614 sense_len = par_sense_len;
1615
1616 memcpy(cp->sense_buffer, sense_data, sense_len);
1617
1618 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1619 track_sense_len -= sense_len;
1620 SET_CMD_SENSE_LEN(sp, track_sense_len);
1621
1622 if (track_sense_len != 0) {
1623 rsp->status_srb = sp;
1624 cp->result = res;
1625 }
1626
1627 if (sense_len) {
1628 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1629 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1630 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1631 cp);
1632 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1633 cp->sense_buffer, sense_len);
1634 }
1635}
1636
1637struct scsi_dif_tuple {
1638 __be16 guard;
1639 __be16 app_tag;
1640 __be32 ref_tag;
1641};
1642
1643
1644
1645
1646
1647
1648
1649static inline int
1650qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1651{
1652 struct scsi_qla_host *vha = sp->fcport->vha;
1653 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1654 uint8_t *ap = &sts24->data[12];
1655 uint8_t *ep = &sts24->data[20];
1656 uint32_t e_ref_tag, a_ref_tag;
1657 uint16_t e_app_tag, a_app_tag;
1658 uint16_t e_guard, a_guard;
1659
1660
1661
1662
1663
1664 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1665 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1666 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1667 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1668 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1669 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1670
1671 ql_dbg(ql_dbg_io, vha, 0x3023,
1672 "iocb(s) %p Returned STATUS.\n", sts24);
1673
1674 ql_dbg(ql_dbg_io, vha, 0x3024,
1675 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1676 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1677 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1678 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1679 a_app_tag, e_app_tag, a_guard, e_guard);
1680
1681
1682
1683
1684
1685
1686 if ((a_app_tag == 0xffff) &&
1687 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1688 (a_ref_tag == 0xffffffff))) {
1689 uint32_t blocks_done, resid;
1690 sector_t lba_s = scsi_get_lba(cmd);
1691
1692
1693 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1694
1695 resid = scsi_bufflen(cmd) - (blocks_done *
1696 cmd->device->sector_size);
1697
1698 scsi_set_resid(cmd, resid);
1699 cmd->result = DID_OK << 16;
1700
1701
1702 if (scsi_prot_sg_count(cmd)) {
1703 uint32_t i, j = 0, k = 0, num_ent;
1704 struct scatterlist *sg;
1705 struct sd_dif_tuple *spt;
1706
1707
1708 scsi_for_each_prot_sg(cmd, sg,
1709 scsi_prot_sg_count(cmd), i) {
1710 num_ent = sg_dma_len(sg) / 8;
1711 if (k + num_ent < blocks_done) {
1712 k += num_ent;
1713 continue;
1714 }
1715 j = blocks_done - k - 1;
1716 k = blocks_done;
1717 break;
1718 }
1719
1720 if (k != blocks_done) {
1721 ql_log(ql_log_warn, vha, 0x302f,
1722 "unexpected tag values tag:lba=%x:%llx)\n",
1723 e_ref_tag, (unsigned long long)lba_s);
1724 return 1;
1725 }
1726
1727 spt = page_address(sg_page(sg)) + sg->offset;
1728 spt += j;
1729
1730 spt->app_tag = 0xffff;
1731 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1732 spt->ref_tag = 0xffffffff;
1733 }
1734
1735 return 0;
1736 }
1737
1738
1739 if (e_guard != a_guard) {
1740 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1741 0x10, 0x1);
1742 set_driver_byte(cmd, DRIVER_SENSE);
1743 set_host_byte(cmd, DID_ABORT);
1744 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1745 return 1;
1746 }
1747
1748
1749 if (e_ref_tag != a_ref_tag) {
1750 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1751 0x10, 0x3);
1752 set_driver_byte(cmd, DRIVER_SENSE);
1753 set_host_byte(cmd, DID_ABORT);
1754 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1755 return 1;
1756 }
1757
1758
1759 if (e_app_tag != a_app_tag) {
1760 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1761 0x10, 0x2);
1762 set_driver_byte(cmd, DRIVER_SENSE);
1763 set_host_byte(cmd, DID_ABORT);
1764 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1765 return 1;
1766 }
1767
1768 return 1;
1769}
1770
1771static void
1772qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1773 struct req_que *req, uint32_t index)
1774{
1775 struct qla_hw_data *ha = vha->hw;
1776 srb_t *sp;
1777 uint16_t comp_status;
1778 uint16_t scsi_status;
1779 uint16_t thread_id;
1780 uint32_t rval = EXT_STATUS_OK;
1781 struct fc_bsg_job *bsg_job = NULL;
1782 sts_entry_t *sts;
1783 struct sts_entry_24xx *sts24;
1784 sts = (sts_entry_t *) pkt;
1785 sts24 = (struct sts_entry_24xx *) pkt;
1786
1787
1788 if (index >= req->num_outstanding_cmds) {
1789 ql_log(ql_log_warn, vha, 0x70af,
1790 "Invalid SCSI completion handle 0x%x.\n", index);
1791 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1792 return;
1793 }
1794
1795 sp = req->outstanding_cmds[index];
1796 if (sp) {
1797
1798 req->outstanding_cmds[index] = NULL;
1799 bsg_job = sp->u.bsg_job;
1800 } else {
1801 ql_log(ql_log_warn, vha, 0x70b0,
1802 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1803 req->id, index);
1804
1805 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1806 return;
1807 }
1808
1809 if (IS_FWI2_CAPABLE(ha)) {
1810 comp_status = le16_to_cpu(sts24->comp_status);
1811 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1812 } else {
1813 comp_status = le16_to_cpu(sts->comp_status);
1814 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1815 }
1816
1817 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1818 switch (comp_status) {
1819 case CS_COMPLETE:
1820 if (scsi_status == 0) {
1821 bsg_job->reply->reply_payload_rcv_len =
1822 bsg_job->reply_payload.payload_len;
1823 vha->qla_stats.input_bytes +=
1824 bsg_job->reply->reply_payload_rcv_len;
1825 vha->qla_stats.input_requests++;
1826 rval = EXT_STATUS_OK;
1827 }
1828 goto done;
1829
1830 case CS_DATA_OVERRUN:
1831 ql_dbg(ql_dbg_user, vha, 0x70b1,
1832 "Command completed with date overrun thread_id=%d\n",
1833 thread_id);
1834 rval = EXT_STATUS_DATA_OVERRUN;
1835 break;
1836
1837 case CS_DATA_UNDERRUN:
1838 ql_dbg(ql_dbg_user, vha, 0x70b2,
1839 "Command completed with date underrun thread_id=%d\n",
1840 thread_id);
1841 rval = EXT_STATUS_DATA_UNDERRUN;
1842 break;
1843 case CS_BIDIR_RD_OVERRUN:
1844 ql_dbg(ql_dbg_user, vha, 0x70b3,
1845 "Command completed with read data overrun thread_id=%d\n",
1846 thread_id);
1847 rval = EXT_STATUS_DATA_OVERRUN;
1848 break;
1849
1850 case CS_BIDIR_RD_WR_OVERRUN:
1851 ql_dbg(ql_dbg_user, vha, 0x70b4,
1852 "Command completed with read and write data overrun "
1853 "thread_id=%d\n", thread_id);
1854 rval = EXT_STATUS_DATA_OVERRUN;
1855 break;
1856
1857 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1858 ql_dbg(ql_dbg_user, vha, 0x70b5,
1859 "Command completed with read data over and write data "
1860 "underrun thread_id=%d\n", thread_id);
1861 rval = EXT_STATUS_DATA_OVERRUN;
1862 break;
1863
1864 case CS_BIDIR_RD_UNDERRUN:
1865 ql_dbg(ql_dbg_user, vha, 0x70b6,
1866 "Command completed with read data data underrun "
1867 "thread_id=%d\n", thread_id);
1868 rval = EXT_STATUS_DATA_UNDERRUN;
1869 break;
1870
1871 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1872 ql_dbg(ql_dbg_user, vha, 0x70b7,
1873 "Command completed with read data under and write data "
1874 "overrun thread_id=%d\n", thread_id);
1875 rval = EXT_STATUS_DATA_UNDERRUN;
1876 break;
1877
1878 case CS_BIDIR_RD_WR_UNDERRUN:
1879 ql_dbg(ql_dbg_user, vha, 0x70b8,
1880 "Command completed with read and write data underrun "
1881 "thread_id=%d\n", thread_id);
1882 rval = EXT_STATUS_DATA_UNDERRUN;
1883 break;
1884
1885 case CS_BIDIR_DMA:
1886 ql_dbg(ql_dbg_user, vha, 0x70b9,
1887 "Command completed with data DMA error thread_id=%d\n",
1888 thread_id);
1889 rval = EXT_STATUS_DMA_ERR;
1890 break;
1891
1892 case CS_TIMEOUT:
1893 ql_dbg(ql_dbg_user, vha, 0x70ba,
1894 "Command completed with timeout thread_id=%d\n",
1895 thread_id);
1896 rval = EXT_STATUS_TIMEOUT;
1897 break;
1898 default:
1899 ql_dbg(ql_dbg_user, vha, 0x70bb,
1900 "Command completed with completion status=0x%x "
1901 "thread_id=%d\n", comp_status, thread_id);
1902 rval = EXT_STATUS_ERR;
1903 break;
1904 }
1905 bsg_job->reply->reply_payload_rcv_len = 0;
1906
1907done:
1908
1909 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1910 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1911
1912
1913 sp->done(vha, sp, (DID_OK << 6));
1914
1915}
1916
1917
1918
1919
1920
1921
1922static void
1923qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1924{
1925 srb_t *sp;
1926 fc_port_t *fcport;
1927 struct scsi_cmnd *cp;
1928 sts_entry_t *sts;
1929 struct sts_entry_24xx *sts24;
1930 uint16_t comp_status;
1931 uint16_t scsi_status;
1932 uint16_t ox_id;
1933 uint8_t lscsi_status;
1934 int32_t resid;
1935 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1936 fw_resid_len;
1937 uint8_t *rsp_info, *sense_data;
1938 struct qla_hw_data *ha = vha->hw;
1939 uint32_t handle;
1940 uint16_t que;
1941 struct req_que *req;
1942 int logit = 1;
1943 int res = 0;
1944 uint16_t state_flags = 0;
1945
1946 sts = (sts_entry_t *) pkt;
1947 sts24 = (struct sts_entry_24xx *) pkt;
1948 if (IS_FWI2_CAPABLE(ha)) {
1949 comp_status = le16_to_cpu(sts24->comp_status);
1950 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1951 state_flags = le16_to_cpu(sts24->state_flags);
1952 } else {
1953 comp_status = le16_to_cpu(sts->comp_status);
1954 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1955 }
1956 handle = (uint32_t) LSW(sts->handle);
1957 que = MSW(sts->handle);
1958 req = ha->req_q_map[que];
1959
1960
1961 if (req == NULL ||
1962 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
1963 ql_dbg(ql_dbg_io, vha, 0x3059,
1964 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
1965 "que=%u.\n", sts->handle, req, que);
1966 return;
1967 }
1968
1969
1970 if (handle < req->num_outstanding_cmds)
1971 sp = req->outstanding_cmds[handle];
1972 else
1973 sp = NULL;
1974
1975 if (sp == NULL) {
1976 ql_dbg(ql_dbg_io, vha, 0x3017,
1977 "Invalid status handle (0x%x).\n", sts->handle);
1978
1979 if (IS_P3P_TYPE(ha))
1980 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1981 else
1982 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1983 qla2xxx_wake_dpc(vha);
1984 return;
1985 }
1986
1987 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1988 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1989 return;
1990 }
1991
1992
1993 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1994 qla2x00_do_host_ramp_up(vha);
1995 qla2x00_process_completed_request(vha, req, handle);
1996
1997 return;
1998 }
1999
2000 req->outstanding_cmds[handle] = NULL;
2001 cp = GET_CMD_SP(sp);
2002 if (cp == NULL) {
2003 ql_dbg(ql_dbg_io, vha, 0x3018,
2004 "Command already returned (0x%x/%p).\n",
2005 sts->handle, sp);
2006
2007 return;
2008 }
2009
2010 lscsi_status = scsi_status & STATUS_MASK;
2011
2012 fcport = sp->fcport;
2013
2014 ox_id = 0;
2015 sense_len = par_sense_len = rsp_info_len = resid_len =
2016 fw_resid_len = 0;
2017 if (IS_FWI2_CAPABLE(ha)) {
2018 if (scsi_status & SS_SENSE_LEN_VALID)
2019 sense_len = le32_to_cpu(sts24->sense_len);
2020 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2021 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2022 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2023 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2024 if (comp_status == CS_DATA_UNDERRUN)
2025 fw_resid_len = le32_to_cpu(sts24->residual_len);
2026 rsp_info = sts24->data;
2027 sense_data = sts24->data;
2028 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2029 ox_id = le16_to_cpu(sts24->ox_id);
2030 par_sense_len = sizeof(sts24->data);
2031 } else {
2032 if (scsi_status & SS_SENSE_LEN_VALID)
2033 sense_len = le16_to_cpu(sts->req_sense_length);
2034 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2035 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2036 resid_len = le32_to_cpu(sts->residual_length);
2037 rsp_info = sts->rsp_info;
2038 sense_data = sts->req_sense_data;
2039 par_sense_len = sizeof(sts->req_sense_data);
2040 }
2041
2042
2043 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2044
2045 if (IS_FWI2_CAPABLE(ha)) {
2046 sense_data += rsp_info_len;
2047 par_sense_len -= rsp_info_len;
2048 }
2049 if (rsp_info_len > 3 && rsp_info[3]) {
2050 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2051 "FCP I/O protocol failure (0x%x/0x%x).\n",
2052 rsp_info_len, rsp_info[3]);
2053
2054 res = DID_BUS_BUSY << 16;
2055 goto out;
2056 }
2057 }
2058
2059
2060 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2061 scsi_status & SS_RESIDUAL_OVER)
2062 comp_status = CS_DATA_OVERRUN;
2063
2064
2065
2066
2067 switch (comp_status) {
2068 case CS_COMPLETE:
2069 case CS_QUEUE_FULL:
2070 if (scsi_status == 0) {
2071 res = DID_OK << 16;
2072 break;
2073 }
2074 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2075 resid = resid_len;
2076 scsi_set_resid(cp, resid);
2077
2078 if (!lscsi_status &&
2079 ((unsigned)(scsi_bufflen(cp) - resid) <
2080 cp->underflow)) {
2081 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2082 "Mid-layer underflow "
2083 "detected (0x%x of 0x%x bytes).\n",
2084 resid, scsi_bufflen(cp));
2085
2086 res = DID_ERROR << 16;
2087 break;
2088 }
2089 }
2090 res = DID_OK << 16 | lscsi_status;
2091
2092 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2093 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2094 "QUEUE FULL detected.\n");
2095 break;
2096 }
2097 logit = 0;
2098 if (lscsi_status != SS_CHECK_CONDITION)
2099 break;
2100
2101 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2102 if (!(scsi_status & SS_SENSE_LEN_VALID))
2103 break;
2104
2105 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2106 rsp, res);
2107 break;
2108
2109 case CS_DATA_UNDERRUN:
2110
2111 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2112 scsi_set_resid(cp, resid);
2113 if (scsi_status & SS_RESIDUAL_UNDER) {
2114 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2115 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2116 "Dropped frame(s) detected "
2117 "(0x%x of 0x%x bytes).\n",
2118 resid, scsi_bufflen(cp));
2119
2120 res = DID_ERROR << 16 | lscsi_status;
2121 goto check_scsi_status;
2122 }
2123
2124 if (!lscsi_status &&
2125 ((unsigned)(scsi_bufflen(cp) - resid) <
2126 cp->underflow)) {
2127 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2128 "Mid-layer underflow "
2129 "detected (0x%x of 0x%x bytes).\n",
2130 resid, scsi_bufflen(cp));
2131
2132 res = DID_ERROR << 16;
2133 break;
2134 }
2135 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2136 lscsi_status != SAM_STAT_BUSY) {
2137
2138
2139
2140
2141
2142 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2143 "Dropped frame(s) detected (0x%x "
2144 "of 0x%x bytes).\n", resid,
2145 scsi_bufflen(cp));
2146
2147 res = DID_ERROR << 16 | lscsi_status;
2148 goto check_scsi_status;
2149 } else {
2150 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2151 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2152 scsi_status, lscsi_status);
2153 }
2154
2155 res = DID_OK << 16 | lscsi_status;
2156 logit = 0;
2157
2158check_scsi_status:
2159
2160
2161
2162
2163 if (lscsi_status != 0) {
2164 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2165 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2166 "QUEUE FULL detected.\n");
2167 logit = 1;
2168 break;
2169 }
2170 if (lscsi_status != SS_CHECK_CONDITION)
2171 break;
2172
2173 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2174 if (!(scsi_status & SS_SENSE_LEN_VALID))
2175 break;
2176
2177 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2178 sense_len, rsp, res);
2179 }
2180 break;
2181
2182 case CS_PORT_LOGGED_OUT:
2183 case CS_PORT_CONFIG_CHG:
2184 case CS_PORT_BUSY:
2185 case CS_INCOMPLETE:
2186 case CS_PORT_UNAVAILABLE:
2187 case CS_TIMEOUT:
2188 case CS_RESET:
2189
2190
2191
2192
2193
2194
2195 res = DID_TRANSPORT_DISRUPTED << 16;
2196
2197 if (comp_status == CS_TIMEOUT) {
2198 if (IS_FWI2_CAPABLE(ha))
2199 break;
2200 else if ((le16_to_cpu(sts->status_flags) &
2201 SF_LOGOUT_SENT) == 0)
2202 break;
2203 }
2204
2205 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2206 "Port to be marked lost on fcport=%02x%02x%02x, current "
2207 "port state= %s.\n", fcport->d_id.b.domain,
2208 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2209 port_state_str[atomic_read(&fcport->state)]);
2210
2211 if (atomic_read(&fcport->state) == FCS_ONLINE)
2212 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2213 break;
2214
2215 case CS_ABORTED:
2216 res = DID_RESET << 16;
2217 break;
2218
2219 case CS_DIF_ERROR:
2220 logit = qla2x00_handle_dif_error(sp, sts24);
2221 res = cp->result;
2222 break;
2223
2224 case CS_TRANSPORT:
2225 res = DID_ERROR << 16;
2226
2227 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2228 break;
2229
2230 if (state_flags & BIT_4)
2231 scmd_printk(KERN_WARNING, cp,
2232 "Unsupported device '%s' found.\n",
2233 cp->device->vendor);
2234 break;
2235
2236 default:
2237 res = DID_ERROR << 16;
2238 break;
2239 }
2240
2241out:
2242 if (logit)
2243 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2244 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
2245 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2246 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2247 comp_status, scsi_status, res, vha->host_no,
2248 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2249 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2250 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2251 resid_len, fw_resid_len);
2252
2253 if (!res)
2254 qla2x00_do_host_ramp_up(vha);
2255
2256 if (rsp->status_srb == NULL)
2257 sp->done(ha, sp, res);
2258}
2259
2260
2261
2262
2263
2264
2265
2266
2267static void
2268qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2269{
2270 uint8_t sense_sz = 0;
2271 struct qla_hw_data *ha = rsp->hw;
2272 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2273 srb_t *sp = rsp->status_srb;
2274 struct scsi_cmnd *cp;
2275 uint32_t sense_len;
2276 uint8_t *sense_ptr;
2277
2278 if (!sp || !GET_CMD_SENSE_LEN(sp))
2279 return;
2280
2281 sense_len = GET_CMD_SENSE_LEN(sp);
2282 sense_ptr = GET_CMD_SENSE_PTR(sp);
2283
2284 cp = GET_CMD_SP(sp);
2285 if (cp == NULL) {
2286 ql_log(ql_log_warn, vha, 0x3025,
2287 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2288
2289 rsp->status_srb = NULL;
2290 return;
2291 }
2292
2293 if (sense_len > sizeof(pkt->data))
2294 sense_sz = sizeof(pkt->data);
2295 else
2296 sense_sz = sense_len;
2297
2298
2299 if (IS_FWI2_CAPABLE(ha))
2300 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2301 memcpy(sense_ptr, pkt->data, sense_sz);
2302 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2303 sense_ptr, sense_sz);
2304
2305 sense_len -= sense_sz;
2306 sense_ptr += sense_sz;
2307
2308 SET_CMD_SENSE_PTR(sp, sense_ptr);
2309 SET_CMD_SENSE_LEN(sp, sense_len);
2310
2311
2312 if (sense_len == 0) {
2313 rsp->status_srb = NULL;
2314 sp->done(ha, sp, cp->result);
2315 }
2316}
2317
2318
2319
2320
2321
2322
2323static void
2324qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2325{
2326 srb_t *sp;
2327 struct qla_hw_data *ha = vha->hw;
2328 const char func[] = "ERROR-IOCB";
2329 uint16_t que = MSW(pkt->handle);
2330 struct req_que *req = NULL;
2331 int res = DID_ERROR << 16;
2332
2333 ql_dbg(ql_dbg_async, vha, 0x502a,
2334 "type of error status in response: 0x%x\n", pkt->entry_status);
2335
2336 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2337 goto fatal;
2338
2339 req = ha->req_q_map[que];
2340
2341 if (pkt->entry_status & RF_BUSY)
2342 res = DID_BUS_BUSY << 16;
2343
2344 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2345 if (sp) {
2346 sp->done(ha, sp, res);
2347 return;
2348 }
2349fatal:
2350 ql_log(ql_log_warn, vha, 0x5030,
2351 "Error entry - invalid handle/queue.\n");
2352
2353 if (IS_P3P_TYPE(ha))
2354 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2355 else
2356 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2357 qla2xxx_wake_dpc(vha);
2358}
2359
2360
2361
2362
2363
2364
2365static void
2366qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2367{
2368 uint16_t cnt;
2369 uint32_t mboxes;
2370 uint16_t __iomem *wptr;
2371 struct qla_hw_data *ha = vha->hw;
2372 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2373
2374
2375 mboxes = (1 << ha->mbx_count) - 1;
2376 if (!ha->mcp)
2377 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2378 else
2379 mboxes = ha->mcp->in_mb;
2380
2381
2382 ha->flags.mbox_int = 1;
2383 ha->mailbox_out[0] = mb0;
2384 mboxes >>= 1;
2385 wptr = (uint16_t __iomem *)®->mailbox1;
2386
2387 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2388 if (mboxes & BIT_0)
2389 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2390
2391 mboxes >>= 1;
2392 wptr++;
2393 }
2394}
2395
2396
2397
2398
2399
2400void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2401 struct rsp_que *rsp)
2402{
2403 struct sts_entry_24xx *pkt;
2404 struct qla_hw_data *ha = vha->hw;
2405
2406 if (!vha->flags.online)
2407 return;
2408
2409 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2410 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2411
2412 rsp->ring_index++;
2413 if (rsp->ring_index == rsp->length) {
2414 rsp->ring_index = 0;
2415 rsp->ring_ptr = rsp->ring;
2416 } else {
2417 rsp->ring_ptr++;
2418 }
2419
2420 if (pkt->entry_status != 0) {
2421 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2422
2423 (void)qlt_24xx_process_response_error(vha, pkt);
2424
2425 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2426 wmb();
2427 continue;
2428 }
2429
2430 switch (pkt->entry_type) {
2431 case STATUS_TYPE:
2432 qla2x00_status_entry(vha, rsp, pkt);
2433 break;
2434 case STATUS_CONT_TYPE:
2435 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2436 break;
2437 case VP_RPT_ID_IOCB_TYPE:
2438 qla24xx_report_id_acquisition(vha,
2439 (struct vp_rpt_id_entry_24xx *)pkt);
2440 break;
2441 case LOGINOUT_PORT_IOCB_TYPE:
2442 qla24xx_logio_entry(vha, rsp->req,
2443 (struct logio_entry_24xx *)pkt);
2444 break;
2445 case TSK_MGMT_IOCB_TYPE:
2446 qla24xx_tm_iocb_entry(vha, rsp->req,
2447 (struct tsk_mgmt_entry *)pkt);
2448 break;
2449 case CT_IOCB_TYPE:
2450 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2451 break;
2452 case ELS_IOCB_TYPE:
2453 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2454 break;
2455 case ABTS_RECV_24XX:
2456
2457 qlt_24xx_process_atio_queue(vha);
2458 case ABTS_RESP_24XX:
2459 case CTIO_TYPE7:
2460 case NOTIFY_ACK_TYPE:
2461 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2462 break;
2463 case MARKER_TYPE:
2464
2465
2466
2467 break;
2468 default:
2469
2470 ql_dbg(ql_dbg_async, vha, 0x5042,
2471 "Received unknown response pkt type %x "
2472 "entry status=%x.\n",
2473 pkt->entry_type, pkt->entry_status);
2474 break;
2475 }
2476 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2477 wmb();
2478 }
2479
2480
2481 if (IS_P3P_TYPE(ha)) {
2482 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2483 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2484 } else
2485 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2486}
2487
2488static void
2489qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2490{
2491 int rval;
2492 uint32_t cnt;
2493 struct qla_hw_data *ha = vha->hw;
2494 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2495
2496 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2497 return;
2498
2499 rval = QLA_SUCCESS;
2500 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2501 RD_REG_DWORD(®->iobase_addr);
2502 WRT_REG_DWORD(®->iobase_window, 0x0001);
2503 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2504 rval == QLA_SUCCESS; cnt--) {
2505 if (cnt) {
2506 WRT_REG_DWORD(®->iobase_window, 0x0001);
2507 udelay(10);
2508 } else
2509 rval = QLA_FUNCTION_TIMEOUT;
2510 }
2511 if (rval == QLA_SUCCESS)
2512 goto next_test;
2513
2514 rval = QLA_SUCCESS;
2515 WRT_REG_DWORD(®->iobase_window, 0x0003);
2516 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2517 rval == QLA_SUCCESS; cnt--) {
2518 if (cnt) {
2519 WRT_REG_DWORD(®->iobase_window, 0x0003);
2520 udelay(10);
2521 } else
2522 rval = QLA_FUNCTION_TIMEOUT;
2523 }
2524 if (rval != QLA_SUCCESS)
2525 goto done;
2526
2527next_test:
2528 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2529 ql_log(ql_log_info, vha, 0x504c,
2530 "Additional code -- 0x55AA.\n");
2531
2532done:
2533 WRT_REG_DWORD(®->iobase_window, 0x0000);
2534 RD_REG_DWORD(®->iobase_window);
2535}
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546irqreturn_t
2547qla24xx_intr_handler(int irq, void *dev_id)
2548{
2549 scsi_qla_host_t *vha;
2550 struct qla_hw_data *ha;
2551 struct device_reg_24xx __iomem *reg;
2552 int status;
2553 unsigned long iter;
2554 uint32_t stat;
2555 uint32_t hccr;
2556 uint16_t mb[8];
2557 struct rsp_que *rsp;
2558 unsigned long flags;
2559
2560 rsp = (struct rsp_que *) dev_id;
2561 if (!rsp) {
2562 ql_log(ql_log_info, NULL, 0x5059,
2563 "%s: NULL response queue pointer.\n", __func__);
2564 return IRQ_NONE;
2565 }
2566
2567 ha = rsp->hw;
2568 reg = &ha->iobase->isp24;
2569 status = 0;
2570
2571 if (unlikely(pci_channel_offline(ha->pdev)))
2572 return IRQ_HANDLED;
2573
2574 spin_lock_irqsave(&ha->hardware_lock, flags);
2575 vha = pci_get_drvdata(ha->pdev);
2576 for (iter = 50; iter--; ) {
2577 stat = RD_REG_DWORD(®->host_status);
2578 if (stat & HSRX_RISC_PAUSED) {
2579 if (unlikely(pci_channel_offline(ha->pdev)))
2580 break;
2581
2582 hccr = RD_REG_DWORD(®->hccr);
2583
2584 ql_log(ql_log_warn, vha, 0x504b,
2585 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2586 hccr);
2587
2588 qla2xxx_check_risc_status(vha);
2589
2590 ha->isp_ops->fw_dump(vha, 1);
2591 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2592 break;
2593 } else if ((stat & HSRX_RISC_INT) == 0)
2594 break;
2595
2596 switch (stat & 0xff) {
2597 case INTR_ROM_MB_SUCCESS:
2598 case INTR_ROM_MB_FAILED:
2599 case INTR_MB_SUCCESS:
2600 case INTR_MB_FAILED:
2601 qla24xx_mbx_completion(vha, MSW(stat));
2602 status |= MBX_INTERRUPT;
2603
2604 break;
2605 case INTR_ASYNC_EVENT:
2606 mb[0] = MSW(stat);
2607 mb[1] = RD_REG_WORD(®->mailbox1);
2608 mb[2] = RD_REG_WORD(®->mailbox2);
2609 mb[3] = RD_REG_WORD(®->mailbox3);
2610 qla2x00_async_event(vha, rsp, mb);
2611 break;
2612 case INTR_RSP_QUE_UPDATE:
2613 case INTR_RSP_QUE_UPDATE_83XX:
2614 qla24xx_process_response_queue(vha, rsp);
2615 break;
2616 case INTR_ATIO_QUE_UPDATE:
2617 qlt_24xx_process_atio_queue(vha);
2618 break;
2619 case INTR_ATIO_RSP_QUE_UPDATE:
2620 qlt_24xx_process_atio_queue(vha);
2621 qla24xx_process_response_queue(vha, rsp);
2622 break;
2623 default:
2624 ql_dbg(ql_dbg_async, vha, 0x504f,
2625 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2626 break;
2627 }
2628 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2629 RD_REG_DWORD_RELAXED(®->hccr);
2630 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2631 ndelay(3500);
2632 }
2633 qla2x00_handle_mbx_completion(ha, status);
2634 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2635
2636 return IRQ_HANDLED;
2637}
2638
2639static irqreturn_t
2640qla24xx_msix_rsp_q(int irq, void *dev_id)
2641{
2642 struct qla_hw_data *ha;
2643 struct rsp_que *rsp;
2644 struct device_reg_24xx __iomem *reg;
2645 struct scsi_qla_host *vha;
2646 unsigned long flags;
2647
2648 rsp = (struct rsp_que *) dev_id;
2649 if (!rsp) {
2650 ql_log(ql_log_info, NULL, 0x505a,
2651 "%s: NULL response queue pointer.\n", __func__);
2652 return IRQ_NONE;
2653 }
2654 ha = rsp->hw;
2655 reg = &ha->iobase->isp24;
2656
2657 spin_lock_irqsave(&ha->hardware_lock, flags);
2658
2659 vha = pci_get_drvdata(ha->pdev);
2660 qla24xx_process_response_queue(vha, rsp);
2661 if (!ha->flags.disable_msix_handshake) {
2662 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2663 RD_REG_DWORD_RELAXED(®->hccr);
2664 }
2665 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2666
2667 return IRQ_HANDLED;
2668}
2669
2670static irqreturn_t
2671qla25xx_msix_rsp_q(int irq, void *dev_id)
2672{
2673 struct qla_hw_data *ha;
2674 struct rsp_que *rsp;
2675 struct device_reg_24xx __iomem *reg;
2676 unsigned long flags;
2677
2678 rsp = (struct rsp_que *) dev_id;
2679 if (!rsp) {
2680 ql_log(ql_log_info, NULL, 0x505b,
2681 "%s: NULL response queue pointer.\n", __func__);
2682 return IRQ_NONE;
2683 }
2684 ha = rsp->hw;
2685
2686
2687 if (!ha->flags.disable_msix_handshake) {
2688 reg = &ha->iobase->isp24;
2689 spin_lock_irqsave(&ha->hardware_lock, flags);
2690 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2691 RD_REG_DWORD_RELAXED(®->hccr);
2692 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2693 }
2694 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2695
2696 return IRQ_HANDLED;
2697}
2698
2699static irqreturn_t
2700qla24xx_msix_default(int irq, void *dev_id)
2701{
2702 scsi_qla_host_t *vha;
2703 struct qla_hw_data *ha;
2704 struct rsp_que *rsp;
2705 struct device_reg_24xx __iomem *reg;
2706 int status;
2707 uint32_t stat;
2708 uint32_t hccr;
2709 uint16_t mb[8];
2710 unsigned long flags;
2711
2712 rsp = (struct rsp_que *) dev_id;
2713 if (!rsp) {
2714 ql_log(ql_log_info, NULL, 0x505c,
2715 "%s: NULL response queue pointer.\n", __func__);
2716 return IRQ_NONE;
2717 }
2718 ha = rsp->hw;
2719 reg = &ha->iobase->isp24;
2720 status = 0;
2721
2722 spin_lock_irqsave(&ha->hardware_lock, flags);
2723 vha = pci_get_drvdata(ha->pdev);
2724 do {
2725 stat = RD_REG_DWORD(®->host_status);
2726 if (stat & HSRX_RISC_PAUSED) {
2727 if (unlikely(pci_channel_offline(ha->pdev)))
2728 break;
2729
2730 hccr = RD_REG_DWORD(®->hccr);
2731
2732 ql_log(ql_log_info, vha, 0x5050,
2733 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2734 hccr);
2735
2736 qla2xxx_check_risc_status(vha);
2737
2738 ha->isp_ops->fw_dump(vha, 1);
2739 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2740 break;
2741 } else if ((stat & HSRX_RISC_INT) == 0)
2742 break;
2743
2744 switch (stat & 0xff) {
2745 case INTR_ROM_MB_SUCCESS:
2746 case INTR_ROM_MB_FAILED:
2747 case INTR_MB_SUCCESS:
2748 case INTR_MB_FAILED:
2749 qla24xx_mbx_completion(vha, MSW(stat));
2750 status |= MBX_INTERRUPT;
2751
2752 break;
2753 case INTR_ASYNC_EVENT:
2754 mb[0] = MSW(stat);
2755 mb[1] = RD_REG_WORD(®->mailbox1);
2756 mb[2] = RD_REG_WORD(®->mailbox2);
2757 mb[3] = RD_REG_WORD(®->mailbox3);
2758 qla2x00_async_event(vha, rsp, mb);
2759 break;
2760 case INTR_RSP_QUE_UPDATE:
2761 case INTR_RSP_QUE_UPDATE_83XX:
2762 qla24xx_process_response_queue(vha, rsp);
2763 break;
2764 case INTR_ATIO_QUE_UPDATE:
2765 qlt_24xx_process_atio_queue(vha);
2766 break;
2767 case INTR_ATIO_RSP_QUE_UPDATE:
2768 qlt_24xx_process_atio_queue(vha);
2769 qla24xx_process_response_queue(vha, rsp);
2770 break;
2771 default:
2772 ql_dbg(ql_dbg_async, vha, 0x5051,
2773 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2774 break;
2775 }
2776 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2777 } while (0);
2778 qla2x00_handle_mbx_completion(ha, status);
2779 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2780
2781 return IRQ_HANDLED;
2782}
2783
2784
2785
2786struct qla_init_msix_entry {
2787 const char *name;
2788 irq_handler_t handler;
2789};
2790
2791static struct qla_init_msix_entry msix_entries[3] = {
2792 { "qla2xxx (default)", qla24xx_msix_default },
2793 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2794 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2795};
2796
2797static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2798 { "qla2xxx (default)", qla82xx_msix_default },
2799 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2800};
2801
2802static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2803 { "qla2xxx (default)", qla24xx_msix_default },
2804 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2805 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2806};
2807
2808static void
2809qla24xx_disable_msix(struct qla_hw_data *ha)
2810{
2811 int i;
2812 struct qla_msix_entry *qentry;
2813 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2814
2815 for (i = 0; i < ha->msix_count; i++) {
2816 qentry = &ha->msix_entries[i];
2817 if (qentry->have_irq)
2818 free_irq(qentry->vector, qentry->rsp);
2819 }
2820 pci_disable_msix(ha->pdev);
2821 kfree(ha->msix_entries);
2822 ha->msix_entries = NULL;
2823 ha->flags.msix_enabled = 0;
2824 ql_dbg(ql_dbg_init, vha, 0x0042,
2825 "Disabled the MSI.\n");
2826}
2827
2828static int
2829qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2830{
2831#define MIN_MSIX_COUNT 2
2832 int i, ret;
2833 struct msix_entry *entries;
2834 struct qla_msix_entry *qentry;
2835 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2836
2837 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2838 GFP_KERNEL);
2839 if (!entries) {
2840 ql_log(ql_log_warn, vha, 0x00bc,
2841 "Failed to allocate memory for msix_entry.\n");
2842 return -ENOMEM;
2843 }
2844
2845 for (i = 0; i < ha->msix_count; i++)
2846 entries[i].entry = i;
2847
2848 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2849 if (ret) {
2850 if (ret < MIN_MSIX_COUNT)
2851 goto msix_failed;
2852
2853 ql_log(ql_log_warn, vha, 0x00c6,
2854 "MSI-X: Failed to enable support "
2855 "-- %d/%d\n Retry with %d vectors.\n",
2856 ha->msix_count, ret, ret);
2857 ha->msix_count = ret;
2858 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2859 if (ret) {
2860msix_failed:
2861 ql_log(ql_log_fatal, vha, 0x00c7,
2862 "MSI-X: Failed to enable support, "
2863 "giving up -- %d/%d.\n",
2864 ha->msix_count, ret);
2865 goto msix_out;
2866 }
2867 ha->max_rsp_queues = ha->msix_count - 1;
2868 }
2869 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2870 ha->msix_count, GFP_KERNEL);
2871 if (!ha->msix_entries) {
2872 ql_log(ql_log_fatal, vha, 0x00c8,
2873 "Failed to allocate memory for ha->msix_entries.\n");
2874 ret = -ENOMEM;
2875 goto msix_out;
2876 }
2877 ha->flags.msix_enabled = 1;
2878
2879 for (i = 0; i < ha->msix_count; i++) {
2880 qentry = &ha->msix_entries[i];
2881 qentry->vector = entries[i].vector;
2882 qentry->entry = entries[i].entry;
2883 qentry->have_irq = 0;
2884 qentry->rsp = NULL;
2885 }
2886
2887
2888 for (i = 0; i < ha->msix_count; i++) {
2889 qentry = &ha->msix_entries[i];
2890 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2891 ret = request_irq(qentry->vector,
2892 qla83xx_msix_entries[i].handler,
2893 0, qla83xx_msix_entries[i].name, rsp);
2894 } else if (IS_P3P_TYPE(ha)) {
2895 ret = request_irq(qentry->vector,
2896 qla82xx_msix_entries[i].handler,
2897 0, qla82xx_msix_entries[i].name, rsp);
2898 } else {
2899 ret = request_irq(qentry->vector,
2900 msix_entries[i].handler,
2901 0, msix_entries[i].name, rsp);
2902 }
2903 if (ret) {
2904 ql_log(ql_log_fatal, vha, 0x00cb,
2905 "MSI-X: unable to register handler -- %x/%d.\n",
2906 qentry->vector, ret);
2907 qla24xx_disable_msix(ha);
2908 ha->mqenable = 0;
2909 goto msix_out;
2910 }
2911 qentry->have_irq = 1;
2912 qentry->rsp = rsp;
2913 rsp->msix = qentry;
2914 }
2915
2916
2917 if (IS_QLA83XX(ha)) {
2918 if (ha->msixbase && ha->mqiobase &&
2919 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2920 ha->mqenable = 1;
2921 } else
2922 if (ha->mqiobase
2923 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2924 ha->mqenable = 1;
2925 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2926 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2927 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2928 ql_dbg(ql_dbg_init, vha, 0x0055,
2929 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2930 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2931
2932msix_out:
2933 kfree(entries);
2934 return ret;
2935}
2936
2937int
2938qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2939{
2940 int ret;
2941 device_reg_t __iomem *reg = ha->iobase;
2942 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2943
2944
2945 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2946 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
2947 goto skip_msi;
2948
2949 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2950 (ha->pdev->subsystem_device == 0x7040 ||
2951 ha->pdev->subsystem_device == 0x7041 ||
2952 ha->pdev->subsystem_device == 0x1705)) {
2953 ql_log(ql_log_warn, vha, 0x0034,
2954 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2955 ha->pdev->subsystem_vendor,
2956 ha->pdev->subsystem_device);
2957 goto skip_msi;
2958 }
2959
2960 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2961 ql_log(ql_log_warn, vha, 0x0035,
2962 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2963 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2964 goto skip_msix;
2965 }
2966
2967 ret = qla24xx_enable_msix(ha, rsp);
2968 if (!ret) {
2969 ql_dbg(ql_dbg_init, vha, 0x0036,
2970 "MSI-X: Enabled (0x%X, 0x%X).\n",
2971 ha->chip_revision, ha->fw_attributes);
2972 goto clear_risc_ints;
2973 }
2974 ql_log(ql_log_info, vha, 0x0037,
2975 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2976skip_msix:
2977
2978 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2979 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
2980 goto skip_msi;
2981
2982 ret = pci_enable_msi(ha->pdev);
2983 if (!ret) {
2984 ql_dbg(ql_dbg_init, vha, 0x0038,
2985 "MSI: Enabled.\n");
2986 ha->flags.msi_enabled = 1;
2987 } else
2988 ql_log(ql_log_warn, vha, 0x0039,
2989 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2990
2991
2992 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2993 return QLA_FUNCTION_FAILED;
2994
2995skip_msi:
2996
2997 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2998 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2999 QLA2XXX_DRIVER_NAME, rsp);
3000 if (ret) {
3001 ql_log(ql_log_warn, vha, 0x003a,
3002 "Failed to reserve interrupt %d already in use.\n",
3003 ha->pdev->irq);
3004 goto fail;
3005 } else if (!ha->flags.msi_enabled) {
3006 ql_dbg(ql_dbg_init, vha, 0x0125,
3007 "INTa mode: Enabled.\n");
3008 ha->flags.mr_intr_valid = 1;
3009 }
3010
3011clear_risc_ints:
3012
3013 spin_lock_irq(&ha->hardware_lock);
3014 if (!IS_FWI2_CAPABLE(ha))
3015 WRT_REG_WORD(®->isp.semaphore, 0);
3016 spin_unlock_irq(&ha->hardware_lock);
3017
3018fail:
3019 return ret;
3020}
3021
3022void
3023qla2x00_free_irqs(scsi_qla_host_t *vha)
3024{
3025 struct qla_hw_data *ha = vha->hw;
3026 struct rsp_que *rsp;
3027
3028
3029
3030
3031
3032 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3033 return;
3034 rsp = ha->rsp_q_map[0];
3035
3036 if (ha->flags.msix_enabled)
3037 qla24xx_disable_msix(ha);
3038 else if (ha->flags.msi_enabled) {
3039 free_irq(ha->pdev->irq, rsp);
3040 pci_disable_msi(ha->pdev);
3041 } else
3042 free_irq(ha->pdev->irq, rsp);
3043}
3044
3045
3046int qla25xx_request_irq(struct rsp_que *rsp)
3047{
3048 struct qla_hw_data *ha = rsp->hw;
3049 struct qla_init_msix_entry *intr = &msix_entries[2];
3050 struct qla_msix_entry *msix = rsp->msix;
3051 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3052 int ret;
3053
3054 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3055 if (ret) {
3056 ql_log(ql_log_fatal, vha, 0x00e6,
3057 "MSI-X: Unable to register handler -- %x/%d.\n",
3058 msix->vector, ret);
3059 return ret;
3060 }
3061 msix->have_irq = 1;
3062 msix->rsp = rsp;
3063 return ret;
3064}
3065