1
2
3
4
5
6
7#include "qla_def.h"
8#include "qla_target.h"
9
10#include <linux/delay.h>
11#include <linux/slab.h>
12#include <scsi/scsi_tcq.h>
13#include <scsi/scsi_bsg_fc.h>
14#include <scsi/scsi_eh.h>
15
16static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
20 sts_entry_t *);
21static void qla_irq_affinity_notify(struct irq_affinity_notify *,
22 const cpumask_t *);
23static void qla_irq_affinity_release(struct kref *);
24
25
26
27
28
29
30
31
32
33
34
35irqreturn_t
36qla2100_intr_handler(int irq, void *dev_id)
37{
38 scsi_qla_host_t *vha;
39 struct qla_hw_data *ha;
40 struct device_reg_2xxx __iomem *reg;
41 int status;
42 unsigned long iter;
43 uint16_t hccr;
44 uint16_t mb[4];
45 struct rsp_que *rsp;
46 unsigned long flags;
47
48 rsp = (struct rsp_que *) dev_id;
49 if (!rsp) {
50 ql_log(ql_log_info, NULL, 0x505d,
51 "%s: NULL response queue pointer.\n", __func__);
52 return (IRQ_NONE);
53 }
54
55 ha = rsp->hw;
56 reg = &ha->iobase->isp;
57 status = 0;
58
59 spin_lock_irqsave(&ha->hardware_lock, flags);
60 vha = pci_get_drvdata(ha->pdev);
61 for (iter = 50; iter--; ) {
62 hccr = RD_REG_WORD(®->hccr);
63 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
64 break;
65 if (hccr & HCCR_RISC_PAUSE) {
66 if (pci_channel_offline(ha->pdev))
67 break;
68
69
70
71
72
73
74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
75 RD_REG_WORD(®->hccr);
76
77 ha->isp_ops->fw_dump(vha, 1);
78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
79 break;
80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
81 break;
82
83 if (RD_REG_WORD(®->semaphore) & BIT_0) {
84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
85 RD_REG_WORD(®->hccr);
86
87
88 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
89 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
90 qla2x00_mbx_completion(vha, mb[0]);
91 status |= MBX_INTERRUPT;
92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
93 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
94 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
95 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
96 qla2x00_async_event(vha, rsp, mb);
97 } else {
98
99 ql_dbg(ql_dbg_async, vha, 0x5025,
100 "Unrecognized interrupt type (%d).\n",
101 mb[0]);
102 }
103
104 WRT_REG_WORD(®->semaphore, 0);
105 RD_REG_WORD(®->semaphore);
106 } else {
107 qla2x00_process_response_queue(rsp);
108
109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
110 RD_REG_WORD(®->hccr);
111 }
112 }
113 qla2x00_handle_mbx_completion(ha, status);
114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
115
116 return (IRQ_HANDLED);
117}
118
119bool
120qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
121{
122
123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
127
128
129
130
131
132 schedule_work(&vha->hw->board_disable);
133 }
134 return true;
135 } else
136 return false;
137}
138
139bool
140qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
141{
142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
143}
144
145
146
147
148
149
150
151
152
153
154irqreturn_t
155qla2300_intr_handler(int irq, void *dev_id)
156{
157 scsi_qla_host_t *vha;
158 struct device_reg_2xxx __iomem *reg;
159 int status;
160 unsigned long iter;
161 uint32_t stat;
162 uint16_t hccr;
163 uint16_t mb[4];
164 struct rsp_que *rsp;
165 struct qla_hw_data *ha;
166 unsigned long flags;
167
168 rsp = (struct rsp_que *) dev_id;
169 if (!rsp) {
170 ql_log(ql_log_info, NULL, 0x5058,
171 "%s: NULL response queue pointer.\n", __func__);
172 return (IRQ_NONE);
173 }
174
175 ha = rsp->hw;
176 reg = &ha->iobase->isp;
177 status = 0;
178
179 spin_lock_irqsave(&ha->hardware_lock, flags);
180 vha = pci_get_drvdata(ha->pdev);
181 for (iter = 50; iter--; ) {
182 stat = RD_REG_DWORD(®->u.isp2300.host_status);
183 if (qla2x00_check_reg32_for_disconnect(vha, stat))
184 break;
185 if (stat & HSR_RISC_PAUSED) {
186 if (unlikely(pci_channel_offline(ha->pdev)))
187 break;
188
189 hccr = RD_REG_WORD(®->hccr);
190
191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
192 ql_log(ql_log_warn, vha, 0x5026,
193 "Parity error -- HCCR=%x, Dumping "
194 "firmware.\n", hccr);
195 else
196 ql_log(ql_log_warn, vha, 0x5027,
197 "RISC paused -- HCCR=%x, Dumping "
198 "firmware.\n", hccr);
199
200
201
202
203
204
205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
206 RD_REG_WORD(®->hccr);
207
208 ha->isp_ops->fw_dump(vha, 1);
209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
210 break;
211 } else if ((stat & HSR_RISC_INT) == 0)
212 break;
213
214 switch (stat & 0xff) {
215 case 0x1:
216 case 0x2:
217 case 0x10:
218 case 0x11:
219 qla2x00_mbx_completion(vha, MSW(stat));
220 status |= MBX_INTERRUPT;
221
222
223 WRT_REG_WORD(®->semaphore, 0);
224 break;
225 case 0x12:
226 mb[0] = MSW(stat);
227 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
228 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
229 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
230 qla2x00_async_event(vha, rsp, mb);
231 break;
232 case 0x13:
233 qla2x00_process_response_queue(rsp);
234 break;
235 case 0x15:
236 mb[0] = MBA_CMPLT_1_16BIT;
237 mb[1] = MSW(stat);
238 qla2x00_async_event(vha, rsp, mb);
239 break;
240 case 0x16:
241 mb[0] = MBA_SCSI_COMPLETION;
242 mb[1] = MSW(stat);
243 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
244 qla2x00_async_event(vha, rsp, mb);
245 break;
246 default:
247 ql_dbg(ql_dbg_async, vha, 0x5028,
248 "Unrecognized interrupt type (%d).\n", stat & 0xff);
249 break;
250 }
251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
252 RD_REG_WORD_RELAXED(®->hccr);
253 }
254 qla2x00_handle_mbx_completion(ha, status);
255 spin_unlock_irqrestore(&ha->hardware_lock, flags);
256
257 return (IRQ_HANDLED);
258}
259
260
261
262
263
264
265static void
266qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
267{
268 uint16_t cnt;
269 uint32_t mboxes;
270 uint16_t __iomem *wptr;
271 struct qla_hw_data *ha = vha->hw;
272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
273
274
275 mboxes = (1 << ha->mbx_count) - 1;
276 if (!ha->mcp)
277 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
278 else
279 mboxes = ha->mcp->in_mb;
280
281
282 ha->flags.mbox_int = 1;
283 ha->mailbox_out[0] = mb0;
284 mboxes >>= 1;
285 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
286
287 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
288 if (IS_QLA2200(ha) && cnt == 8)
289 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
290 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
291 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
292 else if (mboxes & BIT_0)
293 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
294
295 wptr++;
296 mboxes >>= 1;
297 }
298}
299
300static void
301qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
302{
303 static char *event[] =
304 { "Complete", "Request Notification", "Time Extension" };
305 int rval;
306 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
307 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
308 uint16_t __iomem *wptr;
309 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
310
311
312 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
313 wptr = (uint16_t __iomem *)®24->mailbox1;
314 else if (IS_QLA8044(vha->hw))
315 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
316 else
317 return;
318
319 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
320 mb[cnt] = RD_REG_WORD(wptr);
321
322 ql_dbg(ql_dbg_async, vha, 0x5021,
323 "Inter-Driver Communication %s -- "
324 "%04x %04x %04x %04x %04x %04x %04x.\n",
325 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
326 mb[4], mb[5], mb[6]);
327 switch (aen) {
328
329 case MBA_IDC_COMPLETE:
330 if (mb[1] >> 15) {
331 vha->hw->flags.idc_compl_status = 1;
332 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
333 complete(&vha->hw->dcbx_comp);
334 }
335 break;
336
337 case MBA_IDC_NOTIFY:
338
339 timeout = (descr >> 8) & 0xf;
340 ql_dbg(ql_dbg_async, vha, 0x5022,
341 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
342 vha->host_no, event[aen & 0xff], timeout);
343
344 if (!timeout)
345 return;
346 rval = qla2x00_post_idc_ack_work(vha, mb);
347 if (rval != QLA_SUCCESS)
348 ql_log(ql_log_warn, vha, 0x5023,
349 "IDC failed to post ACK.\n");
350 break;
351 case MBA_IDC_TIME_EXT:
352 vha->hw->idc_extend_tmo = descr;
353 ql_dbg(ql_dbg_async, vha, 0x5087,
354 "%lu Inter-Driver Communication %s -- "
355 "Extend timeout by=%d.\n",
356 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
357 break;
358 }
359}
360
361#define LS_UNKNOWN 2
362const char *
363qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
364{
365 static const char *const link_speeds[] = {
366 "1", "2", "?", "4", "8", "16", "32", "10"
367 };
368#define QLA_LAST_SPEED 7
369
370 if (IS_QLA2100(ha) || IS_QLA2200(ha))
371 return link_speeds[0];
372 else if (speed == 0x13)
373 return link_speeds[QLA_LAST_SPEED];
374 else if (speed < QLA_LAST_SPEED)
375 return link_speeds[speed];
376 else
377 return link_speeds[LS_UNKNOWN];
378}
379
380static void
381qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
382{
383 struct qla_hw_data *ha = vha->hw;
384
385
386
387
388
389
390
391
392
393
394
395
396 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
397 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
398 mb[0], mb[1], mb[2], mb[6]);
399 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
400 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
401 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
402
403 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
404 IDC_HEARTBEAT_FAILURE)) {
405 ha->flags.nic_core_hung = 1;
406 ql_log(ql_log_warn, vha, 0x5060,
407 "83XX: F/W Error Reported: Check if reset required.\n");
408
409 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
410 uint32_t protocol_engine_id, fw_err_code, err_level;
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425 protocol_engine_id = (mb[2] & 0xff);
426 fw_err_code = (((mb[2] & 0xff00) >> 8) |
427 ((mb[6] & 0x1fff) << 8));
428 err_level = ((mb[6] & 0xe000) >> 13);
429 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
430 "Register: protocol_engine_id=0x%x "
431 "fw_err_code=0x%x err_level=0x%x.\n",
432 protocol_engine_id, fw_err_code, err_level);
433 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
434 "Register: 0x%x%x.\n", mb[7], mb[3]);
435 if (err_level == ERR_LEVEL_NON_FATAL) {
436 ql_log(ql_log_warn, vha, 0x5063,
437 "Not a fatal error, f/w has recovered "
438 "iteself.\n");
439 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
440 ql_log(ql_log_fatal, vha, 0x5064,
441 "Recoverable Fatal error: Chip reset "
442 "required.\n");
443 qla83xx_schedule_work(vha,
444 QLA83XX_NIC_CORE_RESET);
445 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
446 ql_log(ql_log_fatal, vha, 0x5065,
447 "Unrecoverable Fatal error: Set FAILED "
448 "state, reboot required.\n");
449 qla83xx_schedule_work(vha,
450 QLA83XX_NIC_CORE_UNRECOVERABLE);
451 }
452 }
453
454 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
455 uint16_t peg_fw_state, nw_interface_link_up;
456 uint16_t nw_interface_signal_detect, sfp_status;
457 uint16_t htbt_counter, htbt_monitor_enable;
458 uint16_t sfp_additonal_info, sfp_multirate;
459 uint16_t sfp_tx_fault, link_speed, dcbx_status;
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492 peg_fw_state = (mb[2] & 0x00ff);
493 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
494 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
495 sfp_status = ((mb[2] & 0x0c00) >> 10);
496 htbt_counter = ((mb[2] & 0x7000) >> 12);
497 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
498 sfp_additonal_info = (mb[6] & 0x0003);
499 sfp_multirate = ((mb[6] & 0x0004) >> 2);
500 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
501 link_speed = ((mb[6] & 0x0070) >> 4);
502 dcbx_status = ((mb[6] & 0x7000) >> 12);
503
504 ql_log(ql_log_warn, vha, 0x5066,
505 "Peg-to-Fc Status Register:\n"
506 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
507 "nw_interface_signal_detect=0x%x"
508 "\nsfp_statis=0x%x.\n ", peg_fw_state,
509 nw_interface_link_up, nw_interface_signal_detect,
510 sfp_status);
511 ql_log(ql_log_warn, vha, 0x5067,
512 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
513 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
514 htbt_counter, htbt_monitor_enable,
515 sfp_additonal_info, sfp_multirate);
516 ql_log(ql_log_warn, vha, 0x5068,
517 "sfp_tx_fault=0x%x, link_state=0x%x, "
518 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
519 dcbx_status);
520
521 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
522 }
523
524 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
525 ql_log(ql_log_warn, vha, 0x5069,
526 "Heartbeat Failure encountered, chip reset "
527 "required.\n");
528
529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
530 }
531 }
532
533 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
534 ql_log(ql_log_info, vha, 0x506a,
535 "IDC Device-State changed = 0x%x.\n", mb[4]);
536 if (ha->flags.nic_core_reset_owner)
537 return;
538 qla83xx_schedule_work(vha, MBA_IDC_AEN);
539 }
540}
541
542int
543qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
544{
545 struct qla_hw_data *ha = vha->hw;
546 scsi_qla_host_t *vp;
547 uint32_t vp_did;
548 unsigned long flags;
549 int ret = 0;
550
551 if (!ha->num_vhosts)
552 return ret;
553
554 spin_lock_irqsave(&ha->vport_slock, flags);
555 list_for_each_entry(vp, &ha->vp_list, list) {
556 vp_did = vp->d_id.b24;
557 if (vp_did == rscn_entry) {
558 ret = 1;
559 break;
560 }
561 }
562 spin_unlock_irqrestore(&ha->vport_slock, flags);
563
564 return ret;
565}
566
567static inline fc_port_t *
568qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
569{
570 fc_port_t *fcport;
571
572 list_for_each_entry(fcport, &vha->vp_fcports, list)
573 if (fcport->loop_id == loop_id)
574 return fcport;
575 return NULL;
576}
577
578
579
580
581
582
583void
584qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
585{
586 uint16_t handle_cnt;
587 uint16_t cnt, mbx;
588 uint32_t handles[5];
589 struct qla_hw_data *ha = vha->hw;
590 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
591 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
592 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
593 uint32_t rscn_entry, host_pid;
594 unsigned long flags;
595 fc_port_t *fcport = NULL;
596
597
598 handle_cnt = 0;
599 if (IS_CNA_CAPABLE(ha))
600 goto skip_rio;
601 switch (mb[0]) {
602 case MBA_SCSI_COMPLETION:
603 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
604 handle_cnt = 1;
605 break;
606 case MBA_CMPLT_1_16BIT:
607 handles[0] = mb[1];
608 handle_cnt = 1;
609 mb[0] = MBA_SCSI_COMPLETION;
610 break;
611 case MBA_CMPLT_2_16BIT:
612 handles[0] = mb[1];
613 handles[1] = mb[2];
614 handle_cnt = 2;
615 mb[0] = MBA_SCSI_COMPLETION;
616 break;
617 case MBA_CMPLT_3_16BIT:
618 handles[0] = mb[1];
619 handles[1] = mb[2];
620 handles[2] = mb[3];
621 handle_cnt = 3;
622 mb[0] = MBA_SCSI_COMPLETION;
623 break;
624 case MBA_CMPLT_4_16BIT:
625 handles[0] = mb[1];
626 handles[1] = mb[2];
627 handles[2] = mb[3];
628 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
629 handle_cnt = 4;
630 mb[0] = MBA_SCSI_COMPLETION;
631 break;
632 case MBA_CMPLT_5_16BIT:
633 handles[0] = mb[1];
634 handles[1] = mb[2];
635 handles[2] = mb[3];
636 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
637 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
638 handle_cnt = 5;
639 mb[0] = MBA_SCSI_COMPLETION;
640 break;
641 case MBA_CMPLT_2_32BIT:
642 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
643 handles[1] = le32_to_cpu(
644 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
645 RD_MAILBOX_REG(ha, reg, 6));
646 handle_cnt = 2;
647 mb[0] = MBA_SCSI_COMPLETION;
648 break;
649 default:
650 break;
651 }
652skip_rio:
653 switch (mb[0]) {
654 case MBA_SCSI_COMPLETION:
655 if (!vha->flags.online)
656 break;
657
658 for (cnt = 0; cnt < handle_cnt; cnt++)
659 qla2x00_process_completed_request(vha, rsp->req,
660 handles[cnt]);
661 break;
662
663 case MBA_RESET:
664 ql_dbg(ql_dbg_async, vha, 0x5002,
665 "Asynchronous RESET.\n");
666
667 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
668 break;
669
670 case MBA_SYSTEM_ERR:
671 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
672 RD_REG_WORD(®24->mailbox7) : 0;
673 ql_log(ql_log_warn, vha, 0x5003,
674 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
675 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
676
677 ha->isp_ops->fw_dump(vha, 1);
678
679 if (IS_FWI2_CAPABLE(ha)) {
680 if (mb[1] == 0 && mb[2] == 0) {
681 ql_log(ql_log_fatal, vha, 0x5004,
682 "Unrecoverable Hardware Error: adapter "
683 "marked OFFLINE!\n");
684 vha->flags.online = 0;
685 vha->device_flags |= DFLG_DEV_FAILED;
686 } else {
687
688 if ((mbx & MBX_3) && (ha->port_no == 0))
689 set_bit(MPI_RESET_NEEDED,
690 &vha->dpc_flags);
691
692 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
693 }
694 } else if (mb[1] == 0) {
695 ql_log(ql_log_fatal, vha, 0x5005,
696 "Unrecoverable Hardware Error: adapter marked "
697 "OFFLINE!\n");
698 vha->flags.online = 0;
699 vha->device_flags |= DFLG_DEV_FAILED;
700 } else
701 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
702 break;
703
704 case MBA_REQ_TRANSFER_ERR:
705 ql_log(ql_log_warn, vha, 0x5006,
706 "ISP Request Transfer Error (%x).\n", mb[1]);
707
708 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
709 break;
710
711 case MBA_RSP_TRANSFER_ERR:
712 ql_log(ql_log_warn, vha, 0x5007,
713 "ISP Response Transfer Error (%x).\n", mb[1]);
714
715 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
716 break;
717
718 case MBA_WAKEUP_THRES:
719 ql_dbg(ql_dbg_async, vha, 0x5008,
720 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
721 break;
722
723 case MBA_LOOP_INIT_ERR:
724 ql_log(ql_log_warn, vha, 0x5090,
725 "LOOP INIT ERROR (%x).\n", mb[1]);
726 ha->isp_ops->fw_dump(vha, 1);
727 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
728 break;
729
730 case MBA_LIP_OCCURRED:
731 ql_dbg(ql_dbg_async, vha, 0x5009,
732 "LIP occurred (%x).\n", mb[1]);
733
734 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
735 atomic_set(&vha->loop_state, LOOP_DOWN);
736 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
737 qla2x00_mark_all_devices_lost(vha, 1);
738 }
739
740 if (vha->vp_idx) {
741 atomic_set(&vha->vp_state, VP_FAILED);
742 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
743 }
744
745 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
746 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
747
748 vha->flags.management_server_logged_in = 0;
749 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
750 break;
751
752 case MBA_LOOP_UP:
753 if (IS_QLA2100(ha) || IS_QLA2200(ha))
754 ha->link_data_rate = PORT_SPEED_1GB;
755 else
756 ha->link_data_rate = mb[1];
757
758 ql_log(ql_log_info, vha, 0x500a,
759 "LOOP UP detected (%s Gbps).\n",
760 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
761
762 vha->flags.management_server_logged_in = 0;
763 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
764 break;
765
766 case MBA_LOOP_DOWN:
767 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
768 ? RD_REG_WORD(®24->mailbox4) : 0;
769 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
770 : mbx;
771 ql_log(ql_log_info, vha, 0x500b,
772 "LOOP DOWN detected (%x %x %x %x).\n",
773 mb[1], mb[2], mb[3], mbx);
774
775 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
776 atomic_set(&vha->loop_state, LOOP_DOWN);
777 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
778
779
780
781
782
783 if (!vha->vp_idx) {
784 if (ha->flags.fawwpn_enabled) {
785 void *wwpn = ha->init_cb->port_name;
786 memcpy(vha->port_name, wwpn, WWN_SIZE);
787 fc_host_port_name(vha->host) =
788 wwn_to_u64(vha->port_name);
789 ql_dbg(ql_dbg_init + ql_dbg_verbose,
790 vha, 0x0144, "LOOP DOWN detected,"
791 "restore WWPN %016llx\n",
792 wwn_to_u64(vha->port_name));
793 }
794
795 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
796 }
797
798 vha->device_flags |= DFLG_NO_CABLE;
799 qla2x00_mark_all_devices_lost(vha, 1);
800 }
801
802 if (vha->vp_idx) {
803 atomic_set(&vha->vp_state, VP_FAILED);
804 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
805 }
806
807 vha->flags.management_server_logged_in = 0;
808 ha->link_data_rate = PORT_SPEED_UNKNOWN;
809 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
810 break;
811
812 case MBA_LIP_RESET:
813 ql_dbg(ql_dbg_async, vha, 0x500c,
814 "LIP reset occurred (%x).\n", mb[1]);
815
816 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
817 atomic_set(&vha->loop_state, LOOP_DOWN);
818 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
819 qla2x00_mark_all_devices_lost(vha, 1);
820 }
821
822 if (vha->vp_idx) {
823 atomic_set(&vha->vp_state, VP_FAILED);
824 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
825 }
826
827 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
828
829 ha->operating_mode = LOOP;
830 vha->flags.management_server_logged_in = 0;
831 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
832 break;
833
834
835 case MBA_POINT_TO_POINT:
836 if (IS_QLA2100(ha))
837 break;
838
839 if (IS_CNA_CAPABLE(ha)) {
840 ql_dbg(ql_dbg_async, vha, 0x500d,
841 "DCBX Completed -- %04x %04x %04x.\n",
842 mb[1], mb[2], mb[3]);
843 if (ha->notify_dcbx_comp && !vha->vp_idx)
844 complete(&ha->dcbx_comp);
845
846 } else
847 ql_dbg(ql_dbg_async, vha, 0x500e,
848 "Asynchronous P2P MODE received.\n");
849
850
851
852
853
854 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
855 atomic_set(&vha->loop_state, LOOP_DOWN);
856 if (!atomic_read(&vha->loop_down_timer))
857 atomic_set(&vha->loop_down_timer,
858 LOOP_DOWN_TIME);
859 qla2x00_mark_all_devices_lost(vha, 1);
860 }
861
862 if (vha->vp_idx) {
863 atomic_set(&vha->vp_state, VP_FAILED);
864 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
865 }
866
867 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
868 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
869
870 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
871 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
872
873 ha->flags.gpsc_supported = 1;
874 vha->flags.management_server_logged_in = 0;
875 break;
876
877 case MBA_CHG_IN_CONNECTION:
878 if (IS_QLA2100(ha))
879 break;
880
881 ql_dbg(ql_dbg_async, vha, 0x500f,
882 "Configuration change detected: value=%x.\n", mb[1]);
883
884 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
885 atomic_set(&vha->loop_state, LOOP_DOWN);
886 if (!atomic_read(&vha->loop_down_timer))
887 atomic_set(&vha->loop_down_timer,
888 LOOP_DOWN_TIME);
889 qla2x00_mark_all_devices_lost(vha, 1);
890 }
891
892 if (vha->vp_idx) {
893 atomic_set(&vha->vp_state, VP_FAILED);
894 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
895 }
896
897 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
898 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
899 break;
900
901 case MBA_PORT_UPDATE:
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917 if (IS_QLA2XXX_MIDTYPE(ha) &&
918 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
919 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
920 break;
921
922 if (mb[2] == 0x7) {
923 ql_dbg(ql_dbg_async, vha, 0x5010,
924 "Port %s %04x %04x %04x.\n",
925 mb[1] == 0xffff ? "unavailable" : "logout",
926 mb[1], mb[2], mb[3]);
927
928 if (mb[1] == 0xffff)
929 goto global_port_update;
930
931
932 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
933 if (!fcport)
934 break;
935 if (atomic_read(&fcport->state) != FCS_ONLINE)
936 break;
937 ql_dbg(ql_dbg_async, vha, 0x508a,
938 "Marking port lost loopid=%04x portid=%06x.\n",
939 fcport->loop_id, fcport->d_id.b24);
940 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
941 break;
942
943global_port_update:
944 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
945 atomic_set(&vha->loop_state, LOOP_DOWN);
946 atomic_set(&vha->loop_down_timer,
947 LOOP_DOWN_TIME);
948 vha->device_flags |= DFLG_NO_CABLE;
949 qla2x00_mark_all_devices_lost(vha, 1);
950 }
951
952 if (vha->vp_idx) {
953 atomic_set(&vha->vp_state, VP_FAILED);
954 fc_vport_set_state(vha->fc_vport,
955 FC_VPORT_FAILED);
956 qla2x00_mark_all_devices_lost(vha, 1);
957 }
958
959 vha->flags.management_server_logged_in = 0;
960 ha->link_data_rate = PORT_SPEED_UNKNOWN;
961 break;
962 }
963
964
965
966
967
968
969 atomic_set(&vha->loop_down_timer, 0);
970 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
971 atomic_read(&vha->loop_state) != LOOP_DEAD) {
972 ql_dbg(ql_dbg_async, vha, 0x5011,
973 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
974 mb[1], mb[2], mb[3]);
975
976 qlt_async_event(mb[0], vha, mb);
977 break;
978 }
979
980 ql_dbg(ql_dbg_async, vha, 0x5012,
981 "Port database changed %04x %04x %04x.\n",
982 mb[1], mb[2], mb[3]);
983
984
985
986
987 atomic_set(&vha->loop_state, LOOP_UP);
988
989 qla2x00_mark_all_devices_lost(vha, 1);
990
991 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
992 set_bit(SCR_PENDING, &vha->dpc_flags);
993
994 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
995 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
996 set_bit(VP_CONFIG_OK, &vha->vp_flags);
997
998 qlt_async_event(mb[0], vha, mb);
999 break;
1000
1001 case MBA_RSCN_UPDATE:
1002
1003 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1004 break;
1005
1006 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1007 break;
1008
1009 ql_dbg(ql_dbg_async, vha, 0x5013,
1010 "RSCN database changed -- %04x %04x %04x.\n",
1011 mb[1], mb[2], mb[3]);
1012
1013 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1014 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1015 | vha->d_id.b.al_pa;
1016 if (rscn_entry == host_pid) {
1017 ql_dbg(ql_dbg_async, vha, 0x5014,
1018 "Ignoring RSCN update to local host "
1019 "port ID (%06x).\n", host_pid);
1020 break;
1021 }
1022
1023
1024 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1025
1026
1027 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1028 break;
1029
1030
1031
1032
1033
1034 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1035 if (atomic_read(&fcport->state) != FCS_ONLINE)
1036 continue;
1037 if (fcport->d_id.b24 == rscn_entry) {
1038 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1039 break;
1040 }
1041 }
1042
1043 atomic_set(&vha->loop_down_timer, 0);
1044 vha->flags.management_server_logged_in = 0;
1045
1046 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1047 set_bit(RSCN_UPDATE, &vha->dpc_flags);
1048 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1049 break;
1050
1051
1052 case MBA_ZIO_RESPONSE:
1053 ql_dbg(ql_dbg_async, vha, 0x5015,
1054 "[R|Z]IO update completion.\n");
1055
1056 if (IS_FWI2_CAPABLE(ha))
1057 qla24xx_process_response_queue(vha, rsp);
1058 else
1059 qla2x00_process_response_queue(rsp);
1060 break;
1061
1062 case MBA_DISCARD_RND_FRAME:
1063 ql_dbg(ql_dbg_async, vha, 0x5016,
1064 "Discard RND Frame -- %04x %04x %04x.\n",
1065 mb[1], mb[2], mb[3]);
1066 break;
1067
1068 case MBA_TRACE_NOTIFICATION:
1069 ql_dbg(ql_dbg_async, vha, 0x5017,
1070 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1071 break;
1072
1073 case MBA_ISP84XX_ALERT:
1074 ql_dbg(ql_dbg_async, vha, 0x5018,
1075 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1076 mb[1], mb[2], mb[3]);
1077
1078 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1079 switch (mb[1]) {
1080 case A84_PANIC_RECOVERY:
1081 ql_log(ql_log_info, vha, 0x5019,
1082 "Alert 84XX: panic recovery %04x %04x.\n",
1083 mb[2], mb[3]);
1084 break;
1085 case A84_OP_LOGIN_COMPLETE:
1086 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1087 ql_log(ql_log_info, vha, 0x501a,
1088 "Alert 84XX: firmware version %x.\n",
1089 ha->cs84xx->op_fw_version);
1090 break;
1091 case A84_DIAG_LOGIN_COMPLETE:
1092 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1093 ql_log(ql_log_info, vha, 0x501b,
1094 "Alert 84XX: diagnostic firmware version %x.\n",
1095 ha->cs84xx->diag_fw_version);
1096 break;
1097 case A84_GOLD_LOGIN_COMPLETE:
1098 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1099 ha->cs84xx->fw_update = 1;
1100 ql_log(ql_log_info, vha, 0x501c,
1101 "Alert 84XX: gold firmware version %x.\n",
1102 ha->cs84xx->gold_fw_version);
1103 break;
1104 default:
1105 ql_log(ql_log_warn, vha, 0x501d,
1106 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1107 mb[1], mb[2], mb[3]);
1108 }
1109 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1110 break;
1111 case MBA_DCBX_START:
1112 ql_dbg(ql_dbg_async, vha, 0x501e,
1113 "DCBX Started -- %04x %04x %04x.\n",
1114 mb[1], mb[2], mb[3]);
1115 break;
1116 case MBA_DCBX_PARAM_UPDATE:
1117 ql_dbg(ql_dbg_async, vha, 0x501f,
1118 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1119 mb[1], mb[2], mb[3]);
1120 break;
1121 case MBA_FCF_CONF_ERR:
1122 ql_dbg(ql_dbg_async, vha, 0x5020,
1123 "FCF Configuration Error -- %04x %04x %04x.\n",
1124 mb[1], mb[2], mb[3]);
1125 break;
1126 case MBA_IDC_NOTIFY:
1127 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1128 mb[4] = RD_REG_WORD(®24->mailbox4);
1129 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1130 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1131 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1132 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1133
1134
1135
1136 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1137 atomic_set(&vha->loop_down_timer,
1138 LOOP_DOWN_TIME);
1139 qla2xxx_wake_dpc(vha);
1140 }
1141 }
1142 case MBA_IDC_COMPLETE:
1143 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1144 complete(&ha->lb_portup_comp);
1145
1146 case MBA_IDC_TIME_EXT:
1147 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1148 IS_QLA8044(ha))
1149 qla81xx_idc_event(vha, mb[0], mb[1]);
1150 break;
1151
1152 case MBA_IDC_AEN:
1153 mb[4] = RD_REG_WORD(®24->mailbox4);
1154 mb[5] = RD_REG_WORD(®24->mailbox5);
1155 mb[6] = RD_REG_WORD(®24->mailbox6);
1156 mb[7] = RD_REG_WORD(®24->mailbox7);
1157 qla83xx_handle_8200_aen(vha, mb);
1158 break;
1159
1160 case MBA_DPORT_DIAGNOSTICS:
1161 ql_dbg(ql_dbg_async, vha, 0x5052,
1162 "D-Port Diagnostics: %04x result=%s\n",
1163 mb[0],
1164 mb[1] == 0 ? "start" :
1165 mb[1] == 1 ? "done (pass)" :
1166 mb[1] == 2 ? "done (error)" : "other");
1167 break;
1168
1169 case MBA_TEMPERATURE_ALERT:
1170 ql_dbg(ql_dbg_async, vha, 0x505e,
1171 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1172 if (mb[1] == 0x12)
1173 schedule_work(&ha->board_disable);
1174 break;
1175
1176 default:
1177 ql_dbg(ql_dbg_async, vha, 0x5057,
1178 "Unknown AEN:%04x %04x %04x %04x\n",
1179 mb[0], mb[1], mb[2], mb[3]);
1180 }
1181
1182 qlt_async_event(mb[0], vha, mb);
1183
1184 if (!vha->vp_idx && ha->num_vhosts)
1185 qla2x00_alert_all_vps(rsp, mb);
1186}
1187
1188
1189
1190
1191
1192
1193void
1194qla2x00_process_completed_request(struct scsi_qla_host *vha,
1195 struct req_que *req, uint32_t index)
1196{
1197 srb_t *sp;
1198 struct qla_hw_data *ha = vha->hw;
1199
1200
1201 if (index >= req->num_outstanding_cmds) {
1202 ql_log(ql_log_warn, vha, 0x3014,
1203 "Invalid SCSI command index (%x).\n", index);
1204
1205 if (IS_P3P_TYPE(ha))
1206 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1207 else
1208 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1209 return;
1210 }
1211
1212 sp = req->outstanding_cmds[index];
1213 if (sp) {
1214
1215 req->outstanding_cmds[index] = NULL;
1216
1217
1218 sp->done(ha, sp, DID_OK << 16);
1219 } else {
1220 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1221
1222 if (IS_P3P_TYPE(ha))
1223 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1224 else
1225 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1226 }
1227}
1228
1229srb_t *
1230qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1231 struct req_que *req, void *iocb)
1232{
1233 struct qla_hw_data *ha = vha->hw;
1234 sts_entry_t *pkt = iocb;
1235 srb_t *sp = NULL;
1236 uint16_t index;
1237
1238 index = LSW(pkt->handle);
1239 if (index >= req->num_outstanding_cmds) {
1240 ql_log(ql_log_warn, vha, 0x5031,
1241 "Invalid command index (%x).\n", index);
1242 if (IS_P3P_TYPE(ha))
1243 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1244 else
1245 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1246 goto done;
1247 }
1248 sp = req->outstanding_cmds[index];
1249 if (!sp) {
1250 ql_log(ql_log_warn, vha, 0x5032,
1251 "Invalid completion handle (%x) -- timed-out.\n", index);
1252 return sp;
1253 }
1254 if (sp->handle != index) {
1255 ql_log(ql_log_warn, vha, 0x5033,
1256 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1257 return NULL;
1258 }
1259
1260 req->outstanding_cmds[index] = NULL;
1261
1262done:
1263 return sp;
1264}
1265
1266static void
1267qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1268 struct mbx_entry *mbx)
1269{
1270 const char func[] = "MBX-IOCB";
1271 const char *type;
1272 fc_port_t *fcport;
1273 srb_t *sp;
1274 struct srb_iocb *lio;
1275 uint16_t *data;
1276 uint16_t status;
1277
1278 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1279 if (!sp)
1280 return;
1281
1282 lio = &sp->u.iocb_cmd;
1283 type = sp->name;
1284 fcport = sp->fcport;
1285 data = lio->u.logio.data;
1286
1287 data[0] = MBS_COMMAND_ERROR;
1288 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1289 QLA_LOGIO_LOGIN_RETRIED : 0;
1290 if (mbx->entry_status) {
1291 ql_dbg(ql_dbg_async, vha, 0x5043,
1292 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1293 "entry-status=%x status=%x state-flag=%x "
1294 "status-flags=%x.\n", type, sp->handle,
1295 fcport->d_id.b.domain, fcport->d_id.b.area,
1296 fcport->d_id.b.al_pa, mbx->entry_status,
1297 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1298 le16_to_cpu(mbx->status_flags));
1299
1300 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1301 (uint8_t *)mbx, sizeof(*mbx));
1302
1303 goto logio_done;
1304 }
1305
1306 status = le16_to_cpu(mbx->status);
1307 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1308 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1309 status = 0;
1310 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1311 ql_dbg(ql_dbg_async, vha, 0x5045,
1312 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1313 type, sp->handle, fcport->d_id.b.domain,
1314 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1315 le16_to_cpu(mbx->mb1));
1316
1317 data[0] = MBS_COMMAND_COMPLETE;
1318 if (sp->type == SRB_LOGIN_CMD) {
1319 fcport->port_type = FCT_TARGET;
1320 if (le16_to_cpu(mbx->mb1) & BIT_0)
1321 fcport->port_type = FCT_INITIATOR;
1322 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1323 fcport->flags |= FCF_FCP2_DEVICE;
1324 }
1325 goto logio_done;
1326 }
1327
1328 data[0] = le16_to_cpu(mbx->mb0);
1329 switch (data[0]) {
1330 case MBS_PORT_ID_USED:
1331 data[1] = le16_to_cpu(mbx->mb1);
1332 break;
1333 case MBS_LOOP_ID_USED:
1334 break;
1335 default:
1336 data[0] = MBS_COMMAND_ERROR;
1337 break;
1338 }
1339
1340 ql_log(ql_log_warn, vha, 0x5046,
1341 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1342 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1343 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1344 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1345 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1346 le16_to_cpu(mbx->mb7));
1347
1348logio_done:
1349 sp->done(vha, sp, 0);
1350}
1351
1352static void
1353qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1354 sts_entry_t *pkt, int iocb_type)
1355{
1356 const char func[] = "CT_IOCB";
1357 const char *type;
1358 srb_t *sp;
1359 struct fc_bsg_job *bsg_job;
1360 uint16_t comp_status;
1361 int res;
1362
1363 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1364 if (!sp)
1365 return;
1366
1367 bsg_job = sp->u.bsg_job;
1368
1369 type = "ct pass-through";
1370
1371 comp_status = le16_to_cpu(pkt->comp_status);
1372
1373
1374
1375
1376 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1377 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1378
1379 if (comp_status != CS_COMPLETE) {
1380 if (comp_status == CS_DATA_UNDERRUN) {
1381 res = DID_OK << 16;
1382 bsg_job->reply->reply_payload_rcv_len =
1383 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1384
1385 ql_log(ql_log_warn, vha, 0x5048,
1386 "CT pass-through-%s error "
1387 "comp_status-status=0x%x total_byte = 0x%x.\n",
1388 type, comp_status,
1389 bsg_job->reply->reply_payload_rcv_len);
1390 } else {
1391 ql_log(ql_log_warn, vha, 0x5049,
1392 "CT pass-through-%s error "
1393 "comp_status-status=0x%x.\n", type, comp_status);
1394 res = DID_ERROR << 16;
1395 bsg_job->reply->reply_payload_rcv_len = 0;
1396 }
1397 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1398 (uint8_t *)pkt, sizeof(*pkt));
1399 } else {
1400 res = DID_OK << 16;
1401 bsg_job->reply->reply_payload_rcv_len =
1402 bsg_job->reply_payload.payload_len;
1403 bsg_job->reply_len = 0;
1404 }
1405
1406 sp->done(vha, sp, res);
1407}
1408
1409static void
1410qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1411 struct sts_entry_24xx *pkt, int iocb_type)
1412{
1413 const char func[] = "ELS_CT_IOCB";
1414 const char *type;
1415 srb_t *sp;
1416 struct fc_bsg_job *bsg_job;
1417 uint16_t comp_status;
1418 uint32_t fw_status[3];
1419 uint8_t* fw_sts_ptr;
1420 int res;
1421
1422 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1423 if (!sp)
1424 return;
1425 bsg_job = sp->u.bsg_job;
1426
1427 type = NULL;
1428 switch (sp->type) {
1429 case SRB_ELS_CMD_RPT:
1430 case SRB_ELS_CMD_HST:
1431 type = "els";
1432 break;
1433 case SRB_CT_CMD:
1434 type = "ct pass-through";
1435 break;
1436 case SRB_ELS_DCMD:
1437 type = "Driver ELS logo";
1438 ql_dbg(ql_dbg_user, vha, 0x5047,
1439 "Completing %s: (%p) type=%d.\n", type, sp, sp->type);
1440 sp->done(vha, sp, 0);
1441 return;
1442 default:
1443 ql_dbg(ql_dbg_user, vha, 0x503e,
1444 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1445 return;
1446 }
1447
1448 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1449 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1450 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1451
1452
1453
1454
1455 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1456 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1457
1458 if (comp_status != CS_COMPLETE) {
1459 if (comp_status == CS_DATA_UNDERRUN) {
1460 res = DID_OK << 16;
1461 bsg_job->reply->reply_payload_rcv_len =
1462 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1463
1464 ql_dbg(ql_dbg_user, vha, 0x503f,
1465 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1466 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1467 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1468 le16_to_cpu(((struct els_sts_entry_24xx *)
1469 pkt)->total_byte_count));
1470 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1471 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1472 }
1473 else {
1474 ql_dbg(ql_dbg_user, vha, 0x5040,
1475 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1476 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1477 type, sp->handle, comp_status,
1478 le16_to_cpu(((struct els_sts_entry_24xx *)
1479 pkt)->error_subcode_1),
1480 le16_to_cpu(((struct els_sts_entry_24xx *)
1481 pkt)->error_subcode_2));
1482 res = DID_ERROR << 16;
1483 bsg_job->reply->reply_payload_rcv_len = 0;
1484 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1485 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1486 }
1487 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1488 (uint8_t *)pkt, sizeof(*pkt));
1489 }
1490 else {
1491 res = DID_OK << 16;
1492 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1493 bsg_job->reply_len = 0;
1494 }
1495
1496 sp->done(vha, sp, res);
1497}
1498
1499static void
1500qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1501 struct logio_entry_24xx *logio)
1502{
1503 const char func[] = "LOGIO-IOCB";
1504 const char *type;
1505 fc_port_t *fcport;
1506 srb_t *sp;
1507 struct srb_iocb *lio;
1508 uint16_t *data;
1509 uint32_t iop[2];
1510
1511 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1512 if (!sp)
1513 return;
1514
1515 lio = &sp->u.iocb_cmd;
1516 type = sp->name;
1517 fcport = sp->fcport;
1518 data = lio->u.logio.data;
1519
1520 data[0] = MBS_COMMAND_ERROR;
1521 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1522 QLA_LOGIO_LOGIN_RETRIED : 0;
1523 if (logio->entry_status) {
1524 ql_log(ql_log_warn, fcport->vha, 0x5034,
1525 "Async-%s error entry - hdl=%x"
1526 "portid=%02x%02x%02x entry-status=%x.\n",
1527 type, sp->handle, fcport->d_id.b.domain,
1528 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1529 logio->entry_status);
1530 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1531 (uint8_t *)logio, sizeof(*logio));
1532
1533 goto logio_done;
1534 }
1535
1536 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1537 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1538 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1539 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1540 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1541 le32_to_cpu(logio->io_parameter[0]));
1542
1543 data[0] = MBS_COMMAND_COMPLETE;
1544 if (sp->type != SRB_LOGIN_CMD)
1545 goto logio_done;
1546
1547 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1548 if (iop[0] & BIT_4) {
1549 fcport->port_type = FCT_TARGET;
1550 if (iop[0] & BIT_8)
1551 fcport->flags |= FCF_FCP2_DEVICE;
1552 } else if (iop[0] & BIT_5)
1553 fcport->port_type = FCT_INITIATOR;
1554
1555 if (iop[0] & BIT_7)
1556 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1557
1558 if (logio->io_parameter[7] || logio->io_parameter[8])
1559 fcport->supported_classes |= FC_COS_CLASS2;
1560 if (logio->io_parameter[9] || logio->io_parameter[10])
1561 fcport->supported_classes |= FC_COS_CLASS3;
1562
1563 goto logio_done;
1564 }
1565
1566 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1567 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1568 switch (iop[0]) {
1569 case LSC_SCODE_PORTID_USED:
1570 data[0] = MBS_PORT_ID_USED;
1571 data[1] = LSW(iop[1]);
1572 break;
1573 case LSC_SCODE_NPORT_USED:
1574 data[0] = MBS_LOOP_ID_USED;
1575 break;
1576 default:
1577 data[0] = MBS_COMMAND_ERROR;
1578 break;
1579 }
1580
1581 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1582 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1583 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1584 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1585 le16_to_cpu(logio->comp_status),
1586 le32_to_cpu(logio->io_parameter[0]),
1587 le32_to_cpu(logio->io_parameter[1]));
1588
1589logio_done:
1590 sp->done(vha, sp, 0);
1591}
1592
1593static void
1594qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1595{
1596 const char func[] = "TMF-IOCB";
1597 const char *type;
1598 fc_port_t *fcport;
1599 srb_t *sp;
1600 struct srb_iocb *iocb;
1601 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1602
1603 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1604 if (!sp)
1605 return;
1606
1607 iocb = &sp->u.iocb_cmd;
1608 type = sp->name;
1609 fcport = sp->fcport;
1610 iocb->u.tmf.data = QLA_SUCCESS;
1611
1612 if (sts->entry_status) {
1613 ql_log(ql_log_warn, fcport->vha, 0x5038,
1614 "Async-%s error - hdl=%x entry-status(%x).\n",
1615 type, sp->handle, sts->entry_status);
1616 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1617 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1618 ql_log(ql_log_warn, fcport->vha, 0x5039,
1619 "Async-%s error - hdl=%x completion status(%x).\n",
1620 type, sp->handle, sts->comp_status);
1621 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1622 } else if ((le16_to_cpu(sts->scsi_status) &
1623 SS_RESPONSE_INFO_LEN_VALID)) {
1624 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1625 ql_log(ql_log_warn, fcport->vha, 0x503b,
1626 "Async-%s error - hdl=%x not enough response(%d).\n",
1627 type, sp->handle, sts->rsp_data_len);
1628 } else if (sts->data[3]) {
1629 ql_log(ql_log_warn, fcport->vha, 0x503c,
1630 "Async-%s error - hdl=%x response(%x).\n",
1631 type, sp->handle, sts->data[3]);
1632 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1633 }
1634 }
1635
1636 if (iocb->u.tmf.data != QLA_SUCCESS)
1637 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1638 (uint8_t *)sts, sizeof(*sts));
1639
1640 sp->done(vha, sp, 0);
1641}
1642
1643
1644
1645
1646
1647void
1648qla2x00_process_response_queue(struct rsp_que *rsp)
1649{
1650 struct scsi_qla_host *vha;
1651 struct qla_hw_data *ha = rsp->hw;
1652 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1653 sts_entry_t *pkt;
1654 uint16_t handle_cnt;
1655 uint16_t cnt;
1656
1657 vha = pci_get_drvdata(ha->pdev);
1658
1659 if (!vha->flags.online)
1660 return;
1661
1662 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1663 pkt = (sts_entry_t *)rsp->ring_ptr;
1664
1665 rsp->ring_index++;
1666 if (rsp->ring_index == rsp->length) {
1667 rsp->ring_index = 0;
1668 rsp->ring_ptr = rsp->ring;
1669 } else {
1670 rsp->ring_ptr++;
1671 }
1672
1673 if (pkt->entry_status != 0) {
1674 qla2x00_error_entry(vha, rsp, pkt);
1675 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1676 wmb();
1677 continue;
1678 }
1679
1680 switch (pkt->entry_type) {
1681 case STATUS_TYPE:
1682 qla2x00_status_entry(vha, rsp, pkt);
1683 break;
1684 case STATUS_TYPE_21:
1685 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1686 for (cnt = 0; cnt < handle_cnt; cnt++) {
1687 qla2x00_process_completed_request(vha, rsp->req,
1688 ((sts21_entry_t *)pkt)->handle[cnt]);
1689 }
1690 break;
1691 case STATUS_TYPE_22:
1692 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1693 for (cnt = 0; cnt < handle_cnt; cnt++) {
1694 qla2x00_process_completed_request(vha, rsp->req,
1695 ((sts22_entry_t *)pkt)->handle[cnt]);
1696 }
1697 break;
1698 case STATUS_CONT_TYPE:
1699 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1700 break;
1701 case MBX_IOCB_TYPE:
1702 qla2x00_mbx_iocb_entry(vha, rsp->req,
1703 (struct mbx_entry *)pkt);
1704 break;
1705 case CT_IOCB_TYPE:
1706 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1707 break;
1708 default:
1709
1710 ql_log(ql_log_warn, vha, 0x504a,
1711 "Received unknown response pkt type %x "
1712 "entry status=%x.\n",
1713 pkt->entry_type, pkt->entry_status);
1714 break;
1715 }
1716 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1717 wmb();
1718 }
1719
1720
1721 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1722}
1723
1724static inline void
1725qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1726 uint32_t sense_len, struct rsp_que *rsp, int res)
1727{
1728 struct scsi_qla_host *vha = sp->fcport->vha;
1729 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1730 uint32_t track_sense_len;
1731
1732 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1733 sense_len = SCSI_SENSE_BUFFERSIZE;
1734
1735 SET_CMD_SENSE_LEN(sp, sense_len);
1736 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1737 track_sense_len = sense_len;
1738
1739 if (sense_len > par_sense_len)
1740 sense_len = par_sense_len;
1741
1742 memcpy(cp->sense_buffer, sense_data, sense_len);
1743
1744 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1745 track_sense_len -= sense_len;
1746 SET_CMD_SENSE_LEN(sp, track_sense_len);
1747
1748 if (track_sense_len != 0) {
1749 rsp->status_srb = sp;
1750 cp->result = res;
1751 }
1752
1753 if (sense_len) {
1754 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1755 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
1756 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1757 cp);
1758 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1759 cp->sense_buffer, sense_len);
1760 }
1761}
1762
1763struct scsi_dif_tuple {
1764 __be16 guard;
1765 __be16 app_tag;
1766 __be32 ref_tag;
1767};
1768
1769
1770
1771
1772
1773
1774
1775static inline int
1776qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1777{
1778 struct scsi_qla_host *vha = sp->fcport->vha;
1779 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1780 uint8_t *ap = &sts24->data[12];
1781 uint8_t *ep = &sts24->data[20];
1782 uint32_t e_ref_tag, a_ref_tag;
1783 uint16_t e_app_tag, a_app_tag;
1784 uint16_t e_guard, a_guard;
1785
1786
1787
1788
1789
1790 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1791 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1792 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1793 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1794 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1795 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1796
1797 ql_dbg(ql_dbg_io, vha, 0x3023,
1798 "iocb(s) %p Returned STATUS.\n", sts24);
1799
1800 ql_dbg(ql_dbg_io, vha, 0x3024,
1801 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1802 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1803 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1804 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1805 a_app_tag, e_app_tag, a_guard, e_guard);
1806
1807
1808
1809
1810
1811
1812 if ((a_app_tag == 0xffff) &&
1813 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1814 (a_ref_tag == 0xffffffff))) {
1815 uint32_t blocks_done, resid;
1816 sector_t lba_s = scsi_get_lba(cmd);
1817
1818
1819 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1820
1821 resid = scsi_bufflen(cmd) - (blocks_done *
1822 cmd->device->sector_size);
1823
1824 scsi_set_resid(cmd, resid);
1825 cmd->result = DID_OK << 16;
1826
1827
1828 if (scsi_prot_sg_count(cmd)) {
1829 uint32_t i, j = 0, k = 0, num_ent;
1830 struct scatterlist *sg;
1831 struct t10_pi_tuple *spt;
1832
1833
1834 scsi_for_each_prot_sg(cmd, sg,
1835 scsi_prot_sg_count(cmd), i) {
1836 num_ent = sg_dma_len(sg) / 8;
1837 if (k + num_ent < blocks_done) {
1838 k += num_ent;
1839 continue;
1840 }
1841 j = blocks_done - k - 1;
1842 k = blocks_done;
1843 break;
1844 }
1845
1846 if (k != blocks_done) {
1847 ql_log(ql_log_warn, vha, 0x302f,
1848 "unexpected tag values tag:lba=%x:%llx)\n",
1849 e_ref_tag, (unsigned long long)lba_s);
1850 return 1;
1851 }
1852
1853 spt = page_address(sg_page(sg)) + sg->offset;
1854 spt += j;
1855
1856 spt->app_tag = 0xffff;
1857 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1858 spt->ref_tag = 0xffffffff;
1859 }
1860
1861 return 0;
1862 }
1863
1864
1865 if (e_guard != a_guard) {
1866 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1867 0x10, 0x1);
1868 set_driver_byte(cmd, DRIVER_SENSE);
1869 set_host_byte(cmd, DID_ABORT);
1870 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1871 return 1;
1872 }
1873
1874
1875 if (e_ref_tag != a_ref_tag) {
1876 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1877 0x10, 0x3);
1878 set_driver_byte(cmd, DRIVER_SENSE);
1879 set_host_byte(cmd, DID_ABORT);
1880 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1881 return 1;
1882 }
1883
1884
1885 if (e_app_tag != a_app_tag) {
1886 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1887 0x10, 0x2);
1888 set_driver_byte(cmd, DRIVER_SENSE);
1889 set_host_byte(cmd, DID_ABORT);
1890 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1891 return 1;
1892 }
1893
1894 return 1;
1895}
1896
1897static void
1898qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1899 struct req_que *req, uint32_t index)
1900{
1901 struct qla_hw_data *ha = vha->hw;
1902 srb_t *sp;
1903 uint16_t comp_status;
1904 uint16_t scsi_status;
1905 uint16_t thread_id;
1906 uint32_t rval = EXT_STATUS_OK;
1907 struct fc_bsg_job *bsg_job = NULL;
1908 sts_entry_t *sts;
1909 struct sts_entry_24xx *sts24;
1910 sts = (sts_entry_t *) pkt;
1911 sts24 = (struct sts_entry_24xx *) pkt;
1912
1913
1914 if (index >= req->num_outstanding_cmds) {
1915 ql_log(ql_log_warn, vha, 0x70af,
1916 "Invalid SCSI completion handle 0x%x.\n", index);
1917 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1918 return;
1919 }
1920
1921 sp = req->outstanding_cmds[index];
1922 if (sp) {
1923
1924 req->outstanding_cmds[index] = NULL;
1925 bsg_job = sp->u.bsg_job;
1926 } else {
1927 ql_log(ql_log_warn, vha, 0x70b0,
1928 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1929 req->id, index);
1930
1931 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1932 return;
1933 }
1934
1935 if (IS_FWI2_CAPABLE(ha)) {
1936 comp_status = le16_to_cpu(sts24->comp_status);
1937 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1938 } else {
1939 comp_status = le16_to_cpu(sts->comp_status);
1940 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1941 }
1942
1943 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1944 switch (comp_status) {
1945 case CS_COMPLETE:
1946 if (scsi_status == 0) {
1947 bsg_job->reply->reply_payload_rcv_len =
1948 bsg_job->reply_payload.payload_len;
1949 vha->qla_stats.input_bytes +=
1950 bsg_job->reply->reply_payload_rcv_len;
1951 vha->qla_stats.input_requests++;
1952 rval = EXT_STATUS_OK;
1953 }
1954 goto done;
1955
1956 case CS_DATA_OVERRUN:
1957 ql_dbg(ql_dbg_user, vha, 0x70b1,
1958 "Command completed with date overrun thread_id=%d\n",
1959 thread_id);
1960 rval = EXT_STATUS_DATA_OVERRUN;
1961 break;
1962
1963 case CS_DATA_UNDERRUN:
1964 ql_dbg(ql_dbg_user, vha, 0x70b2,
1965 "Command completed with date underrun thread_id=%d\n",
1966 thread_id);
1967 rval = EXT_STATUS_DATA_UNDERRUN;
1968 break;
1969 case CS_BIDIR_RD_OVERRUN:
1970 ql_dbg(ql_dbg_user, vha, 0x70b3,
1971 "Command completed with read data overrun thread_id=%d\n",
1972 thread_id);
1973 rval = EXT_STATUS_DATA_OVERRUN;
1974 break;
1975
1976 case CS_BIDIR_RD_WR_OVERRUN:
1977 ql_dbg(ql_dbg_user, vha, 0x70b4,
1978 "Command completed with read and write data overrun "
1979 "thread_id=%d\n", thread_id);
1980 rval = EXT_STATUS_DATA_OVERRUN;
1981 break;
1982
1983 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1984 ql_dbg(ql_dbg_user, vha, 0x70b5,
1985 "Command completed with read data over and write data "
1986 "underrun thread_id=%d\n", thread_id);
1987 rval = EXT_STATUS_DATA_OVERRUN;
1988 break;
1989
1990 case CS_BIDIR_RD_UNDERRUN:
1991 ql_dbg(ql_dbg_user, vha, 0x70b6,
1992 "Command completed with read data data underrun "
1993 "thread_id=%d\n", thread_id);
1994 rval = EXT_STATUS_DATA_UNDERRUN;
1995 break;
1996
1997 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1998 ql_dbg(ql_dbg_user, vha, 0x70b7,
1999 "Command completed with read data under and write data "
2000 "overrun thread_id=%d\n", thread_id);
2001 rval = EXT_STATUS_DATA_UNDERRUN;
2002 break;
2003
2004 case CS_BIDIR_RD_WR_UNDERRUN:
2005 ql_dbg(ql_dbg_user, vha, 0x70b8,
2006 "Command completed with read and write data underrun "
2007 "thread_id=%d\n", thread_id);
2008 rval = EXT_STATUS_DATA_UNDERRUN;
2009 break;
2010
2011 case CS_BIDIR_DMA:
2012 ql_dbg(ql_dbg_user, vha, 0x70b9,
2013 "Command completed with data DMA error thread_id=%d\n",
2014 thread_id);
2015 rval = EXT_STATUS_DMA_ERR;
2016 break;
2017
2018 case CS_TIMEOUT:
2019 ql_dbg(ql_dbg_user, vha, 0x70ba,
2020 "Command completed with timeout thread_id=%d\n",
2021 thread_id);
2022 rval = EXT_STATUS_TIMEOUT;
2023 break;
2024 default:
2025 ql_dbg(ql_dbg_user, vha, 0x70bb,
2026 "Command completed with completion status=0x%x "
2027 "thread_id=%d\n", comp_status, thread_id);
2028 rval = EXT_STATUS_ERR;
2029 break;
2030 }
2031 bsg_job->reply->reply_payload_rcv_len = 0;
2032
2033done:
2034
2035 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2036 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2037
2038
2039 sp->done(vha, sp, (DID_OK << 6));
2040
2041}
2042
2043
2044
2045
2046
2047
2048static void
2049qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2050{
2051 srb_t *sp;
2052 fc_port_t *fcport;
2053 struct scsi_cmnd *cp;
2054 sts_entry_t *sts;
2055 struct sts_entry_24xx *sts24;
2056 uint16_t comp_status;
2057 uint16_t scsi_status;
2058 uint16_t ox_id;
2059 uint8_t lscsi_status;
2060 int32_t resid;
2061 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2062 fw_resid_len;
2063 uint8_t *rsp_info, *sense_data;
2064 struct qla_hw_data *ha = vha->hw;
2065 uint32_t handle;
2066 uint16_t que;
2067 struct req_que *req;
2068 int logit = 1;
2069 int res = 0;
2070 uint16_t state_flags = 0;
2071 uint16_t retry_delay = 0;
2072
2073 sts = (sts_entry_t *) pkt;
2074 sts24 = (struct sts_entry_24xx *) pkt;
2075 if (IS_FWI2_CAPABLE(ha)) {
2076 comp_status = le16_to_cpu(sts24->comp_status);
2077 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2078 state_flags = le16_to_cpu(sts24->state_flags);
2079 } else {
2080 comp_status = le16_to_cpu(sts->comp_status);
2081 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2082 }
2083 handle = (uint32_t) LSW(sts->handle);
2084 que = MSW(sts->handle);
2085 req = ha->req_q_map[que];
2086
2087
2088 if (req == NULL ||
2089 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2090 ql_dbg(ql_dbg_io, vha, 0x3059,
2091 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2092 "que=%u.\n", sts->handle, req, que);
2093 return;
2094 }
2095
2096
2097 if (handle < req->num_outstanding_cmds) {
2098 sp = req->outstanding_cmds[handle];
2099 if (!sp) {
2100 ql_dbg(ql_dbg_io, vha, 0x3075,
2101 "%s(%ld): Already returned command for status handle (0x%x).\n",
2102 __func__, vha->host_no, sts->handle);
2103 return;
2104 }
2105 } else {
2106 ql_dbg(ql_dbg_io, vha, 0x3017,
2107 "Invalid status handle, out of range (0x%x).\n",
2108 sts->handle);
2109
2110 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2111 if (IS_P3P_TYPE(ha))
2112 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2113 else
2114 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2115 qla2xxx_wake_dpc(vha);
2116 }
2117 return;
2118 }
2119
2120 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2121 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2122 return;
2123 }
2124
2125
2126 if (sp->type == SRB_TM_CMD) {
2127 qla24xx_tm_iocb_entry(vha, req, pkt);
2128 return;
2129 }
2130
2131
2132 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2133 qla2x00_process_completed_request(vha, req, handle);
2134
2135 return;
2136 }
2137
2138 req->outstanding_cmds[handle] = NULL;
2139 cp = GET_CMD_SP(sp);
2140 if (cp == NULL) {
2141 ql_dbg(ql_dbg_io, vha, 0x3018,
2142 "Command already returned (0x%x/%p).\n",
2143 sts->handle, sp);
2144
2145 return;
2146 }
2147
2148 lscsi_status = scsi_status & STATUS_MASK;
2149
2150 fcport = sp->fcport;
2151
2152 ox_id = 0;
2153 sense_len = par_sense_len = rsp_info_len = resid_len =
2154 fw_resid_len = 0;
2155 if (IS_FWI2_CAPABLE(ha)) {
2156 if (scsi_status & SS_SENSE_LEN_VALID)
2157 sense_len = le32_to_cpu(sts24->sense_len);
2158 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2159 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2160 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2161 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2162 if (comp_status == CS_DATA_UNDERRUN)
2163 fw_resid_len = le32_to_cpu(sts24->residual_len);
2164 rsp_info = sts24->data;
2165 sense_data = sts24->data;
2166 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2167 ox_id = le16_to_cpu(sts24->ox_id);
2168 par_sense_len = sizeof(sts24->data);
2169
2170 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
2171 retry_delay = sts24->retry_delay;
2172 } else {
2173 if (scsi_status & SS_SENSE_LEN_VALID)
2174 sense_len = le16_to_cpu(sts->req_sense_length);
2175 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2176 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2177 resid_len = le32_to_cpu(sts->residual_length);
2178 rsp_info = sts->rsp_info;
2179 sense_data = sts->req_sense_data;
2180 par_sense_len = sizeof(sts->req_sense_data);
2181 }
2182
2183
2184 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2185
2186 if (IS_FWI2_CAPABLE(ha)) {
2187 sense_data += rsp_info_len;
2188 par_sense_len -= rsp_info_len;
2189 }
2190 if (rsp_info_len > 3 && rsp_info[3]) {
2191 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2192 "FCP I/O protocol failure (0x%x/0x%x).\n",
2193 rsp_info_len, rsp_info[3]);
2194
2195 res = DID_BUS_BUSY << 16;
2196 goto out;
2197 }
2198 }
2199
2200
2201 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2202 scsi_status & SS_RESIDUAL_OVER)
2203 comp_status = CS_DATA_OVERRUN;
2204
2205
2206
2207
2208
2209 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2210 lscsi_status == SAM_STAT_BUSY)
2211 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2212
2213
2214
2215
2216 switch (comp_status) {
2217 case CS_COMPLETE:
2218 case CS_QUEUE_FULL:
2219 if (scsi_status == 0) {
2220 res = DID_OK << 16;
2221 break;
2222 }
2223 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2224 resid = resid_len;
2225 scsi_set_resid(cp, resid);
2226
2227 if (!lscsi_status &&
2228 ((unsigned)(scsi_bufflen(cp) - resid) <
2229 cp->underflow)) {
2230 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2231 "Mid-layer underflow "
2232 "detected (0x%x of 0x%x bytes).\n",
2233 resid, scsi_bufflen(cp));
2234
2235 res = DID_ERROR << 16;
2236 break;
2237 }
2238 }
2239 res = DID_OK << 16 | lscsi_status;
2240
2241 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2242 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2243 "QUEUE FULL detected.\n");
2244 break;
2245 }
2246 logit = 0;
2247 if (lscsi_status != SS_CHECK_CONDITION)
2248 break;
2249
2250 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2251 if (!(scsi_status & SS_SENSE_LEN_VALID))
2252 break;
2253
2254 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2255 rsp, res);
2256 break;
2257
2258 case CS_DATA_UNDERRUN:
2259
2260 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2261 scsi_set_resid(cp, resid);
2262 if (scsi_status & SS_RESIDUAL_UNDER) {
2263 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2264 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2265 "Dropped frame(s) detected "
2266 "(0x%x of 0x%x bytes).\n",
2267 resid, scsi_bufflen(cp));
2268
2269 res = DID_ERROR << 16 | lscsi_status;
2270 goto check_scsi_status;
2271 }
2272
2273 if (!lscsi_status &&
2274 ((unsigned)(scsi_bufflen(cp) - resid) <
2275 cp->underflow)) {
2276 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2277 "Mid-layer underflow "
2278 "detected (0x%x of 0x%x bytes).\n",
2279 resid, scsi_bufflen(cp));
2280
2281 res = DID_ERROR << 16;
2282 break;
2283 }
2284 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2285 lscsi_status != SAM_STAT_BUSY) {
2286
2287
2288
2289
2290
2291 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2292 "Dropped frame(s) detected (0x%x "
2293 "of 0x%x bytes).\n", resid,
2294 scsi_bufflen(cp));
2295
2296 res = DID_ERROR << 16 | lscsi_status;
2297 goto check_scsi_status;
2298 } else {
2299 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2300 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2301 scsi_status, lscsi_status);
2302 }
2303
2304 res = DID_OK << 16 | lscsi_status;
2305 logit = 0;
2306
2307check_scsi_status:
2308
2309
2310
2311
2312 if (lscsi_status != 0) {
2313 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2314 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2315 "QUEUE FULL detected.\n");
2316 logit = 1;
2317 break;
2318 }
2319 if (lscsi_status != SS_CHECK_CONDITION)
2320 break;
2321
2322 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2323 if (!(scsi_status & SS_SENSE_LEN_VALID))
2324 break;
2325
2326 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2327 sense_len, rsp, res);
2328 }
2329 break;
2330
2331 case CS_PORT_LOGGED_OUT:
2332 case CS_PORT_CONFIG_CHG:
2333 case CS_PORT_BUSY:
2334 case CS_INCOMPLETE:
2335 case CS_PORT_UNAVAILABLE:
2336 case CS_TIMEOUT:
2337 case CS_RESET:
2338
2339
2340
2341
2342
2343
2344 res = DID_TRANSPORT_DISRUPTED << 16;
2345
2346 if (comp_status == CS_TIMEOUT) {
2347 if (IS_FWI2_CAPABLE(ha))
2348 break;
2349 else if ((le16_to_cpu(sts->status_flags) &
2350 SF_LOGOUT_SENT) == 0)
2351 break;
2352 }
2353
2354 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2355 "Port to be marked lost on fcport=%02x%02x%02x, current "
2356 "port state= %s.\n", fcport->d_id.b.domain,
2357 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2358 port_state_str[atomic_read(&fcport->state)]);
2359
2360 if (atomic_read(&fcport->state) == FCS_ONLINE)
2361 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2362 break;
2363
2364 case CS_ABORTED:
2365 res = DID_RESET << 16;
2366 break;
2367
2368 case CS_DIF_ERROR:
2369 logit = qla2x00_handle_dif_error(sp, sts24);
2370 res = cp->result;
2371 break;
2372
2373 case CS_TRANSPORT:
2374 res = DID_ERROR << 16;
2375
2376 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2377 break;
2378
2379 if (state_flags & BIT_4)
2380 scmd_printk(KERN_WARNING, cp,
2381 "Unsupported device '%s' found.\n",
2382 cp->device->vendor);
2383 break;
2384
2385 default:
2386 res = DID_ERROR << 16;
2387 break;
2388 }
2389
2390out:
2391 if (logit)
2392 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2393 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2394 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2395 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2396 comp_status, scsi_status, res, vha->host_no,
2397 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2398 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2399 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2400 resid_len, fw_resid_len, sp, cp);
2401
2402 if (rsp->status_srb == NULL)
2403 sp->done(ha, sp, res);
2404}
2405
2406
2407
2408
2409
2410
2411
2412
2413static void
2414qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2415{
2416 uint8_t sense_sz = 0;
2417 struct qla_hw_data *ha = rsp->hw;
2418 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2419 srb_t *sp = rsp->status_srb;
2420 struct scsi_cmnd *cp;
2421 uint32_t sense_len;
2422 uint8_t *sense_ptr;
2423
2424 if (!sp || !GET_CMD_SENSE_LEN(sp))
2425 return;
2426
2427 sense_len = GET_CMD_SENSE_LEN(sp);
2428 sense_ptr = GET_CMD_SENSE_PTR(sp);
2429
2430 cp = GET_CMD_SP(sp);
2431 if (cp == NULL) {
2432 ql_log(ql_log_warn, vha, 0x3025,
2433 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2434
2435 rsp->status_srb = NULL;
2436 return;
2437 }
2438
2439 if (sense_len > sizeof(pkt->data))
2440 sense_sz = sizeof(pkt->data);
2441 else
2442 sense_sz = sense_len;
2443
2444
2445 if (IS_FWI2_CAPABLE(ha))
2446 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2447 memcpy(sense_ptr, pkt->data, sense_sz);
2448 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2449 sense_ptr, sense_sz);
2450
2451 sense_len -= sense_sz;
2452 sense_ptr += sense_sz;
2453
2454 SET_CMD_SENSE_PTR(sp, sense_ptr);
2455 SET_CMD_SENSE_LEN(sp, sense_len);
2456
2457
2458 if (sense_len == 0) {
2459 rsp->status_srb = NULL;
2460 sp->done(ha, sp, cp->result);
2461 }
2462}
2463
2464
2465
2466
2467
2468
2469static void
2470qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2471{
2472 srb_t *sp;
2473 struct qla_hw_data *ha = vha->hw;
2474 const char func[] = "ERROR-IOCB";
2475 uint16_t que = MSW(pkt->handle);
2476 struct req_que *req = NULL;
2477 int res = DID_ERROR << 16;
2478
2479 ql_dbg(ql_dbg_async, vha, 0x502a,
2480 "type of error status in response: 0x%x\n", pkt->entry_status);
2481
2482 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2483 goto fatal;
2484
2485 req = ha->req_q_map[que];
2486
2487 if (pkt->entry_status & RF_BUSY)
2488 res = DID_BUS_BUSY << 16;
2489
2490 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2491 if (sp) {
2492 sp->done(ha, sp, res);
2493 return;
2494 }
2495fatal:
2496 ql_log(ql_log_warn, vha, 0x5030,
2497 "Error entry - invalid handle/queue (%04x).\n", que);
2498}
2499
2500
2501
2502
2503
2504
2505static void
2506qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2507{
2508 uint16_t cnt;
2509 uint32_t mboxes;
2510 uint16_t __iomem *wptr;
2511 struct qla_hw_data *ha = vha->hw;
2512 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2513
2514
2515 mboxes = (1 << ha->mbx_count) - 1;
2516 if (!ha->mcp)
2517 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2518 else
2519 mboxes = ha->mcp->in_mb;
2520
2521
2522 ha->flags.mbox_int = 1;
2523 ha->mailbox_out[0] = mb0;
2524 mboxes >>= 1;
2525 wptr = (uint16_t __iomem *)®->mailbox1;
2526
2527 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2528 if (mboxes & BIT_0)
2529 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2530
2531 mboxes >>= 1;
2532 wptr++;
2533 }
2534}
2535
2536static void
2537qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2538 struct abort_entry_24xx *pkt)
2539{
2540 const char func[] = "ABT_IOCB";
2541 srb_t *sp;
2542 struct srb_iocb *abt;
2543
2544 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2545 if (!sp)
2546 return;
2547
2548 abt = &sp->u.iocb_cmd;
2549 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
2550 sp->done(vha, sp, 0);
2551}
2552
2553
2554
2555
2556
2557void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2558 struct rsp_que *rsp)
2559{
2560 struct sts_entry_24xx *pkt;
2561 struct qla_hw_data *ha = vha->hw;
2562
2563 if (!vha->flags.online)
2564 return;
2565
2566 if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
2567
2568
2569
2570 rsp->msix->cpuid = smp_processor_id();
2571 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
2572 }
2573
2574 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2575 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2576
2577 rsp->ring_index++;
2578 if (rsp->ring_index == rsp->length) {
2579 rsp->ring_index = 0;
2580 rsp->ring_ptr = rsp->ring;
2581 } else {
2582 rsp->ring_ptr++;
2583 }
2584
2585 if (pkt->entry_status != 0) {
2586 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2587
2588 if (qlt_24xx_process_response_error(vha, pkt))
2589 goto process_err;
2590
2591 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2592 wmb();
2593 continue;
2594 }
2595process_err:
2596
2597 switch (pkt->entry_type) {
2598 case STATUS_TYPE:
2599 qla2x00_status_entry(vha, rsp, pkt);
2600 break;
2601 case STATUS_CONT_TYPE:
2602 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2603 break;
2604 case VP_RPT_ID_IOCB_TYPE:
2605 qla24xx_report_id_acquisition(vha,
2606 (struct vp_rpt_id_entry_24xx *)pkt);
2607 break;
2608 case LOGINOUT_PORT_IOCB_TYPE:
2609 qla24xx_logio_entry(vha, rsp->req,
2610 (struct logio_entry_24xx *)pkt);
2611 break;
2612 case CT_IOCB_TYPE:
2613 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2614 break;
2615 case ELS_IOCB_TYPE:
2616 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2617 break;
2618 case ABTS_RECV_24XX:
2619 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2620
2621 qlt_handle_abts_recv(vha, (response_t *)pkt);
2622 break;
2623 } else {
2624
2625 qlt_24xx_process_atio_queue(vha, 1);
2626 }
2627 case ABTS_RESP_24XX:
2628 case CTIO_TYPE7:
2629 case NOTIFY_ACK_TYPE:
2630 case CTIO_CRC2:
2631 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2632 break;
2633 case MARKER_TYPE:
2634
2635
2636
2637 break;
2638 case ABORT_IOCB_TYPE:
2639 qla24xx_abort_iocb_entry(vha, rsp->req,
2640 (struct abort_entry_24xx *)pkt);
2641 break;
2642 default:
2643
2644 ql_dbg(ql_dbg_async, vha, 0x5042,
2645 "Received unknown response pkt type %x "
2646 "entry status=%x.\n",
2647 pkt->entry_type, pkt->entry_status);
2648 break;
2649 }
2650 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2651 wmb();
2652 }
2653
2654
2655 if (IS_P3P_TYPE(ha)) {
2656 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2657 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
2658 } else
2659 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2660}
2661
2662static void
2663qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2664{
2665 int rval;
2666 uint32_t cnt;
2667 struct qla_hw_data *ha = vha->hw;
2668 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2669
2670 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
2671 !IS_QLA27XX(ha))
2672 return;
2673
2674 rval = QLA_SUCCESS;
2675 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
2676 RD_REG_DWORD(®->iobase_addr);
2677 WRT_REG_DWORD(®->iobase_window, 0x0001);
2678 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2679 rval == QLA_SUCCESS; cnt--) {
2680 if (cnt) {
2681 WRT_REG_DWORD(®->iobase_window, 0x0001);
2682 udelay(10);
2683 } else
2684 rval = QLA_FUNCTION_TIMEOUT;
2685 }
2686 if (rval == QLA_SUCCESS)
2687 goto next_test;
2688
2689 rval = QLA_SUCCESS;
2690 WRT_REG_DWORD(®->iobase_window, 0x0003);
2691 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
2692 rval == QLA_SUCCESS; cnt--) {
2693 if (cnt) {
2694 WRT_REG_DWORD(®->iobase_window, 0x0003);
2695 udelay(10);
2696 } else
2697 rval = QLA_FUNCTION_TIMEOUT;
2698 }
2699 if (rval != QLA_SUCCESS)
2700 goto done;
2701
2702next_test:
2703 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
2704 ql_log(ql_log_info, vha, 0x504c,
2705 "Additional code -- 0x55AA.\n");
2706
2707done:
2708 WRT_REG_DWORD(®->iobase_window, 0x0000);
2709 RD_REG_DWORD(®->iobase_window);
2710}
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721irqreturn_t
2722qla24xx_intr_handler(int irq, void *dev_id)
2723{
2724 scsi_qla_host_t *vha;
2725 struct qla_hw_data *ha;
2726 struct device_reg_24xx __iomem *reg;
2727 int status;
2728 unsigned long iter;
2729 uint32_t stat;
2730 uint32_t hccr;
2731 uint16_t mb[8];
2732 struct rsp_que *rsp;
2733 unsigned long flags;
2734
2735 rsp = (struct rsp_que *) dev_id;
2736 if (!rsp) {
2737 ql_log(ql_log_info, NULL, 0x5059,
2738 "%s: NULL response queue pointer.\n", __func__);
2739 return IRQ_NONE;
2740 }
2741
2742 ha = rsp->hw;
2743 reg = &ha->iobase->isp24;
2744 status = 0;
2745
2746 if (unlikely(pci_channel_offline(ha->pdev)))
2747 return IRQ_HANDLED;
2748
2749 spin_lock_irqsave(&ha->hardware_lock, flags);
2750 vha = pci_get_drvdata(ha->pdev);
2751 for (iter = 50; iter--; ) {
2752 stat = RD_REG_DWORD(®->host_status);
2753 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2754 break;
2755 if (stat & HSRX_RISC_PAUSED) {
2756 if (unlikely(pci_channel_offline(ha->pdev)))
2757 break;
2758
2759 hccr = RD_REG_DWORD(®->hccr);
2760
2761 ql_log(ql_log_warn, vha, 0x504b,
2762 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2763 hccr);
2764
2765 qla2xxx_check_risc_status(vha);
2766
2767 ha->isp_ops->fw_dump(vha, 1);
2768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2769 break;
2770 } else if ((stat & HSRX_RISC_INT) == 0)
2771 break;
2772
2773 switch (stat & 0xff) {
2774 case INTR_ROM_MB_SUCCESS:
2775 case INTR_ROM_MB_FAILED:
2776 case INTR_MB_SUCCESS:
2777 case INTR_MB_FAILED:
2778 qla24xx_mbx_completion(vha, MSW(stat));
2779 status |= MBX_INTERRUPT;
2780
2781 break;
2782 case INTR_ASYNC_EVENT:
2783 mb[0] = MSW(stat);
2784 mb[1] = RD_REG_WORD(®->mailbox1);
2785 mb[2] = RD_REG_WORD(®->mailbox2);
2786 mb[3] = RD_REG_WORD(®->mailbox3);
2787 qla2x00_async_event(vha, rsp, mb);
2788 break;
2789 case INTR_RSP_QUE_UPDATE:
2790 case INTR_RSP_QUE_UPDATE_83XX:
2791 qla24xx_process_response_queue(vha, rsp);
2792 break;
2793 case INTR_ATIO_QUE_UPDATE:{
2794 unsigned long flags2;
2795 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2796 qlt_24xx_process_atio_queue(vha, 1);
2797 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2798 break;
2799 }
2800 case INTR_ATIO_RSP_QUE_UPDATE: {
2801 unsigned long flags2;
2802 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2803 qlt_24xx_process_atio_queue(vha, 1);
2804 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2805
2806 qla24xx_process_response_queue(vha, rsp);
2807 break;
2808 }
2809 default:
2810 ql_dbg(ql_dbg_async, vha, 0x504f,
2811 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2812 break;
2813 }
2814 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2815 RD_REG_DWORD_RELAXED(®->hccr);
2816 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2817 ndelay(3500);
2818 }
2819 qla2x00_handle_mbx_completion(ha, status);
2820 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2821
2822 return IRQ_HANDLED;
2823}
2824
2825static irqreturn_t
2826qla24xx_msix_rsp_q(int irq, void *dev_id)
2827{
2828 struct qla_hw_data *ha;
2829 struct rsp_que *rsp;
2830 struct device_reg_24xx __iomem *reg;
2831 struct scsi_qla_host *vha;
2832 unsigned long flags;
2833 uint32_t stat = 0;
2834
2835 rsp = (struct rsp_que *) dev_id;
2836 if (!rsp) {
2837 ql_log(ql_log_info, NULL, 0x505a,
2838 "%s: NULL response queue pointer.\n", __func__);
2839 return IRQ_NONE;
2840 }
2841 ha = rsp->hw;
2842 reg = &ha->iobase->isp24;
2843
2844 spin_lock_irqsave(&ha->hardware_lock, flags);
2845
2846 vha = pci_get_drvdata(ha->pdev);
2847
2848
2849
2850
2851 stat = RD_REG_DWORD(®->host_status);
2852 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2853 goto out;
2854 qla24xx_process_response_queue(vha, rsp);
2855 if (!ha->flags.disable_msix_handshake) {
2856 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2857 RD_REG_DWORD_RELAXED(®->hccr);
2858 }
2859out:
2860 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2861
2862 return IRQ_HANDLED;
2863}
2864
2865static irqreturn_t
2866qla25xx_msix_rsp_q(int irq, void *dev_id)
2867{
2868 struct qla_hw_data *ha;
2869 scsi_qla_host_t *vha;
2870 struct rsp_que *rsp;
2871 struct device_reg_24xx __iomem *reg;
2872 unsigned long flags;
2873 uint32_t hccr = 0;
2874
2875 rsp = (struct rsp_que *) dev_id;
2876 if (!rsp) {
2877 ql_log(ql_log_info, NULL, 0x505b,
2878 "%s: NULL response queue pointer.\n", __func__);
2879 return IRQ_NONE;
2880 }
2881 ha = rsp->hw;
2882 vha = pci_get_drvdata(ha->pdev);
2883
2884
2885 if (!ha->flags.disable_msix_handshake) {
2886 reg = &ha->iobase->isp24;
2887 spin_lock_irqsave(&ha->hardware_lock, flags);
2888 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2889 hccr = RD_REG_DWORD_RELAXED(®->hccr);
2890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2891 }
2892 if (qla2x00_check_reg32_for_disconnect(vha, hccr))
2893 goto out;
2894 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2895
2896out:
2897 return IRQ_HANDLED;
2898}
2899
2900static irqreturn_t
2901qla24xx_msix_default(int irq, void *dev_id)
2902{
2903 scsi_qla_host_t *vha;
2904 struct qla_hw_data *ha;
2905 struct rsp_que *rsp;
2906 struct device_reg_24xx __iomem *reg;
2907 int status;
2908 uint32_t stat;
2909 uint32_t hccr;
2910 uint16_t mb[8];
2911 unsigned long flags;
2912
2913 rsp = (struct rsp_que *) dev_id;
2914 if (!rsp) {
2915 ql_log(ql_log_info, NULL, 0x505c,
2916 "%s: NULL response queue pointer.\n", __func__);
2917 return IRQ_NONE;
2918 }
2919 ha = rsp->hw;
2920 reg = &ha->iobase->isp24;
2921 status = 0;
2922
2923 spin_lock_irqsave(&ha->hardware_lock, flags);
2924 vha = pci_get_drvdata(ha->pdev);
2925 do {
2926 stat = RD_REG_DWORD(®->host_status);
2927 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2928 break;
2929 if (stat & HSRX_RISC_PAUSED) {
2930 if (unlikely(pci_channel_offline(ha->pdev)))
2931 break;
2932
2933 hccr = RD_REG_DWORD(®->hccr);
2934
2935 ql_log(ql_log_info, vha, 0x5050,
2936 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2937 hccr);
2938
2939 qla2xxx_check_risc_status(vha);
2940
2941 ha->isp_ops->fw_dump(vha, 1);
2942 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2943 break;
2944 } else if ((stat & HSRX_RISC_INT) == 0)
2945 break;
2946
2947 switch (stat & 0xff) {
2948 case INTR_ROM_MB_SUCCESS:
2949 case INTR_ROM_MB_FAILED:
2950 case INTR_MB_SUCCESS:
2951 case INTR_MB_FAILED:
2952 qla24xx_mbx_completion(vha, MSW(stat));
2953 status |= MBX_INTERRUPT;
2954
2955 break;
2956 case INTR_ASYNC_EVENT:
2957 mb[0] = MSW(stat);
2958 mb[1] = RD_REG_WORD(®->mailbox1);
2959 mb[2] = RD_REG_WORD(®->mailbox2);
2960 mb[3] = RD_REG_WORD(®->mailbox3);
2961 qla2x00_async_event(vha, rsp, mb);
2962 break;
2963 case INTR_RSP_QUE_UPDATE:
2964 case INTR_RSP_QUE_UPDATE_83XX:
2965 qla24xx_process_response_queue(vha, rsp);
2966 break;
2967 case INTR_ATIO_QUE_UPDATE:{
2968 unsigned long flags2;
2969 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2970 qlt_24xx_process_atio_queue(vha, 1);
2971 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2972 break;
2973 }
2974 case INTR_ATIO_RSP_QUE_UPDATE: {
2975 unsigned long flags2;
2976 spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
2977 qlt_24xx_process_atio_queue(vha, 1);
2978 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
2979
2980 qla24xx_process_response_queue(vha, rsp);
2981 break;
2982 }
2983 default:
2984 ql_dbg(ql_dbg_async, vha, 0x5051,
2985 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2986 break;
2987 }
2988 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
2989 } while (0);
2990 qla2x00_handle_mbx_completion(ha, status);
2991 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2992
2993 return IRQ_HANDLED;
2994}
2995
2996
2997
2998struct qla_init_msix_entry {
2999 const char *name;
3000 irq_handler_t handler;
3001};
3002
3003static struct qla_init_msix_entry msix_entries[3] = {
3004 { "qla2xxx (default)", qla24xx_msix_default },
3005 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3006 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
3007};
3008
3009static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
3010 { "qla2xxx (default)", qla82xx_msix_default },
3011 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3012};
3013
3014static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
3015 { "qla2xxx (default)", qla24xx_msix_default },
3016 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
3017 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3018};
3019
3020static void
3021qla24xx_disable_msix(struct qla_hw_data *ha)
3022{
3023 int i;
3024 struct qla_msix_entry *qentry;
3025 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3026
3027 for (i = 0; i < ha->msix_count; i++) {
3028 qentry = &ha->msix_entries[i];
3029 if (qentry->have_irq) {
3030
3031 irq_set_affinity_notifier(qentry->vector, NULL);
3032 free_irq(qentry->vector, qentry->rsp);
3033 }
3034 }
3035 pci_disable_msix(ha->pdev);
3036 kfree(ha->msix_entries);
3037 ha->msix_entries = NULL;
3038 ha->flags.msix_enabled = 0;
3039 ql_dbg(ql_dbg_init, vha, 0x0042,
3040 "Disabled the MSI.\n");
3041}
3042
3043static int
3044qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3045{
3046#define MIN_MSIX_COUNT 2
3047#define ATIO_VECTOR 2
3048 int i, ret;
3049 struct msix_entry *entries;
3050 struct qla_msix_entry *qentry;
3051 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3052
3053 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
3054 GFP_KERNEL);
3055 if (!entries) {
3056 ql_log(ql_log_warn, vha, 0x00bc,
3057 "Failed to allocate memory for msix_entry.\n");
3058 return -ENOMEM;
3059 }
3060
3061 for (i = 0; i < ha->msix_count; i++)
3062 entries[i].entry = i;
3063
3064 ret = pci_enable_msix_range(ha->pdev,
3065 entries, MIN_MSIX_COUNT, ha->msix_count);
3066 if (ret < 0) {
3067 ql_log(ql_log_fatal, vha, 0x00c7,
3068 "MSI-X: Failed to enable support, "
3069 "giving up -- %d/%d.\n",
3070 ha->msix_count, ret);
3071 goto msix_out;
3072 } else if (ret < ha->msix_count) {
3073 ql_log(ql_log_warn, vha, 0x00c6,
3074 "MSI-X: Failed to enable support "
3075 "-- %d/%d\n Retry with %d vectors.\n",
3076 ha->msix_count, ret, ret);
3077 ha->msix_count = ret;
3078 ha->max_rsp_queues = ha->msix_count - 1;
3079 }
3080 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3081 ha->msix_count, GFP_KERNEL);
3082 if (!ha->msix_entries) {
3083 ql_log(ql_log_fatal, vha, 0x00c8,
3084 "Failed to allocate memory for ha->msix_entries.\n");
3085 ret = -ENOMEM;
3086 goto msix_out;
3087 }
3088 ha->flags.msix_enabled = 1;
3089
3090 for (i = 0; i < ha->msix_count; i++) {
3091 qentry = &ha->msix_entries[i];
3092 qentry->vector = entries[i].vector;
3093 qentry->entry = entries[i].entry;
3094 qentry->have_irq = 0;
3095 qentry->rsp = NULL;
3096 qentry->irq_notify.notify = qla_irq_affinity_notify;
3097 qentry->irq_notify.release = qla_irq_affinity_release;
3098 qentry->cpuid = -1;
3099 }
3100
3101
3102 for (i = 0; i < 2; i++) {
3103 qentry = &ha->msix_entries[i];
3104 qentry->rsp = rsp;
3105 rsp->msix = qentry;
3106 if (IS_P3P_TYPE(ha))
3107 ret = request_irq(qentry->vector,
3108 qla82xx_msix_entries[i].handler,
3109 0, qla82xx_msix_entries[i].name, rsp);
3110 else
3111 ret = request_irq(qentry->vector,
3112 msix_entries[i].handler,
3113 0, msix_entries[i].name, rsp);
3114 if (ret)
3115 goto msix_register_fail;
3116 qentry->have_irq = 1;
3117
3118
3119 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
3120
3121
3122
3123
3124
3125
3126
3127 kref_get(&qentry->irq_notify.kref);
3128 schedule_work(&qentry->irq_notify.work);
3129 }
3130
3131
3132
3133
3134
3135 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3136 qentry = &ha->msix_entries[ATIO_VECTOR];
3137 qentry->rsp = rsp;
3138 rsp->msix = qentry;
3139 ret = request_irq(qentry->vector,
3140 qla83xx_msix_entries[ATIO_VECTOR].handler,
3141 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
3142 qentry->have_irq = 1;
3143 }
3144
3145msix_register_fail:
3146 if (ret) {
3147 ql_log(ql_log_fatal, vha, 0x00cb,
3148 "MSI-X: unable to register handler -- %x/%d.\n",
3149 qentry->vector, ret);
3150 qla24xx_disable_msix(ha);
3151 ha->mqenable = 0;
3152 goto msix_out;
3153 }
3154
3155
3156 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
3157 if (ha->msixbase && ha->mqiobase &&
3158 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
3159 ha->mqenable = 1;
3160 } else
3161 if (ha->mqiobase
3162 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
3163 ha->mqenable = 1;
3164 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3165 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3166 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3167 ql_dbg(ql_dbg_init, vha, 0x0055,
3168 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3169 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3170
3171msix_out:
3172 kfree(entries);
3173 return ret;
3174}
3175
3176int
3177qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3178{
3179 int ret = QLA_FUNCTION_FAILED;
3180 device_reg_t *reg = ha->iobase;
3181 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3182
3183
3184 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3185 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) &&
3186 !IS_QLA27XX(ha))
3187 goto skip_msi;
3188
3189 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3190 (ha->pdev->subsystem_device == 0x7040 ||
3191 ha->pdev->subsystem_device == 0x7041 ||
3192 ha->pdev->subsystem_device == 0x1705)) {
3193 ql_log(ql_log_warn, vha, 0x0034,
3194 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3195 ha->pdev->subsystem_vendor,
3196 ha->pdev->subsystem_device);
3197 goto skip_msi;
3198 }
3199
3200 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3201 ql_log(ql_log_warn, vha, 0x0035,
3202 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3203 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3204 goto skip_msix;
3205 }
3206
3207 ret = qla24xx_enable_msix(ha, rsp);
3208 if (!ret) {
3209 ql_dbg(ql_dbg_init, vha, 0x0036,
3210 "MSI-X: Enabled (0x%X, 0x%X).\n",
3211 ha->chip_revision, ha->fw_attributes);
3212 goto clear_risc_ints;
3213 }
3214
3215skip_msix:
3216
3217 ql_log(ql_log_info, vha, 0x0037,
3218 "Falling back-to MSI mode -%d.\n", ret);
3219
3220 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3221 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3222 !IS_QLA27XX(ha))
3223 goto skip_msi;
3224
3225 ret = pci_enable_msi(ha->pdev);
3226 if (!ret) {
3227 ql_dbg(ql_dbg_init, vha, 0x0038,
3228 "MSI: Enabled.\n");
3229 ha->flags.msi_enabled = 1;
3230 } else
3231 ql_log(ql_log_warn, vha, 0x0039,
3232 "Falling back-to INTa mode -- %d.\n", ret);
3233skip_msi:
3234
3235
3236 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3237 return QLA_FUNCTION_FAILED;
3238
3239 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3240 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3241 QLA2XXX_DRIVER_NAME, rsp);
3242 if (ret) {
3243 ql_log(ql_log_warn, vha, 0x003a,
3244 "Failed to reserve interrupt %d already in use.\n",
3245 ha->pdev->irq);
3246 goto fail;
3247 } else if (!ha->flags.msi_enabled) {
3248 ql_dbg(ql_dbg_init, vha, 0x0125,
3249 "INTa mode: Enabled.\n");
3250 ha->flags.mr_intr_valid = 1;
3251 }
3252
3253clear_risc_ints:
3254 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3255 goto fail;
3256
3257 spin_lock_irq(&ha->hardware_lock);
3258 WRT_REG_WORD(®->isp.semaphore, 0);
3259 spin_unlock_irq(&ha->hardware_lock);
3260
3261fail:
3262 return ret;
3263}
3264
3265void
3266qla2x00_free_irqs(scsi_qla_host_t *vha)
3267{
3268 struct qla_hw_data *ha = vha->hw;
3269 struct rsp_que *rsp;
3270
3271
3272
3273
3274
3275 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3276 return;
3277 rsp = ha->rsp_q_map[0];
3278
3279 if (ha->flags.msix_enabled)
3280 qla24xx_disable_msix(ha);
3281 else if (ha->flags.msi_enabled) {
3282 free_irq(ha->pdev->irq, rsp);
3283 pci_disable_msi(ha->pdev);
3284 } else
3285 free_irq(ha->pdev->irq, rsp);
3286}
3287
3288
3289int qla25xx_request_irq(struct rsp_que *rsp)
3290{
3291 struct qla_hw_data *ha = rsp->hw;
3292 struct qla_init_msix_entry *intr = &msix_entries[2];
3293 struct qla_msix_entry *msix = rsp->msix;
3294 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3295 int ret;
3296
3297 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3298 if (ret) {
3299 ql_log(ql_log_fatal, vha, 0x00e6,
3300 "MSI-X: Unable to register handler -- %x/%d.\n",
3301 msix->vector, ret);
3302 return ret;
3303 }
3304 msix->have_irq = 1;
3305 msix->rsp = rsp;
3306 return ret;
3307}
3308
3309
3310
3311static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3312 const cpumask_t *mask)
3313{
3314 struct qla_msix_entry *e =
3315 container_of(notify, struct qla_msix_entry, irq_notify);
3316 struct qla_hw_data *ha;
3317 struct scsi_qla_host *base_vha;
3318
3319
3320 e->cpuid = cpumask_first(mask);
3321
3322 ha = e->rsp->hw;
3323 base_vha = pci_get_drvdata(ha->pdev);
3324
3325 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3326 "%s: host %ld : vector %d cpu %d \n", __func__,
3327 base_vha->host_no, e->vector, e->cpuid);
3328
3329 if (e->have_irq) {
3330 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
3331 (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
3332 ha->tgt.rspq_vector_cpuid = e->cpuid;
3333 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3334 "%s: host%ld: rspq vector %d cpu %d runtime change\n",
3335 __func__, base_vha->host_no, e->vector, e->cpuid);
3336 }
3337 }
3338}
3339
3340static void qla_irq_affinity_release(struct kref *ref)
3341{
3342 struct irq_affinity_notify *notify =
3343 container_of(ref, struct irq_affinity_notify, kref);
3344 struct qla_msix_entry *e =
3345 container_of(notify, struct qla_msix_entry, irq_notify);
3346 struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev);
3347
3348 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3349 "%s: host%ld: vector %d cpu %d \n", __func__,
3350 base_vha->host_no, e->vector, e->cpuid);
3351}
3352