1
2
3
4
5
6
7#include "qla_def.h"
8
9#include <linux/blkdev.h>
10#include <linux/delay.h>
11
12#include <scsi/scsi_tcq.h>
13
14static void qla25xx_set_que(srb_t *, struct rsp_que **);
15
16
17
18
19
20
21static inline uint16_t
22qla2x00_get_cmd_direction(srb_t *sp)
23{
24 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26
27 cflags = 0;
28
29
30 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
31 cflags = CF_WRITE;
32 sp->fcport->vha->hw->qla_stats.output_bytes +=
33 scsi_bufflen(cmd);
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
35 cflags = CF_READ;
36 sp->fcport->vha->hw->qla_stats.input_bytes +=
37 scsi_bufflen(cmd);
38 }
39 return (cflags);
40}
41
42
43
44
45
46
47
48
49
50uint16_t
51qla2x00_calc_iocbs_32(uint16_t dsds)
52{
53 uint16_t iocbs;
54
55 iocbs = 1;
56 if (dsds > 3) {
57 iocbs += (dsds - 3) / 7;
58 if ((dsds - 3) % 7)
59 iocbs++;
60 }
61 return (iocbs);
62}
63
64
65
66
67
68
69
70
71
72uint16_t
73qla2x00_calc_iocbs_64(uint16_t dsds)
74{
75 uint16_t iocbs;
76
77 iocbs = 1;
78 if (dsds > 2) {
79 iocbs += (dsds - 2) / 5;
80 if ((dsds - 2) % 5)
81 iocbs++;
82 }
83 return (iocbs);
84}
85
86
87
88
89
90
91
92static inline cont_entry_t *
93qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94{
95 cont_entry_t *cont_pkt;
96 struct req_que *req = vha->req;
97
98 req->ring_index++;
99 if (req->ring_index == req->length) {
100 req->ring_index = 0;
101 req->ring_ptr = req->ring;
102 } else {
103 req->ring_ptr++;
104 }
105
106 cont_pkt = (cont_entry_t *)req->ring_ptr;
107
108
109 *((uint32_t *)(&cont_pkt->entry_type)) =
110 __constant_cpu_to_le32(CONTINUE_TYPE);
111
112 return (cont_pkt);
113}
114
115
116
117
118
119
120
121static inline cont_a64_entry_t *
122qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123{
124 cont_a64_entry_t *cont_pkt;
125
126
127 req->ring_index++;
128 if (req->ring_index == req->length) {
129 req->ring_index = 0;
130 req->ring_ptr = req->ring;
131 } else {
132 req->ring_ptr++;
133 }
134
135 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136
137
138 *((uint32_t *)(&cont_pkt->entry_type)) =
139 __constant_cpu_to_le32(CONTINUE_A64_TYPE);
140
141 return (cont_pkt);
142}
143
144static inline int
145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146{
147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
149
150
151 if (guard != SHOST_DIX_GUARD_CRC) {
152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
153 "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
154 return 0;
155 }
156
157
158 *fw_prot_opts = 0;
159
160
161 switch (scsi_get_prot_op(cmd)) {
162 case SCSI_PROT_READ_STRIP:
163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
164 break;
165 case SCSI_PROT_WRITE_INSERT:
166 *fw_prot_opts |= PO_MODE_DIF_INSERT;
167 break;
168 case SCSI_PROT_READ_INSERT:
169 *fw_prot_opts |= PO_MODE_DIF_INSERT;
170 break;
171 case SCSI_PROT_WRITE_STRIP:
172 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
173 break;
174 case SCSI_PROT_READ_PASS:
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 case SCSI_PROT_WRITE_PASS:
178 *fw_prot_opts |= PO_MODE_DIF_PASS;
179 break;
180 default:
181 *fw_prot_opts |= PO_MODE_DIF_PASS;
182 break;
183 }
184
185 return scsi_prot_sg_count(cmd);
186}
187
188
189
190
191
192
193
194
195
196void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
197 uint16_t tot_dsds)
198{
199 uint16_t avail_dsds;
200 uint32_t *cur_dsd;
201 scsi_qla_host_t *vha;
202 struct scsi_cmnd *cmd;
203 struct scatterlist *sg;
204 int i;
205
206 cmd = GET_CMD_SP(sp);
207
208
209 *((uint32_t *)(&cmd_pkt->entry_type)) =
210 __constant_cpu_to_le32(COMMAND_TYPE);
211
212
213 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
214 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
215 return;
216 }
217
218 vha = sp->fcport->vha;
219 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
220
221
222 avail_dsds = 3;
223 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
224
225
226 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
227 cont_entry_t *cont_pkt;
228
229
230 if (avail_dsds == 0) {
231
232
233
234
235 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
236 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
237 avail_dsds = 7;
238 }
239
240 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
241 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
242 avail_dsds--;
243 }
244}
245
246
247
248
249
250
251
252
253
254void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
255 uint16_t tot_dsds)
256{
257 uint16_t avail_dsds;
258 uint32_t *cur_dsd;
259 scsi_qla_host_t *vha;
260 struct scsi_cmnd *cmd;
261 struct scatterlist *sg;
262 int i;
263
264 cmd = GET_CMD_SP(sp);
265
266
267 *((uint32_t *)(&cmd_pkt->entry_type)) =
268 __constant_cpu_to_le32(COMMAND_A64_TYPE);
269
270
271 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
272 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
273 return;
274 }
275
276 vha = sp->fcport->vha;
277 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
278
279
280 avail_dsds = 2;
281 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
282
283
284 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
285 dma_addr_t sle_dma;
286 cont_a64_entry_t *cont_pkt;
287
288
289 if (avail_dsds == 0) {
290
291
292
293
294 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
295 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
296 avail_dsds = 5;
297 }
298
299 sle_dma = sg_dma_address(sg);
300 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
301 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
302 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
303 avail_dsds--;
304 }
305}
306
307
308
309
310
311
312
313int
314qla2x00_start_scsi(srb_t *sp)
315{
316 int ret, nseg;
317 unsigned long flags;
318 scsi_qla_host_t *vha;
319 struct scsi_cmnd *cmd;
320 uint32_t *clr_ptr;
321 uint32_t index;
322 uint32_t handle;
323 cmd_entry_t *cmd_pkt;
324 uint16_t cnt;
325 uint16_t req_cnt;
326 uint16_t tot_dsds;
327 struct device_reg_2xxx __iomem *reg;
328 struct qla_hw_data *ha;
329 struct req_que *req;
330 struct rsp_que *rsp;
331 char tag[2];
332
333
334 ret = 0;
335 vha = sp->fcport->vha;
336 ha = vha->hw;
337 reg = &ha->iobase->isp;
338 cmd = GET_CMD_SP(sp);
339 req = ha->req_q_map[0];
340 rsp = ha->rsp_q_map[0];
341
342 tot_dsds = 0;
343
344
345 if (vha->marker_needed != 0) {
346 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
347 QLA_SUCCESS) {
348 return (QLA_FUNCTION_FAILED);
349 }
350 vha->marker_needed = 0;
351 }
352
353
354 spin_lock_irqsave(&ha->hardware_lock, flags);
355
356
357 handle = req->current_outstanding_cmd;
358 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
359 handle++;
360 if (handle == MAX_OUTSTANDING_COMMANDS)
361 handle = 1;
362 if (!req->outstanding_cmds[handle])
363 break;
364 }
365 if (index == MAX_OUTSTANDING_COMMANDS)
366 goto queuing_error;
367
368
369 if (scsi_sg_count(cmd)) {
370 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
371 scsi_sg_count(cmd), cmd->sc_data_direction);
372 if (unlikely(!nseg))
373 goto queuing_error;
374 } else
375 nseg = 0;
376
377 tot_dsds = nseg;
378
379
380 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
381 if (req->cnt < (req_cnt + 2)) {
382 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
383 if (req->ring_index < cnt)
384 req->cnt = cnt - req->ring_index;
385 else
386 req->cnt = req->length -
387 (req->ring_index - cnt);
388 }
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
391
392
393 req->current_outstanding_cmd = handle;
394 req->outstanding_cmds[handle] = sp;
395 sp->handle = handle;
396 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
397 req->cnt -= req_cnt;
398
399 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
400 cmd_pkt->handle = handle;
401
402 clr_ptr = (uint32_t *)cmd_pkt + 2;
403 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
404 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
405
406
407 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
408 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
409
410
411 if (scsi_populate_tag_msg(cmd, tag)) {
412 switch (tag[0]) {
413 case HEAD_OF_QUEUE_TAG:
414 cmd_pkt->control_flags =
415 __constant_cpu_to_le16(CF_HEAD_TAG);
416 break;
417 case ORDERED_QUEUE_TAG:
418 cmd_pkt->control_flags =
419 __constant_cpu_to_le16(CF_ORDERED_TAG);
420 break;
421 default:
422 cmd_pkt->control_flags =
423 __constant_cpu_to_le16(CF_SIMPLE_TAG);
424 break;
425 }
426 }
427
428
429 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
431
432
433 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
434
435
436 cmd_pkt->entry_count = (uint8_t)req_cnt;
437 wmb();
438
439
440 req->ring_index++;
441 if (req->ring_index == req->length) {
442 req->ring_index = 0;
443 req->ring_ptr = req->ring;
444 } else
445 req->ring_ptr++;
446
447 sp->flags |= SRB_DMA_VALID;
448
449
450 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));
452
453
454 if (vha->flags.process_response_queue &&
455 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456 qla2x00_process_response_queue(rsp);
457
458 spin_unlock_irqrestore(&ha->hardware_lock, flags);
459 return (QLA_SUCCESS);
460
461queuing_error:
462 if (tot_dsds)
463 scsi_dma_unmap(cmd);
464
465 spin_unlock_irqrestore(&ha->hardware_lock, flags);
466
467 return (QLA_FUNCTION_FAILED);
468}
469
470
471
472
473static void
474qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475{
476 struct qla_hw_data *ha = vha->hw;
477 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
478
479 if (IS_QLA82XX(ha)) {
480 qla82xx_start_iocbs(vha);
481 } else {
482
483 req->ring_index++;
484 if (req->ring_index == req->length) {
485 req->ring_index = 0;
486 req->ring_ptr = req->ring;
487 } else
488 req->ring_ptr++;
489
490
491 if (ha->mqenable || IS_QLA83XX(ha)) {
492 WRT_REG_DWORD(req->req_q_in, req->ring_index);
493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494 } else if (IS_FWI2_CAPABLE(ha)) {
495 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
496 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
497 } else {
498 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
499 req->ring_index);
500 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
501 }
502 }
503}
504
505
506
507
508
509
510
511
512
513
514
515
516static int
517__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
518 struct rsp_que *rsp, uint16_t loop_id,
519 uint16_t lun, uint8_t type)
520{
521 mrk_entry_t *mrk;
522 struct mrk_entry_24xx *mrk24;
523 struct qla_hw_data *ha = vha->hw;
524 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
525
526 mrk24 = NULL;
527 req = ha->req_q_map[0];
528 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
529 if (mrk == NULL) {
530 ql_log(ql_log_warn, base_vha, 0x3026,
531 "Failed to allocate Marker IOCB.\n");
532
533 return (QLA_FUNCTION_FAILED);
534 }
535
536 mrk->entry_type = MARKER_TYPE;
537 mrk->modifier = type;
538 if (type != MK_SYNC_ALL) {
539 if (IS_FWI2_CAPABLE(ha)) {
540 mrk24 = (struct mrk_entry_24xx *) mrk;
541 mrk24->nport_handle = cpu_to_le16(loop_id);
542 mrk24->lun[1] = LSB(lun);
543 mrk24->lun[2] = MSB(lun);
544 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
545 mrk24->vp_index = vha->vp_idx;
546 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
547 } else {
548 SET_TARGET_ID(ha, mrk->target, loop_id);
549 mrk->lun = cpu_to_le16(lun);
550 }
551 }
552 wmb();
553
554 qla2x00_start_iocbs(vha, req);
555
556 return (QLA_SUCCESS);
557}
558
559int
560qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
561 struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
562 uint8_t type)
563{
564 int ret;
565 unsigned long flags = 0;
566
567 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
568 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
569 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
570
571 return (ret);
572}
573
574
575
576
577
578
579
580
581
582inline uint16_t
583qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
584{
585 uint16_t iocbs;
586
587 iocbs = 1;
588 if (dsds > 1) {
589 iocbs += (dsds - 1) / 5;
590 if ((dsds - 1) % 5)
591 iocbs++;
592 }
593 return iocbs;
594}
595
596static inline int
597qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
598 uint16_t tot_dsds)
599{
600 uint32_t *cur_dsd = NULL;
601 scsi_qla_host_t *vha;
602 struct qla_hw_data *ha;
603 struct scsi_cmnd *cmd;
604 struct scatterlist *cur_seg;
605 uint32_t *dsd_seg;
606 void *next_dsd;
607 uint8_t avail_dsds;
608 uint8_t first_iocb = 1;
609 uint32_t dsd_list_len;
610 struct dsd_dma *dsd_ptr;
611 struct ct6_dsd *ctx;
612
613 cmd = GET_CMD_SP(sp);
614
615
616 *((uint32_t *)(&cmd_pkt->entry_type)) =
617 __constant_cpu_to_le32(COMMAND_TYPE_6);
618
619
620 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
621 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
622 return 0;
623 }
624
625 vha = sp->fcport->vha;
626 ha = vha->hw;
627
628
629 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
630 cmd_pkt->control_flags =
631 __constant_cpu_to_le16(CF_WRITE_DATA);
632 ha->qla_stats.output_bytes += scsi_bufflen(cmd);
633 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
634 cmd_pkt->control_flags =
635 __constant_cpu_to_le16(CF_READ_DATA);
636 ha->qla_stats.input_bytes += scsi_bufflen(cmd);
637 }
638
639 cur_seg = scsi_sglist(cmd);
640 ctx = GET_CMD_CTX_SP(sp);
641
642 while (tot_dsds) {
643 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
644 QLA_DSDS_PER_IOCB : tot_dsds;
645 tot_dsds -= avail_dsds;
646 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
647
648 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
649 struct dsd_dma, list);
650 next_dsd = dsd_ptr->dsd_addr;
651 list_del(&dsd_ptr->list);
652 ha->gbl_dsd_avail--;
653 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
654 ctx->dsd_use_cnt++;
655 ha->gbl_dsd_inuse++;
656
657 if (first_iocb) {
658 first_iocb = 0;
659 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
660 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
661 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
662 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
663 } else {
664 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
665 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
666 *cur_dsd++ = cpu_to_le32(dsd_list_len);
667 }
668 cur_dsd = (uint32_t *)next_dsd;
669 while (avail_dsds) {
670 dma_addr_t sle_dma;
671
672 sle_dma = sg_dma_address(cur_seg);
673 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
674 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
675 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
676 cur_seg = sg_next(cur_seg);
677 avail_dsds--;
678 }
679 }
680
681
682 *cur_dsd++ = 0;
683 *cur_dsd++ = 0;
684 *cur_dsd++ = 0;
685 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
686 return 0;
687}
688
689
690
691
692
693
694
695
696
697inline uint16_t
698qla24xx_calc_dsd_lists(uint16_t dsds)
699{
700 uint16_t dsd_lists = 0;
701
702 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
703 if (dsds % QLA_DSDS_PER_IOCB)
704 dsd_lists++;
705 return dsd_lists;
706}
707
708
709
710
711
712
713
714
715
716
717inline void
718qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
719 uint16_t tot_dsds)
720{
721 uint16_t avail_dsds;
722 uint32_t *cur_dsd;
723 scsi_qla_host_t *vha;
724 struct scsi_cmnd *cmd;
725 struct scatterlist *sg;
726 int i;
727 struct req_que *req;
728
729 cmd = GET_CMD_SP(sp);
730
731
732 *((uint32_t *)(&cmd_pkt->entry_type)) =
733 __constant_cpu_to_le32(COMMAND_TYPE_7);
734
735
736 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
737 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
738 return;
739 }
740
741 vha = sp->fcport->vha;
742 req = vha->req;
743
744
745 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
746 cmd_pkt->task_mgmt_flags =
747 __constant_cpu_to_le16(TMF_WRITE_DATA);
748 sp->fcport->vha->hw->qla_stats.output_bytes +=
749 scsi_bufflen(cmd);
750 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
751 cmd_pkt->task_mgmt_flags =
752 __constant_cpu_to_le16(TMF_READ_DATA);
753 sp->fcport->vha->hw->qla_stats.input_bytes +=
754 scsi_bufflen(cmd);
755 }
756
757
758 avail_dsds = 1;
759 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
760
761
762
763 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
764 dma_addr_t sle_dma;
765 cont_a64_entry_t *cont_pkt;
766
767
768 if (avail_dsds == 0) {
769
770
771
772
773 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
774 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
775 avail_dsds = 5;
776 }
777
778 sle_dma = sg_dma_address(sg);
779 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
780 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
781 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
782 avail_dsds--;
783 }
784}
785
786struct fw_dif_context {
787 uint32_t ref_tag;
788 uint16_t app_tag;
789 uint8_t ref_tag_mask[4];
790 uint8_t app_tag_mask[2];
791};
792
793
794
795
796
797static inline void
798qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
799 unsigned int protcnt)
800{
801 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
802 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
803
804 switch (scsi_get_prot_type(cmd)) {
805 case SCSI_PROT_DIF_TYPE0:
806
807
808
809
810 pkt->ref_tag = cpu_to_le32((uint32_t)
811 (0xffffffff & scsi_get_lba(cmd)));
812
813 if (!qla2x00_hba_err_chk_enabled(sp))
814 break;
815
816 pkt->ref_tag_mask[0] = 0xff;
817 pkt->ref_tag_mask[1] = 0xff;
818 pkt->ref_tag_mask[2] = 0xff;
819 pkt->ref_tag_mask[3] = 0xff;
820 break;
821
822
823
824
825
826 case SCSI_PROT_DIF_TYPE2:
827 pkt->app_tag = __constant_cpu_to_le16(0);
828 pkt->app_tag_mask[0] = 0x0;
829 pkt->app_tag_mask[1] = 0x0;
830
831 pkt->ref_tag = cpu_to_le32((uint32_t)
832 (0xffffffff & scsi_get_lba(cmd)));
833
834 if (!qla2x00_hba_err_chk_enabled(sp))
835 break;
836
837
838 pkt->ref_tag_mask[0] = 0xff;
839 pkt->ref_tag_mask[1] = 0xff;
840 pkt->ref_tag_mask[2] = 0xff;
841 pkt->ref_tag_mask[3] = 0xff;
842 break;
843
844
845 case SCSI_PROT_DIF_TYPE3:
846 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
847 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
848 0x00;
849 break;
850
851
852
853
854
855 case SCSI_PROT_DIF_TYPE1:
856 pkt->ref_tag = cpu_to_le32((uint32_t)
857 (0xffffffff & scsi_get_lba(cmd)));
858 pkt->app_tag = __constant_cpu_to_le16(0);
859 pkt->app_tag_mask[0] = 0x0;
860 pkt->app_tag_mask[1] = 0x0;
861
862 if (!qla2x00_hba_err_chk_enabled(sp))
863 break;
864
865
866 pkt->ref_tag_mask[0] = 0xff;
867 pkt->ref_tag_mask[1] = 0xff;
868 pkt->ref_tag_mask[2] = 0xff;
869 pkt->ref_tag_mask[3] = 0xff;
870 break;
871 }
872
873 ql_dbg(ql_dbg_io, vha, 0x3009,
874 "Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
875 "prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
876 pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
877 scsi_get_prot_type(cmd), cmd);
878}
879
880struct qla2_sgx {
881 dma_addr_t dma_addr;
882 uint32_t dma_len;
883
884 uint32_t tot_bytes;
885 struct scatterlist *cur_sg;
886
887
888 uint32_t bytes_consumed;
889 uint32_t num_bytes;
890 uint32_t tot_partial;
891
892
893 uint32_t num_sg;
894 srb_t *sp;
895};
896
897static int
898qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
899 uint32_t *partial)
900{
901 struct scatterlist *sg;
902 uint32_t cumulative_partial, sg_len;
903 dma_addr_t sg_dma_addr;
904
905 if (sgx->num_bytes == sgx->tot_bytes)
906 return 0;
907
908 sg = sgx->cur_sg;
909 cumulative_partial = sgx->tot_partial;
910
911 sg_dma_addr = sg_dma_address(sg);
912 sg_len = sg_dma_len(sg);
913
914 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
915
916 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
917 sgx->dma_len = (blk_sz - cumulative_partial);
918 sgx->tot_partial = 0;
919 sgx->num_bytes += blk_sz;
920 *partial = 0;
921 } else {
922 sgx->dma_len = sg_len - sgx->bytes_consumed;
923 sgx->tot_partial += sgx->dma_len;
924 *partial = 1;
925 }
926
927 sgx->bytes_consumed += sgx->dma_len;
928
929 if (sg_len == sgx->bytes_consumed) {
930 sg = sg_next(sg);
931 sgx->num_sg++;
932 sgx->cur_sg = sg;
933 sgx->bytes_consumed = 0;
934 }
935
936 return 1;
937}
938
939static int
940qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
941 uint32_t *dsd, uint16_t tot_dsds)
942{
943 void *next_dsd;
944 uint8_t avail_dsds = 0;
945 uint32_t dsd_list_len;
946 struct dsd_dma *dsd_ptr;
947 struct scatterlist *sg_prot;
948 uint32_t *cur_dsd = dsd;
949 uint16_t used_dsds = tot_dsds;
950
951 uint32_t prot_int;
952 uint32_t partial;
953 struct qla2_sgx sgx;
954 dma_addr_t sle_dma;
955 uint32_t sle_dma_len, tot_prot_dma_len = 0;
956 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
957
958 prot_int = cmd->device->sector_size;
959
960 memset(&sgx, 0, sizeof(struct qla2_sgx));
961 sgx.tot_bytes = scsi_bufflen(cmd);
962 sgx.cur_sg = scsi_sglist(cmd);
963 sgx.sp = sp;
964
965 sg_prot = scsi_prot_sglist(cmd);
966
967 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
968
969 sle_dma = sgx.dma_addr;
970 sle_dma_len = sgx.dma_len;
971alloc_and_fill:
972
973 if (avail_dsds == 0) {
974 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
975 QLA_DSDS_PER_IOCB : used_dsds;
976 dsd_list_len = (avail_dsds + 1) * 12;
977 used_dsds -= avail_dsds;
978
979
980 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
981 if (!dsd_ptr)
982 return 1;
983
984
985 dsd_ptr->dsd_addr = next_dsd =
986 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
987 &dsd_ptr->dsd_list_dma);
988
989 if (!next_dsd) {
990
991
992
993
994 kfree(dsd_ptr);
995 return 1;
996 }
997
998 list_add_tail(&dsd_ptr->list,
999 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1000
1001 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1002
1003
1004 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1005 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1006 *cur_dsd++ = dsd_list_len;
1007 cur_dsd = (uint32_t *)next_dsd;
1008 }
1009 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1010 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1011 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1012 avail_dsds--;
1013
1014 if (partial == 0) {
1015
1016 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1017 sle_dma_len = 8;
1018
1019 tot_prot_dma_len += sle_dma_len;
1020 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1021 tot_prot_dma_len = 0;
1022 sg_prot = sg_next(sg_prot);
1023 }
1024
1025 partial = 1;
1026 goto alloc_and_fill;
1027 }
1028 }
1029
1030 *cur_dsd++ = 0;
1031 *cur_dsd++ = 0;
1032 *cur_dsd++ = 0;
1033 return 0;
1034}
1035
1036static int
1037qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1038 uint16_t tot_dsds)
1039{
1040 void *next_dsd;
1041 uint8_t avail_dsds = 0;
1042 uint32_t dsd_list_len;
1043 struct dsd_dma *dsd_ptr;
1044 struct scatterlist *sg;
1045 uint32_t *cur_dsd = dsd;
1046 int i;
1047 uint16_t used_dsds = tot_dsds;
1048 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1049 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1050
1051 uint8_t *cp;
1052
1053 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1054 dma_addr_t sle_dma;
1055
1056
1057 if (avail_dsds == 0) {
1058 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1059 QLA_DSDS_PER_IOCB : used_dsds;
1060 dsd_list_len = (avail_dsds + 1) * 12;
1061 used_dsds -= avail_dsds;
1062
1063
1064 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1065 if (!dsd_ptr)
1066 return 1;
1067
1068
1069 dsd_ptr->dsd_addr = next_dsd =
1070 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1071 &dsd_ptr->dsd_list_dma);
1072
1073 if (!next_dsd) {
1074
1075
1076
1077
1078 kfree(dsd_ptr);
1079 return 1;
1080 }
1081
1082 list_add_tail(&dsd_ptr->list,
1083 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1084
1085 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1086
1087
1088 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1089 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1090 *cur_dsd++ = dsd_list_len;
1091 cur_dsd = (uint32_t *)next_dsd;
1092 }
1093 sle_dma = sg_dma_address(sg);
1094 ql_dbg(ql_dbg_io, vha, 0x300a,
1095 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1096 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
1097 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1098 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1099 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1100 avail_dsds--;
1101
1102 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1103 cp = page_address(sg_page(sg)) + sg->offset;
1104 ql_dbg(ql_dbg_io, vha, 0x300b,
1105 "User data buffer=%p for cmd=%p.\n", cp, cmd);
1106 }
1107 }
1108
1109 *cur_dsd++ = 0;
1110 *cur_dsd++ = 0;
1111 *cur_dsd++ = 0;
1112 return 0;
1113}
1114
1115static int
1116qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1117 uint32_t *dsd,
1118 uint16_t tot_dsds)
1119{
1120 void *next_dsd;
1121 uint8_t avail_dsds = 0;
1122 uint32_t dsd_list_len;
1123 struct dsd_dma *dsd_ptr;
1124 struct scatterlist *sg;
1125 int i;
1126 struct scsi_cmnd *cmd;
1127 uint32_t *cur_dsd = dsd;
1128 uint16_t used_dsds = tot_dsds;
1129 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1130 uint8_t *cp;
1131
1132 cmd = GET_CMD_SP(sp);
1133 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1134 dma_addr_t sle_dma;
1135
1136
1137 if (avail_dsds == 0) {
1138 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1139 QLA_DSDS_PER_IOCB : used_dsds;
1140 dsd_list_len = (avail_dsds + 1) * 12;
1141 used_dsds -= avail_dsds;
1142
1143
1144 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1145 if (!dsd_ptr)
1146 return 1;
1147
1148
1149 dsd_ptr->dsd_addr = next_dsd =
1150 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1151 &dsd_ptr->dsd_list_dma);
1152
1153 if (!next_dsd) {
1154
1155
1156
1157
1158 kfree(dsd_ptr);
1159 return 1;
1160 }
1161
1162 list_add_tail(&dsd_ptr->list,
1163 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1164
1165 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1166
1167
1168 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1169 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1170 *cur_dsd++ = dsd_list_len;
1171 cur_dsd = (uint32_t *)next_dsd;
1172 }
1173 sle_dma = sg_dma_address(sg);
1174 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1175 ql_dbg(ql_dbg_io, vha, 0x3027,
1176 "%s(): %p, sg_entry %d - "
1177 "addr=0x%x0x%x, len=%d.\n",
1178 __func__, cur_dsd, i,
1179 LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
1180 }
1181 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1182 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1183 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1184
1185 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1186 cp = page_address(sg_page(sg)) + sg->offset;
1187 ql_dbg(ql_dbg_io, vha, 0x3028,
1188 "%s(): Protection Data buffer = %p.\n", __func__,
1189 cp);
1190 }
1191 avail_dsds--;
1192 }
1193
1194 *cur_dsd++ = 0;
1195 *cur_dsd++ = 0;
1196 *cur_dsd++ = 0;
1197 return 0;
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static inline int
1209qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1210 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1211{
1212 uint32_t *cur_dsd, *fcp_dl;
1213 scsi_qla_host_t *vha;
1214 struct scsi_cmnd *cmd;
1215 struct scatterlist *cur_seg;
1216 int sgc;
1217 uint32_t total_bytes = 0;
1218 uint32_t data_bytes;
1219 uint32_t dif_bytes;
1220 uint8_t bundling = 1;
1221 uint16_t blk_size;
1222 uint8_t *clr_ptr;
1223 struct crc_context *crc_ctx_pkt = NULL;
1224 struct qla_hw_data *ha;
1225 uint8_t additional_fcpcdb_len;
1226 uint16_t fcp_cmnd_len;
1227 struct fcp_cmnd *fcp_cmnd;
1228 dma_addr_t crc_ctx_dma;
1229 char tag[2];
1230
1231 cmd = GET_CMD_SP(sp);
1232
1233 sgc = 0;
1234
1235 *((uint32_t *)(&cmd_pkt->entry_type)) =
1236 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1237
1238 vha = sp->fcport->vha;
1239 ha = vha->hw;
1240
1241
1242 data_bytes = scsi_bufflen(cmd);
1243 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1244 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1245 return QLA_SUCCESS;
1246 }
1247
1248 cmd_pkt->vp_index = sp->fcport->vp_idx;
1249
1250
1251 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1252 cmd_pkt->control_flags =
1253 __constant_cpu_to_le16(CF_WRITE_DATA);
1254 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1255 cmd_pkt->control_flags =
1256 __constant_cpu_to_le16(CF_READ_DATA);
1257 }
1258
1259 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1260 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1261 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1262 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1263 bundling = 0;
1264
1265
1266 crc_ctx_pkt = sp->u.scmd.ctx =
1267 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1268
1269 if (!crc_ctx_pkt)
1270 goto crc_queuing_error;
1271
1272
1273 clr_ptr = (uint8_t *)crc_ctx_pkt;
1274 memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1275
1276 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1277
1278 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1279
1280
1281 crc_ctx_pkt->handle = cmd_pkt->handle;
1282
1283 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1284
1285 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1286 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1287
1288 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1289 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1290 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1291
1292
1293 if (cmd->cmd_len > 16) {
1294 additional_fcpcdb_len = cmd->cmd_len - 16;
1295 if ((cmd->cmd_len % 4) != 0) {
1296
1297 goto crc_queuing_error;
1298 }
1299 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1300 } else {
1301 additional_fcpcdb_len = 0;
1302 fcp_cmnd_len = 12 + 16 + 4;
1303 }
1304
1305 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1306
1307 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1308 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1309 fcp_cmnd->additional_cdb_len |= 1;
1310 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1311 fcp_cmnd->additional_cdb_len |= 2;
1312
1313 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1314 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1315 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1316 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1317 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1318 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1319 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1320 fcp_cmnd->task_management = 0;
1321
1322
1323
1324
1325 if (scsi_populate_tag_msg(cmd, tag)) {
1326 switch (tag[0]) {
1327 case HEAD_OF_QUEUE_TAG:
1328 fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1329 break;
1330 case ORDERED_QUEUE_TAG:
1331 fcp_cmnd->task_attribute = TSK_ORDERED;
1332 break;
1333 default:
1334 fcp_cmnd->task_attribute = 0;
1335 break;
1336 }
1337 } else {
1338 fcp_cmnd->task_attribute = 0;
1339 }
1340
1341 cmd_pkt->fcp_rsp_dseg_len = 0;
1342
1343
1344 dif_bytes = 0;
1345 blk_size = cmd->device->sector_size;
1346 dif_bytes = (data_bytes / blk_size) * 8;
1347
1348 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1349 case SCSI_PROT_READ_INSERT:
1350 case SCSI_PROT_WRITE_STRIP:
1351 total_bytes = data_bytes;
1352 data_bytes += dif_bytes;
1353 break;
1354
1355 case SCSI_PROT_READ_STRIP:
1356 case SCSI_PROT_WRITE_INSERT:
1357 case SCSI_PROT_READ_PASS:
1358 case SCSI_PROT_WRITE_PASS:
1359 total_bytes = data_bytes + dif_bytes;
1360 break;
1361 default:
1362 BUG();
1363 }
1364
1365 if (!qla2x00_hba_err_chk_enabled(sp))
1366 fw_prot_opts |= 0x10;
1367
1368 if (!bundling) {
1369 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1370 } else {
1371
1372
1373
1374
1375 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1376 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1377 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1378 tot_prot_dsds);
1379 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1380 }
1381
1382
1383 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1384 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1385 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1386 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1387
1388 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1389 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1390 additional_fcpcdb_len);
1391 *fcp_dl = htonl(total_bytes);
1392
1393 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1394 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1395 return QLA_SUCCESS;
1396 }
1397
1398
1399 cmd_pkt->control_flags |=
1400 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1401
1402 if (!bundling && tot_prot_dsds) {
1403 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1404 cur_dsd, tot_dsds))
1405 goto crc_queuing_error;
1406 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1407 (tot_dsds - tot_prot_dsds)))
1408 goto crc_queuing_error;
1409
1410 if (bundling && tot_prot_dsds) {
1411
1412 cur_seg = scsi_prot_sglist(cmd);
1413 cmd_pkt->control_flags |=
1414 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1415 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1416 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1417 tot_prot_dsds))
1418 goto crc_queuing_error;
1419 }
1420 return QLA_SUCCESS;
1421
1422crc_queuing_error:
1423
1424
1425 return QLA_FUNCTION_FAILED;
1426}
1427
1428
1429
1430
1431
1432
1433
1434int
1435qla24xx_start_scsi(srb_t *sp)
1436{
1437 int ret, nseg;
1438 unsigned long flags;
1439 uint32_t *clr_ptr;
1440 uint32_t index;
1441 uint32_t handle;
1442 struct cmd_type_7 *cmd_pkt;
1443 uint16_t cnt;
1444 uint16_t req_cnt;
1445 uint16_t tot_dsds;
1446 struct req_que *req = NULL;
1447 struct rsp_que *rsp = NULL;
1448 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1449 struct scsi_qla_host *vha = sp->fcport->vha;
1450 struct qla_hw_data *ha = vha->hw;
1451 char tag[2];
1452
1453
1454 ret = 0;
1455
1456 qla25xx_set_que(sp, &rsp);
1457 req = vha->req;
1458
1459
1460 tot_dsds = 0;
1461
1462
1463 if (vha->marker_needed != 0) {
1464 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1465 QLA_SUCCESS)
1466 return QLA_FUNCTION_FAILED;
1467 vha->marker_needed = 0;
1468 }
1469
1470
1471 spin_lock_irqsave(&ha->hardware_lock, flags);
1472
1473
1474 handle = req->current_outstanding_cmd;
1475 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1476 handle++;
1477 if (handle == MAX_OUTSTANDING_COMMANDS)
1478 handle = 1;
1479 if (!req->outstanding_cmds[handle])
1480 break;
1481 }
1482 if (index == MAX_OUTSTANDING_COMMANDS) {
1483 goto queuing_error;
1484 }
1485
1486
1487 if (scsi_sg_count(cmd)) {
1488 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1489 scsi_sg_count(cmd), cmd->sc_data_direction);
1490 if (unlikely(!nseg))
1491 goto queuing_error;
1492 } else
1493 nseg = 0;
1494
1495 tot_dsds = nseg;
1496 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1497 if (req->cnt < (req_cnt + 2)) {
1498 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1499
1500 if (req->ring_index < cnt)
1501 req->cnt = cnt - req->ring_index;
1502 else
1503 req->cnt = req->length -
1504 (req->ring_index - cnt);
1505 }
1506 if (req->cnt < (req_cnt + 2))
1507 goto queuing_error;
1508
1509
1510 req->current_outstanding_cmd = handle;
1511 req->outstanding_cmds[handle] = sp;
1512 sp->handle = handle;
1513 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1514 req->cnt -= req_cnt;
1515
1516 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1517 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1518
1519
1520
1521 clr_ptr = (uint32_t *)cmd_pkt + 2;
1522 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1523 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1524
1525
1526 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1527 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1528 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1530 cmd_pkt->vp_index = sp->fcport->vp_idx;
1531
1532 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1534
1535
1536 if (scsi_populate_tag_msg(cmd, tag)) {
1537 switch (tag[0]) {
1538 case HEAD_OF_QUEUE_TAG:
1539 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1540 break;
1541 case ORDERED_QUEUE_TAG:
1542 cmd_pkt->task = TSK_ORDERED;
1543 break;
1544 }
1545 }
1546
1547
1548 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1549 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1550
1551 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1552
1553
1554 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1555
1556
1557 cmd_pkt->entry_count = (uint8_t)req_cnt;
1558
1559 cmd_pkt->entry_status = (uint8_t) rsp->id;
1560 wmb();
1561
1562 req->ring_index++;
1563 if (req->ring_index == req->length) {
1564 req->ring_index = 0;
1565 req->ring_ptr = req->ring;
1566 } else
1567 req->ring_ptr++;
1568
1569 sp->flags |= SRB_DMA_VALID;
1570
1571
1572 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1573 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1574
1575
1576 if (vha->flags.process_response_queue &&
1577 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1578 qla24xx_process_response_queue(vha, rsp);
1579
1580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1581 return QLA_SUCCESS;
1582
1583queuing_error:
1584 if (tot_dsds)
1585 scsi_dma_unmap(cmd);
1586
1587 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1588
1589 return QLA_FUNCTION_FAILED;
1590}
1591
1592
1593
1594
1595
1596
1597
1598
1599int
1600qla24xx_dif_start_scsi(srb_t *sp)
1601{
1602 int nseg;
1603 unsigned long flags;
1604 uint32_t *clr_ptr;
1605 uint32_t index;
1606 uint32_t handle;
1607 uint16_t cnt;
1608 uint16_t req_cnt = 0;
1609 uint16_t tot_dsds;
1610 uint16_t tot_prot_dsds;
1611 uint16_t fw_prot_opts = 0;
1612 struct req_que *req = NULL;
1613 struct rsp_que *rsp = NULL;
1614 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1615 struct scsi_qla_host *vha = sp->fcport->vha;
1616 struct qla_hw_data *ha = vha->hw;
1617 struct cmd_type_crc_2 *cmd_pkt;
1618 uint32_t status = 0;
1619
1620#define QDSS_GOT_Q_SPACE BIT_0
1621
1622
1623 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1624 if (cmd->cmd_len <= 16)
1625 return qla24xx_start_scsi(sp);
1626 }
1627
1628
1629
1630 qla25xx_set_que(sp, &rsp);
1631 req = vha->req;
1632
1633
1634 tot_dsds = 0;
1635
1636
1637 if (vha->marker_needed != 0) {
1638 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1639 QLA_SUCCESS)
1640 return QLA_FUNCTION_FAILED;
1641 vha->marker_needed = 0;
1642 }
1643
1644
1645 spin_lock_irqsave(&ha->hardware_lock, flags);
1646
1647
1648 handle = req->current_outstanding_cmd;
1649 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1650 handle++;
1651 if (handle == MAX_OUTSTANDING_COMMANDS)
1652 handle = 1;
1653 if (!req->outstanding_cmds[handle])
1654 break;
1655 }
1656
1657 if (index == MAX_OUTSTANDING_COMMANDS)
1658 goto queuing_error;
1659
1660
1661
1662 if (scsi_sg_count(cmd)) {
1663 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1664 scsi_sg_count(cmd), cmd->sc_data_direction);
1665 if (unlikely(!nseg))
1666 goto queuing_error;
1667 else
1668 sp->flags |= SRB_DMA_VALID;
1669
1670 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1671 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1672 struct qla2_sgx sgx;
1673 uint32_t partial;
1674
1675 memset(&sgx, 0, sizeof(struct qla2_sgx));
1676 sgx.tot_bytes = scsi_bufflen(cmd);
1677 sgx.cur_sg = scsi_sglist(cmd);
1678 sgx.sp = sp;
1679
1680 nseg = 0;
1681 while (qla24xx_get_one_block_sg(
1682 cmd->device->sector_size, &sgx, &partial))
1683 nseg++;
1684 }
1685 } else
1686 nseg = 0;
1687
1688
1689 tot_dsds = nseg;
1690
1691
1692 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1693 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1694 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1695 if (unlikely(!nseg))
1696 goto queuing_error;
1697 else
1698 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1699
1700 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1701 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1702 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1703 }
1704 } else {
1705 nseg = 0;
1706 }
1707
1708 req_cnt = 1;
1709
1710 tot_prot_dsds = nseg;
1711 tot_dsds += nseg;
1712 if (req->cnt < (req_cnt + 2)) {
1713 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1714
1715 if (req->ring_index < cnt)
1716 req->cnt = cnt - req->ring_index;
1717 else
1718 req->cnt = req->length -
1719 (req->ring_index - cnt);
1720 }
1721
1722 if (req->cnt < (req_cnt + 2))
1723 goto queuing_error;
1724
1725 status |= QDSS_GOT_Q_SPACE;
1726
1727
1728 req->current_outstanding_cmd = handle;
1729 req->outstanding_cmds[handle] = sp;
1730 sp->handle = handle;
1731 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1732 req->cnt -= req_cnt;
1733
1734
1735 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1736 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1737
1738 clr_ptr = (uint32_t *)cmd_pkt + 2;
1739 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1740
1741
1742 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1743 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1744 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1745 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1746
1747 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1748 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1749
1750
1751 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1752
1753
1754 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1755 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1756 QLA_SUCCESS)
1757 goto queuing_error;
1758
1759 cmd_pkt->entry_count = (uint8_t)req_cnt;
1760
1761 cmd_pkt->entry_status = (uint8_t) rsp->id;
1762 cmd_pkt->timeout = __constant_cpu_to_le16(0);
1763 wmb();
1764
1765
1766 req->ring_index++;
1767 if (req->ring_index == req->length) {
1768 req->ring_index = 0;
1769 req->ring_ptr = req->ring;
1770 } else
1771 req->ring_ptr++;
1772
1773
1774 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1775 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1776
1777
1778 if (vha->flags.process_response_queue &&
1779 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1780 qla24xx_process_response_queue(vha, rsp);
1781
1782 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1783
1784 return QLA_SUCCESS;
1785
1786queuing_error:
1787 if (status & QDSS_GOT_Q_SPACE) {
1788 req->outstanding_cmds[handle] = NULL;
1789 req->cnt += req_cnt;
1790 }
1791
1792
1793 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1794 return QLA_FUNCTION_FAILED;
1795}
1796
1797
1798static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1799{
1800 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1801 struct qla_hw_data *ha = sp->fcport->vha->hw;
1802 int affinity = cmd->request->cpu;
1803
1804 if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1805 affinity < ha->max_rsp_queues - 1)
1806 *rsp = ha->rsp_q_map[affinity + 1];
1807 else
1808 *rsp = ha->rsp_q_map[0];
1809}
1810
1811
1812void *
1813qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1814{
1815 struct qla_hw_data *ha = vha->hw;
1816 struct req_que *req = ha->req_q_map[0];
1817 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1818 uint32_t index, handle;
1819 request_t *pkt;
1820 uint16_t cnt, req_cnt;
1821
1822 pkt = NULL;
1823 req_cnt = 1;
1824 handle = 0;
1825
1826 if (!sp)
1827 goto skip_cmd_array;
1828
1829
1830 handle = req->current_outstanding_cmd;
1831 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
1832 handle++;
1833 if (handle == MAX_OUTSTANDING_COMMANDS)
1834 handle = 1;
1835 if (!req->outstanding_cmds[handle])
1836 break;
1837 }
1838 if (index == MAX_OUTSTANDING_COMMANDS) {
1839 ql_log(ql_log_warn, vha, 0x700b,
1840 "No room on oustanding cmd array.\n");
1841 goto queuing_error;
1842 }
1843
1844
1845 req->current_outstanding_cmd = handle;
1846 req->outstanding_cmds[handle] = sp;
1847 sp->handle = handle;
1848
1849
1850 if (sp->type != SRB_SCSI_CMD)
1851 req_cnt = sp->iocbs;
1852
1853skip_cmd_array:
1854
1855 if (req->cnt < req_cnt) {
1856 if (ha->mqenable || IS_QLA83XX(ha))
1857 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
1858 else if (IS_QLA82XX(ha))
1859 cnt = RD_REG_DWORD(®->isp82.req_q_out);
1860 else if (IS_FWI2_CAPABLE(ha))
1861 cnt = RD_REG_DWORD(®->isp24.req_q_out);
1862 else
1863 cnt = qla2x00_debounce_register(
1864 ISP_REQ_Q_OUT(ha, ®->isp));
1865
1866 if (req->ring_index < cnt)
1867 req->cnt = cnt - req->ring_index;
1868 else
1869 req->cnt = req->length -
1870 (req->ring_index - cnt);
1871 }
1872 if (req->cnt < req_cnt)
1873 goto queuing_error;
1874
1875
1876 req->cnt -= req_cnt;
1877 pkt = req->ring_ptr;
1878 memset(pkt, 0, REQUEST_ENTRY_SIZE);
1879 pkt->entry_count = req_cnt;
1880 pkt->handle = handle;
1881
1882queuing_error:
1883 return pkt;
1884}
1885
1886static void
1887qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1888{
1889 struct srb_iocb *lio = &sp->u.iocb_cmd;
1890
1891 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1892 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1893 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1894 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1895 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1896 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1897 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1898 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1899 logio->port_id[1] = sp->fcport->d_id.b.area;
1900 logio->port_id[2] = sp->fcport->d_id.b.domain;
1901 logio->vp_index = sp->fcport->vp_idx;
1902}
1903
1904static void
1905qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1906{
1907 struct qla_hw_data *ha = sp->fcport->vha->hw;
1908 struct srb_iocb *lio = &sp->u.iocb_cmd;
1909 uint16_t opts;
1910
1911 mbx->entry_type = MBX_IOCB_TYPE;
1912 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1913 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1914 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1915 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1916 if (HAS_EXTENDED_IDS(ha)) {
1917 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1918 mbx->mb10 = cpu_to_le16(opts);
1919 } else {
1920 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1921 }
1922 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1923 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1924 sp->fcport->d_id.b.al_pa);
1925 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1926}
1927
1928static void
1929qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1930{
1931 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1932 logio->control_flags =
1933 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1934 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1935 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1936 logio->port_id[1] = sp->fcport->d_id.b.area;
1937 logio->port_id[2] = sp->fcport->d_id.b.domain;
1938 logio->vp_index = sp->fcport->vp_idx;
1939}
1940
1941static void
1942qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1943{
1944 struct qla_hw_data *ha = sp->fcport->vha->hw;
1945
1946 mbx->entry_type = MBX_IOCB_TYPE;
1947 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1948 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1949 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1950 cpu_to_le16(sp->fcport->loop_id):
1951 cpu_to_le16(sp->fcport->loop_id << 8);
1952 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1953 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1954 sp->fcport->d_id.b.al_pa);
1955 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1956
1957}
1958
1959static void
1960qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1961{
1962 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1963 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1964 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1965 logio->vp_index = sp->fcport->vp_idx;
1966}
1967
1968static void
1969qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1970{
1971 struct qla_hw_data *ha = sp->fcport->vha->hw;
1972
1973 mbx->entry_type = MBX_IOCB_TYPE;
1974 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1975 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1976 if (HAS_EXTENDED_IDS(ha)) {
1977 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1978 mbx->mb10 = cpu_to_le16(BIT_0);
1979 } else {
1980 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1981 }
1982 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1983 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1984 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1985 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1986 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1987}
1988
1989static void
1990qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1991{
1992 uint32_t flags;
1993 unsigned int lun;
1994 struct fc_port *fcport = sp->fcport;
1995 scsi_qla_host_t *vha = fcport->vha;
1996 struct qla_hw_data *ha = vha->hw;
1997 struct srb_iocb *iocb = &sp->u.iocb_cmd;
1998 struct req_que *req = vha->req;
1999
2000 flags = iocb->u.tmf.flags;
2001 lun = iocb->u.tmf.lun;
2002
2003 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2004 tsk->entry_count = 1;
2005 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2006 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2007 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2008 tsk->control_flags = cpu_to_le32(flags);
2009 tsk->port_id[0] = fcport->d_id.b.al_pa;
2010 tsk->port_id[1] = fcport->d_id.b.area;
2011 tsk->port_id[2] = fcport->d_id.b.domain;
2012 tsk->vp_index = fcport->vp_idx;
2013
2014 if (flags == TCF_LUN_RESET) {
2015 int_to_scsilun(lun, &tsk->lun);
2016 host_to_fcp_swap((uint8_t *)&tsk->lun,
2017 sizeof(tsk->lun));
2018 }
2019}
2020
2021static void
2022qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2023{
2024 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2025
2026 els_iocb->entry_type = ELS_IOCB_TYPE;
2027 els_iocb->entry_count = 1;
2028 els_iocb->sys_define = 0;
2029 els_iocb->entry_status = 0;
2030 els_iocb->handle = sp->handle;
2031 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2032 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2033 els_iocb->vp_index = sp->fcport->vp_idx;
2034 els_iocb->sof_type = EST_SOFI3;
2035 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2036
2037 els_iocb->opcode =
2038 sp->type == SRB_ELS_CMD_RPT ?
2039 bsg_job->request->rqst_data.r_els.els_code :
2040 bsg_job->request->rqst_data.h_els.command_code;
2041 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2042 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2043 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2044 els_iocb->control_flags = 0;
2045 els_iocb->rx_byte_count =
2046 cpu_to_le32(bsg_job->reply_payload.payload_len);
2047 els_iocb->tx_byte_count =
2048 cpu_to_le32(bsg_job->request_payload.payload_len);
2049
2050 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2051 (bsg_job->request_payload.sg_list)));
2052 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2053 (bsg_job->request_payload.sg_list)));
2054 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2055 (bsg_job->request_payload.sg_list));
2056
2057 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2058 (bsg_job->reply_payload.sg_list)));
2059 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2060 (bsg_job->reply_payload.sg_list)));
2061 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2062 (bsg_job->reply_payload.sg_list));
2063}
2064
2065static void
2066qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2067{
2068 uint16_t avail_dsds;
2069 uint32_t *cur_dsd;
2070 struct scatterlist *sg;
2071 int index;
2072 uint16_t tot_dsds;
2073 scsi_qla_host_t *vha = sp->fcport->vha;
2074 struct qla_hw_data *ha = vha->hw;
2075 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2076 int loop_iterartion = 0;
2077 int cont_iocb_prsnt = 0;
2078 int entry_count = 1;
2079
2080 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2081 ct_iocb->entry_type = CT_IOCB_TYPE;
2082 ct_iocb->entry_status = 0;
2083 ct_iocb->handle1 = sp->handle;
2084 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2085 ct_iocb->status = __constant_cpu_to_le16(0);
2086 ct_iocb->control_flags = __constant_cpu_to_le16(0);
2087 ct_iocb->timeout = 0;
2088 ct_iocb->cmd_dsd_count =
2089 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2090 ct_iocb->total_dsd_count =
2091 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2092 ct_iocb->req_bytecount =
2093 cpu_to_le32(bsg_job->request_payload.payload_len);
2094 ct_iocb->rsp_bytecount =
2095 cpu_to_le32(bsg_job->reply_payload.payload_len);
2096
2097 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2098 (bsg_job->request_payload.sg_list)));
2099 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2100 (bsg_job->request_payload.sg_list)));
2101 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2102
2103 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2104 (bsg_job->reply_payload.sg_list)));
2105 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2106 (bsg_job->reply_payload.sg_list)));
2107 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2108
2109 avail_dsds = 1;
2110 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2111 index = 0;
2112 tot_dsds = bsg_job->reply_payload.sg_cnt;
2113
2114 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2115 dma_addr_t sle_dma;
2116 cont_a64_entry_t *cont_pkt;
2117
2118
2119 if (avail_dsds == 0) {
2120
2121
2122
2123
2124 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2125 vha->hw->req_q_map[0]);
2126 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2127 avail_dsds = 5;
2128 cont_iocb_prsnt = 1;
2129 entry_count++;
2130 }
2131
2132 sle_dma = sg_dma_address(sg);
2133 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2134 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2135 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2136 loop_iterartion++;
2137 avail_dsds--;
2138 }
2139 ct_iocb->entry_count = entry_count;
2140}
2141
2142static void
2143qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2144{
2145 uint16_t avail_dsds;
2146 uint32_t *cur_dsd;
2147 struct scatterlist *sg;
2148 int index;
2149 uint16_t tot_dsds;
2150 scsi_qla_host_t *vha = sp->fcport->vha;
2151 struct qla_hw_data *ha = vha->hw;
2152 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2153 int loop_iterartion = 0;
2154 int cont_iocb_prsnt = 0;
2155 int entry_count = 1;
2156
2157 ct_iocb->entry_type = CT_IOCB_TYPE;
2158 ct_iocb->entry_status = 0;
2159 ct_iocb->sys_define = 0;
2160 ct_iocb->handle = sp->handle;
2161
2162 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2163 ct_iocb->vp_index = sp->fcport->vp_idx;
2164 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2165
2166 ct_iocb->cmd_dsd_count =
2167 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2168 ct_iocb->timeout = 0;
2169 ct_iocb->rsp_dsd_count =
2170 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2171 ct_iocb->rsp_byte_count =
2172 cpu_to_le32(bsg_job->reply_payload.payload_len);
2173 ct_iocb->cmd_byte_count =
2174 cpu_to_le32(bsg_job->request_payload.payload_len);
2175 ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2176 (bsg_job->request_payload.sg_list)));
2177 ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2178 (bsg_job->request_payload.sg_list)));
2179 ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2180 (bsg_job->request_payload.sg_list));
2181
2182 avail_dsds = 1;
2183 cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2184 index = 0;
2185 tot_dsds = bsg_job->reply_payload.sg_cnt;
2186
2187 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2188 dma_addr_t sle_dma;
2189 cont_a64_entry_t *cont_pkt;
2190
2191
2192 if (avail_dsds == 0) {
2193
2194
2195
2196
2197 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2198 ha->req_q_map[0]);
2199 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2200 avail_dsds = 5;
2201 cont_iocb_prsnt = 1;
2202 entry_count++;
2203 }
2204
2205 sle_dma = sg_dma_address(sg);
2206 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2207 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2208 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2209 loop_iterartion++;
2210 avail_dsds--;
2211 }
2212 ct_iocb->entry_count = entry_count;
2213}
2214
2215
2216
2217
2218
2219
2220
2221int
2222qla82xx_start_scsi(srb_t *sp)
2223{
2224 int ret, nseg;
2225 unsigned long flags;
2226 struct scsi_cmnd *cmd;
2227 uint32_t *clr_ptr;
2228 uint32_t index;
2229 uint32_t handle;
2230 uint16_t cnt;
2231 uint16_t req_cnt;
2232 uint16_t tot_dsds;
2233 struct device_reg_82xx __iomem *reg;
2234 uint32_t dbval;
2235 uint32_t *fcp_dl;
2236 uint8_t additional_cdb_len;
2237 struct ct6_dsd *ctx;
2238 struct scsi_qla_host *vha = sp->fcport->vha;
2239 struct qla_hw_data *ha = vha->hw;
2240 struct req_que *req = NULL;
2241 struct rsp_que *rsp = NULL;
2242 char tag[2];
2243
2244
2245 ret = 0;
2246 reg = &ha->iobase->isp82;
2247 cmd = GET_CMD_SP(sp);
2248 req = vha->req;
2249 rsp = ha->rsp_q_map[0];
2250
2251
2252 tot_dsds = 0;
2253
2254 dbval = 0x04 | (ha->portnum << 5);
2255
2256
2257 if (vha->marker_needed != 0) {
2258 if (qla2x00_marker(vha, req,
2259 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2260 ql_log(ql_log_warn, vha, 0x300c,
2261 "qla2x00_marker failed for cmd=%p.\n", cmd);
2262 return QLA_FUNCTION_FAILED;
2263 }
2264 vha->marker_needed = 0;
2265 }
2266
2267
2268 spin_lock_irqsave(&ha->hardware_lock, flags);
2269
2270
2271 handle = req->current_outstanding_cmd;
2272 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
2273 handle++;
2274 if (handle == MAX_OUTSTANDING_COMMANDS)
2275 handle = 1;
2276 if (!req->outstanding_cmds[handle])
2277 break;
2278 }
2279 if (index == MAX_OUTSTANDING_COMMANDS)
2280 goto queuing_error;
2281
2282
2283 if (scsi_sg_count(cmd)) {
2284 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2285 scsi_sg_count(cmd), cmd->sc_data_direction);
2286 if (unlikely(!nseg))
2287 goto queuing_error;
2288 } else
2289 nseg = 0;
2290
2291 tot_dsds = nseg;
2292
2293 if (tot_dsds > ql2xshiftctondsd) {
2294 struct cmd_type_6 *cmd_pkt;
2295 uint16_t more_dsd_lists = 0;
2296 struct dsd_dma *dsd_ptr;
2297 uint16_t i;
2298
2299 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2300 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2301 ql_dbg(ql_dbg_io, vha, 0x300d,
2302 "Num of DSD list %d is than %d for cmd=%p.\n",
2303 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2304 cmd);
2305 goto queuing_error;
2306 }
2307
2308 if (more_dsd_lists <= ha->gbl_dsd_avail)
2309 goto sufficient_dsds;
2310 else
2311 more_dsd_lists -= ha->gbl_dsd_avail;
2312
2313 for (i = 0; i < more_dsd_lists; i++) {
2314 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2315 if (!dsd_ptr) {
2316 ql_log(ql_log_fatal, vha, 0x300e,
2317 "Failed to allocate memory for dsd_dma "
2318 "for cmd=%p.\n", cmd);
2319 goto queuing_error;
2320 }
2321
2322 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2323 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2324 if (!dsd_ptr->dsd_addr) {
2325 kfree(dsd_ptr);
2326 ql_log(ql_log_fatal, vha, 0x300f,
2327 "Failed to allocate memory for dsd_addr "
2328 "for cmd=%p.\n", cmd);
2329 goto queuing_error;
2330 }
2331 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2332 ha->gbl_dsd_avail++;
2333 }
2334
2335sufficient_dsds:
2336 req_cnt = 1;
2337
2338 if (req->cnt < (req_cnt + 2)) {
2339 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2340 ®->req_q_out[0]);
2341 if (req->ring_index < cnt)
2342 req->cnt = cnt - req->ring_index;
2343 else
2344 req->cnt = req->length -
2345 (req->ring_index - cnt);
2346 }
2347
2348 if (req->cnt < (req_cnt + 2))
2349 goto queuing_error;
2350
2351 ctx = sp->u.scmd.ctx =
2352 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2353 if (!ctx) {
2354 ql_log(ql_log_fatal, vha, 0x3010,
2355 "Failed to allocate ctx for cmd=%p.\n", cmd);
2356 goto queuing_error;
2357 }
2358
2359 memset(ctx, 0, sizeof(struct ct6_dsd));
2360 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2361 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2362 if (!ctx->fcp_cmnd) {
2363 ql_log(ql_log_fatal, vha, 0x3011,
2364 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2365 goto queuing_error_fcp_cmnd;
2366 }
2367
2368
2369 INIT_LIST_HEAD(&ctx->dsd_list);
2370 ctx->dsd_use_cnt = 0;
2371
2372 if (cmd->cmd_len > 16) {
2373 additional_cdb_len = cmd->cmd_len - 16;
2374 if ((cmd->cmd_len % 4) != 0) {
2375
2376
2377
2378 ql_log(ql_log_warn, vha, 0x3012,
2379 "scsi cmd len %d not multiple of 4 "
2380 "for cmd=%p.\n", cmd->cmd_len, cmd);
2381 goto queuing_error_fcp_cmnd;
2382 }
2383 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2384 } else {
2385 additional_cdb_len = 0;
2386 ctx->fcp_cmnd_len = 12 + 16 + 4;
2387 }
2388
2389 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2390 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2391
2392
2393
2394 clr_ptr = (uint32_t *)cmd_pkt + 2;
2395 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2396 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2397
2398
2399 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2400 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2401 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2402 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2403 cmd_pkt->vp_index = sp->fcport->vp_idx;
2404
2405
2406 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2407 goto queuing_error_fcp_cmnd;
2408
2409 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2410 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2411
2412
2413 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2414 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2415 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2416
2417 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2418 ctx->fcp_cmnd->additional_cdb_len |= 1;
2419 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2420 ctx->fcp_cmnd->additional_cdb_len |= 2;
2421
2422
2423
2424
2425 if (scsi_populate_tag_msg(cmd, tag)) {
2426 switch (tag[0]) {
2427 case HEAD_OF_QUEUE_TAG:
2428 ctx->fcp_cmnd->task_attribute =
2429 TSK_HEAD_OF_QUEUE;
2430 break;
2431 case ORDERED_QUEUE_TAG:
2432 ctx->fcp_cmnd->task_attribute =
2433 TSK_ORDERED;
2434 break;
2435 }
2436 }
2437
2438
2439 if (ha->flags.fcp_prio_enabled)
2440 ctx->fcp_cmnd->task_attribute |=
2441 sp->fcport->fcp_prio << 3;
2442
2443 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2444
2445 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2446 additional_cdb_len);
2447 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2448
2449 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2450 cmd_pkt->fcp_cmnd_dseg_address[0] =
2451 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2452 cmd_pkt->fcp_cmnd_dseg_address[1] =
2453 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2454
2455 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2456 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2457
2458 cmd_pkt->entry_count = (uint8_t)req_cnt;
2459
2460
2461
2462 cmd_pkt->entry_status = (uint8_t) rsp->id;
2463 } else {
2464 struct cmd_type_7 *cmd_pkt;
2465 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2466 if (req->cnt < (req_cnt + 2)) {
2467 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2468 ®->req_q_out[0]);
2469 if (req->ring_index < cnt)
2470 req->cnt = cnt - req->ring_index;
2471 else
2472 req->cnt = req->length -
2473 (req->ring_index - cnt);
2474 }
2475 if (req->cnt < (req_cnt + 2))
2476 goto queuing_error;
2477
2478 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2479 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2480
2481
2482
2483 clr_ptr = (uint32_t *)cmd_pkt + 2;
2484 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2485 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2486
2487
2488 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2489 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2490 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2491 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2492 cmd_pkt->vp_index = sp->fcport->vp_idx;
2493
2494 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2495 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2496 sizeof(cmd_pkt->lun));
2497
2498
2499
2500
2501 if (scsi_populate_tag_msg(cmd, tag)) {
2502 switch (tag[0]) {
2503 case HEAD_OF_QUEUE_TAG:
2504 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2505 break;
2506 case ORDERED_QUEUE_TAG:
2507 cmd_pkt->task = TSK_ORDERED;
2508 break;
2509 }
2510 }
2511
2512
2513 if (ha->flags.fcp_prio_enabled)
2514 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2515
2516
2517 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2518 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2519
2520 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2521
2522
2523 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2524
2525
2526 cmd_pkt->entry_count = (uint8_t)req_cnt;
2527
2528
2529
2530 cmd_pkt->entry_status = (uint8_t) rsp->id;
2531
2532 }
2533
2534 req->current_outstanding_cmd = handle;
2535 req->outstanding_cmds[handle] = sp;
2536 sp->handle = handle;
2537 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2538 req->cnt -= req_cnt;
2539 wmb();
2540
2541
2542 req->ring_index++;
2543 if (req->ring_index == req->length) {
2544 req->ring_index = 0;
2545 req->ring_ptr = req->ring;
2546 } else
2547 req->ring_ptr++;
2548
2549 sp->flags |= SRB_DMA_VALID;
2550
2551
2552
2553 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2554 if (ql2xdbwr)
2555 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2556 else {
2557 WRT_REG_DWORD(
2558 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2559 dbval);
2560 wmb();
2561 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2562 WRT_REG_DWORD(
2563 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2564 dbval);
2565 wmb();
2566 }
2567 }
2568
2569
2570 if (vha->flags.process_response_queue &&
2571 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2572 qla24xx_process_response_queue(vha, rsp);
2573
2574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2575 return QLA_SUCCESS;
2576
2577queuing_error_fcp_cmnd:
2578 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2579queuing_error:
2580 if (tot_dsds)
2581 scsi_dma_unmap(cmd);
2582
2583 if (sp->u.scmd.ctx) {
2584 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2585 sp->u.scmd.ctx = NULL;
2586 }
2587 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2588
2589 return QLA_FUNCTION_FAILED;
2590}
2591
2592int
2593qla2x00_start_sp(srb_t *sp)
2594{
2595 int rval;
2596 struct qla_hw_data *ha = sp->fcport->vha->hw;
2597 void *pkt;
2598 unsigned long flags;
2599
2600 rval = QLA_FUNCTION_FAILED;
2601 spin_lock_irqsave(&ha->hardware_lock, flags);
2602 pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2603 if (!pkt) {
2604 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2605 "qla2x00_alloc_iocbs failed.\n");
2606 goto done;
2607 }
2608
2609 rval = QLA_SUCCESS;
2610 switch (sp->type) {
2611 case SRB_LOGIN_CMD:
2612 IS_FWI2_CAPABLE(ha) ?
2613 qla24xx_login_iocb(sp, pkt) :
2614 qla2x00_login_iocb(sp, pkt);
2615 break;
2616 case SRB_LOGOUT_CMD:
2617 IS_FWI2_CAPABLE(ha) ?
2618 qla24xx_logout_iocb(sp, pkt) :
2619 qla2x00_logout_iocb(sp, pkt);
2620 break;
2621 case SRB_ELS_CMD_RPT:
2622 case SRB_ELS_CMD_HST:
2623 qla24xx_els_iocb(sp, pkt);
2624 break;
2625 case SRB_CT_CMD:
2626 IS_FWI2_CAPABLE(ha) ?
2627 qla24xx_ct_iocb(sp, pkt) :
2628 qla2x00_ct_iocb(sp, pkt);
2629 break;
2630 case SRB_ADISC_CMD:
2631 IS_FWI2_CAPABLE(ha) ?
2632 qla24xx_adisc_iocb(sp, pkt) :
2633 qla2x00_adisc_iocb(sp, pkt);
2634 break;
2635 case SRB_TM_CMD:
2636 qla24xx_tm_iocb(sp, pkt);
2637 break;
2638 default:
2639 break;
2640 }
2641
2642 wmb();
2643 qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2644done:
2645 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2646 return rval;
2647}
2648