1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/pci.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/delay.h>
25#include <asm/unaligned.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_eh.h>
30#include <scsi/scsi_host.h>
31#include <scsi/scsi_tcq.h>
32#include <scsi/scsi_transport_fc.h>
33
34#include "lpfc_version.h"
35#include "lpfc_hw4.h"
36#include "lpfc_hw.h"
37#include "lpfc_sli.h"
38#include "lpfc_sli4.h"
39#include "lpfc_nl.h"
40#include "lpfc_disc.h"
41#include "lpfc_scsi.h"
42#include "lpfc.h"
43#include "lpfc_logmsg.h"
44#include "lpfc_crtn.h"
45#include "lpfc_vport.h"
46
47#define LPFC_RESET_WAIT 2
48#define LPFC_ABORT_WAIT 2
49
50int _dump_buf_done;
51
52static char *dif_op_str[] = {
53 "SCSI_PROT_NORMAL",
54 "SCSI_PROT_READ_INSERT",
55 "SCSI_PROT_WRITE_STRIP",
56 "SCSI_PROT_READ_STRIP",
57 "SCSI_PROT_WRITE_INSERT",
58 "SCSI_PROT_READ_PASS",
59 "SCSI_PROT_WRITE_PASS",
60};
61static void
62lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
63static void
64lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
65
66static void
67lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
68{
69 void *src, *dst;
70 struct scatterlist *sgde = scsi_sglist(cmnd);
71
72 if (!_dump_buf_data) {
73 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
74 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
75 __func__);
76 return;
77 }
78
79
80 if (!sgde) {
81 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
82 "9051 BLKGRD: ERROR: data scatterlist is null\n");
83 return;
84 }
85
86 dst = (void *) _dump_buf_data;
87 while (sgde) {
88 src = sg_virt(sgde);
89 memcpy(dst, src, sgde->length);
90 dst += sgde->length;
91 sgde = sg_next(sgde);
92 }
93}
94
95static void
96lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
97{
98 void *src, *dst;
99 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
100
101 if (!_dump_buf_dif) {
102 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
103 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
104 __func__);
105 return;
106 }
107
108 if (!sgde) {
109 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
110 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
111 return;
112 }
113
114 dst = _dump_buf_dif;
115 while (sgde) {
116 src = sg_virt(sgde);
117 memcpy(dst, src, sgde->length);
118 dst += sgde->length;
119 sgde = sg_next(sgde);
120 }
121}
122
123
124
125
126
127
128
129
130
131static void
132lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
133 struct lpfc_scsi_buf *lpfc_cmd)
134{
135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
136 if (sgl) {
137 sgl += 1;
138 sgl->word2 = le32_to_cpu(sgl->word2);
139 bf_set(lpfc_sli4_sge_last, sgl, 1);
140 sgl->word2 = cpu_to_le32(sgl->word2);
141 }
142}
143
144
145
146
147
148
149
150
151
152static void
153lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
154{
155 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
156 struct lpfc_nodelist *pnode = rdata->pnode;
157 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
158 unsigned long flags;
159 struct Scsi_Host *shost = cmd->device->host;
160 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
161 unsigned long latency;
162 int i;
163
164 if (cmd->result)
165 return;
166
167 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
168
169 spin_lock_irqsave(shost->host_lock, flags);
170 if (!vport->stat_data_enabled ||
171 vport->stat_data_blocked ||
172 !pnode ||
173 !pnode->lat_data ||
174 (phba->bucket_type == LPFC_NO_BUCKET)) {
175 spin_unlock_irqrestore(shost->host_lock, flags);
176 return;
177 }
178
179 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
180 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
181 phba->bucket_step;
182
183 if (i < 0)
184 i = 0;
185 else if (i >= LPFC_MAX_BUCKET_COUNT)
186 i = LPFC_MAX_BUCKET_COUNT - 1;
187 } else {
188 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
189 if (latency <= (phba->bucket_base +
190 ((1<<i)*phba->bucket_step)))
191 break;
192 }
193
194 pnode->lat_data[i].cmd_count++;
195 spin_unlock_irqrestore(shost->host_lock, flags);
196}
197
198
199
200
201
202
203
204
205
206
207
208
209
210static void
211lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
212 struct lpfc_vport *vport,
213 struct lpfc_nodelist *ndlp,
214 uint32_t lun,
215 uint32_t old_val,
216 uint32_t new_val)
217{
218 struct lpfc_fast_path_event *fast_path_evt;
219 unsigned long flags;
220
221 fast_path_evt = lpfc_alloc_fast_evt(phba);
222 if (!fast_path_evt)
223 return;
224
225 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
226 FC_REG_SCSI_EVENT;
227 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
228 LPFC_EVENT_VARQUEDEPTH;
229
230
231 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
232 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
233 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
234 &ndlp->nlp_portname, sizeof(struct lpfc_name));
235 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
236 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
237 }
238
239 fast_path_evt->un.queue_depth_evt.oldval = old_val;
240 fast_path_evt->un.queue_depth_evt.newval = new_val;
241 fast_path_evt->vport = vport;
242
243 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
244 spin_lock_irqsave(&phba->hbalock, flags);
245 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
246 spin_unlock_irqrestore(&phba->hbalock, flags);
247 lpfc_worker_wake_up(phba);
248
249 return;
250}
251
252
253
254
255
256
257
258
259
260
261
262int
263lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
264{
265 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
266 struct lpfc_hba *phba = vport->phba;
267 struct lpfc_rport_data *rdata;
268 unsigned long new_queue_depth, old_queue_depth;
269
270 old_queue_depth = sdev->queue_depth;
271 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
272 new_queue_depth = sdev->queue_depth;
273 rdata = sdev->hostdata;
274 if (rdata)
275 lpfc_send_sdev_queuedepth_change_event(phba, vport,
276 rdata->pnode, sdev->lun,
277 old_queue_depth,
278 new_queue_depth);
279 return sdev->queue_depth;
280}
281
282
283
284
285
286
287
288
289
290
291
292
293void
294lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
295{
296 unsigned long flags;
297 uint32_t evt_posted;
298
299 spin_lock_irqsave(&phba->hbalock, flags);
300 atomic_inc(&phba->num_rsrc_err);
301 phba->last_rsrc_error_time = jiffies;
302
303 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
304 spin_unlock_irqrestore(&phba->hbalock, flags);
305 return;
306 }
307
308 phba->last_ramp_down_time = jiffies;
309
310 spin_unlock_irqrestore(&phba->hbalock, flags);
311
312 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
313 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
314 if (!evt_posted)
315 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
316 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
317
318 if (!evt_posted)
319 lpfc_worker_wake_up(phba);
320 return;
321}
322
323
324
325
326
327
328
329
330
331
332
333
334static inline void
335lpfc_rampup_queue_depth(struct lpfc_vport *vport,
336 uint32_t queue_depth)
337{
338 unsigned long flags;
339 struct lpfc_hba *phba = vport->phba;
340 uint32_t evt_posted;
341 atomic_inc(&phba->num_cmd_success);
342
343 if (vport->cfg_lun_queue_depth <= queue_depth)
344 return;
345 spin_lock_irqsave(&phba->hbalock, flags);
346 if (time_before(jiffies,
347 phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
348 time_before(jiffies,
349 phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
350 spin_unlock_irqrestore(&phba->hbalock, flags);
351 return;
352 }
353 phba->last_ramp_up_time = jiffies;
354 spin_unlock_irqrestore(&phba->hbalock, flags);
355
356 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
357 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
358 if (!evt_posted)
359 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
360 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
361
362 if (!evt_posted)
363 lpfc_worker_wake_up(phba);
364 return;
365}
366
367
368
369
370
371
372
373
374
375void
376lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
377{
378 struct lpfc_vport **vports;
379 struct Scsi_Host *shost;
380 struct scsi_device *sdev;
381 unsigned long new_queue_depth;
382 unsigned long num_rsrc_err, num_cmd_success;
383 int i;
384
385 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
386 num_cmd_success = atomic_read(&phba->num_cmd_success);
387
388 vports = lpfc_create_vport_work_array(phba);
389 if (vports != NULL)
390 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
391 shost = lpfc_shost_from_vport(vports[i]);
392 shost_for_each_device(sdev, shost) {
393 new_queue_depth =
394 sdev->queue_depth * num_rsrc_err /
395 (num_rsrc_err + num_cmd_success);
396 if (!new_queue_depth)
397 new_queue_depth = sdev->queue_depth - 1;
398 else
399 new_queue_depth = sdev->queue_depth -
400 new_queue_depth;
401 lpfc_change_queue_depth(sdev, new_queue_depth,
402 SCSI_QDEPTH_DEFAULT);
403 }
404 }
405 lpfc_destroy_vport_work_array(phba, vports);
406 atomic_set(&phba->num_rsrc_err, 0);
407 atomic_set(&phba->num_cmd_success, 0);
408}
409
410
411
412
413
414
415
416
417
418
419void
420lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
421{
422 struct lpfc_vport **vports;
423 struct Scsi_Host *shost;
424 struct scsi_device *sdev;
425 int i;
426
427 vports = lpfc_create_vport_work_array(phba);
428 if (vports != NULL)
429 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
430 shost = lpfc_shost_from_vport(vports[i]);
431 shost_for_each_device(sdev, shost) {
432 if (vports[i]->cfg_lun_queue_depth <=
433 sdev->queue_depth)
434 continue;
435 lpfc_change_queue_depth(sdev,
436 sdev->queue_depth+1,
437 SCSI_QDEPTH_RAMP_UP);
438 }
439 }
440 lpfc_destroy_vport_work_array(phba, vports);
441 atomic_set(&phba->num_rsrc_err, 0);
442 atomic_set(&phba->num_cmd_success, 0);
443}
444
445
446
447
448
449
450
451
452
453void
454lpfc_scsi_dev_block(struct lpfc_hba *phba)
455{
456 struct lpfc_vport **vports;
457 struct Scsi_Host *shost;
458 struct scsi_device *sdev;
459 struct fc_rport *rport;
460 int i;
461
462 vports = lpfc_create_vport_work_array(phba);
463 if (vports != NULL)
464 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
465 shost = lpfc_shost_from_vport(vports[i]);
466 shost_for_each_device(sdev, shost) {
467 rport = starget_to_rport(scsi_target(sdev));
468 fc_remote_port_delete(rport);
469 }
470 }
471 lpfc_destroy_vport_work_array(phba, vports);
472}
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490static int
491lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
492{
493 struct lpfc_hba *phba = vport->phba;
494 struct lpfc_scsi_buf *psb;
495 struct ulp_bde64 *bpl;
496 IOCB_t *iocb;
497 dma_addr_t pdma_phys_fcp_cmd;
498 dma_addr_t pdma_phys_fcp_rsp;
499 dma_addr_t pdma_phys_bpl;
500 uint16_t iotag;
501 int bcnt;
502
503 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
504 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
505 if (!psb)
506 break;
507
508
509
510
511
512
513
514 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
515 GFP_KERNEL, &psb->dma_handle);
516 if (!psb->data) {
517 kfree(psb);
518 break;
519 }
520
521
522 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
523
524
525 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
526 if (iotag == 0) {
527 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
528 psb->data, psb->dma_handle);
529 kfree(psb);
530 break;
531 }
532 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
533
534 psb->fcp_cmnd = psb->data;
535 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
536 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
537 sizeof(struct fcp_rsp);
538
539
540 bpl = psb->fcp_bpl;
541 pdma_phys_fcp_cmd = psb->dma_handle;
542 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
543 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
544 sizeof(struct fcp_rsp);
545
546
547
548
549
550
551 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
552 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
553 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
554 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
555 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
556
557
558 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
559 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
560 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
561 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
562 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
563
564
565
566
567
568 iocb = &psb->cur_iocbq.iocb;
569 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
570 if ((phba->sli_rev == 3) &&
571 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
572
573 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
574 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
575 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
576 unsli3.fcp_ext.icd);
577 iocb->un.fcpi64.bdl.addrHigh = 0;
578 iocb->ulpBdeCount = 0;
579 iocb->ulpLe = 0;
580
581 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
582 BUFF_TYPE_BDE_64;
583 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
584 sizeof(struct fcp_rsp);
585 iocb->unsli3.fcp_ext.rbde.addrLow =
586 putPaddrLow(pdma_phys_fcp_rsp);
587 iocb->unsli3.fcp_ext.rbde.addrHigh =
588 putPaddrHigh(pdma_phys_fcp_rsp);
589 } else {
590 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
591 iocb->un.fcpi64.bdl.bdeSize =
592 (2 * sizeof(struct ulp_bde64));
593 iocb->un.fcpi64.bdl.addrLow =
594 putPaddrLow(pdma_phys_bpl);
595 iocb->un.fcpi64.bdl.addrHigh =
596 putPaddrHigh(pdma_phys_bpl);
597 iocb->ulpBdeCount = 1;
598 iocb->ulpLe = 1;
599 }
600 iocb->ulpClass = CLASS3;
601 psb->status = IOSTAT_SUCCESS;
602
603 psb->cur_iocbq.context1 = psb;
604 lpfc_release_scsi_buf_s3(phba, psb);
605
606 }
607
608 return bcnt;
609}
610
611
612
613
614
615
616
617
618
619void
620lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
621 struct sli4_wcqe_xri_aborted *axri)
622{
623 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
624 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
625 struct lpfc_scsi_buf *psb, *next_psb;
626 unsigned long iflag = 0;
627 struct lpfc_iocbq *iocbq;
628 int i;
629 struct lpfc_nodelist *ndlp;
630 int rrq_empty = 0;
631 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
632
633 spin_lock_irqsave(&phba->hbalock, iflag);
634 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
635 list_for_each_entry_safe(psb, next_psb,
636 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
637 if (psb->cur_iocbq.sli4_xritag == xri) {
638 list_del(&psb->list);
639 psb->exch_busy = 0;
640 psb->status = IOSTAT_SUCCESS;
641 spin_unlock(
642 &phba->sli4_hba.abts_scsi_buf_list_lock);
643 ndlp = psb->rdata->pnode;
644 rrq_empty = list_empty(&phba->active_rrq_list);
645 spin_unlock_irqrestore(&phba->hbalock, iflag);
646 if (ndlp)
647 lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
648 lpfc_release_scsi_buf_s4(phba, psb);
649 if (rrq_empty)
650 lpfc_worker_wake_up(phba);
651 return;
652 }
653 }
654 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
655 for (i = 1; i <= phba->sli.last_iotag; i++) {
656 iocbq = phba->sli.iocbq_lookup[i];
657
658 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
659 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
660 continue;
661 if (iocbq->sli4_xritag != xri)
662 continue;
663 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
664 psb->exch_busy = 0;
665 spin_unlock_irqrestore(&phba->hbalock, iflag);
666 if (pring->txq_cnt)
667 lpfc_worker_wake_up(phba);
668 return;
669
670 }
671 spin_unlock_irqrestore(&phba->hbalock, iflag);
672}
673
674
675
676
677
678
679
680
681
682
683
684
685
686int
687lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
688{
689 struct lpfc_scsi_buf *psb;
690 int index, status, bcnt = 0, rcnt = 0, rc = 0;
691 LIST_HEAD(sblist);
692
693 for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
694 psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
695 if (psb) {
696
697 list_del(&psb->list);
698
699 list_add_tail(&psb->list, &sblist);
700 if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
701 bcnt = rcnt;
702 rcnt = 0;
703 }
704 } else
705
706 bcnt = rcnt;
707
708 if (index == phba->sli4_hba.scsi_xri_cnt - 1)
709
710 bcnt = rcnt;
711
712
713 if (bcnt == 0)
714 continue;
715
716 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
717
718 bcnt = 0;
719 while (!list_empty(&sblist)) {
720 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
721 list);
722 if (status) {
723
724 psb->exch_busy = 1;
725 rc++;
726 } else {
727 psb->exch_busy = 0;
728 psb->status = IOSTAT_SUCCESS;
729 }
730
731 lpfc_release_scsi_buf_s4(phba, psb);
732 }
733 }
734 return rc;
735}
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750static int
751lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
752{
753 struct lpfc_hba *phba = vport->phba;
754 struct lpfc_scsi_buf *psb;
755 struct sli4_sge *sgl;
756 IOCB_t *iocb;
757 dma_addr_t pdma_phys_fcp_cmd;
758 dma_addr_t pdma_phys_fcp_rsp;
759 dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
760 uint16_t iotag, last_xritag = NO_XRI;
761 int status = 0, index;
762 int bcnt;
763 int non_sequential_xri = 0;
764 LIST_HEAD(sblist);
765
766 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
767 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
768 if (!psb)
769 break;
770
771
772
773
774
775
776
777 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
778 GFP_KERNEL, &psb->dma_handle);
779 if (!psb->data) {
780 kfree(psb);
781 break;
782 }
783
784
785 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
786
787
788 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
789 if (iotag == 0) {
790 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
791 psb->data, psb->dma_handle);
792 kfree(psb);
793 break;
794 }
795
796 psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
797 if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
798 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
799 psb->data, psb->dma_handle);
800 kfree(psb);
801 break;
802 }
803 if (last_xritag != NO_XRI
804 && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
805 non_sequential_xri = 1;
806 } else
807 list_add_tail(&psb->list, &sblist);
808 last_xritag = psb->cur_iocbq.sli4_xritag;
809
810 index = phba->sli4_hba.scsi_xri_cnt++;
811 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
812
813 psb->fcp_bpl = psb->data;
814 psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
815 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
816 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
817 sizeof(struct fcp_cmnd));
818
819
820 sgl = (struct sli4_sge *)psb->fcp_bpl;
821 pdma_phys_bpl = psb->dma_handle;
822 pdma_phys_fcp_cmd =
823 (psb->dma_handle + phba->cfg_sg_dma_buf_size)
824 - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
825 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
826
827
828
829
830
831
832 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
833 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
834 bf_set(lpfc_sli4_sge_last, sgl, 0);
835 sgl->word2 = cpu_to_le32(sgl->word2);
836 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
837 sgl++;
838
839
840 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
841 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
842 bf_set(lpfc_sli4_sge_last, sgl, 1);
843 sgl->word2 = cpu_to_le32(sgl->word2);
844 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
845
846
847
848
849
850 iocb = &psb->cur_iocbq.iocb;
851 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
852 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
853
854
855
856
857 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
858 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
859 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
860 iocb->ulpBdeCount = 1;
861 iocb->ulpLe = 1;
862 iocb->ulpClass = CLASS3;
863 psb->cur_iocbq.context1 = psb;
864 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
865 pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
866 else
867 pdma_phys_bpl1 = 0;
868 psb->dma_phys_bpl = pdma_phys_bpl;
869 phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
870 if (non_sequential_xri) {
871 status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
872 pdma_phys_bpl1,
873 psb->cur_iocbq.sli4_xritag);
874 if (status) {
875
876 psb->exch_busy = 1;
877 } else {
878 psb->exch_busy = 0;
879 psb->status = IOSTAT_SUCCESS;
880 }
881
882 lpfc_release_scsi_buf_s4(phba, psb);
883 break;
884 }
885 }
886 if (bcnt) {
887 status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
888
889 while (!list_empty(&sblist)) {
890 list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
891 list);
892 if (status) {
893
894 psb->exch_busy = 1;
895 } else {
896 psb->exch_busy = 0;
897 psb->status = IOSTAT_SUCCESS;
898 }
899
900 lpfc_release_scsi_buf_s4(phba, psb);
901 }
902 }
903
904 return bcnt + non_sequential_xri;
905}
906
907
908
909
910
911
912
913
914
915
916
917
918
919static inline int
920lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
921{
922 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
923}
924
925
926
927
928
929
930
931
932
933
934
935
936static struct lpfc_scsi_buf*
937lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
938{
939 struct lpfc_scsi_buf * lpfc_cmd = NULL;
940 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
941 unsigned long iflag = 0;
942
943 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
944 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
945 if (lpfc_cmd) {
946 lpfc_cmd->seg_cnt = 0;
947 lpfc_cmd->nonsg_phys = 0;
948 lpfc_cmd->prot_seg_cnt = 0;
949 }
950 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
951 return lpfc_cmd;
952}
953
954
955
956
957
958
959
960
961
962
963
964static struct lpfc_scsi_buf*
965lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
966{
967 struct lpfc_scsi_buf *lpfc_cmd = NULL;
968 struct lpfc_scsi_buf *start_lpfc_cmd = NULL;
969 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
970 unsigned long iflag = 0;
971 int found = 0;
972
973 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
974 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
975 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
976 while (!found && lpfc_cmd) {
977 if (lpfc_test_rrq_active(phba, ndlp,
978 lpfc_cmd->cur_iocbq.sli4_xritag)) {
979 lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
980 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
981 list_remove_head(scsi_buf_list, lpfc_cmd,
982 struct lpfc_scsi_buf, list);
983 spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
984 iflag);
985 if (lpfc_cmd == start_lpfc_cmd) {
986 lpfc_cmd = NULL;
987 break;
988 } else
989 continue;
990 }
991 found = 1;
992 lpfc_cmd->seg_cnt = 0;
993 lpfc_cmd->nonsg_phys = 0;
994 lpfc_cmd->prot_seg_cnt = 0;
995 }
996 return lpfc_cmd;
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static struct lpfc_scsi_buf*
1010lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1011{
1012 return phba->lpfc_get_scsi_buf(phba, ndlp);
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023static void
1024lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1025{
1026 unsigned long iflag = 0;
1027
1028 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1029 psb->pCmd = NULL;
1030 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1031 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1032}
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044static void
1045lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1046{
1047 unsigned long iflag = 0;
1048
1049 if (psb->exch_busy) {
1050 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1051 iflag);
1052 psb->pCmd = NULL;
1053 list_add_tail(&psb->list,
1054 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1055 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1056 iflag);
1057 } else {
1058
1059 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
1060 psb->pCmd = NULL;
1061 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
1062 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
1063 }
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074static void
1075lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1076{
1077
1078 phba->lpfc_release_scsi_buf(phba, psb);
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095static int
1096lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1097{
1098 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1099 struct scatterlist *sgel = NULL;
1100 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1101 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1102 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1103 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1104 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1105 dma_addr_t physaddr;
1106 uint32_t num_bde = 0;
1107 int nseg, datadir = scsi_cmnd->sc_data_direction;
1108
1109
1110
1111
1112
1113
1114
1115 bpl += 2;
1116 if (scsi_sg_count(scsi_cmnd)) {
1117
1118
1119
1120
1121
1122
1123
1124 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1125 scsi_sg_count(scsi_cmnd), datadir);
1126 if (unlikely(!nseg))
1127 return 1;
1128
1129 lpfc_cmd->seg_cnt = nseg;
1130 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1131 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1132 "9064 BLKGRD: %s: Too many sg segments from "
1133 "dma_map_sg. Config %d, seg_cnt %d\n",
1134 __func__, phba->cfg_sg_seg_cnt,
1135 lpfc_cmd->seg_cnt);
1136 scsi_dma_unmap(scsi_cmnd);
1137 return 1;
1138 }
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1150 physaddr = sg_dma_address(sgel);
1151 if (phba->sli_rev == 3 &&
1152 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1153 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1154 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1155 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1156 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1157 data_bde->addrLow = putPaddrLow(physaddr);
1158 data_bde->addrHigh = putPaddrHigh(physaddr);
1159 data_bde++;
1160 } else {
1161 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1162 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1163 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1164 bpl->addrLow =
1165 le32_to_cpu(putPaddrLow(physaddr));
1166 bpl->addrHigh =
1167 le32_to_cpu(putPaddrHigh(physaddr));
1168 bpl++;
1169 }
1170 }
1171 }
1172
1173
1174
1175
1176
1177
1178
1179 if (phba->sli_rev == 3 &&
1180 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1181 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1182 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1183
1184
1185
1186
1187
1188 physaddr = lpfc_cmd->dma_handle;
1189 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1190 data_bde->tus.f.bdeSize = (num_bde *
1191 sizeof(struct ulp_bde64));
1192 physaddr += (sizeof(struct fcp_cmnd) +
1193 sizeof(struct fcp_rsp) +
1194 (2 * sizeof(struct ulp_bde64)));
1195 data_bde->addrHigh = putPaddrHigh(physaddr);
1196 data_bde->addrLow = putPaddrLow(physaddr);
1197
1198 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1199 } else {
1200
1201 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1202 }
1203 } else {
1204 iocb_cmd->un.fcpi64.bdl.bdeSize =
1205 ((num_bde + 2) * sizeof(struct ulp_bde64));
1206 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1207 }
1208 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1209
1210
1211
1212
1213
1214 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1215 return 0;
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227static int
1228lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1229 uint8_t *txop, uint8_t *rxop)
1230{
1231 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1232 uint8_t ret = 0;
1233
1234 if (guard_type == SHOST_DIX_GUARD_IP) {
1235 switch (scsi_get_prot_op(sc)) {
1236 case SCSI_PROT_READ_INSERT:
1237 case SCSI_PROT_WRITE_STRIP:
1238 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1239 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1240 break;
1241
1242 case SCSI_PROT_READ_STRIP:
1243 case SCSI_PROT_WRITE_INSERT:
1244 *txop = BG_OP_IN_NODIF_OUT_CRC;
1245 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1246 break;
1247
1248 case SCSI_PROT_READ_PASS:
1249 case SCSI_PROT_WRITE_PASS:
1250 *txop = BG_OP_IN_CSUM_OUT_CRC;
1251 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1252 break;
1253
1254 case SCSI_PROT_NORMAL:
1255 default:
1256 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1257 "9063 BLKGRD: Bad op/guard:%d/%d combination\n",
1258 scsi_get_prot_op(sc), guard_type);
1259 ret = 1;
1260 break;
1261
1262 }
1263 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
1264 switch (scsi_get_prot_op(sc)) {
1265 case SCSI_PROT_READ_STRIP:
1266 case SCSI_PROT_WRITE_INSERT:
1267 *txop = BG_OP_IN_NODIF_OUT_CRC;
1268 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1269 break;
1270
1271 case SCSI_PROT_READ_PASS:
1272 case SCSI_PROT_WRITE_PASS:
1273 *txop = BG_OP_IN_CRC_OUT_CRC;
1274 *rxop = BG_OP_IN_CRC_OUT_CRC;
1275 break;
1276
1277 case SCSI_PROT_READ_INSERT:
1278 case SCSI_PROT_WRITE_STRIP:
1279 case SCSI_PROT_NORMAL:
1280 default:
1281 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1282 "9075 BLKGRD: Bad op/guard:%d/%d combination\n",
1283 scsi_get_prot_op(sc), guard_type);
1284 ret = 1;
1285 break;
1286 }
1287 } else {
1288
1289 BUG();
1290 }
1291
1292 return ret;
1293}
1294
1295struct scsi_dif_tuple {
1296 __be16 guard_tag;
1297 __be16 app_tag;
1298 __be32 ref_tag;
1299};
1300
1301static inline unsigned
1302lpfc_cmd_blksize(struct scsi_cmnd *sc)
1303{
1304 return sc->device->sector_size;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static inline void
1320lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
1321 uint16_t *apptagval, uint32_t *reftag)
1322{
1323 struct scsi_dif_tuple *spt;
1324 unsigned char op = scsi_get_prot_op(sc);
1325 unsigned int protcnt = scsi_prot_sg_count(sc);
1326 static int cnt;
1327
1328 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
1329 op == SCSI_PROT_WRITE_PASS)) {
1330
1331 cnt++;
1332 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
1333 scsi_prot_sglist(sc)[0].offset;
1334 *apptagmask = 0;
1335 *apptagval = 0;
1336 *reftag = cpu_to_be32(spt->ref_tag);
1337
1338 } else {
1339
1340 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
1341 *apptagmask = 0;
1342 *apptagval = 0;
1343 }
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372static int
1373lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1374 struct ulp_bde64 *bpl, int datasegcnt)
1375{
1376 struct scatterlist *sgde = NULL;
1377 struct lpfc_pde5 *pde5 = NULL;
1378 struct lpfc_pde6 *pde6 = NULL;
1379 dma_addr_t physaddr;
1380 int i = 0, num_bde = 0, status;
1381 int datadir = sc->sc_data_direction;
1382 unsigned blksize;
1383 uint32_t reftag;
1384 uint16_t apptagmask, apptagval;
1385 uint8_t txop, rxop;
1386
1387 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1388 if (status)
1389 goto out;
1390
1391
1392 blksize = lpfc_cmd_blksize(sc);
1393 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1394
1395
1396 pde5 = (struct lpfc_pde5 *) bpl;
1397 memset(pde5, 0, sizeof(struct lpfc_pde5));
1398 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1399 pde5->reftag = reftag;
1400
1401
1402 pde5->word0 = cpu_to_le32(pde5->word0);
1403 pde5->reftag = cpu_to_le32(pde5->reftag);
1404
1405
1406 num_bde++;
1407 bpl++;
1408 pde6 = (struct lpfc_pde6 *) bpl;
1409
1410
1411 memset(pde6, 0, sizeof(struct lpfc_pde6));
1412 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1413 bf_set(pde6_optx, pde6, txop);
1414 bf_set(pde6_oprx, pde6, rxop);
1415 if (datadir == DMA_FROM_DEVICE) {
1416 bf_set(pde6_ce, pde6, 1);
1417 bf_set(pde6_re, pde6, 1);
1418 bf_set(pde6_ae, pde6, 1);
1419 }
1420 bf_set(pde6_ai, pde6, 1);
1421 bf_set(pde6_apptagval, pde6, apptagval);
1422
1423
1424 pde6->word0 = cpu_to_le32(pde6->word0);
1425 pde6->word1 = cpu_to_le32(pde6->word1);
1426 pde6->word2 = cpu_to_le32(pde6->word2);
1427
1428
1429 num_bde++;
1430 bpl++;
1431
1432
1433 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1434 physaddr = sg_dma_address(sgde);
1435 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1436 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1437 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1438 if (datadir == DMA_TO_DEVICE)
1439 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1440 else
1441 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1442 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1443 bpl++;
1444 num_bde++;
1445 }
1446
1447out:
1448 return num_bde;
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486static int
1487lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1488 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1489{
1490 struct scatterlist *sgde = NULL;
1491 struct scatterlist *sgpe = NULL;
1492 struct lpfc_pde5 *pde5 = NULL;
1493 struct lpfc_pde6 *pde6 = NULL;
1494 struct ulp_bde64 *prot_bde = NULL;
1495 dma_addr_t dataphysaddr, protphysaddr;
1496 unsigned short curr_data = 0, curr_prot = 0;
1497 unsigned int split_offset, protgroup_len;
1498 unsigned int protgrp_blks, protgrp_bytes;
1499 unsigned int remainder, subtotal;
1500 int status;
1501 int datadir = sc->sc_data_direction;
1502 unsigned char pgdone = 0, alldone = 0;
1503 unsigned blksize;
1504 uint32_t reftag;
1505 uint16_t apptagmask, apptagval;
1506 uint8_t txop, rxop;
1507 int num_bde = 0;
1508
1509 sgpe = scsi_prot_sglist(sc);
1510 sgde = scsi_sglist(sc);
1511
1512 if (!sgpe || !sgde) {
1513 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1514 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1515 sgpe, sgde);
1516 return 0;
1517 }
1518
1519 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1520 if (status)
1521 goto out;
1522
1523
1524 blksize = lpfc_cmd_blksize(sc);
1525 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1526
1527 split_offset = 0;
1528 do {
1529
1530 pde5 = (struct lpfc_pde5 *) bpl;
1531 memset(pde5, 0, sizeof(struct lpfc_pde5));
1532 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1533 pde5->reftag = reftag;
1534
1535
1536 pde5->word0 = cpu_to_le32(pde5->word0);
1537 pde5->reftag = cpu_to_le32(pde5->reftag);
1538
1539
1540 num_bde++;
1541 bpl++;
1542 pde6 = (struct lpfc_pde6 *) bpl;
1543
1544
1545 memset(pde6, 0, sizeof(struct lpfc_pde6));
1546 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1547 bf_set(pde6_optx, pde6, txop);
1548 bf_set(pde6_oprx, pde6, rxop);
1549 bf_set(pde6_ce, pde6, 1);
1550 bf_set(pde6_re, pde6, 1);
1551 bf_set(pde6_ae, pde6, 1);
1552 bf_set(pde6_ai, pde6, 1);
1553 bf_set(pde6_apptagval, pde6, apptagval);
1554
1555
1556 pde6->word0 = cpu_to_le32(pde6->word0);
1557 pde6->word1 = cpu_to_le32(pde6->word1);
1558 pde6->word2 = cpu_to_le32(pde6->word2);
1559
1560
1561 num_bde++;
1562 bpl++;
1563
1564
1565 prot_bde = (struct ulp_bde64 *) bpl;
1566 protphysaddr = sg_dma_address(sgpe);
1567 prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr));
1568 prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr));
1569 protgroup_len = sg_dma_len(sgpe);
1570
1571
1572 BUG_ON(protgroup_len % 8);
1573
1574 protgrp_blks = protgroup_len / 8;
1575 protgrp_bytes = protgrp_blks * blksize;
1576
1577 prot_bde->tus.f.bdeSize = protgroup_len;
1578 prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR;
1579 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1580
1581 curr_prot++;
1582 num_bde++;
1583
1584
1585 pgdone = 0;
1586 subtotal = 0;
1587 while (!pgdone) {
1588 if (!sgde) {
1589 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1590 "9065 BLKGRD:%s Invalid data segment\n",
1591 __func__);
1592 return 0;
1593 }
1594 bpl++;
1595 dataphysaddr = sg_dma_address(sgde) + split_offset;
1596 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1597 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1598
1599 remainder = sg_dma_len(sgde) - split_offset;
1600
1601 if ((subtotal + remainder) <= protgrp_bytes) {
1602
1603 bpl->tus.f.bdeSize = remainder;
1604 split_offset = 0;
1605
1606 if ((subtotal + remainder) == protgrp_bytes)
1607 pgdone = 1;
1608 } else {
1609
1610 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1611 split_offset += bpl->tus.f.bdeSize;
1612 }
1613
1614 subtotal += bpl->tus.f.bdeSize;
1615
1616 if (datadir == DMA_TO_DEVICE)
1617 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1618 else
1619 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1620 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1621
1622 num_bde++;
1623 curr_data++;
1624
1625 if (split_offset)
1626 break;
1627
1628
1629 sgde = sg_next(sgde);
1630
1631 }
1632
1633
1634 if (curr_prot == protcnt) {
1635 alldone = 1;
1636 } else if (curr_prot < protcnt) {
1637
1638 sgpe = sg_next(sgpe);
1639 bpl++;
1640
1641
1642 reftag += protgrp_blks;
1643 } else {
1644
1645 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1646 "9054 BLKGRD: bug in %s\n", __func__);
1647 }
1648
1649 } while (!alldone);
1650
1651out:
1652
1653 return num_bde;
1654}
1655
1656
1657
1658
1659
1660
1661
1662static int
1663lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1664{
1665 int ret = LPFC_PG_TYPE_INVALID;
1666 unsigned char op = scsi_get_prot_op(sc);
1667
1668 switch (op) {
1669 case SCSI_PROT_READ_STRIP:
1670 case SCSI_PROT_WRITE_INSERT:
1671 ret = LPFC_PG_TYPE_NO_DIF;
1672 break;
1673 case SCSI_PROT_READ_INSERT:
1674 case SCSI_PROT_WRITE_STRIP:
1675 case SCSI_PROT_READ_PASS:
1676 case SCSI_PROT_WRITE_PASS:
1677 ret = LPFC_PG_TYPE_DIF_BUF;
1678 break;
1679 default:
1680 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1681 "9021 Unsupported protection op:%d\n", op);
1682 break;
1683 }
1684
1685 return ret;
1686}
1687
1688
1689
1690
1691
1692
1693static int
1694lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1695 struct lpfc_scsi_buf *lpfc_cmd)
1696{
1697 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1698 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1699 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1700 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1701 uint32_t num_bde = 0;
1702 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1703 int prot_group_type = 0;
1704 int diflen, fcpdl;
1705 unsigned blksize;
1706
1707
1708
1709
1710
1711 bpl += 2;
1712 if (scsi_sg_count(scsi_cmnd)) {
1713
1714
1715
1716
1717
1718
1719 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1720 scsi_sglist(scsi_cmnd),
1721 scsi_sg_count(scsi_cmnd), datadir);
1722 if (unlikely(!datasegcnt))
1723 return 1;
1724
1725 lpfc_cmd->seg_cnt = datasegcnt;
1726 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1727 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1728 "9067 BLKGRD: %s: Too many sg segments"
1729 " from dma_map_sg. Config %d, seg_cnt"
1730 " %d\n",
1731 __func__, phba->cfg_sg_seg_cnt,
1732 lpfc_cmd->seg_cnt);
1733 scsi_dma_unmap(scsi_cmnd);
1734 return 1;
1735 }
1736
1737 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1738
1739 switch (prot_group_type) {
1740 case LPFC_PG_TYPE_NO_DIF:
1741 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1742 datasegcnt);
1743
1744 if (num_bde < 2)
1745 goto err;
1746 break;
1747 case LPFC_PG_TYPE_DIF_BUF:{
1748
1749
1750
1751
1752
1753 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1754 scsi_prot_sglist(scsi_cmnd),
1755 scsi_prot_sg_count(scsi_cmnd), datadir);
1756 if (unlikely(!protsegcnt)) {
1757 scsi_dma_unmap(scsi_cmnd);
1758 return 1;
1759 }
1760
1761 lpfc_cmd->prot_seg_cnt = protsegcnt;
1762 if (lpfc_cmd->prot_seg_cnt
1763 > phba->cfg_prot_sg_seg_cnt) {
1764 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1765 "9068 BLKGRD: %s: Too many prot sg "
1766 "segments from dma_map_sg. Config %d,"
1767 "prot_seg_cnt %d\n", __func__,
1768 phba->cfg_prot_sg_seg_cnt,
1769 lpfc_cmd->prot_seg_cnt);
1770 dma_unmap_sg(&phba->pcidev->dev,
1771 scsi_prot_sglist(scsi_cmnd),
1772 scsi_prot_sg_count(scsi_cmnd),
1773 datadir);
1774 scsi_dma_unmap(scsi_cmnd);
1775 return 1;
1776 }
1777
1778 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1779 datasegcnt, protsegcnt);
1780
1781 if (num_bde < 3)
1782 goto err;
1783 break;
1784 }
1785 case LPFC_PG_TYPE_INVALID:
1786 default:
1787 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1788 "9022 Unexpected protection group %i\n",
1789 prot_group_type);
1790 return 1;
1791 }
1792 }
1793
1794
1795
1796
1797
1798
1799
1800 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1801 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1802 iocb_cmd->ulpBdeCount = 1;
1803 iocb_cmd->ulpLe = 1;
1804
1805 fcpdl = scsi_bufflen(scsi_cmnd);
1806
1807 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1808
1809
1810
1811
1812
1813 blksize = lpfc_cmd_blksize(scsi_cmnd);
1814 diflen = (fcpdl / blksize) * 8;
1815 fcpdl += diflen;
1816 }
1817 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1818
1819
1820
1821
1822
1823 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1824
1825 return 0;
1826err:
1827 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1828 "9023 Could not setup all needed BDE's"
1829 "prot_group_type=%d, num_bde=%d\n",
1830 prot_group_type, num_bde);
1831 return 1;
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846static int
1847lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1848 struct lpfc_iocbq *pIocbOut)
1849{
1850 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1851 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1852 int ret = 0;
1853 uint32_t bghm = bgf->bghm;
1854 uint32_t bgstat = bgf->bgstat;
1855 uint64_t failing_sector = 0;
1856
1857 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd"
1858 " 0x%x lba 0x%llx blk cnt 0x%x "
1859 "bgstat=0x%x bghm=0x%x\n",
1860 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1861 blk_rq_sectors(cmd->request), bgstat, bghm);
1862
1863 spin_lock(&_dump_buf_lock);
1864 if (!_dump_buf_done) {
1865 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
1866 " Data for %u blocks to debugfs\n",
1867 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1868 lpfc_debug_save_data(phba, cmd);
1869
1870
1871 if (lpfc_prot_group_type(phba, cmd) ==
1872 LPFC_PG_TYPE_DIF_BUF) {
1873 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
1874 "Saving DIF for %u blocks to debugfs\n",
1875 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1876 lpfc_debug_save_dif(phba, cmd);
1877 }
1878
1879 _dump_buf_done = 1;
1880 }
1881 spin_unlock(&_dump_buf_lock);
1882
1883 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1884 cmd->result = ScsiResult(DID_ERROR, 0);
1885 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid"
1886 " BlockGuard profile. bgstat:0x%x\n",
1887 bgstat);
1888 ret = (-1);
1889 goto out;
1890 }
1891
1892 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1893 cmd->result = ScsiResult(DID_ERROR, 0);
1894 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: "
1895 "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1896 bgstat);
1897 ret = (-1);
1898 goto out;
1899 }
1900
1901 if (lpfc_bgs_get_guard_err(bgstat)) {
1902 ret = 1;
1903
1904 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1905 0x10, 0x1);
1906 cmd->result = DRIVER_SENSE << 24
1907 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1908 phba->bg_guard_err_cnt++;
1909 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1910 "9055 BLKGRD: guard_tag error\n");
1911 }
1912
1913 if (lpfc_bgs_get_reftag_err(bgstat)) {
1914 ret = 1;
1915
1916 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1917 0x10, 0x3);
1918 cmd->result = DRIVER_SENSE << 24
1919 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1920
1921 phba->bg_reftag_err_cnt++;
1922 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1923 "9056 BLKGRD: ref_tag error\n");
1924 }
1925
1926 if (lpfc_bgs_get_apptag_err(bgstat)) {
1927 ret = 1;
1928
1929 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1930 0x10, 0x2);
1931 cmd->result = DRIVER_SENSE << 24
1932 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1933
1934 phba->bg_apptag_err_cnt++;
1935 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1936 "9061 BLKGRD: app_tag error\n");
1937 }
1938
1939 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1940
1941
1942
1943
1944 cmd->sense_buffer[8] = 0;
1945 cmd->sense_buffer[9] = 0xa;
1946 bghm /= cmd->device->sector_size;
1947
1948 failing_sector = scsi_get_lba(cmd);
1949 failing_sector += bghm;
1950
1951 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1952 }
1953
1954 if (!ret) {
1955
1956 cmd->result = ScsiResult(DID_ERROR, 0);
1957 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1958 "9057 BLKGRD: no errors reported!\n");
1959 }
1960
1961out:
1962 return ret;
1963}
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static int
1978lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1979{
1980 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1981 struct scatterlist *sgel = NULL;
1982 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1983 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
1984 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1985 dma_addr_t physaddr;
1986 uint32_t num_bde = 0;
1987 uint32_t dma_len;
1988 uint32_t dma_offset = 0;
1989 int nseg;
1990
1991
1992
1993
1994
1995
1996
1997 if (scsi_sg_count(scsi_cmnd)) {
1998
1999
2000
2001
2002
2003
2004
2005 nseg = scsi_dma_map(scsi_cmnd);
2006 if (unlikely(!nseg))
2007 return 1;
2008 sgl += 1;
2009
2010 sgl->word2 = le32_to_cpu(sgl->word2);
2011 bf_set(lpfc_sli4_sge_last, sgl, 0);
2012 sgl->word2 = cpu_to_le32(sgl->word2);
2013 sgl += 1;
2014
2015 lpfc_cmd->seg_cnt = nseg;
2016 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2017 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
2018 " %s: Too many sg segments from "
2019 "dma_map_sg. Config %d, seg_cnt %d\n",
2020 __func__, phba->cfg_sg_seg_cnt,
2021 lpfc_cmd->seg_cnt);
2022 scsi_dma_unmap(scsi_cmnd);
2023 return 1;
2024 }
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
2036 physaddr = sg_dma_address(sgel);
2037 dma_len = sg_dma_len(sgel);
2038 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2039 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2040 if ((num_bde + 1) == nseg)
2041 bf_set(lpfc_sli4_sge_last, sgl, 1);
2042 else
2043 bf_set(lpfc_sli4_sge_last, sgl, 0);
2044 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2045 sgl->word2 = cpu_to_le32(sgl->word2);
2046 sgl->sge_len = cpu_to_le32(dma_len);
2047 dma_offset += dma_len;
2048 sgl++;
2049 }
2050 } else {
2051 sgl += 1;
2052
2053 sgl->word2 = le32_to_cpu(sgl->word2);
2054 bf_set(lpfc_sli4_sge_last, sgl, 1);
2055 sgl->word2 = cpu_to_le32(sgl->word2);
2056 }
2057
2058
2059
2060
2061
2062
2063
2064 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
2065
2066
2067
2068
2069
2070 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
2071 return 0;
2072}
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086static inline int
2087lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2088{
2089 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2090}
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102static void
2103lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
2104 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
2105 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2106 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2107 uint32_t resp_info = fcprsp->rspStatus2;
2108 uint32_t scsi_status = fcprsp->rspStatus3;
2109 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2110 struct lpfc_fast_path_event *fast_path_evt = NULL;
2111 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
2112 unsigned long flags;
2113
2114 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2115 return;
2116
2117
2118 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
2119 (cmnd->result == SAM_STAT_BUSY)) {
2120 fast_path_evt = lpfc_alloc_fast_evt(phba);
2121 if (!fast_path_evt)
2122 return;
2123 fast_path_evt->un.scsi_evt.event_type =
2124 FC_REG_SCSI_EVENT;
2125 fast_path_evt->un.scsi_evt.subcategory =
2126 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
2127 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
2128 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
2129 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
2130 &pnode->nlp_portname, sizeof(struct lpfc_name));
2131 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
2132 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2133 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
2134 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
2135 fast_path_evt = lpfc_alloc_fast_evt(phba);
2136 if (!fast_path_evt)
2137 return;
2138 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
2139 FC_REG_SCSI_EVENT;
2140 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
2141 LPFC_EVENT_CHECK_COND;
2142 fast_path_evt->un.check_cond_evt.scsi_event.lun =
2143 cmnd->device->lun;
2144 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
2145 &pnode->nlp_portname, sizeof(struct lpfc_name));
2146 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
2147 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2148 fast_path_evt->un.check_cond_evt.sense_key =
2149 cmnd->sense_buffer[2] & 0xf;
2150 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
2151 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
2152 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2153 fcpi_parm &&
2154 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
2155 ((scsi_status == SAM_STAT_GOOD) &&
2156 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
2157
2158
2159
2160
2161 fast_path_evt = lpfc_alloc_fast_evt(phba);
2162 if (!fast_path_evt)
2163 return;
2164 fast_path_evt->un.read_check_error.header.event_type =
2165 FC_REG_FABRIC_EVENT;
2166 fast_path_evt->un.read_check_error.header.subcategory =
2167 LPFC_EVENT_FCPRDCHKERR;
2168 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
2169 &pnode->nlp_portname, sizeof(struct lpfc_name));
2170 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
2171 &pnode->nlp_nodename, sizeof(struct lpfc_name));
2172 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
2173 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
2174 fast_path_evt->un.read_check_error.fcpiparam =
2175 fcpi_parm;
2176 } else
2177 return;
2178
2179 fast_path_evt->vport = vport;
2180 spin_lock_irqsave(&phba->hbalock, flags);
2181 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
2182 spin_unlock_irqrestore(&phba->hbalock, flags);
2183 lpfc_worker_wake_up(phba);
2184 return;
2185}
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195static void
2196lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
2197{
2198
2199
2200
2201
2202
2203
2204 if (psb->seg_cnt > 0)
2205 scsi_dma_unmap(psb->pCmd);
2206 if (psb->prot_seg_cnt > 0)
2207 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
2208 scsi_prot_sg_count(psb->pCmd),
2209 psb->pCmd->sc_data_direction);
2210}
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222static void
2223lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2224 struct lpfc_iocbq *rsp_iocb)
2225{
2226 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
2227 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
2228 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
2229 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
2230 uint32_t resp_info = fcprsp->rspStatus2;
2231 uint32_t scsi_status = fcprsp->rspStatus3;
2232 uint32_t *lp;
2233 uint32_t host_status = DID_OK;
2234 uint32_t rsplen = 0;
2235 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
2236
2237
2238
2239
2240
2241
2242
2243 if (fcpcmd->fcpCntl2) {
2244 scsi_status = 0;
2245 goto out;
2246 }
2247
2248 if (resp_info & RSP_LEN_VALID) {
2249 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2250 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
2251 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2252 "2719 Invalid response length: "
2253 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
2254 cmnd->device->id,
2255 cmnd->device->lun, cmnd->cmnd[0],
2256 rsplen);
2257 host_status = DID_ERROR;
2258 goto out;
2259 }
2260 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
2261 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2262 "2757 Protocol failure detected during "
2263 "processing of FCP I/O op: "
2264 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
2265 cmnd->device->id,
2266 cmnd->device->lun, cmnd->cmnd[0],
2267 fcprsp->rspInfo3);
2268 host_status = DID_ERROR;
2269 goto out;
2270 }
2271 }
2272
2273 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
2274 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
2275 if (snslen > SCSI_SENSE_BUFFERSIZE)
2276 snslen = SCSI_SENSE_BUFFERSIZE;
2277
2278 if (resp_info & RSP_LEN_VALID)
2279 rsplen = be32_to_cpu(fcprsp->rspRspLen);
2280 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
2281 }
2282 lp = (uint32_t *)cmnd->sense_buffer;
2283
2284 if (!scsi_status && (resp_info & RESID_UNDER))
2285 logit = LOG_FCP;
2286
2287 lpfc_printf_vlog(vport, KERN_WARNING, logit,
2288 "9024 FCP command x%x failed: x%x SNS x%x x%x "
2289 "Data: x%x x%x x%x x%x x%x\n",
2290 cmnd->cmnd[0], scsi_status,
2291 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
2292 be32_to_cpu(fcprsp->rspResId),
2293 be32_to_cpu(fcprsp->rspSnsLen),
2294 be32_to_cpu(fcprsp->rspRspLen),
2295 fcprsp->rspInfo3);
2296
2297 scsi_set_resid(cmnd, 0);
2298 if (resp_info & RESID_UNDER) {
2299 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
2300
2301 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2302 "9025 FCP Read Underrun, expected %d, "
2303 "residual %d Data: x%x x%x x%x\n",
2304 be32_to_cpu(fcpcmd->fcpDl),
2305 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
2306 cmnd->underflow);
2307
2308
2309
2310
2311
2312
2313 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
2314 fcpi_parm &&
2315 (scsi_get_resid(cmnd) != fcpi_parm)) {
2316 lpfc_printf_vlog(vport, KERN_WARNING,
2317 LOG_FCP | LOG_FCP_ERROR,
2318 "9026 FCP Read Check Error "
2319 "and Underrun Data: x%x x%x x%x x%x\n",
2320 be32_to_cpu(fcpcmd->fcpDl),
2321 scsi_get_resid(cmnd), fcpi_parm,
2322 cmnd->cmnd[0]);
2323 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2324 host_status = DID_ERROR;
2325 }
2326
2327
2328
2329
2330
2331
2332 if (!(resp_info & SNS_LEN_VALID) &&
2333 (scsi_status == SAM_STAT_GOOD) &&
2334 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
2335 < cmnd->underflow)) {
2336 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2337 "9027 FCP command x%x residual "
2338 "underrun converted to error "
2339 "Data: x%x x%x x%x\n",
2340 cmnd->cmnd[0], scsi_bufflen(cmnd),
2341 scsi_get_resid(cmnd), cmnd->underflow);
2342 host_status = DID_ERROR;
2343 }
2344 } else if (resp_info & RESID_OVER) {
2345 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2346 "9028 FCP command x%x residual overrun error. "
2347 "Data: x%x x%x\n", cmnd->cmnd[0],
2348 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
2349 host_status = DID_ERROR;
2350
2351
2352
2353
2354
2355 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
2356 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
2357 "9029 FCP Read Check Error Data: "
2358 "x%x x%x x%x x%x x%x\n",
2359 be32_to_cpu(fcpcmd->fcpDl),
2360 be32_to_cpu(fcprsp->rspResId),
2361 fcpi_parm, cmnd->cmnd[0], scsi_status);
2362 switch (scsi_status) {
2363 case SAM_STAT_GOOD:
2364 case SAM_STAT_CHECK_CONDITION:
2365
2366
2367
2368
2369
2370 host_status = DID_ERROR;
2371 break;
2372 }
2373 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
2374 }
2375
2376 out:
2377 cmnd->result = ScsiResult(host_status, scsi_status);
2378 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
2379}
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391static void
2392lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2393 struct lpfc_iocbq *pIocbOut)
2394{
2395 struct lpfc_scsi_buf *lpfc_cmd =
2396 (struct lpfc_scsi_buf *) pIocbIn->context1;
2397 struct lpfc_vport *vport = pIocbIn->vport;
2398 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2399 struct lpfc_nodelist *pnode = rdata->pnode;
2400 struct scsi_cmnd *cmd;
2401 int result;
2402 struct scsi_device *tmp_sdev;
2403 int depth;
2404 unsigned long flags;
2405 struct lpfc_fast_path_event *fast_path_evt;
2406 struct Scsi_Host *shost;
2407 uint32_t queue_depth, scsi_id;
2408
2409
2410 if (!(lpfc_cmd->pCmd))
2411 return;
2412 cmd = lpfc_cmd->pCmd;
2413 shost = cmd->device->host;
2414
2415 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
2416 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
2417
2418 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
2419
2420 if (pnode && NLP_CHK_NODE_ACT(pnode))
2421 atomic_dec(&pnode->cmd_pending);
2422
2423 if (lpfc_cmd->status) {
2424 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2425 (lpfc_cmd->result & IOERR_DRVR_MASK))
2426 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2427 else if (lpfc_cmd->status >= IOSTAT_CNT)
2428 lpfc_cmd->status = IOSTAT_DEFAULT;
2429
2430 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2431 "9030 FCP cmd x%x failed <%d/%d> "
2432 "status: x%x result: x%x Data: x%x x%x\n",
2433 cmd->cmnd[0],
2434 cmd->device ? cmd->device->id : 0xffff,
2435 cmd->device ? cmd->device->lun : 0xffff,
2436 lpfc_cmd->status, lpfc_cmd->result,
2437 pIocbOut->iocb.ulpContext,
2438 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
2439
2440 switch (lpfc_cmd->status) {
2441 case IOSTAT_FCP_RSP_ERROR:
2442
2443 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
2444 break;
2445 case IOSTAT_NPORT_BSY:
2446 case IOSTAT_FABRIC_BSY:
2447 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2448 fast_path_evt = lpfc_alloc_fast_evt(phba);
2449 if (!fast_path_evt)
2450 break;
2451 fast_path_evt->un.fabric_evt.event_type =
2452 FC_REG_FABRIC_EVENT;
2453 fast_path_evt->un.fabric_evt.subcategory =
2454 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
2455 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
2456 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2457 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
2458 &pnode->nlp_portname,
2459 sizeof(struct lpfc_name));
2460 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
2461 &pnode->nlp_nodename,
2462 sizeof(struct lpfc_name));
2463 }
2464 fast_path_evt->vport = vport;
2465 fast_path_evt->work_evt.evt =
2466 LPFC_EVT_FASTPATH_MGMT_EVT;
2467 spin_lock_irqsave(&phba->hbalock, flags);
2468 list_add_tail(&fast_path_evt->work_evt.evt_listp,
2469 &phba->work_list);
2470 spin_unlock_irqrestore(&phba->hbalock, flags);
2471 lpfc_worker_wake_up(phba);
2472 break;
2473 case IOSTAT_LOCAL_REJECT:
2474 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
2475 lpfc_cmd->result == IOERR_NO_RESOURCES ||
2476 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
2477 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
2478 cmd->result = ScsiResult(DID_REQUEUE, 0);
2479 break;
2480 }
2481
2482 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
2483 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
2484 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
2485 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2486
2487
2488
2489
2490 lpfc_parse_bg_err(phba, lpfc_cmd,
2491 pIocbOut);
2492 break;
2493 } else {
2494 lpfc_printf_vlog(vport, KERN_WARNING,
2495 LOG_BG,
2496 "9031 non-zero BGSTAT "
2497 "on unprotected cmd\n");
2498 }
2499 }
2500
2501
2502 default:
2503 cmd->result = ScsiResult(DID_ERROR, 0);
2504 break;
2505 }
2506
2507 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
2508 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
2509 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
2510 SAM_STAT_BUSY);
2511 } else {
2512 cmd->result = ScsiResult(DID_OK, 0);
2513 }
2514
2515 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
2516 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
2517
2518 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2519 "0710 Iodone <%d/%d> cmd %p, error "
2520 "x%x SNS x%x x%x Data: x%x x%x\n",
2521 cmd->device->id, cmd->device->lun, cmd,
2522 cmd->result, *lp, *(lp + 3), cmd->retries,
2523 scsi_get_resid(cmd));
2524 }
2525
2526 lpfc_update_stats(phba, lpfc_cmd);
2527 result = cmd->result;
2528 if (vport->cfg_max_scsicmpl_time &&
2529 time_after(jiffies, lpfc_cmd->start_time +
2530 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
2531 spin_lock_irqsave(shost->host_lock, flags);
2532 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2533 if (pnode->cmd_qdepth >
2534 atomic_read(&pnode->cmd_pending) &&
2535 (atomic_read(&pnode->cmd_pending) >
2536 LPFC_MIN_TGT_QDEPTH) &&
2537 ((cmd->cmnd[0] == READ_10) ||
2538 (cmd->cmnd[0] == WRITE_10)))
2539 pnode->cmd_qdepth =
2540 atomic_read(&pnode->cmd_pending);
2541
2542 pnode->last_change_time = jiffies;
2543 }
2544 spin_unlock_irqrestore(shost->host_lock, flags);
2545 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
2546 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
2547 time_after(jiffies, pnode->last_change_time +
2548 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
2549 spin_lock_irqsave(shost->host_lock, flags);
2550 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
2551 / 100;
2552 depth = depth ? depth : 1;
2553 pnode->cmd_qdepth += depth;
2554 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
2555 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
2556 pnode->last_change_time = jiffies;
2557 spin_unlock_irqrestore(shost->host_lock, flags);
2558 }
2559 }
2560
2561 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2562
2563
2564 queue_depth = cmd->device->queue_depth;
2565 scsi_id = cmd->device->id;
2566 cmd->scsi_done(cmd);
2567
2568 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2569
2570
2571
2572
2573 spin_lock_irqsave(shost->host_lock, flags);
2574 lpfc_cmd->pCmd = NULL;
2575 if (lpfc_cmd->waitq)
2576 wake_up(lpfc_cmd->waitq);
2577 spin_unlock_irqrestore(shost->host_lock, flags);
2578 lpfc_release_scsi_buf(phba, lpfc_cmd);
2579 return;
2580 }
2581
2582 if (!result)
2583 lpfc_rampup_queue_depth(vport, queue_depth);
2584
2585
2586
2587
2588
2589 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
2590 NLP_CHK_NODE_ACT(pnode)) {
2591 shost_for_each_device(tmp_sdev, shost) {
2592 if (tmp_sdev->id != scsi_id)
2593 continue;
2594 depth = scsi_track_queue_full(tmp_sdev,
2595 tmp_sdev->queue_depth-1);
2596 if (depth <= 0)
2597 continue;
2598 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2599 "0711 detected queue full - lun queue "
2600 "depth adjusted to %d.\n", depth);
2601 lpfc_send_sdev_queuedepth_change_event(phba, vport,
2602 pnode,
2603 tmp_sdev->lun,
2604 depth+1, depth);
2605 }
2606 }
2607
2608
2609
2610
2611
2612 spin_lock_irqsave(shost->host_lock, flags);
2613 lpfc_cmd->pCmd = NULL;
2614 if (lpfc_cmd->waitq)
2615 wake_up(lpfc_cmd->waitq);
2616 spin_unlock_irqrestore(shost->host_lock, flags);
2617
2618 lpfc_release_scsi_buf(phba, lpfc_cmd);
2619}
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629static void
2630lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
2631{
2632 int i, j;
2633 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
2634 i += sizeof(uint32_t), j++) {
2635 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
2636 }
2637}
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648static void
2649lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2650 struct lpfc_nodelist *pnode)
2651{
2652 struct lpfc_hba *phba = vport->phba;
2653 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2654 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2655 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2656 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
2657 int datadir = scsi_cmnd->sc_data_direction;
2658 char tag[2];
2659
2660 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2661 return;
2662
2663 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
2664
2665 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
2666
2667 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
2668 &lpfc_cmd->fcp_cmnd->fcp_lun);
2669
2670 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
2671
2672 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
2673 switch (tag[0]) {
2674 case HEAD_OF_QUEUE_TAG:
2675 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
2676 break;
2677 case ORDERED_QUEUE_TAG:
2678 fcp_cmnd->fcpCntl1 = ORDERED_Q;
2679 break;
2680 default:
2681 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2682 break;
2683 }
2684 } else
2685 fcp_cmnd->fcpCntl1 = 0;
2686
2687
2688
2689
2690
2691
2692
2693 if (scsi_sg_count(scsi_cmnd)) {
2694 if (datadir == DMA_TO_DEVICE) {
2695 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2696 if (phba->sli_rev < LPFC_SLI_REV4) {
2697 iocb_cmd->un.fcpi.fcpi_parm = 0;
2698 iocb_cmd->ulpPU = 0;
2699 } else
2700 iocb_cmd->ulpPU = PARM_READ_CHECK;
2701 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2702 phba->fc4OutputRequests++;
2703 } else {
2704 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2705 iocb_cmd->ulpPU = PARM_READ_CHECK;
2706 fcp_cmnd->fcpCntl3 = READ_DATA;
2707 phba->fc4InputRequests++;
2708 }
2709 } else {
2710 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2711 iocb_cmd->un.fcpi.fcpi_parm = 0;
2712 iocb_cmd->ulpPU = 0;
2713 fcp_cmnd->fcpCntl3 = 0;
2714 phba->fc4ControlRequests++;
2715 }
2716 if (phba->sli_rev == 3 &&
2717 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2718 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
2719
2720
2721
2722
2723 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2724 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2725 piocbq->iocb.ulpFCP2Rcvy = 1;
2726 else
2727 piocbq->iocb.ulpFCP2Rcvy = 0;
2728
2729 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2730 piocbq->context1 = lpfc_cmd;
2731 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2732 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2733 piocbq->vport = vport;
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750static int
2751lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2752 struct lpfc_scsi_buf *lpfc_cmd,
2753 unsigned int lun,
2754 uint8_t task_mgmt_cmd)
2755{
2756 struct lpfc_iocbq *piocbq;
2757 IOCB_t *piocb;
2758 struct fcp_cmnd *fcp_cmnd;
2759 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2760 struct lpfc_nodelist *ndlp = rdata->pnode;
2761
2762 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2763 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
2764 return 0;
2765
2766 piocbq = &(lpfc_cmd->cur_iocbq);
2767 piocbq->vport = vport;
2768
2769 piocb = &piocbq->iocb;
2770
2771 fcp_cmnd = lpfc_cmd->fcp_cmnd;
2772
2773 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2774 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
2775 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
2776 if (vport->phba->sli_rev == 3 &&
2777 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2778 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2779 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2780 piocb->ulpContext = ndlp->nlp_rpi;
2781 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2782 piocb->ulpFCP2Rcvy = 1;
2783 }
2784 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2785
2786
2787 if (lpfc_cmd->timeout > 0xff) {
2788
2789
2790
2791
2792 piocb->ulpTimeout = 0;
2793 } else
2794 piocb->ulpTimeout = lpfc_cmd->timeout;
2795
2796 if (vport->phba->sli_rev == LPFC_SLI_REV4)
2797 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
2798
2799 return 1;
2800}
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811int
2812lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
2813{
2814
2815 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
2816 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
2817
2818 switch (dev_grp) {
2819 case LPFC_PCI_DEV_LP:
2820 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
2821 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
2822 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
2823 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
2824 break;
2825 case LPFC_PCI_DEV_OC:
2826 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
2827 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
2828 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
2829 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
2830 break;
2831 default:
2832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2833 "1418 Invalid HBA PCI-device group: 0x%x\n",
2834 dev_grp);
2835 return -ENODEV;
2836 break;
2837 }
2838 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
2839 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2840 return 0;
2841}
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852static void
2853lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2854 struct lpfc_iocbq *cmdiocbq,
2855 struct lpfc_iocbq *rspiocbq)
2856{
2857 struct lpfc_scsi_buf *lpfc_cmd =
2858 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2859 if (lpfc_cmd)
2860 lpfc_release_scsi_buf(phba, lpfc_cmd);
2861 return;
2862}
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873const char *
2874lpfc_info(struct Scsi_Host *host)
2875{
2876 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2877 struct lpfc_hba *phba = vport->phba;
2878 int len;
2879 static char lpfcinfobuf[384];
2880
2881 memset(lpfcinfobuf,0,384);
2882 if (phba && phba->pcidev){
2883 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2884 len = strlen(lpfcinfobuf);
2885 snprintf(lpfcinfobuf + len,
2886 384-len,
2887 " on PCI bus %02x device %02x irq %d",
2888 phba->pcidev->bus->number,
2889 phba->pcidev->devfn,
2890 phba->pcidev->irq);
2891 len = strlen(lpfcinfobuf);
2892 if (phba->Port[0]) {
2893 snprintf(lpfcinfobuf + len,
2894 384-len,
2895 " port %s",
2896 phba->Port);
2897 }
2898 len = strlen(lpfcinfobuf);
2899 if (phba->sli4_hba.link_state.logical_speed) {
2900 snprintf(lpfcinfobuf + len,
2901 384-len,
2902 " Logical Link Speed: %d Mbps",
2903 phba->sli4_hba.link_state.logical_speed * 10);
2904 }
2905 }
2906 return lpfcinfobuf;
2907}
2908
2909
2910
2911
2912
2913
2914
2915
2916static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2917{
2918 unsigned long poll_tmo_expires =
2919 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2920
2921 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2922 mod_timer(&phba->fcp_poll_timer,
2923 poll_tmo_expires);
2924}
2925
2926
2927
2928
2929
2930
2931
2932void lpfc_poll_start_timer(struct lpfc_hba * phba)
2933{
2934 lpfc_poll_rearm_timer(phba);
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945void lpfc_poll_timeout(unsigned long ptr)
2946{
2947 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2948
2949 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2950 lpfc_sli_handle_fast_ring_event(phba,
2951 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
2952
2953 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2954 lpfc_poll_rearm_timer(phba);
2955 }
2956}
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971static int
2972lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2973{
2974 struct Scsi_Host *shost = cmnd->device->host;
2975 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2976 struct lpfc_hba *phba = vport->phba;
2977 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2978 struct lpfc_nodelist *ndlp;
2979 struct lpfc_scsi_buf *lpfc_cmd;
2980 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2981 int err;
2982
2983 err = fc_remote_port_chkready(rport);
2984 if (err) {
2985 cmnd->result = err;
2986 goto out_fail_command;
2987 }
2988 ndlp = rdata->pnode;
2989
2990 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2991 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2992
2993 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2994 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
2995 " op:%02x str=%s without registering for"
2996 " BlockGuard - Rejecting command\n",
2997 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2998 dif_op_str[scsi_get_prot_op(cmnd)]);
2999 goto out_fail_command;
3000 }
3001
3002
3003
3004
3005
3006 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
3007 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
3008 goto out_fail_command;
3009 }
3010 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
3011 goto out_host_busy;
3012
3013 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
3014 if (lpfc_cmd == NULL) {
3015 lpfc_rampdown_queue_depth(phba);
3016
3017 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3018 "0707 driver's buffer pool is empty, "
3019 "IO busied\n");
3020 goto out_host_busy;
3021 }
3022
3023
3024
3025
3026
3027 lpfc_cmd->pCmd = cmnd;
3028 lpfc_cmd->rdata = rdata;
3029 lpfc_cmd->timeout = 0;
3030 lpfc_cmd->start_time = jiffies;
3031 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
3032 cmnd->scsi_done = done;
3033
3034 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
3035 if (vport->phba->cfg_enable_bg) {
3036 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3037 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
3038 "str=%s\n",
3039 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
3040 dif_op_str[scsi_get_prot_op(cmnd)]);
3041 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3042 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
3043 "%02x %02x %02x %02x %02x\n",
3044 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
3045 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
3046 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
3047 cmnd->cmnd[9]);
3048 if (cmnd->cmnd[0] == READ_10)
3049 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3050 "9035 BLKGRD: READ @ sector %llu, "
3051 "count %u\n",
3052 (unsigned long long)scsi_get_lba(cmnd),
3053 blk_rq_sectors(cmnd->request));
3054 else if (cmnd->cmnd[0] == WRITE_10)
3055 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3056 "9036 BLKGRD: WRITE @ sector %llu, "
3057 "count %u cmd=%p\n",
3058 (unsigned long long)scsi_get_lba(cmnd),
3059 blk_rq_sectors(cmnd->request),
3060 cmnd);
3061 }
3062
3063 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3064 } else {
3065 if (vport->phba->cfg_enable_bg) {
3066 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3067 "9038 BLKGRD: rcvd unprotected cmd:"
3068 "%02x op:%02x str=%s\n",
3069 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
3070 dif_op_str[scsi_get_prot_op(cmnd)]);
3071 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3072 "9039 BLKGRD: CDB: %02x %02x %02x "
3073 "%02x %02x %02x %02x %02x %02x %02x\n",
3074 cmnd->cmnd[0], cmnd->cmnd[1],
3075 cmnd->cmnd[2], cmnd->cmnd[3],
3076 cmnd->cmnd[4], cmnd->cmnd[5],
3077 cmnd->cmnd[6], cmnd->cmnd[7],
3078 cmnd->cmnd[8], cmnd->cmnd[9]);
3079 if (cmnd->cmnd[0] == READ_10)
3080 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3081 "9040 dbg: READ @ sector %llu, "
3082 "count %u\n",
3083 (unsigned long long)scsi_get_lba(cmnd),
3084 blk_rq_sectors(cmnd->request));
3085 else if (cmnd->cmnd[0] == WRITE_10)
3086 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3087 "9041 dbg: WRITE @ sector %llu, "
3088 "count %u cmd=%p\n",
3089 (unsigned long long)scsi_get_lba(cmnd),
3090 blk_rq_sectors(cmnd->request), cmnd);
3091 else
3092 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
3093 "9042 dbg: parser not implemented\n");
3094 }
3095 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3096 }
3097
3098 if (err)
3099 goto out_host_busy_free_buf;
3100
3101 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
3102
3103 atomic_inc(&ndlp->cmd_pending);
3104 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
3105 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
3106 if (err) {
3107 atomic_dec(&ndlp->cmd_pending);
3108 goto out_host_busy_free_buf;
3109 }
3110 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3111 spin_unlock(shost->host_lock);
3112 lpfc_sli_handle_fast_ring_event(phba,
3113 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3114
3115 spin_lock(shost->host_lock);
3116 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3117 lpfc_poll_rearm_timer(phba);
3118 }
3119
3120 return 0;
3121
3122 out_host_busy_free_buf:
3123 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3124 lpfc_release_scsi_buf(phba, lpfc_cmd);
3125 out_host_busy:
3126 return SCSI_MLQUEUE_HOST_BUSY;
3127
3128 out_fail_command:
3129 done(cmnd);
3130 return 0;
3131}
3132
3133static DEF_SCSI_QCMD(lpfc_queuecommand)
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145static int
3146lpfc_abort_handler(struct scsi_cmnd *cmnd)
3147{
3148 struct Scsi_Host *shost = cmnd->device->host;
3149 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3150 struct lpfc_hba *phba = vport->phba;
3151 struct lpfc_iocbq *iocb;
3152 struct lpfc_iocbq *abtsiocb;
3153 struct lpfc_scsi_buf *lpfc_cmd;
3154 IOCB_t *cmd, *icmd;
3155 int ret = SUCCESS;
3156 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
3157
3158 ret = fc_block_scsi_eh(cmnd);
3159 if (ret)
3160 return ret;
3161 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
3162 if (!lpfc_cmd) {
3163 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3164 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
3165 "x%x ID %d "
3166 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3167 cmnd->device->lun, cmnd->serial_number);
3168 return SUCCESS;
3169 }
3170
3171
3172
3173
3174
3175
3176
3177
3178 iocb = &lpfc_cmd->cur_iocbq;
3179 if (lpfc_cmd->pCmd != cmnd)
3180 goto out;
3181
3182 BUG_ON(iocb->context1 != lpfc_cmd);
3183
3184 abtsiocb = lpfc_sli_get_iocbq(phba);
3185 if (abtsiocb == NULL) {
3186 ret = FAILED;
3187 goto out;
3188 }
3189
3190
3191
3192
3193
3194
3195
3196 cmd = &iocb->iocb;
3197 icmd = &abtsiocb->iocb;
3198 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
3199 icmd->un.acxri.abortContextTag = cmd->ulpContext;
3200 if (phba->sli_rev == LPFC_SLI_REV4)
3201 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
3202 else
3203 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
3204
3205 icmd->ulpLe = 1;
3206 icmd->ulpClass = cmd->ulpClass;
3207
3208
3209 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
3210 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
3211
3212 if (lpfc_is_link_up(phba))
3213 icmd->ulpCommand = CMD_ABORT_XRI_CN;
3214 else
3215 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
3216
3217 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3218 abtsiocb->vport = vport;
3219 if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
3220 IOCB_ERROR) {
3221 lpfc_sli_release_iocbq(phba, abtsiocb);
3222 ret = FAILED;
3223 goto out;
3224 }
3225
3226 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3227 lpfc_sli_handle_fast_ring_event(phba,
3228 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3229
3230 lpfc_cmd->waitq = &waitq;
3231
3232 wait_event_timeout(waitq,
3233 (lpfc_cmd->pCmd != cmnd),
3234 (2*vport->cfg_devloss_tmo*HZ));
3235
3236 spin_lock_irq(shost->host_lock);
3237 lpfc_cmd->waitq = NULL;
3238 spin_unlock_irq(shost->host_lock);
3239
3240 if (lpfc_cmd->pCmd == cmnd) {
3241 ret = FAILED;
3242 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3243 "0748 abort handler timed out waiting "
3244 "for abort to complete: ret %#x, ID %d, "
3245 "LUN %d, snum %#lx\n",
3246 ret, cmnd->device->id, cmnd->device->lun,
3247 cmnd->serial_number);
3248 }
3249
3250 out:
3251 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3252 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
3253 "LUN %d snum %#lx\n", ret, cmnd->device->id,
3254 cmnd->device->lun, cmnd->serial_number);
3255 return ret;
3256}
3257
3258static char *
3259lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
3260{
3261 switch (task_mgmt_cmd) {
3262 case FCP_ABORT_TASK_SET:
3263 return "ABORT_TASK_SET";
3264 case FCP_CLEAR_TASK_SET:
3265 return "FCP_CLEAR_TASK_SET";
3266 case FCP_BUS_RESET:
3267 return "FCP_BUS_RESET";
3268 case FCP_LUN_RESET:
3269 return "FCP_LUN_RESET";
3270 case FCP_TARGET_RESET:
3271 return "FCP_TARGET_RESET";
3272 case FCP_CLEAR_ACA:
3273 return "FCP_CLEAR_ACA";
3274 case FCP_TERMINATE_TASK:
3275 return "FCP_TERMINATE_TASK";
3276 default:
3277 return "unknown";
3278 }
3279}
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296static int
3297lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
3298 unsigned tgt_id, unsigned int lun_id,
3299 uint8_t task_mgmt_cmd)
3300{
3301 struct lpfc_hba *phba = vport->phba;
3302 struct lpfc_scsi_buf *lpfc_cmd;
3303 struct lpfc_iocbq *iocbq;
3304 struct lpfc_iocbq *iocbqrsp;
3305 struct lpfc_nodelist *pnode = rdata->pnode;
3306 int ret;
3307 int status;
3308
3309 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3310 return FAILED;
3311
3312 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
3313 if (lpfc_cmd == NULL)
3314 return FAILED;
3315 lpfc_cmd->timeout = 60;
3316 lpfc_cmd->rdata = rdata;
3317
3318 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
3319 task_mgmt_cmd);
3320 if (!status) {
3321 lpfc_release_scsi_buf(phba, lpfc_cmd);
3322 return FAILED;
3323 }
3324
3325 iocbq = &lpfc_cmd->cur_iocbq;
3326 iocbqrsp = lpfc_sli_get_iocbq(phba);
3327 if (iocbqrsp == NULL) {
3328 lpfc_release_scsi_buf(phba, lpfc_cmd);
3329 return FAILED;
3330 }
3331
3332 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3333 "0702 Issue %s to TGT %d LUN %d "
3334 "rpi x%x nlp_flag x%x\n",
3335 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
3336 pnode->nlp_rpi, pnode->nlp_flag);
3337
3338 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
3339 iocbq, iocbqrsp, lpfc_cmd->timeout);
3340 if (status != IOCB_SUCCESS) {
3341 if (status == IOCB_TIMEDOUT) {
3342 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
3343 ret = TIMEOUT_ERROR;
3344 } else
3345 ret = FAILED;
3346 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3347 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3348 "0727 TMF %s to TGT %d LUN %d failed (%d, %d)\n",
3349 lpfc_taskmgmt_name(task_mgmt_cmd),
3350 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
3351 iocbqrsp->iocb.un.ulpWord[4]);
3352 } else if (status == IOCB_BUSY)
3353 ret = FAILED;
3354 else
3355 ret = SUCCESS;
3356
3357 lpfc_sli_release_iocbq(phba, iocbqrsp);
3358
3359 if (ret != TIMEOUT_ERROR)
3360 lpfc_release_scsi_buf(phba, lpfc_cmd);
3361
3362 return ret;
3363}
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377static int
3378lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
3379{
3380 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3381 struct lpfc_nodelist *pnode;
3382 unsigned long later;
3383
3384 if (!rdata) {
3385 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3386 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
3387 return FAILED;
3388 }
3389 pnode = rdata->pnode;
3390
3391
3392
3393
3394 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3395 while (time_after(later, jiffies)) {
3396 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3397 return FAILED;
3398 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
3399 return SUCCESS;
3400 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
3401 rdata = cmnd->device->hostdata;
3402 if (!rdata)
3403 return FAILED;
3404 pnode = rdata->pnode;
3405 }
3406 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
3407 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3408 return FAILED;
3409 return SUCCESS;
3410}
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428static int
3429lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
3430 uint64_t lun_id, lpfc_ctx_cmd context)
3431{
3432 struct lpfc_hba *phba = vport->phba;
3433 unsigned long later;
3434 int cnt;
3435
3436 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3437 if (cnt)
3438 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
3439 tgt_id, lun_id, context);
3440 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
3441 while (time_after(later, jiffies) && cnt) {
3442 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
3443 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
3444 }
3445 if (cnt) {
3446 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3447 "0724 I/O flush failure for context %s : cnt x%x\n",
3448 ((context == LPFC_CTX_LUN) ? "LUN" :
3449 ((context == LPFC_CTX_TGT) ? "TGT" :
3450 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
3451 cnt);
3452 return FAILED;
3453 }
3454 return SUCCESS;
3455}
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468static int
3469lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
3470{
3471 struct Scsi_Host *shost = cmnd->device->host;
3472 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3473 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3474 struct lpfc_nodelist *pnode;
3475 unsigned tgt_id = cmnd->device->id;
3476 unsigned int lun_id = cmnd->device->lun;
3477 struct lpfc_scsi_event_header scsi_event;
3478 int status;
3479
3480 if (!rdata) {
3481 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3482 "0798 Device Reset rport failure: rdata x%p\n", rdata);
3483 return FAILED;
3484 }
3485 pnode = rdata->pnode;
3486 status = fc_block_scsi_eh(cmnd);
3487 if (status)
3488 return status;
3489
3490 status = lpfc_chk_tgt_mapped(vport, cmnd);
3491 if (status == FAILED) {
3492 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3493 "0721 Device Reset rport failure: rdata x%p\n", rdata);
3494 return FAILED;
3495 }
3496
3497 scsi_event.event_type = FC_REG_SCSI_EVENT;
3498 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
3499 scsi_event.lun = lun_id;
3500 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3501 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3502
3503 fc_host_post_vendor_event(shost, fc_get_event_number(),
3504 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3505
3506 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3507 FCP_LUN_RESET);
3508
3509 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3510 "0713 SCSI layer issued Device Reset (%d, %d) "
3511 "return x%x\n", tgt_id, lun_id, status);
3512
3513
3514
3515
3516
3517
3518
3519 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3520 LPFC_CTX_LUN);
3521 return status;
3522}
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535static int
3536lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
3537{
3538 struct Scsi_Host *shost = cmnd->device->host;
3539 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3540 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
3541 struct lpfc_nodelist *pnode;
3542 unsigned tgt_id = cmnd->device->id;
3543 unsigned int lun_id = cmnd->device->lun;
3544 struct lpfc_scsi_event_header scsi_event;
3545 int status;
3546
3547 if (!rdata) {
3548 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3549 "0799 Target Reset rport failure: rdata x%p\n", rdata);
3550 return FAILED;
3551 }
3552 pnode = rdata->pnode;
3553 status = fc_block_scsi_eh(cmnd);
3554 if (status)
3555 return status;
3556
3557 status = lpfc_chk_tgt_mapped(vport, cmnd);
3558 if (status == FAILED) {
3559 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3560 "0722 Target Reset rport failure: rdata x%p\n", rdata);
3561 return FAILED;
3562 }
3563
3564 scsi_event.event_type = FC_REG_SCSI_EVENT;
3565 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
3566 scsi_event.lun = 0;
3567 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
3568 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
3569
3570 fc_host_post_vendor_event(shost, fc_get_event_number(),
3571 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3572
3573 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
3574 FCP_TARGET_RESET);
3575
3576 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3577 "0723 SCSI layer issued Target Reset (%d, %d) "
3578 "return x%x\n", tgt_id, lun_id, status);
3579
3580
3581
3582
3583
3584
3585
3586 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
3587 LPFC_CTX_TGT);
3588 return status;
3589}
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602static int
3603lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3604{
3605 struct Scsi_Host *shost = cmnd->device->host;
3606 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3607 struct lpfc_nodelist *ndlp = NULL;
3608 struct lpfc_scsi_event_header scsi_event;
3609 int match;
3610 int ret = SUCCESS, status, i;
3611
3612 scsi_event.event_type = FC_REG_SCSI_EVENT;
3613 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
3614 scsi_event.lun = 0;
3615 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
3616 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
3617
3618 fc_host_post_vendor_event(shost, fc_get_event_number(),
3619 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3620
3621 ret = fc_block_scsi_eh(cmnd);
3622 if (ret)
3623 return ret;
3624
3625
3626
3627
3628
3629
3630 for (i = 0; i < LPFC_MAX_TARGET; i++) {
3631
3632 match = 0;
3633 spin_lock_irq(shost->host_lock);
3634 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
3635 if (!NLP_CHK_NODE_ACT(ndlp))
3636 continue;
3637 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
3638 ndlp->nlp_sid == i &&
3639 ndlp->rport) {
3640 match = 1;
3641 break;
3642 }
3643 }
3644 spin_unlock_irq(shost->host_lock);
3645 if (!match)
3646 continue;
3647
3648 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
3649 i, 0, FCP_TARGET_RESET);
3650
3651 if (status != SUCCESS) {
3652 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3653 "0700 Bus Reset on target %d failed\n",
3654 i);
3655 ret = FAILED;
3656 }
3657 }
3658
3659
3660
3661
3662
3663
3664
3665 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
3666 if (status != SUCCESS)
3667 ret = FAILED;
3668
3669 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3670 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
3671 return ret;
3672}
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687static int
3688lpfc_slave_alloc(struct scsi_device *sdev)
3689{
3690 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3691 struct lpfc_hba *phba = vport->phba;
3692 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3693 uint32_t total = 0;
3694 uint32_t num_to_alloc = 0;
3695 int num_allocated = 0;
3696 uint32_t sdev_cnt;
3697
3698 if (!rport || fc_remote_port_chkready(rport))
3699 return -ENXIO;
3700
3701 sdev->hostdata = rport->dd_data;
3702 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
3703
3704
3705
3706
3707
3708
3709
3710
3711 total = phba->total_scsi_bufs;
3712 num_to_alloc = vport->cfg_lun_queue_depth + 2;
3713
3714
3715 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
3716 return 0;
3717
3718
3719 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3720 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3721 "0704 At limitation of %d preallocated "
3722 "command buffers\n", total);
3723 return 0;
3724
3725 } else if (total + num_to_alloc >
3726 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
3727 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3728 "0705 Allocation request of %d "
3729 "command buffers will exceed max of %d. "
3730 "Reducing allocation request to %d.\n",
3731 num_to_alloc, phba->cfg_hba_queue_depth,
3732 (phba->cfg_hba_queue_depth - total));
3733 num_to_alloc = phba->cfg_hba_queue_depth - total;
3734 }
3735 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
3736 if (num_to_alloc != num_allocated) {
3737 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3738 "0708 Allocation request of %d "
3739 "command buffers did not succeed. "
3740 "Allocated %d buffers.\n",
3741 num_to_alloc, num_allocated);
3742 }
3743 if (num_allocated > 0)
3744 phba->total_scsi_bufs += num_allocated;
3745 return 0;
3746}
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759static int
3760lpfc_slave_configure(struct scsi_device *sdev)
3761{
3762 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3763 struct lpfc_hba *phba = vport->phba;
3764
3765 if (sdev->tagged_supported)
3766 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
3767 else
3768 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
3769
3770 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
3771 lpfc_sli_handle_fast_ring_event(phba,
3772 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
3773 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
3774 lpfc_poll_rearm_timer(phba);
3775 }
3776
3777 return 0;
3778}
3779
3780
3781
3782
3783
3784
3785
3786static void
3787lpfc_slave_destroy(struct scsi_device *sdev)
3788{
3789 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
3790 struct lpfc_hba *phba = vport->phba;
3791 atomic_dec(&phba->sdev_cnt);
3792 sdev->hostdata = NULL;
3793 return;
3794}
3795
3796
3797struct scsi_host_template lpfc_template = {
3798 .module = THIS_MODULE,
3799 .name = LPFC_DRIVER_NAME,
3800 .info = lpfc_info,
3801 .queuecommand = lpfc_queuecommand,
3802 .eh_abort_handler = lpfc_abort_handler,
3803 .eh_device_reset_handler = lpfc_device_reset_handler,
3804 .eh_target_reset_handler = lpfc_target_reset_handler,
3805 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3806 .slave_alloc = lpfc_slave_alloc,
3807 .slave_configure = lpfc_slave_configure,
3808 .slave_destroy = lpfc_slave_destroy,
3809 .scan_finished = lpfc_scan_finished,
3810 .this_id = -1,
3811 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3812 .cmd_per_lun = LPFC_CMD_PER_LUN,
3813 .use_clustering = ENABLE_CLUSTERING,
3814 .shost_attrs = lpfc_hba_attrs,
3815 .max_sectors = 0xFFFF,
3816 .vendor_id = LPFC_NL_VENDOR_ID,
3817 .change_queue_depth = lpfc_change_queue_depth,
3818};
3819
3820struct scsi_host_template lpfc_vport_template = {
3821 .module = THIS_MODULE,
3822 .name = LPFC_DRIVER_NAME,
3823 .info = lpfc_info,
3824 .queuecommand = lpfc_queuecommand,
3825 .eh_abort_handler = lpfc_abort_handler,
3826 .eh_device_reset_handler = lpfc_device_reset_handler,
3827 .eh_target_reset_handler = lpfc_target_reset_handler,
3828 .eh_bus_reset_handler = lpfc_bus_reset_handler,
3829 .slave_alloc = lpfc_slave_alloc,
3830 .slave_configure = lpfc_slave_configure,
3831 .slave_destroy = lpfc_slave_destroy,
3832 .scan_finished = lpfc_scan_finished,
3833 .this_id = -1,
3834 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
3835 .cmd_per_lun = LPFC_CMD_PER_LUN,
3836 .use_clustering = ENABLE_CLUSTERING,
3837 .shost_attrs = lpfc_vport_attrs,
3838 .max_sectors = 0xFFFF,
3839 .change_queue_depth = lpfc_change_queue_depth,
3840};
3841