1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/export.h>
27#include <linux/delay.h>
28#include <asm/unaligned.h>
29#include <linux/t10-pi.h>
30#include <linux/crc-t10dif.h>
31#include <net/checksum.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_eh.h>
36#include <scsi/scsi_host.h>
37#include <scsi/scsi_tcq.h>
38#include <scsi/scsi_transport_fc.h>
39
40#include "lpfc_version.h"
41#include "lpfc_hw4.h"
42#include "lpfc_hw.h"
43#include "lpfc_sli.h"
44#include "lpfc_sli4.h"
45#include "lpfc_nl.h"
46#include "lpfc_disc.h"
47#include "lpfc.h"
48#include "lpfc_scsi.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52
53#define LPFC_RESET_WAIT 2
54#define LPFC_ABORT_WAIT 2
55
56int _dump_buf_done = 1;
57
58static char *dif_op_str[] = {
59 "PROT_NORMAL",
60 "PROT_READ_INSERT",
61 "PROT_WRITE_STRIP",
62 "PROT_READ_STRIP",
63 "PROT_WRITE_INSERT",
64 "PROT_READ_PASS",
65 "PROT_WRITE_PASS",
66};
67
68struct scsi_dif_tuple {
69 __be16 guard_tag;
70 __be16 app_tag;
71 __be32 ref_tag;
72};
73
74static struct lpfc_rport_data *
75lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
76{
77 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
78
79 if (vport->phba->cfg_fof)
80 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
81 else
82 return (struct lpfc_rport_data *)sdev->hostdata;
83}
84
85static void
86lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87static void
88lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
89static int
90lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
91
92static void
93lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
94{
95 void *src, *dst;
96 struct scatterlist *sgde = scsi_sglist(cmnd);
97
98 if (!_dump_buf_data) {
99 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
100 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
101 __func__);
102 return;
103 }
104
105
106 if (!sgde) {
107 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
108 "9051 BLKGRD: ERROR: data scatterlist is null\n");
109 return;
110 }
111
112 dst = (void *) _dump_buf_data;
113 while (sgde) {
114 src = sg_virt(sgde);
115 memcpy(dst, src, sgde->length);
116 dst += sgde->length;
117 sgde = sg_next(sgde);
118 }
119}
120
121static void
122lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
123{
124 void *src, *dst;
125 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
126
127 if (!_dump_buf_dif) {
128 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
129 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
130 __func__);
131 return;
132 }
133
134 if (!sgde) {
135 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
136 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
137 return;
138 }
139
140 dst = _dump_buf_dif;
141 while (sgde) {
142 src = sg_virt(sgde);
143 memcpy(dst, src, sgde->length);
144 dst += sgde->length;
145 sgde = sg_next(sgde);
146 }
147}
148
149static inline unsigned
150lpfc_cmd_blksize(struct scsi_cmnd *sc)
151{
152 return sc->device->sector_size;
153}
154
155#define LPFC_CHECK_PROTECT_GUARD 1
156#define LPFC_CHECK_PROTECT_REF 2
157static inline unsigned
158lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
159{
160 return 1;
161}
162
163static inline unsigned
164lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
165{
166 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
167 return 0;
168 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
169 return 1;
170 return 0;
171}
172
173
174
175
176
177
178
179
180
181static void
182lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
183 struct lpfc_io_buf *lpfc_cmd)
184{
185 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
186 if (sgl) {
187 sgl += 1;
188 sgl->word2 = le32_to_cpu(sgl->word2);
189 bf_set(lpfc_sli4_sge_last, sgl, 1);
190 sgl->word2 = cpu_to_le32(sgl->word2);
191 }
192}
193
194
195
196
197
198
199
200
201
202static void
203lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
204{
205 struct lpfc_rport_data *rdata;
206 struct lpfc_nodelist *pnode;
207 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
208 unsigned long flags;
209 struct Scsi_Host *shost = cmd->device->host;
210 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
211 unsigned long latency;
212 int i;
213
214 if (!vport->stat_data_enabled ||
215 vport->stat_data_blocked ||
216 (cmd->result))
217 return;
218
219 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
220 rdata = lpfc_cmd->rdata;
221 pnode = rdata->pnode;
222
223 spin_lock_irqsave(shost->host_lock, flags);
224 if (!pnode ||
225 !pnode->lat_data ||
226 (phba->bucket_type == LPFC_NO_BUCKET)) {
227 spin_unlock_irqrestore(shost->host_lock, flags);
228 return;
229 }
230
231 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
232 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
233 phba->bucket_step;
234
235 if (i < 0)
236 i = 0;
237 else if (i >= LPFC_MAX_BUCKET_COUNT)
238 i = LPFC_MAX_BUCKET_COUNT - 1;
239 } else {
240 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
241 if (latency <= (phba->bucket_base +
242 ((1<<i)*phba->bucket_step)))
243 break;
244 }
245
246 pnode->lat_data[i].cmd_count++;
247 spin_unlock_irqrestore(shost->host_lock, flags);
248}
249
250
251
252
253
254
255
256
257
258
259
260
261void
262lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
263{
264 unsigned long flags;
265 uint32_t evt_posted;
266 unsigned long expires;
267
268 spin_lock_irqsave(&phba->hbalock, flags);
269 atomic_inc(&phba->num_rsrc_err);
270 phba->last_rsrc_error_time = jiffies;
271
272 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
273 if (time_after(expires, jiffies)) {
274 spin_unlock_irqrestore(&phba->hbalock, flags);
275 return;
276 }
277
278 phba->last_ramp_down_time = jiffies;
279
280 spin_unlock_irqrestore(&phba->hbalock, flags);
281
282 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
283 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
284 if (!evt_posted)
285 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
286 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
287
288 if (!evt_posted)
289 lpfc_worker_wake_up(phba);
290 return;
291}
292
293
294
295
296
297
298
299
300
301void
302lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
303{
304 struct lpfc_vport **vports;
305 struct Scsi_Host *shost;
306 struct scsi_device *sdev;
307 unsigned long new_queue_depth;
308 unsigned long num_rsrc_err, num_cmd_success;
309 int i;
310
311 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
312 num_cmd_success = atomic_read(&phba->num_cmd_success);
313
314
315
316
317
318
319 if (num_rsrc_err == 0)
320 return;
321
322 vports = lpfc_create_vport_work_array(phba);
323 if (vports != NULL)
324 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
325 shost = lpfc_shost_from_vport(vports[i]);
326 shost_for_each_device(sdev, shost) {
327 new_queue_depth =
328 sdev->queue_depth * num_rsrc_err /
329 (num_rsrc_err + num_cmd_success);
330 if (!new_queue_depth)
331 new_queue_depth = sdev->queue_depth - 1;
332 else
333 new_queue_depth = sdev->queue_depth -
334 new_queue_depth;
335 scsi_change_queue_depth(sdev, new_queue_depth);
336 }
337 }
338 lpfc_destroy_vport_work_array(phba, vports);
339 atomic_set(&phba->num_rsrc_err, 0);
340 atomic_set(&phba->num_cmd_success, 0);
341}
342
343
344
345
346
347
348
349
350
351void
352lpfc_scsi_dev_block(struct lpfc_hba *phba)
353{
354 struct lpfc_vport **vports;
355 struct Scsi_Host *shost;
356 struct scsi_device *sdev;
357 struct fc_rport *rport;
358 int i;
359
360 vports = lpfc_create_vport_work_array(phba);
361 if (vports != NULL)
362 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
363 shost = lpfc_shost_from_vport(vports[i]);
364 shost_for_each_device(sdev, shost) {
365 rport = starget_to_rport(scsi_target(sdev));
366 fc_remote_port_delete(rport);
367 }
368 }
369 lpfc_destroy_vport_work_array(phba, vports);
370}
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388static int
389lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
390{
391 struct lpfc_hba *phba = vport->phba;
392 struct lpfc_io_buf *psb;
393 struct ulp_bde64 *bpl;
394 IOCB_t *iocb;
395 dma_addr_t pdma_phys_fcp_cmd;
396 dma_addr_t pdma_phys_fcp_rsp;
397 dma_addr_t pdma_phys_sgl;
398 uint16_t iotag;
399 int bcnt, bpl_size;
400
401 bpl_size = phba->cfg_sg_dma_buf_size -
402 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
403
404 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
405 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
406 num_to_alloc, phba->cfg_sg_dma_buf_size,
407 (int)sizeof(struct fcp_cmnd),
408 (int)sizeof(struct fcp_rsp), bpl_size);
409
410 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
411 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
412 if (!psb)
413 break;
414
415
416
417
418
419
420
421 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
422 GFP_KERNEL, &psb->dma_handle);
423 if (!psb->data) {
424 kfree(psb);
425 break;
426 }
427
428
429
430 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
431 if (iotag == 0) {
432 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
433 psb->data, psb->dma_handle);
434 kfree(psb);
435 break;
436 }
437 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
438
439 psb->fcp_cmnd = psb->data;
440 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
441 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
442 sizeof(struct fcp_rsp);
443
444
445 bpl = (struct ulp_bde64 *)psb->dma_sgl;
446 pdma_phys_fcp_cmd = psb->dma_handle;
447 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
448 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
449 sizeof(struct fcp_rsp);
450
451
452
453
454
455
456 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
457 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
458 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
459 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
460 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
461
462
463 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
464 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
465 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
466 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
467 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
468
469
470
471
472
473 iocb = &psb->cur_iocbq.iocb;
474 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
475 if ((phba->sli_rev == 3) &&
476 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
477
478 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
479 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
480 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
481 unsli3.fcp_ext.icd);
482 iocb->un.fcpi64.bdl.addrHigh = 0;
483 iocb->ulpBdeCount = 0;
484 iocb->ulpLe = 0;
485
486 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
487 BUFF_TYPE_BDE_64;
488 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
489 sizeof(struct fcp_rsp);
490 iocb->unsli3.fcp_ext.rbde.addrLow =
491 putPaddrLow(pdma_phys_fcp_rsp);
492 iocb->unsli3.fcp_ext.rbde.addrHigh =
493 putPaddrHigh(pdma_phys_fcp_rsp);
494 } else {
495 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
496 iocb->un.fcpi64.bdl.bdeSize =
497 (2 * sizeof(struct ulp_bde64));
498 iocb->un.fcpi64.bdl.addrLow =
499 putPaddrLow(pdma_phys_sgl);
500 iocb->un.fcpi64.bdl.addrHigh =
501 putPaddrHigh(pdma_phys_sgl);
502 iocb->ulpBdeCount = 1;
503 iocb->ulpLe = 1;
504 }
505 iocb->ulpClass = CLASS3;
506 psb->status = IOSTAT_SUCCESS;
507
508 psb->cur_iocbq.context1 = psb;
509 spin_lock_init(&psb->buf_lock);
510 lpfc_release_scsi_buf_s3(phba, psb);
511
512 }
513
514 return bcnt;
515}
516
517
518
519
520
521
522
523
524void
525lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
526{
527 struct lpfc_hba *phba = vport->phba;
528 struct lpfc_io_buf *psb, *next_psb;
529 struct lpfc_sli4_hdw_queue *qp;
530 unsigned long iflag = 0;
531 int idx;
532
533 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
534 return;
535
536 spin_lock_irqsave(&phba->hbalock, iflag);
537 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
538 qp = &phba->sli4_hba.hdwq[idx];
539
540 spin_lock(&qp->abts_scsi_buf_list_lock);
541 list_for_each_entry_safe(psb, next_psb,
542 &qp->lpfc_abts_scsi_buf_list, list) {
543 if (psb->rdata && psb->rdata->pnode &&
544 psb->rdata->pnode->vport == vport)
545 psb->rdata = NULL;
546 }
547 spin_unlock(&qp->abts_scsi_buf_list_lock);
548 }
549 spin_unlock_irqrestore(&phba->hbalock, iflag);
550}
551
552
553
554
555
556
557
558
559
560void
561lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
562 struct sli4_wcqe_xri_aborted *axri, int idx)
563{
564 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
565 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
566 struct lpfc_io_buf *psb, *next_psb;
567 struct lpfc_sli4_hdw_queue *qp;
568 unsigned long iflag = 0;
569 struct lpfc_iocbq *iocbq;
570 int i;
571 struct lpfc_nodelist *ndlp;
572 int rrq_empty = 0;
573 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
574
575 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
576 return;
577
578 qp = &phba->sli4_hba.hdwq[idx];
579 spin_lock_irqsave(&phba->hbalock, iflag);
580 spin_lock(&qp->abts_scsi_buf_list_lock);
581 list_for_each_entry_safe(psb, next_psb,
582 &qp->lpfc_abts_scsi_buf_list, list) {
583 if (psb->cur_iocbq.sli4_xritag == xri) {
584 list_del(&psb->list);
585 qp->abts_scsi_io_bufs--;
586 psb->exch_busy = 0;
587 psb->status = IOSTAT_SUCCESS;
588 spin_unlock(
589 &qp->abts_scsi_buf_list_lock);
590 if (psb->rdata && psb->rdata->pnode)
591 ndlp = psb->rdata->pnode;
592 else
593 ndlp = NULL;
594
595 rrq_empty = list_empty(&phba->active_rrq_list);
596 spin_unlock_irqrestore(&phba->hbalock, iflag);
597 if (ndlp) {
598 lpfc_set_rrq_active(phba, ndlp,
599 psb->cur_iocbq.sli4_lxritag, rxid, 1);
600 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
601 }
602 lpfc_release_scsi_buf_s4(phba, psb);
603 if (rrq_empty)
604 lpfc_worker_wake_up(phba);
605 return;
606 }
607 }
608 spin_unlock(&qp->abts_scsi_buf_list_lock);
609 for (i = 1; i <= phba->sli.last_iotag; i++) {
610 iocbq = phba->sli.iocbq_lookup[i];
611
612 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
613 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
614 continue;
615 if (iocbq->sli4_xritag != xri)
616 continue;
617 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
618 psb->exch_busy = 0;
619 spin_unlock_irqrestore(&phba->hbalock, iflag);
620 if (!list_empty(&pring->txq))
621 lpfc_worker_wake_up(phba);
622 return;
623
624 }
625 spin_unlock_irqrestore(&phba->hbalock, iflag);
626}
627
628
629
630
631
632
633
634
635
636
637
638
639static struct lpfc_io_buf *
640lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
641 struct scsi_cmnd *cmnd)
642{
643 struct lpfc_io_buf *lpfc_cmd = NULL;
644 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
645 unsigned long iflag = 0;
646
647 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
648 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
649 list);
650 if (!lpfc_cmd) {
651 spin_lock(&phba->scsi_buf_list_put_lock);
652 list_splice(&phba->lpfc_scsi_buf_list_put,
653 &phba->lpfc_scsi_buf_list_get);
654 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
655 list_remove_head(scsi_buf_list_get, lpfc_cmd,
656 struct lpfc_io_buf, list);
657 spin_unlock(&phba->scsi_buf_list_put_lock);
658 }
659 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
660
661 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
662 atomic_inc(&ndlp->cmd_pending);
663 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
664 }
665 return lpfc_cmd;
666}
667
668
669
670
671
672
673
674
675
676
677
678static struct lpfc_io_buf *
679lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
680 struct scsi_cmnd *cmnd)
681{
682 struct lpfc_io_buf *lpfc_cmd;
683 struct lpfc_sli4_hdw_queue *qp;
684 struct sli4_sge *sgl;
685 IOCB_t *iocb;
686 dma_addr_t pdma_phys_fcp_rsp;
687 dma_addr_t pdma_phys_fcp_cmd;
688 uint32_t sgl_size, cpu, idx;
689 int tag;
690
691 cpu = raw_smp_processor_id();
692 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
693 tag = blk_mq_unique_tag(cmnd->request);
694 idx = blk_mq_unique_tag_to_hwq(tag);
695 } else {
696 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
697 }
698
699 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
700 !phba->cfg_xri_rebalancing);
701 if (!lpfc_cmd) {
702 qp = &phba->sli4_hba.hdwq[idx];
703 qp->empty_io_bufs++;
704 return NULL;
705 }
706
707 sgl_size = phba->cfg_sg_dma_buf_size -
708 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
709
710
711
712
713 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
714 lpfc_cmd->prot_seg_cnt = 0;
715 lpfc_cmd->seg_cnt = 0;
716 lpfc_cmd->timeout = 0;
717 lpfc_cmd->flags = 0;
718 lpfc_cmd->start_time = jiffies;
719 lpfc_cmd->waitq = NULL;
720 lpfc_cmd->cpu = cpu;
721#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
722 lpfc_cmd->prot_data_type = 0;
723#endif
724 lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
725 lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
726 sizeof(struct fcp_cmnd));
727
728
729
730
731
732
733 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
734 pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size);
735 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
736 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
737 sgl->word2 = le32_to_cpu(sgl->word2);
738 bf_set(lpfc_sli4_sge_last, sgl, 0);
739 sgl->word2 = cpu_to_le32(sgl->word2);
740 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
741 sgl++;
742
743
744 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
745 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
746 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
747 sgl->word2 = le32_to_cpu(sgl->word2);
748 bf_set(lpfc_sli4_sge_last, sgl, 1);
749 sgl->word2 = cpu_to_le32(sgl->word2);
750 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
751
752
753
754
755
756 iocb = &lpfc_cmd->cur_iocbq.iocb;
757 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
758 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
759
760
761
762
763 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
764 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
765 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
766 iocb->ulpBdeCount = 1;
767 iocb->ulpLe = 1;
768 iocb->ulpClass = CLASS3;
769
770 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
771 atomic_inc(&ndlp->cmd_pending);
772 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
773 }
774 return lpfc_cmd;
775}
776
777
778
779
780
781
782
783
784
785
786
787static struct lpfc_io_buf*
788lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
789 struct scsi_cmnd *cmnd)
790{
791 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
792}
793
794
795
796
797
798
799
800
801
802static void
803lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
804{
805 unsigned long iflag = 0;
806
807 psb->seg_cnt = 0;
808 psb->prot_seg_cnt = 0;
809
810 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
811 psb->pCmd = NULL;
812 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
813 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
814 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
815}
816
817
818
819
820
821
822
823
824
825
826
827static void
828lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
829{
830 struct lpfc_sli4_hdw_queue *qp;
831 unsigned long iflag = 0;
832
833 psb->seg_cnt = 0;
834 psb->prot_seg_cnt = 0;
835
836 qp = psb->hdwq;
837 if (psb->exch_busy) {
838 spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
839 psb->pCmd = NULL;
840 list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list);
841 qp->abts_scsi_io_bufs++;
842 spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag);
843 } else {
844 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
845 }
846}
847
848
849
850
851
852
853
854
855
856static void
857lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
858{
859 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
860 atomic_dec(&psb->ndlp->cmd_pending);
861
862 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
863 phba->lpfc_release_scsi_buf(phba, psb);
864}
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880static int
881lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
882{
883 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
884 struct scatterlist *sgel = NULL;
885 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
886 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
887 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
888 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
889 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
890 dma_addr_t physaddr;
891 uint32_t num_bde = 0;
892 int nseg, datadir = scsi_cmnd->sc_data_direction;
893
894
895
896
897
898
899
900 bpl += 2;
901 if (scsi_sg_count(scsi_cmnd)) {
902
903
904
905
906
907
908
909 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
910 scsi_sg_count(scsi_cmnd), datadir);
911 if (unlikely(!nseg))
912 return 1;
913
914 lpfc_cmd->seg_cnt = nseg;
915 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
916 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
917 "9064 BLKGRD: %s: Too many sg segments from "
918 "dma_map_sg. Config %d, seg_cnt %d\n",
919 __func__, phba->cfg_sg_seg_cnt,
920 lpfc_cmd->seg_cnt);
921 lpfc_cmd->seg_cnt = 0;
922 scsi_dma_unmap(scsi_cmnd);
923 return 1;
924 }
925
926
927
928
929
930
931
932
933
934
935 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
936 physaddr = sg_dma_address(sgel);
937 if (phba->sli_rev == 3 &&
938 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
939 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
940 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
941 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
942 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
943 data_bde->addrLow = putPaddrLow(physaddr);
944 data_bde->addrHigh = putPaddrHigh(physaddr);
945 data_bde++;
946 } else {
947 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
948 bpl->tus.f.bdeSize = sg_dma_len(sgel);
949 bpl->tus.w = le32_to_cpu(bpl->tus.w);
950 bpl->addrLow =
951 le32_to_cpu(putPaddrLow(physaddr));
952 bpl->addrHigh =
953 le32_to_cpu(putPaddrHigh(physaddr));
954 bpl++;
955 }
956 }
957 }
958
959
960
961
962
963
964
965 if (phba->sli_rev == 3 &&
966 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
967 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
968 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
969
970
971
972
973
974 physaddr = lpfc_cmd->dma_handle;
975 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
976 data_bde->tus.f.bdeSize = (num_bde *
977 sizeof(struct ulp_bde64));
978 physaddr += (sizeof(struct fcp_cmnd) +
979 sizeof(struct fcp_rsp) +
980 (2 * sizeof(struct ulp_bde64)));
981 data_bde->addrHigh = putPaddrHigh(physaddr);
982 data_bde->addrLow = putPaddrLow(physaddr);
983
984 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
985 } else {
986
987 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
988 }
989 } else {
990 iocb_cmd->un.fcpi64.bdl.bdeSize =
991 ((num_bde + 2) * sizeof(struct ulp_bde64));
992 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
993 }
994 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
995
996
997
998
999
1000 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1001 return 0;
1002}
1003
1004#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1005
1006
1007#define BG_ERR_INIT 0x1
1008
1009#define BG_ERR_TGT 0x2
1010
1011#define BG_ERR_SWAP 0x10
1012
1013
1014
1015
1016#define BG_ERR_CHECK 0x20
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static int
1029lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1030 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1031{
1032 struct scatterlist *sgpe;
1033 struct lpfc_io_buf *lpfc_cmd = NULL;
1034 struct scsi_dif_tuple *src = NULL;
1035 struct lpfc_nodelist *ndlp;
1036 struct lpfc_rport_data *rdata;
1037 uint32_t op = scsi_get_prot_op(sc);
1038 uint32_t blksize;
1039 uint32_t numblks;
1040 sector_t lba;
1041 int rc = 0;
1042 int blockoff = 0;
1043
1044 if (op == SCSI_PROT_NORMAL)
1045 return 0;
1046
1047 sgpe = scsi_prot_sglist(sc);
1048 lba = scsi_get_lba(sc);
1049
1050
1051 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1052 blksize = lpfc_cmd_blksize(sc);
1053 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1054
1055
1056 if ((phba->lpfc_injerr_lba < lba) ||
1057 (phba->lpfc_injerr_lba >= (lba + numblks)))
1058 return 0;
1059 if (sgpe) {
1060 blockoff = phba->lpfc_injerr_lba - lba;
1061 numblks = sg_dma_len(sgpe) /
1062 sizeof(struct scsi_dif_tuple);
1063 if (numblks < blockoff)
1064 blockoff = numblks;
1065 }
1066 }
1067
1068
1069 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1070 if (rdata && rdata->pnode) {
1071 ndlp = rdata->pnode;
1072
1073
1074 if (phba->lpfc_injerr_nportid &&
1075 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1076 return 0;
1077
1078
1079
1080
1081
1082 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1083 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1084 sizeof(struct lpfc_name)) != 0))
1085 return 0;
1086 }
1087
1088
1089 if (sgpe) {
1090 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1091 src += blockoff;
1092 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1093 }
1094
1095
1096 if (reftag) {
1097 if (phba->lpfc_injerr_wref_cnt) {
1098 switch (op) {
1099 case SCSI_PROT_WRITE_PASS:
1100 if (src) {
1101
1102
1103
1104
1105
1106
1107
1108
1109 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1110 "9076 BLKGRD: Injecting reftag error: "
1111 "write lba x%lx + x%x oldrefTag x%x\n",
1112 (unsigned long)lba, blockoff,
1113 be32_to_cpu(src->ref_tag));
1114
1115
1116
1117
1118
1119 if (lpfc_cmd) {
1120 lpfc_cmd->prot_data_type =
1121 LPFC_INJERR_REFTAG;
1122 lpfc_cmd->prot_data_segment =
1123 src;
1124 lpfc_cmd->prot_data =
1125 src->ref_tag;
1126 }
1127 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1128 phba->lpfc_injerr_wref_cnt--;
1129 if (phba->lpfc_injerr_wref_cnt == 0) {
1130 phba->lpfc_injerr_nportid = 0;
1131 phba->lpfc_injerr_lba =
1132 LPFC_INJERR_LBA_OFF;
1133 memset(&phba->lpfc_injerr_wwpn,
1134 0, sizeof(struct lpfc_name));
1135 }
1136 rc = BG_ERR_TGT | BG_ERR_CHECK;
1137
1138 break;
1139 }
1140
1141 case SCSI_PROT_WRITE_INSERT:
1142
1143
1144
1145
1146
1147
1148 *reftag = 0xDEADBEEF;
1149 phba->lpfc_injerr_wref_cnt--;
1150 if (phba->lpfc_injerr_wref_cnt == 0) {
1151 phba->lpfc_injerr_nportid = 0;
1152 phba->lpfc_injerr_lba =
1153 LPFC_INJERR_LBA_OFF;
1154 memset(&phba->lpfc_injerr_wwpn,
1155 0, sizeof(struct lpfc_name));
1156 }
1157 rc = BG_ERR_TGT | BG_ERR_CHECK;
1158
1159 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1160 "9078 BLKGRD: Injecting reftag error: "
1161 "write lba x%lx\n", (unsigned long)lba);
1162 break;
1163 case SCSI_PROT_WRITE_STRIP:
1164
1165
1166
1167
1168
1169 *reftag = 0xDEADBEEF;
1170 phba->lpfc_injerr_wref_cnt--;
1171 if (phba->lpfc_injerr_wref_cnt == 0) {
1172 phba->lpfc_injerr_nportid = 0;
1173 phba->lpfc_injerr_lba =
1174 LPFC_INJERR_LBA_OFF;
1175 memset(&phba->lpfc_injerr_wwpn,
1176 0, sizeof(struct lpfc_name));
1177 }
1178 rc = BG_ERR_INIT;
1179
1180 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1181 "9077 BLKGRD: Injecting reftag error: "
1182 "write lba x%lx\n", (unsigned long)lba);
1183 break;
1184 }
1185 }
1186 if (phba->lpfc_injerr_rref_cnt) {
1187 switch (op) {
1188 case SCSI_PROT_READ_INSERT:
1189 case SCSI_PROT_READ_STRIP:
1190 case SCSI_PROT_READ_PASS:
1191
1192
1193
1194
1195
1196 *reftag = 0xDEADBEEF;
1197 phba->lpfc_injerr_rref_cnt--;
1198 if (phba->lpfc_injerr_rref_cnt == 0) {
1199 phba->lpfc_injerr_nportid = 0;
1200 phba->lpfc_injerr_lba =
1201 LPFC_INJERR_LBA_OFF;
1202 memset(&phba->lpfc_injerr_wwpn,
1203 0, sizeof(struct lpfc_name));
1204 }
1205 rc = BG_ERR_INIT;
1206
1207 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1208 "9079 BLKGRD: Injecting reftag error: "
1209 "read lba x%lx\n", (unsigned long)lba);
1210 break;
1211 }
1212 }
1213 }
1214
1215
1216 if (apptag) {
1217 if (phba->lpfc_injerr_wapp_cnt) {
1218 switch (op) {
1219 case SCSI_PROT_WRITE_PASS:
1220 if (src) {
1221
1222
1223
1224
1225
1226
1227
1228
1229 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1230 "9080 BLKGRD: Injecting apptag error: "
1231 "write lba x%lx + x%x oldappTag x%x\n",
1232 (unsigned long)lba, blockoff,
1233 be16_to_cpu(src->app_tag));
1234
1235
1236
1237
1238
1239 if (lpfc_cmd) {
1240 lpfc_cmd->prot_data_type =
1241 LPFC_INJERR_APPTAG;
1242 lpfc_cmd->prot_data_segment =
1243 src;
1244 lpfc_cmd->prot_data =
1245 src->app_tag;
1246 }
1247 src->app_tag = cpu_to_be16(0xDEAD);
1248 phba->lpfc_injerr_wapp_cnt--;
1249 if (phba->lpfc_injerr_wapp_cnt == 0) {
1250 phba->lpfc_injerr_nportid = 0;
1251 phba->lpfc_injerr_lba =
1252 LPFC_INJERR_LBA_OFF;
1253 memset(&phba->lpfc_injerr_wwpn,
1254 0, sizeof(struct lpfc_name));
1255 }
1256 rc = BG_ERR_TGT | BG_ERR_CHECK;
1257 break;
1258 }
1259
1260 case SCSI_PROT_WRITE_INSERT:
1261
1262
1263
1264
1265
1266
1267 *apptag = 0xDEAD;
1268 phba->lpfc_injerr_wapp_cnt--;
1269 if (phba->lpfc_injerr_wapp_cnt == 0) {
1270 phba->lpfc_injerr_nportid = 0;
1271 phba->lpfc_injerr_lba =
1272 LPFC_INJERR_LBA_OFF;
1273 memset(&phba->lpfc_injerr_wwpn,
1274 0, sizeof(struct lpfc_name));
1275 }
1276 rc = BG_ERR_TGT | BG_ERR_CHECK;
1277
1278 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1279 "0813 BLKGRD: Injecting apptag error: "
1280 "write lba x%lx\n", (unsigned long)lba);
1281 break;
1282 case SCSI_PROT_WRITE_STRIP:
1283
1284
1285
1286
1287
1288 *apptag = 0xDEAD;
1289 phba->lpfc_injerr_wapp_cnt--;
1290 if (phba->lpfc_injerr_wapp_cnt == 0) {
1291 phba->lpfc_injerr_nportid = 0;
1292 phba->lpfc_injerr_lba =
1293 LPFC_INJERR_LBA_OFF;
1294 memset(&phba->lpfc_injerr_wwpn,
1295 0, sizeof(struct lpfc_name));
1296 }
1297 rc = BG_ERR_INIT;
1298
1299 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1300 "0812 BLKGRD: Injecting apptag error: "
1301 "write lba x%lx\n", (unsigned long)lba);
1302 break;
1303 }
1304 }
1305 if (phba->lpfc_injerr_rapp_cnt) {
1306 switch (op) {
1307 case SCSI_PROT_READ_INSERT:
1308 case SCSI_PROT_READ_STRIP:
1309 case SCSI_PROT_READ_PASS:
1310
1311
1312
1313
1314
1315 *apptag = 0xDEAD;
1316 phba->lpfc_injerr_rapp_cnt--;
1317 if (phba->lpfc_injerr_rapp_cnt == 0) {
1318 phba->lpfc_injerr_nportid = 0;
1319 phba->lpfc_injerr_lba =
1320 LPFC_INJERR_LBA_OFF;
1321 memset(&phba->lpfc_injerr_wwpn,
1322 0, sizeof(struct lpfc_name));
1323 }
1324 rc = BG_ERR_INIT;
1325
1326 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1327 "0814 BLKGRD: Injecting apptag error: "
1328 "read lba x%lx\n", (unsigned long)lba);
1329 break;
1330 }
1331 }
1332 }
1333
1334
1335
1336 if (new_guard) {
1337 if (phba->lpfc_injerr_wgrd_cnt) {
1338 switch (op) {
1339 case SCSI_PROT_WRITE_PASS:
1340 rc = BG_ERR_CHECK;
1341
1342
1343 case SCSI_PROT_WRITE_INSERT:
1344
1345
1346
1347
1348
1349 phba->lpfc_injerr_wgrd_cnt--;
1350 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1351 phba->lpfc_injerr_nportid = 0;
1352 phba->lpfc_injerr_lba =
1353 LPFC_INJERR_LBA_OFF;
1354 memset(&phba->lpfc_injerr_wwpn,
1355 0, sizeof(struct lpfc_name));
1356 }
1357
1358 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1359
1360
1361 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1362 "0817 BLKGRD: Injecting guard error: "
1363 "write lba x%lx\n", (unsigned long)lba);
1364 break;
1365 case SCSI_PROT_WRITE_STRIP:
1366
1367
1368
1369
1370
1371 phba->lpfc_injerr_wgrd_cnt--;
1372 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1373 phba->lpfc_injerr_nportid = 0;
1374 phba->lpfc_injerr_lba =
1375 LPFC_INJERR_LBA_OFF;
1376 memset(&phba->lpfc_injerr_wwpn,
1377 0, sizeof(struct lpfc_name));
1378 }
1379
1380 rc = BG_ERR_INIT | BG_ERR_SWAP;
1381
1382
1383 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1384 "0816 BLKGRD: Injecting guard error: "
1385 "write lba x%lx\n", (unsigned long)lba);
1386 break;
1387 }
1388 }
1389 if (phba->lpfc_injerr_rgrd_cnt) {
1390 switch (op) {
1391 case SCSI_PROT_READ_INSERT:
1392 case SCSI_PROT_READ_STRIP:
1393 case SCSI_PROT_READ_PASS:
1394
1395
1396
1397
1398
1399 phba->lpfc_injerr_rgrd_cnt--;
1400 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1401 phba->lpfc_injerr_nportid = 0;
1402 phba->lpfc_injerr_lba =
1403 LPFC_INJERR_LBA_OFF;
1404 memset(&phba->lpfc_injerr_wwpn,
1405 0, sizeof(struct lpfc_name));
1406 }
1407
1408 rc = BG_ERR_INIT | BG_ERR_SWAP;
1409
1410
1411 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1412 "0818 BLKGRD: Injecting guard error: "
1413 "read lba x%lx\n", (unsigned long)lba);
1414 }
1415 }
1416 }
1417
1418 return rc;
1419}
1420#endif
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433static int
1434lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1435 uint8_t *txop, uint8_t *rxop)
1436{
1437 uint8_t ret = 0;
1438
1439 if (lpfc_cmd_guard_csum(sc)) {
1440 switch (scsi_get_prot_op(sc)) {
1441 case SCSI_PROT_READ_INSERT:
1442 case SCSI_PROT_WRITE_STRIP:
1443 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1444 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1445 break;
1446
1447 case SCSI_PROT_READ_STRIP:
1448 case SCSI_PROT_WRITE_INSERT:
1449 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1450 *txop = BG_OP_IN_NODIF_OUT_CRC;
1451 break;
1452
1453 case SCSI_PROT_READ_PASS:
1454 case SCSI_PROT_WRITE_PASS:
1455 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1456 *txop = BG_OP_IN_CSUM_OUT_CRC;
1457 break;
1458
1459 case SCSI_PROT_NORMAL:
1460 default:
1461 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1462 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1463 scsi_get_prot_op(sc));
1464 ret = 1;
1465 break;
1466
1467 }
1468 } else {
1469 switch (scsi_get_prot_op(sc)) {
1470 case SCSI_PROT_READ_STRIP:
1471 case SCSI_PROT_WRITE_INSERT:
1472 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1473 *txop = BG_OP_IN_NODIF_OUT_CRC;
1474 break;
1475
1476 case SCSI_PROT_READ_PASS:
1477 case SCSI_PROT_WRITE_PASS:
1478 *rxop = BG_OP_IN_CRC_OUT_CRC;
1479 *txop = BG_OP_IN_CRC_OUT_CRC;
1480 break;
1481
1482 case SCSI_PROT_READ_INSERT:
1483 case SCSI_PROT_WRITE_STRIP:
1484 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1485 *txop = BG_OP_IN_CRC_OUT_NODIF;
1486 break;
1487
1488 case SCSI_PROT_NORMAL:
1489 default:
1490 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1491 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1492 scsi_get_prot_op(sc));
1493 ret = 1;
1494 break;
1495 }
1496 }
1497
1498 return ret;
1499}
1500
1501#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513static int
1514lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1515 uint8_t *txop, uint8_t *rxop)
1516{
1517 uint8_t ret = 0;
1518
1519 if (lpfc_cmd_guard_csum(sc)) {
1520 switch (scsi_get_prot_op(sc)) {
1521 case SCSI_PROT_READ_INSERT:
1522 case SCSI_PROT_WRITE_STRIP:
1523 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1524 *txop = BG_OP_IN_CRC_OUT_NODIF;
1525 break;
1526
1527 case SCSI_PROT_READ_STRIP:
1528 case SCSI_PROT_WRITE_INSERT:
1529 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1530 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1531 break;
1532
1533 case SCSI_PROT_READ_PASS:
1534 case SCSI_PROT_WRITE_PASS:
1535 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1536 *txop = BG_OP_IN_CRC_OUT_CSUM;
1537 break;
1538
1539 case SCSI_PROT_NORMAL:
1540 default:
1541 break;
1542
1543 }
1544 } else {
1545 switch (scsi_get_prot_op(sc)) {
1546 case SCSI_PROT_READ_STRIP:
1547 case SCSI_PROT_WRITE_INSERT:
1548 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1549 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1550 break;
1551
1552 case SCSI_PROT_READ_PASS:
1553 case SCSI_PROT_WRITE_PASS:
1554 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1555 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1556 break;
1557
1558 case SCSI_PROT_READ_INSERT:
1559 case SCSI_PROT_WRITE_STRIP:
1560 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1561 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1562 break;
1563
1564 case SCSI_PROT_NORMAL:
1565 default:
1566 break;
1567 }
1568 }
1569
1570 return ret;
1571}
1572#endif
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605static int
1606lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1607 struct ulp_bde64 *bpl, int datasegcnt)
1608{
1609 struct scatterlist *sgde = NULL;
1610 struct lpfc_pde5 *pde5 = NULL;
1611 struct lpfc_pde6 *pde6 = NULL;
1612 dma_addr_t physaddr;
1613 int i = 0, num_bde = 0, status;
1614 int datadir = sc->sc_data_direction;
1615#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1616 uint32_t rc;
1617#endif
1618 uint32_t checking = 1;
1619 uint32_t reftag;
1620 uint8_t txop, rxop;
1621
1622 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1623 if (status)
1624 goto out;
1625
1626
1627 reftag = (uint32_t)scsi_get_lba(sc);
1628
1629#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1630 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1631 if (rc) {
1632 if (rc & BG_ERR_SWAP)
1633 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1634 if (rc & BG_ERR_CHECK)
1635 checking = 0;
1636 }
1637#endif
1638
1639
1640 pde5 = (struct lpfc_pde5 *) bpl;
1641 memset(pde5, 0, sizeof(struct lpfc_pde5));
1642 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1643
1644
1645 pde5->word0 = cpu_to_le32(pde5->word0);
1646 pde5->reftag = cpu_to_le32(reftag);
1647
1648
1649 num_bde++;
1650 bpl++;
1651 pde6 = (struct lpfc_pde6 *) bpl;
1652
1653
1654 memset(pde6, 0, sizeof(struct lpfc_pde6));
1655 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1656 bf_set(pde6_optx, pde6, txop);
1657 bf_set(pde6_oprx, pde6, rxop);
1658
1659
1660
1661
1662
1663 if (datadir == DMA_FROM_DEVICE) {
1664 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1665 bf_set(pde6_ce, pde6, checking);
1666 else
1667 bf_set(pde6_ce, pde6, 0);
1668
1669 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1670 bf_set(pde6_re, pde6, checking);
1671 else
1672 bf_set(pde6_re, pde6, 0);
1673 }
1674 bf_set(pde6_ai, pde6, 1);
1675 bf_set(pde6_ae, pde6, 0);
1676 bf_set(pde6_apptagval, pde6, 0);
1677
1678
1679 pde6->word0 = cpu_to_le32(pde6->word0);
1680 pde6->word1 = cpu_to_le32(pde6->word1);
1681 pde6->word2 = cpu_to_le32(pde6->word2);
1682
1683
1684 num_bde++;
1685 bpl++;
1686
1687
1688 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1689 physaddr = sg_dma_address(sgde);
1690 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1691 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1692 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1693 if (datadir == DMA_TO_DEVICE)
1694 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1695 else
1696 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1697 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1698 bpl++;
1699 num_bde++;
1700 }
1701
1702out:
1703 return num_bde;
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745static int
1746lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1747 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1748{
1749 struct scatterlist *sgde = NULL;
1750 struct scatterlist *sgpe = NULL;
1751 struct lpfc_pde5 *pde5 = NULL;
1752 struct lpfc_pde6 *pde6 = NULL;
1753 struct lpfc_pde7 *pde7 = NULL;
1754 dma_addr_t dataphysaddr, protphysaddr;
1755 unsigned short curr_data = 0, curr_prot = 0;
1756 unsigned int split_offset;
1757 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1758 unsigned int protgrp_blks, protgrp_bytes;
1759 unsigned int remainder, subtotal;
1760 int status;
1761 int datadir = sc->sc_data_direction;
1762 unsigned char pgdone = 0, alldone = 0;
1763 unsigned blksize;
1764#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1765 uint32_t rc;
1766#endif
1767 uint32_t checking = 1;
1768 uint32_t reftag;
1769 uint8_t txop, rxop;
1770 int num_bde = 0;
1771
1772 sgpe = scsi_prot_sglist(sc);
1773 sgde = scsi_sglist(sc);
1774
1775 if (!sgpe || !sgde) {
1776 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1777 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1778 sgpe, sgde);
1779 return 0;
1780 }
1781
1782 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1783 if (status)
1784 goto out;
1785
1786
1787 blksize = lpfc_cmd_blksize(sc);
1788 reftag = (uint32_t)scsi_get_lba(sc);
1789
1790#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1791 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1792 if (rc) {
1793 if (rc & BG_ERR_SWAP)
1794 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1795 if (rc & BG_ERR_CHECK)
1796 checking = 0;
1797 }
1798#endif
1799
1800 split_offset = 0;
1801 do {
1802
1803 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1804 return num_bde + 3;
1805
1806
1807 pde5 = (struct lpfc_pde5 *) bpl;
1808 memset(pde5, 0, sizeof(struct lpfc_pde5));
1809 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1810
1811
1812 pde5->word0 = cpu_to_le32(pde5->word0);
1813 pde5->reftag = cpu_to_le32(reftag);
1814
1815
1816 num_bde++;
1817 bpl++;
1818 pde6 = (struct lpfc_pde6 *) bpl;
1819
1820
1821 memset(pde6, 0, sizeof(struct lpfc_pde6));
1822 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1823 bf_set(pde6_optx, pde6, txop);
1824 bf_set(pde6_oprx, pde6, rxop);
1825
1826 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1827 bf_set(pde6_ce, pde6, checking);
1828 else
1829 bf_set(pde6_ce, pde6, 0);
1830
1831 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1832 bf_set(pde6_re, pde6, checking);
1833 else
1834 bf_set(pde6_re, pde6, 0);
1835
1836 bf_set(pde6_ai, pde6, 1);
1837 bf_set(pde6_ae, pde6, 0);
1838 bf_set(pde6_apptagval, pde6, 0);
1839
1840
1841 pde6->word0 = cpu_to_le32(pde6->word0);
1842 pde6->word1 = cpu_to_le32(pde6->word1);
1843 pde6->word2 = cpu_to_le32(pde6->word2);
1844
1845
1846 num_bde++;
1847 bpl++;
1848
1849
1850 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1851 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1852
1853
1854 BUG_ON(protgroup_len % 8);
1855
1856 pde7 = (struct lpfc_pde7 *) bpl;
1857 memset(pde7, 0, sizeof(struct lpfc_pde7));
1858 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1859
1860 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1861 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1862
1863 protgrp_blks = protgroup_len / 8;
1864 protgrp_bytes = protgrp_blks * blksize;
1865
1866
1867 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1868 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1869 protgroup_offset += protgroup_remainder;
1870 protgrp_blks = protgroup_remainder / 8;
1871 protgrp_bytes = protgrp_blks * blksize;
1872 } else {
1873 protgroup_offset = 0;
1874 curr_prot++;
1875 }
1876
1877 num_bde++;
1878
1879
1880 pgdone = 0;
1881 subtotal = 0;
1882 while (!pgdone) {
1883
1884 if (num_bde >= phba->cfg_total_seg_cnt)
1885 return num_bde + 1;
1886
1887 if (!sgde) {
1888 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1889 "9065 BLKGRD:%s Invalid data segment\n",
1890 __func__);
1891 return 0;
1892 }
1893 bpl++;
1894 dataphysaddr = sg_dma_address(sgde) + split_offset;
1895 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1896 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1897
1898 remainder = sg_dma_len(sgde) - split_offset;
1899
1900 if ((subtotal + remainder) <= protgrp_bytes) {
1901
1902 bpl->tus.f.bdeSize = remainder;
1903 split_offset = 0;
1904
1905 if ((subtotal + remainder) == protgrp_bytes)
1906 pgdone = 1;
1907 } else {
1908
1909 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1910 split_offset += bpl->tus.f.bdeSize;
1911 }
1912
1913 subtotal += bpl->tus.f.bdeSize;
1914
1915 if (datadir == DMA_TO_DEVICE)
1916 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1917 else
1918 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1919 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1920
1921 num_bde++;
1922 curr_data++;
1923
1924 if (split_offset)
1925 break;
1926
1927
1928 sgde = sg_next(sgde);
1929
1930 }
1931
1932 if (protgroup_offset) {
1933
1934 reftag += protgrp_blks;
1935 bpl++;
1936 continue;
1937 }
1938
1939
1940 if (curr_prot == protcnt) {
1941 alldone = 1;
1942 } else if (curr_prot < protcnt) {
1943
1944 sgpe = sg_next(sgpe);
1945 bpl++;
1946
1947
1948 reftag += protgrp_blks;
1949 } else {
1950
1951 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1952 "9054 BLKGRD: bug in %s\n", __func__);
1953 }
1954
1955 } while (!alldone);
1956out:
1957
1958 return num_bde;
1959}
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990static int
1991lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1992 struct sli4_sge *sgl, int datasegcnt)
1993{
1994 struct scatterlist *sgde = NULL;
1995 struct sli4_sge_diseed *diseed = NULL;
1996 dma_addr_t physaddr;
1997 int i = 0, num_sge = 0, status;
1998 uint32_t reftag;
1999 uint8_t txop, rxop;
2000#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2001 uint32_t rc;
2002#endif
2003 uint32_t checking = 1;
2004 uint32_t dma_len;
2005 uint32_t dma_offset = 0;
2006
2007 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2008 if (status)
2009 goto out;
2010
2011
2012 reftag = (uint32_t)scsi_get_lba(sc);
2013
2014#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2015 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2016 if (rc) {
2017 if (rc & BG_ERR_SWAP)
2018 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2019 if (rc & BG_ERR_CHECK)
2020 checking = 0;
2021 }
2022#endif
2023
2024
2025 diseed = (struct sli4_sge_diseed *) sgl;
2026 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2027 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2028
2029
2030 diseed->ref_tag = cpu_to_le32(reftag);
2031 diseed->ref_tag_tran = diseed->ref_tag;
2032
2033
2034
2035
2036
2037 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2038 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2039 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2040 else
2041 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2042
2043 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2044 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2045 else
2046 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2047 }
2048
2049
2050 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2051 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2052
2053 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2054 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2055
2056
2057 diseed->word2 = cpu_to_le32(diseed->word2);
2058 diseed->word3 = cpu_to_le32(diseed->word3);
2059
2060
2061 num_sge++;
2062 sgl++;
2063
2064
2065 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2066 physaddr = sg_dma_address(sgde);
2067 dma_len = sg_dma_len(sgde);
2068 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2069 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2070 if ((i + 1) == datasegcnt)
2071 bf_set(lpfc_sli4_sge_last, sgl, 1);
2072 else
2073 bf_set(lpfc_sli4_sge_last, sgl, 0);
2074 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2075 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2076
2077 sgl->sge_len = cpu_to_le32(dma_len);
2078 dma_offset += dma_len;
2079
2080 sgl++;
2081 num_sge++;
2082 }
2083
2084out:
2085 return num_sge;
2086}
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125static int
2126lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2127 struct sli4_sge *sgl, int datacnt, int protcnt)
2128{
2129 struct scatterlist *sgde = NULL;
2130 struct scatterlist *sgpe = NULL;
2131 struct sli4_sge_diseed *diseed = NULL;
2132 dma_addr_t dataphysaddr, protphysaddr;
2133 unsigned short curr_data = 0, curr_prot = 0;
2134 unsigned int split_offset;
2135 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2136 unsigned int protgrp_blks, protgrp_bytes;
2137 unsigned int remainder, subtotal;
2138 int status;
2139 unsigned char pgdone = 0, alldone = 0;
2140 unsigned blksize;
2141 uint32_t reftag;
2142 uint8_t txop, rxop;
2143 uint32_t dma_len;
2144#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2145 uint32_t rc;
2146#endif
2147 uint32_t checking = 1;
2148 uint32_t dma_offset = 0;
2149 int num_sge = 0;
2150
2151 sgpe = scsi_prot_sglist(sc);
2152 sgde = scsi_sglist(sc);
2153
2154 if (!sgpe || !sgde) {
2155 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2156 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2157 sgpe, sgde);
2158 return 0;
2159 }
2160
2161 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2162 if (status)
2163 goto out;
2164
2165
2166 blksize = lpfc_cmd_blksize(sc);
2167 reftag = (uint32_t)scsi_get_lba(sc);
2168
2169#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2170 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2171 if (rc) {
2172 if (rc & BG_ERR_SWAP)
2173 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2174 if (rc & BG_ERR_CHECK)
2175 checking = 0;
2176 }
2177#endif
2178
2179 split_offset = 0;
2180 do {
2181
2182 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2183 return num_sge + 3;
2184
2185
2186 diseed = (struct sli4_sge_diseed *) sgl;
2187 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2188 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2189
2190
2191 diseed->ref_tag = cpu_to_le32(reftag);
2192 diseed->ref_tag_tran = diseed->ref_tag;
2193
2194 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2195 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2196
2197 } else {
2198 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2199
2200
2201
2202
2203
2204
2205
2206 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2207 txop = BG_OP_RAW_MODE;
2208 rxop = BG_OP_RAW_MODE;
2209 }
2210 }
2211
2212
2213 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2214 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2215 else
2216 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2217
2218
2219 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2220 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2221
2222 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2223 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2224
2225
2226 diseed->word2 = cpu_to_le32(diseed->word2);
2227 diseed->word3 = cpu_to_le32(diseed->word3);
2228
2229
2230 num_sge++;
2231 sgl++;
2232
2233
2234 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2235 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2236
2237
2238 BUG_ON(protgroup_len % 8);
2239
2240
2241 sgl->word2 = 0;
2242 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2243 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2244 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2245 sgl->word2 = cpu_to_le32(sgl->word2);
2246
2247 protgrp_blks = protgroup_len / 8;
2248 protgrp_bytes = protgrp_blks * blksize;
2249
2250
2251 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2252 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2253 protgroup_offset += protgroup_remainder;
2254 protgrp_blks = protgroup_remainder / 8;
2255 protgrp_bytes = protgrp_blks * blksize;
2256 } else {
2257 protgroup_offset = 0;
2258 curr_prot++;
2259 }
2260
2261 num_sge++;
2262
2263
2264 pgdone = 0;
2265 subtotal = 0;
2266 while (!pgdone) {
2267
2268 if (num_sge >= phba->cfg_total_seg_cnt)
2269 return num_sge + 1;
2270
2271 if (!sgde) {
2272 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2273 "9086 BLKGRD:%s Invalid data segment\n",
2274 __func__);
2275 return 0;
2276 }
2277 sgl++;
2278 dataphysaddr = sg_dma_address(sgde) + split_offset;
2279
2280 remainder = sg_dma_len(sgde) - split_offset;
2281
2282 if ((subtotal + remainder) <= protgrp_bytes) {
2283
2284 dma_len = remainder;
2285 split_offset = 0;
2286
2287 if ((subtotal + remainder) == protgrp_bytes)
2288 pgdone = 1;
2289 } else {
2290
2291 dma_len = protgrp_bytes - subtotal;
2292 split_offset += dma_len;
2293 }
2294
2295 subtotal += dma_len;
2296
2297 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2298 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2299 bf_set(lpfc_sli4_sge_last, sgl, 0);
2300 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2301 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2302
2303 sgl->sge_len = cpu_to_le32(dma_len);
2304 dma_offset += dma_len;
2305
2306 num_sge++;
2307 curr_data++;
2308
2309 if (split_offset)
2310 break;
2311
2312
2313 sgde = sg_next(sgde);
2314 }
2315
2316 if (protgroup_offset) {
2317
2318 reftag += protgrp_blks;
2319 sgl++;
2320 continue;
2321 }
2322
2323
2324 if (curr_prot == protcnt) {
2325 bf_set(lpfc_sli4_sge_last, sgl, 1);
2326 alldone = 1;
2327 } else if (curr_prot < protcnt) {
2328
2329 sgpe = sg_next(sgpe);
2330 sgl++;
2331
2332
2333 reftag += protgrp_blks;
2334 } else {
2335
2336 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2337 "9085 BLKGRD: bug in %s\n", __func__);
2338 }
2339
2340 } while (!alldone);
2341
2342out:
2343
2344 return num_sge;
2345}
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358static int
2359lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2360{
2361 int ret = LPFC_PG_TYPE_INVALID;
2362 unsigned char op = scsi_get_prot_op(sc);
2363
2364 switch (op) {
2365 case SCSI_PROT_READ_STRIP:
2366 case SCSI_PROT_WRITE_INSERT:
2367 ret = LPFC_PG_TYPE_NO_DIF;
2368 break;
2369 case SCSI_PROT_READ_INSERT:
2370 case SCSI_PROT_WRITE_STRIP:
2371 case SCSI_PROT_READ_PASS:
2372 case SCSI_PROT_WRITE_PASS:
2373 ret = LPFC_PG_TYPE_DIF_BUF;
2374 break;
2375 default:
2376 if (phba)
2377 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2378 "9021 Unsupported protection op:%d\n",
2379 op);
2380 break;
2381 }
2382 return ret;
2383}
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395static int
2396lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2397 struct lpfc_io_buf *lpfc_cmd)
2398{
2399 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2400 int fcpdl;
2401
2402 fcpdl = scsi_bufflen(sc);
2403
2404
2405 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2406
2407 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2408 return fcpdl;
2409
2410 } else {
2411
2412 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2413 return fcpdl;
2414 }
2415
2416
2417
2418
2419
2420
2421 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2422
2423 return fcpdl;
2424}
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435static int
2436lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2437 struct lpfc_io_buf *lpfc_cmd)
2438{
2439 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2440 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2441 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2442 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2443 uint32_t num_bde = 0;
2444 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2445 int prot_group_type = 0;
2446 int fcpdl;
2447 struct lpfc_vport *vport = phba->pport;
2448
2449
2450
2451
2452
2453 bpl += 2;
2454 if (scsi_sg_count(scsi_cmnd)) {
2455
2456
2457
2458
2459
2460
2461 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2462 scsi_sglist(scsi_cmnd),
2463 scsi_sg_count(scsi_cmnd), datadir);
2464 if (unlikely(!datasegcnt))
2465 return 1;
2466
2467 lpfc_cmd->seg_cnt = datasegcnt;
2468
2469
2470 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2471 goto err;
2472
2473 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2474
2475 switch (prot_group_type) {
2476 case LPFC_PG_TYPE_NO_DIF:
2477
2478
2479 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2480 goto err;
2481
2482 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2483 datasegcnt);
2484
2485 if (num_bde < 2)
2486 goto err;
2487 break;
2488
2489 case LPFC_PG_TYPE_DIF_BUF:
2490
2491
2492
2493
2494
2495 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2496 scsi_prot_sglist(scsi_cmnd),
2497 scsi_prot_sg_count(scsi_cmnd), datadir);
2498 if (unlikely(!protsegcnt)) {
2499 scsi_dma_unmap(scsi_cmnd);
2500 return 1;
2501 }
2502
2503 lpfc_cmd->prot_seg_cnt = protsegcnt;
2504
2505
2506
2507
2508
2509 if ((lpfc_cmd->prot_seg_cnt * 4) >
2510 (phba->cfg_total_seg_cnt - 2))
2511 goto err;
2512
2513 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2514 datasegcnt, protsegcnt);
2515
2516 if ((num_bde < 3) ||
2517 (num_bde > phba->cfg_total_seg_cnt))
2518 goto err;
2519 break;
2520
2521 case LPFC_PG_TYPE_INVALID:
2522 default:
2523 scsi_dma_unmap(scsi_cmnd);
2524 lpfc_cmd->seg_cnt = 0;
2525
2526 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2527 "9022 Unexpected protection group %i\n",
2528 prot_group_type);
2529 return 1;
2530 }
2531 }
2532
2533
2534
2535
2536
2537
2538
2539 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2540 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2541 iocb_cmd->ulpBdeCount = 1;
2542 iocb_cmd->ulpLe = 1;
2543
2544 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2545 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2546
2547
2548
2549
2550
2551 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2552
2553
2554
2555
2556
2557 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2558 (fcpdl < vport->cfg_first_burst_size))
2559 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2560
2561 return 0;
2562err:
2563 if (lpfc_cmd->seg_cnt)
2564 scsi_dma_unmap(scsi_cmnd);
2565 if (lpfc_cmd->prot_seg_cnt)
2566 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2567 scsi_prot_sg_count(scsi_cmnd),
2568 scsi_cmnd->sc_data_direction);
2569
2570 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2571 "9023 Cannot setup S/G List for HBA"
2572 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2573 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2574 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2575 prot_group_type, num_bde);
2576
2577 lpfc_cmd->seg_cnt = 0;
2578 lpfc_cmd->prot_seg_cnt = 0;
2579 return 1;
2580}
2581
2582
2583
2584
2585
2586
2587static uint16_t
2588lpfc_bg_crc(uint8_t *data, int count)
2589{
2590 uint16_t crc = 0;
2591 uint16_t x;
2592
2593 crc = crc_t10dif(data, count);
2594 x = cpu_to_be16(crc);
2595 return x;
2596}
2597
2598
2599
2600
2601
2602
2603static uint16_t
2604lpfc_bg_csum(uint8_t *data, int count)
2605{
2606 uint16_t ret;
2607
2608 ret = ip_compute_csum(data, count);
2609 return ret;
2610}
2611
2612
2613
2614
2615
2616static void
2617lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2618{
2619 struct scatterlist *sgpe;
2620 struct scatterlist *sgde;
2621 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2622 struct scsi_dif_tuple *src = NULL;
2623 uint8_t *data_src = NULL;
2624 uint16_t guard_tag;
2625 uint16_t start_app_tag, app_tag;
2626 uint32_t start_ref_tag, ref_tag;
2627 int prot, protsegcnt;
2628 int err_type, len, data_len;
2629 int chk_ref, chk_app, chk_guard;
2630 uint16_t sum;
2631 unsigned blksize;
2632
2633 err_type = BGS_GUARD_ERR_MASK;
2634 sum = 0;
2635 guard_tag = 0;
2636
2637
2638 prot = scsi_get_prot_op(cmd);
2639 if ((prot == SCSI_PROT_READ_STRIP) ||
2640 (prot == SCSI_PROT_WRITE_INSERT) ||
2641 (prot == SCSI_PROT_NORMAL))
2642 goto out;
2643
2644
2645 chk_ref = 1;
2646 chk_app = 0;
2647 chk_guard = 0;
2648
2649
2650 sgpe = scsi_prot_sglist(cmd);
2651 protsegcnt = lpfc_cmd->prot_seg_cnt;
2652
2653 if (sgpe && protsegcnt) {
2654
2655
2656
2657
2658
2659 sgde = scsi_sglist(cmd);
2660 blksize = lpfc_cmd_blksize(cmd);
2661 data_src = (uint8_t *)sg_virt(sgde);
2662 data_len = sgde->length;
2663 if ((data_len & (blksize - 1)) == 0)
2664 chk_guard = 1;
2665
2666 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2667 start_ref_tag = (uint32_t)scsi_get_lba(cmd);
2668 start_app_tag = src->app_tag;
2669 len = sgpe->length;
2670 while (src && protsegcnt) {
2671 while (len) {
2672
2673
2674
2675
2676
2677 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2678 (src->app_tag == T10_PI_APP_ESCAPE)) {
2679 start_ref_tag++;
2680 goto skipit;
2681 }
2682
2683
2684 if (chk_guard) {
2685 guard_tag = src->guard_tag;
2686 if (lpfc_cmd_guard_csum(cmd))
2687 sum = lpfc_bg_csum(data_src,
2688 blksize);
2689 else
2690 sum = lpfc_bg_crc(data_src,
2691 blksize);
2692 if ((guard_tag != sum)) {
2693 err_type = BGS_GUARD_ERR_MASK;
2694 goto out;
2695 }
2696 }
2697
2698
2699 ref_tag = be32_to_cpu(src->ref_tag);
2700 if (chk_ref && (ref_tag != start_ref_tag)) {
2701 err_type = BGS_REFTAG_ERR_MASK;
2702 goto out;
2703 }
2704 start_ref_tag++;
2705
2706
2707 app_tag = src->app_tag;
2708 if (chk_app && (app_tag != start_app_tag)) {
2709 err_type = BGS_APPTAG_ERR_MASK;
2710 goto out;
2711 }
2712skipit:
2713 len -= sizeof(struct scsi_dif_tuple);
2714 if (len < 0)
2715 len = 0;
2716 src++;
2717
2718 data_src += blksize;
2719 data_len -= blksize;
2720
2721
2722
2723
2724
2725
2726 if (chk_guard && (data_len == 0)) {
2727 chk_guard = 0;
2728 sgde = sg_next(sgde);
2729 if (!sgde)
2730 goto out;
2731
2732 data_src = (uint8_t *)sg_virt(sgde);
2733 data_len = sgde->length;
2734 if ((data_len & (blksize - 1)) == 0)
2735 chk_guard = 1;
2736 }
2737 }
2738
2739
2740 sgpe = sg_next(sgpe);
2741 if (sgpe) {
2742 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2743 len = sgpe->length;
2744 } else {
2745 src = NULL;
2746 }
2747 protsegcnt--;
2748 }
2749 }
2750out:
2751 if (err_type == BGS_GUARD_ERR_MASK) {
2752 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2753 0x10, 0x1);
2754 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2755 SAM_STAT_CHECK_CONDITION;
2756 phba->bg_guard_err_cnt++;
2757 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2758 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2759 (unsigned long)scsi_get_lba(cmd),
2760 sum, guard_tag);
2761
2762 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2763 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2764 0x10, 0x3);
2765 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2766 SAM_STAT_CHECK_CONDITION;
2767
2768 phba->bg_reftag_err_cnt++;
2769 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2770 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2771 (unsigned long)scsi_get_lba(cmd),
2772 ref_tag, start_ref_tag);
2773
2774 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2775 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2776 0x10, 0x2);
2777 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2778 SAM_STAT_CHECK_CONDITION;
2779
2780 phba->bg_apptag_err_cnt++;
2781 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2782 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2783 (unsigned long)scsi_get_lba(cmd),
2784 app_tag, start_app_tag);
2785 }
2786}
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801static int
2802lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2803 struct lpfc_iocbq *pIocbOut)
2804{
2805 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2806 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2807 int ret = 0;
2808 uint32_t bghm = bgf->bghm;
2809 uint32_t bgstat = bgf->bgstat;
2810 uint64_t failing_sector = 0;
2811
2812 spin_lock(&_dump_buf_lock);
2813 if (!_dump_buf_done) {
2814 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
2815 " Data for %u blocks to debugfs\n",
2816 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2817 lpfc_debug_save_data(phba, cmd);
2818
2819
2820 if (lpfc_prot_group_type(phba, cmd) ==
2821 LPFC_PG_TYPE_DIF_BUF) {
2822 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
2823 "Saving DIF for %u blocks to debugfs\n",
2824 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
2825 lpfc_debug_save_dif(phba, cmd);
2826 }
2827
2828 _dump_buf_done = 1;
2829 }
2830 spin_unlock(&_dump_buf_lock);
2831
2832 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2833 cmd->result = DID_ERROR << 16;
2834 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2835 "9072 BLKGRD: Invalid BG Profile in cmd"
2836 " 0x%x lba 0x%llx blk cnt 0x%x "
2837 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2838 (unsigned long long)scsi_get_lba(cmd),
2839 blk_rq_sectors(cmd->request), bgstat, bghm);
2840 ret = (-1);
2841 goto out;
2842 }
2843
2844 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2845 cmd->result = DID_ERROR << 16;
2846 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2847 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2848 " 0x%x lba 0x%llx blk cnt 0x%x "
2849 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2850 (unsigned long long)scsi_get_lba(cmd),
2851 blk_rq_sectors(cmd->request), bgstat, bghm);
2852 ret = (-1);
2853 goto out;
2854 }
2855
2856 if (lpfc_bgs_get_guard_err(bgstat)) {
2857 ret = 1;
2858
2859 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2860 0x10, 0x1);
2861 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2862 SAM_STAT_CHECK_CONDITION;
2863 phba->bg_guard_err_cnt++;
2864 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2865 "9055 BLKGRD: Guard Tag error in cmd"
2866 " 0x%x lba 0x%llx blk cnt 0x%x "
2867 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2868 (unsigned long long)scsi_get_lba(cmd),
2869 blk_rq_sectors(cmd->request), bgstat, bghm);
2870 }
2871
2872 if (lpfc_bgs_get_reftag_err(bgstat)) {
2873 ret = 1;
2874
2875 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2876 0x10, 0x3);
2877 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2878 SAM_STAT_CHECK_CONDITION;
2879
2880 phba->bg_reftag_err_cnt++;
2881 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2882 "9056 BLKGRD: Ref Tag error in cmd"
2883 " 0x%x lba 0x%llx blk cnt 0x%x "
2884 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2885 (unsigned long long)scsi_get_lba(cmd),
2886 blk_rq_sectors(cmd->request), bgstat, bghm);
2887 }
2888
2889 if (lpfc_bgs_get_apptag_err(bgstat)) {
2890 ret = 1;
2891
2892 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2893 0x10, 0x2);
2894 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2895 SAM_STAT_CHECK_CONDITION;
2896
2897 phba->bg_apptag_err_cnt++;
2898 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2899 "9061 BLKGRD: App Tag error in cmd"
2900 " 0x%x lba 0x%llx blk cnt 0x%x "
2901 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2902 (unsigned long long)scsi_get_lba(cmd),
2903 blk_rq_sectors(cmd->request), bgstat, bghm);
2904 }
2905
2906 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2907
2908
2909
2910
2911
2912
2913 cmd->sense_buffer[7] = 0xc;
2914 cmd->sense_buffer[8] = 0;
2915 cmd->sense_buffer[9] = 0xa;
2916 cmd->sense_buffer[10] = 0x80;
2917
2918
2919 switch (scsi_get_prot_op(cmd)) {
2920 case SCSI_PROT_READ_INSERT:
2921 case SCSI_PROT_WRITE_STRIP:
2922 bghm /= cmd->device->sector_size;
2923 break;
2924 case SCSI_PROT_READ_STRIP:
2925 case SCSI_PROT_WRITE_INSERT:
2926 case SCSI_PROT_READ_PASS:
2927 case SCSI_PROT_WRITE_PASS:
2928 bghm /= (cmd->device->sector_size +
2929 sizeof(struct scsi_dif_tuple));
2930 break;
2931 }
2932
2933 failing_sector = scsi_get_lba(cmd);
2934 failing_sector += bghm;
2935
2936
2937 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
2938 }
2939
2940 if (!ret) {
2941
2942 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2943 "9057 BLKGRD: Unknown error in cmd"
2944 " 0x%x lba 0x%llx blk cnt 0x%x "
2945 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2946 (unsigned long long)scsi_get_lba(cmd),
2947 blk_rq_sectors(cmd->request), bgstat, bghm);
2948
2949
2950 lpfc_calc_bg_err(phba, lpfc_cmd);
2951 }
2952out:
2953 return ret;
2954}
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968static int
2969lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2970{
2971 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2972 struct scatterlist *sgel = NULL;
2973 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2974 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
2975 struct sli4_sge *first_data_sgl;
2976 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2977 dma_addr_t physaddr;
2978 uint32_t num_bde = 0;
2979 uint32_t dma_len;
2980 uint32_t dma_offset = 0;
2981 int nseg;
2982 struct ulp_bde64 *bde;
2983
2984
2985
2986
2987
2988
2989
2990 if (scsi_sg_count(scsi_cmnd)) {
2991
2992
2993
2994
2995
2996
2997
2998 nseg = scsi_dma_map(scsi_cmnd);
2999 if (unlikely(nseg <= 0))
3000 return 1;
3001 sgl += 1;
3002
3003 sgl->word2 = le32_to_cpu(sgl->word2);
3004 bf_set(lpfc_sli4_sge_last, sgl, 0);
3005 sgl->word2 = cpu_to_le32(sgl->word2);
3006 sgl += 1;
3007 first_data_sgl = sgl;
3008 lpfc_cmd->seg_cnt = nseg;
3009 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3010 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3011 " %s: Too many sg segments from "
3012 "dma_map_sg. Config %d, seg_cnt %d\n",
3013 __func__, phba->cfg_sg_seg_cnt,
3014 lpfc_cmd->seg_cnt);
3015 lpfc_cmd->seg_cnt = 0;
3016 scsi_dma_unmap(scsi_cmnd);
3017 return 1;
3018 }
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3030 physaddr = sg_dma_address(sgel);
3031 dma_len = sg_dma_len(sgel);
3032 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3033 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3034 sgl->word2 = le32_to_cpu(sgl->word2);
3035 if ((num_bde + 1) == nseg)
3036 bf_set(lpfc_sli4_sge_last, sgl, 1);
3037 else
3038 bf_set(lpfc_sli4_sge_last, sgl, 0);
3039 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3040 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3041 sgl->word2 = cpu_to_le32(sgl->word2);
3042 sgl->sge_len = cpu_to_le32(dma_len);
3043 dma_offset += dma_len;
3044 sgl++;
3045 }
3046
3047
3048
3049
3050
3051 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3052 phba->cfg_enable_pbde) {
3053 bde = (struct ulp_bde64 *)
3054 &(iocb_cmd->unsli3.sli3Words[5]);
3055 bde->addrLow = first_data_sgl->addr_lo;
3056 bde->addrHigh = first_data_sgl->addr_hi;
3057 bde->tus.f.bdeSize =
3058 le32_to_cpu(first_data_sgl->sge_len);
3059 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3060 bde->tus.w = cpu_to_le32(bde->tus.w);
3061 }
3062 } else {
3063 sgl += 1;
3064
3065 sgl->word2 = le32_to_cpu(sgl->word2);
3066 bf_set(lpfc_sli4_sge_last, sgl, 1);
3067 sgl->word2 = cpu_to_le32(sgl->word2);
3068
3069 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3070 phba->cfg_enable_pbde) {
3071 bde = (struct ulp_bde64 *)
3072 &(iocb_cmd->unsli3.sli3Words[5]);
3073 memset(bde, 0, (sizeof(uint32_t) * 3));
3074 }
3075 }
3076
3077
3078
3079
3080
3081
3082
3083 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3084
3085
3086
3087
3088
3089 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3090
3091
3092
3093
3094
3095 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3096 scsi_cmnd->device->hostdata)->oas_enabled) {
3097 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3098 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3099 scsi_cmnd->device->hostdata)->priority;
3100 }
3101
3102 return 0;
3103}
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114static int
3115lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3116 struct lpfc_io_buf *lpfc_cmd)
3117{
3118 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3119 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3120 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3121 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3122 uint32_t num_sge = 0;
3123 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3124 int prot_group_type = 0;
3125 int fcpdl;
3126 struct lpfc_vport *vport = phba->pport;
3127
3128
3129
3130
3131
3132 if (scsi_sg_count(scsi_cmnd)) {
3133
3134
3135
3136
3137
3138
3139 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3140 scsi_sglist(scsi_cmnd),
3141 scsi_sg_count(scsi_cmnd), datadir);
3142 if (unlikely(!datasegcnt))
3143 return 1;
3144
3145 sgl += 1;
3146
3147 sgl->word2 = le32_to_cpu(sgl->word2);
3148 bf_set(lpfc_sli4_sge_last, sgl, 0);
3149 sgl->word2 = cpu_to_le32(sgl->word2);
3150
3151 sgl += 1;
3152 lpfc_cmd->seg_cnt = datasegcnt;
3153
3154
3155 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3156 goto err;
3157
3158 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3159
3160 switch (prot_group_type) {
3161 case LPFC_PG_TYPE_NO_DIF:
3162
3163 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3164 goto err;
3165
3166 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3167 datasegcnt);
3168
3169
3170 if (num_sge < 2)
3171 goto err;
3172 break;
3173
3174 case LPFC_PG_TYPE_DIF_BUF:
3175
3176
3177
3178
3179
3180 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3181 scsi_prot_sglist(scsi_cmnd),
3182 scsi_prot_sg_count(scsi_cmnd), datadir);
3183 if (unlikely(!protsegcnt)) {
3184 scsi_dma_unmap(scsi_cmnd);
3185 return 1;
3186 }
3187
3188 lpfc_cmd->prot_seg_cnt = protsegcnt;
3189
3190
3191
3192
3193 if ((lpfc_cmd->prot_seg_cnt * 3) >
3194 (phba->cfg_total_seg_cnt - 2))
3195 goto err;
3196
3197 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3198 datasegcnt, protsegcnt);
3199
3200
3201 if ((num_sge < 3) ||
3202 (num_sge > phba->cfg_total_seg_cnt))
3203 goto err;
3204 break;
3205
3206 case LPFC_PG_TYPE_INVALID:
3207 default:
3208 scsi_dma_unmap(scsi_cmnd);
3209 lpfc_cmd->seg_cnt = 0;
3210
3211 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3212 "9083 Unexpected protection group %i\n",
3213 prot_group_type);
3214 return 1;
3215 }
3216 }
3217
3218 switch (scsi_get_prot_op(scsi_cmnd)) {
3219 case SCSI_PROT_WRITE_STRIP:
3220 case SCSI_PROT_READ_STRIP:
3221 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3222 break;
3223 case SCSI_PROT_WRITE_INSERT:
3224 case SCSI_PROT_READ_INSERT:
3225 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3226 break;
3227 case SCSI_PROT_WRITE_PASS:
3228 case SCSI_PROT_READ_PASS:
3229 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3230 break;
3231 }
3232
3233 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3234 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3235
3236
3237
3238
3239
3240 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3241
3242
3243
3244
3245
3246 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3247 (fcpdl < vport->cfg_first_burst_size))
3248 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3249
3250
3251
3252
3253
3254 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3255 scsi_cmnd->device->hostdata)->oas_enabled)
3256 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3257
3258 return 0;
3259err:
3260 if (lpfc_cmd->seg_cnt)
3261 scsi_dma_unmap(scsi_cmnd);
3262 if (lpfc_cmd->prot_seg_cnt)
3263 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3264 scsi_prot_sg_count(scsi_cmnd),
3265 scsi_cmnd->sc_data_direction);
3266
3267 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3268 "9084 Cannot setup S/G List for HBA"
3269 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3270 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3271 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3272 prot_group_type, num_sge);
3273
3274 lpfc_cmd->seg_cnt = 0;
3275 lpfc_cmd->prot_seg_cnt = 0;
3276 return 1;
3277}
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291static inline int
3292lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3293{
3294 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3295}
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310static inline int
3311lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3312{
3313 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3314}
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326static void
3327lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3328 struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3329 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3330 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3331 uint32_t resp_info = fcprsp->rspStatus2;
3332 uint32_t scsi_status = fcprsp->rspStatus3;
3333 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3334 struct lpfc_fast_path_event *fast_path_evt = NULL;
3335 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3336 unsigned long flags;
3337
3338 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3339 return;
3340
3341
3342 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3343 (cmnd->result == SAM_STAT_BUSY)) {
3344 fast_path_evt = lpfc_alloc_fast_evt(phba);
3345 if (!fast_path_evt)
3346 return;
3347 fast_path_evt->un.scsi_evt.event_type =
3348 FC_REG_SCSI_EVENT;
3349 fast_path_evt->un.scsi_evt.subcategory =
3350 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3351 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3352 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3353 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3354 &pnode->nlp_portname, sizeof(struct lpfc_name));
3355 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3356 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3357 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3358 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3359 fast_path_evt = lpfc_alloc_fast_evt(phba);
3360 if (!fast_path_evt)
3361 return;
3362 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3363 FC_REG_SCSI_EVENT;
3364 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3365 LPFC_EVENT_CHECK_COND;
3366 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3367 cmnd->device->lun;
3368 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3369 &pnode->nlp_portname, sizeof(struct lpfc_name));
3370 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3371 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3372 fast_path_evt->un.check_cond_evt.sense_key =
3373 cmnd->sense_buffer[2] & 0xf;
3374 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3375 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3376 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3377 fcpi_parm &&
3378 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3379 ((scsi_status == SAM_STAT_GOOD) &&
3380 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3381
3382
3383
3384
3385 fast_path_evt = lpfc_alloc_fast_evt(phba);
3386 if (!fast_path_evt)
3387 return;
3388 fast_path_evt->un.read_check_error.header.event_type =
3389 FC_REG_FABRIC_EVENT;
3390 fast_path_evt->un.read_check_error.header.subcategory =
3391 LPFC_EVENT_FCPRDCHKERR;
3392 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3393 &pnode->nlp_portname, sizeof(struct lpfc_name));
3394 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3395 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3396 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3397 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3398 fast_path_evt->un.read_check_error.fcpiparam =
3399 fcpi_parm;
3400 } else
3401 return;
3402
3403 fast_path_evt->vport = vport;
3404 spin_lock_irqsave(&phba->hbalock, flags);
3405 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3406 spin_unlock_irqrestore(&phba->hbalock, flags);
3407 lpfc_worker_wake_up(phba);
3408 return;
3409}
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419static void
3420lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3421{
3422
3423
3424
3425
3426
3427
3428 if (psb->seg_cnt > 0)
3429 scsi_dma_unmap(psb->pCmd);
3430 if (psb->prot_seg_cnt > 0)
3431 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3432 scsi_prot_sg_count(psb->pCmd),
3433 psb->pCmd->sc_data_direction);
3434}
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446static void
3447lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3448 struct lpfc_iocbq *rsp_iocb)
3449{
3450 struct lpfc_hba *phba = vport->phba;
3451 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3452 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3453 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3454 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3455 uint32_t resp_info = fcprsp->rspStatus2;
3456 uint32_t scsi_status = fcprsp->rspStatus3;
3457 uint32_t *lp;
3458 uint32_t host_status = DID_OK;
3459 uint32_t rsplen = 0;
3460 uint32_t fcpDl;
3461 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3462
3463
3464
3465
3466
3467
3468
3469 if (fcpcmd->fcpCntl2) {
3470 scsi_status = 0;
3471 goto out;
3472 }
3473
3474 if (resp_info & RSP_LEN_VALID) {
3475 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3476 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3477 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3478 "2719 Invalid response length: "
3479 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3480 cmnd->device->id,
3481 cmnd->device->lun, cmnd->cmnd[0],
3482 rsplen);
3483 host_status = DID_ERROR;
3484 goto out;
3485 }
3486 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3487 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3488 "2757 Protocol failure detected during "
3489 "processing of FCP I/O op: "
3490 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3491 cmnd->device->id,
3492 cmnd->device->lun, cmnd->cmnd[0],
3493 fcprsp->rspInfo3);
3494 host_status = DID_ERROR;
3495 goto out;
3496 }
3497 }
3498
3499 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3500 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3501 if (snslen > SCSI_SENSE_BUFFERSIZE)
3502 snslen = SCSI_SENSE_BUFFERSIZE;
3503
3504 if (resp_info & RSP_LEN_VALID)
3505 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3506 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3507 }
3508 lp = (uint32_t *)cmnd->sense_buffer;
3509
3510
3511 if (!scsi_status && (resp_info & RESID_UNDER)) {
3512
3513 if (vport->cfg_log_verbose & LOG_FCP)
3514 logit = LOG_FCP_ERROR;
3515
3516 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3517 logit = LOG_FCP_UNDER;
3518 }
3519
3520 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3521 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3522 "Data: x%x x%x x%x x%x x%x\n",
3523 cmnd->cmnd[0], scsi_status,
3524 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3525 be32_to_cpu(fcprsp->rspResId),
3526 be32_to_cpu(fcprsp->rspSnsLen),
3527 be32_to_cpu(fcprsp->rspRspLen),
3528 fcprsp->rspInfo3);
3529
3530 scsi_set_resid(cmnd, 0);
3531 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3532 if (resp_info & RESID_UNDER) {
3533 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3534
3535 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3536 "9025 FCP Underrun, expected %d, "
3537 "residual %d Data: x%x x%x x%x\n",
3538 fcpDl,
3539 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3540 cmnd->underflow);
3541
3542
3543
3544
3545
3546
3547 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3548 lpfc_printf_vlog(vport, KERN_WARNING,
3549 LOG_FCP | LOG_FCP_ERROR,
3550 "9026 FCP Read Check Error "
3551 "and Underrun Data: x%x x%x x%x x%x\n",
3552 fcpDl,
3553 scsi_get_resid(cmnd), fcpi_parm,
3554 cmnd->cmnd[0]);
3555 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3556 host_status = DID_ERROR;
3557 }
3558
3559
3560
3561
3562
3563
3564 if (!(resp_info & SNS_LEN_VALID) &&
3565 (scsi_status == SAM_STAT_GOOD) &&
3566 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3567 < cmnd->underflow)) {
3568 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3569 "9027 FCP command x%x residual "
3570 "underrun converted to error "
3571 "Data: x%x x%x x%x\n",
3572 cmnd->cmnd[0], scsi_bufflen(cmnd),
3573 scsi_get_resid(cmnd), cmnd->underflow);
3574 host_status = DID_ERROR;
3575 }
3576 } else if (resp_info & RESID_OVER) {
3577 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3578 "9028 FCP command x%x residual overrun error. "
3579 "Data: x%x x%x\n", cmnd->cmnd[0],
3580 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3581 host_status = DID_ERROR;
3582
3583
3584
3585
3586
3587 } else if (fcpi_parm) {
3588 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3589 "9029 FCP %s Check Error xri x%x Data: "
3590 "x%x x%x x%x x%x x%x\n",
3591 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3592 "Read" : "Write"),
3593 ((phba->sli_rev == LPFC_SLI_REV4) ?
3594 lpfc_cmd->cur_iocbq.sli4_xritag :
3595 rsp_iocb->iocb.ulpContext),
3596 fcpDl, be32_to_cpu(fcprsp->rspResId),
3597 fcpi_parm, cmnd->cmnd[0], scsi_status);
3598
3599
3600
3601
3602
3603 if (fcpi_parm > fcpDl)
3604 goto out;
3605
3606 switch (scsi_status) {
3607 case SAM_STAT_GOOD:
3608 case SAM_STAT_CHECK_CONDITION:
3609
3610
3611
3612
3613
3614 host_status = DID_ERROR;
3615 break;
3616 }
3617 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3618 }
3619
3620 out:
3621 cmnd->result = host_status << 16 | scsi_status;
3622 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3623}
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635static void
3636lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3637 struct lpfc_iocbq *pIocbOut)
3638{
3639 struct lpfc_io_buf *lpfc_cmd =
3640 (struct lpfc_io_buf *) pIocbIn->context1;
3641 struct lpfc_vport *vport = pIocbIn->vport;
3642 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3643 struct lpfc_nodelist *pnode = rdata->pnode;
3644 struct scsi_cmnd *cmd;
3645 unsigned long flags;
3646 struct lpfc_fast_path_event *fast_path_evt;
3647 struct Scsi_Host *shost;
3648 int idx;
3649 uint32_t logit = LOG_FCP;
3650#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3651 int cpu;
3652#endif
3653
3654
3655 spin_lock(&lpfc_cmd->buf_lock);
3656
3657
3658 cmd = lpfc_cmd->pCmd;
3659 if (!cmd) {
3660 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3661 "2621 IO completion: Not an active IO\n");
3662 spin_unlock(&lpfc_cmd->buf_lock);
3663 return;
3664 }
3665
3666 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
3667 if (phba->sli4_hba.hdwq)
3668 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
3669
3670#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3671 if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
3672 cpu = raw_smp_processor_id();
3673 if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
3674 phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
3675 }
3676#endif
3677 shost = cmd->device->host;
3678
3679 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3680 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3681
3682 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3683
3684#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3685 if (lpfc_cmd->prot_data_type) {
3686 struct scsi_dif_tuple *src = NULL;
3687
3688 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3689
3690
3691
3692
3693 switch (lpfc_cmd->prot_data_type) {
3694 case LPFC_INJERR_REFTAG:
3695 src->ref_tag =
3696 lpfc_cmd->prot_data;
3697 break;
3698 case LPFC_INJERR_APPTAG:
3699 src->app_tag =
3700 (uint16_t)lpfc_cmd->prot_data;
3701 break;
3702 case LPFC_INJERR_GUARD:
3703 src->guard_tag =
3704 (uint16_t)lpfc_cmd->prot_data;
3705 break;
3706 default:
3707 break;
3708 }
3709
3710 lpfc_cmd->prot_data = 0;
3711 lpfc_cmd->prot_data_type = 0;
3712 lpfc_cmd->prot_data_segment = NULL;
3713 }
3714#endif
3715
3716 if (lpfc_cmd->status) {
3717 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3718 (lpfc_cmd->result & IOERR_DRVR_MASK))
3719 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3720 else if (lpfc_cmd->status >= IOSTAT_CNT)
3721 lpfc_cmd->status = IOSTAT_DEFAULT;
3722 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3723 !lpfc_cmd->fcp_rsp->rspStatus3 &&
3724 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3725 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3726 logit = 0;
3727 else
3728 logit = LOG_FCP | LOG_FCP_UNDER;
3729 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3730 "9030 FCP cmd x%x failed <%d/%lld> "
3731 "status: x%x result: x%x "
3732 "sid: x%x did: x%x oxid: x%x "
3733 "Data: x%x x%x\n",
3734 cmd->cmnd[0],
3735 cmd->device ? cmd->device->id : 0xffff,
3736 cmd->device ? cmd->device->lun : 0xffff,
3737 lpfc_cmd->status, lpfc_cmd->result,
3738 vport->fc_myDID,
3739 (pnode) ? pnode->nlp_DID : 0,
3740 phba->sli_rev == LPFC_SLI_REV4 ?
3741 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3742 pIocbOut->iocb.ulpContext,
3743 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3744
3745 switch (lpfc_cmd->status) {
3746 case IOSTAT_FCP_RSP_ERROR:
3747
3748 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3749 break;
3750 case IOSTAT_NPORT_BSY:
3751 case IOSTAT_FABRIC_BSY:
3752 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
3753 fast_path_evt = lpfc_alloc_fast_evt(phba);
3754 if (!fast_path_evt)
3755 break;
3756 fast_path_evt->un.fabric_evt.event_type =
3757 FC_REG_FABRIC_EVENT;
3758 fast_path_evt->un.fabric_evt.subcategory =
3759 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3760 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3761 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3762 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3763 &pnode->nlp_portname,
3764 sizeof(struct lpfc_name));
3765 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3766 &pnode->nlp_nodename,
3767 sizeof(struct lpfc_name));
3768 }
3769 fast_path_evt->vport = vport;
3770 fast_path_evt->work_evt.evt =
3771 LPFC_EVT_FASTPATH_MGMT_EVT;
3772 spin_lock_irqsave(&phba->hbalock, flags);
3773 list_add_tail(&fast_path_evt->work_evt.evt_listp,
3774 &phba->work_list);
3775 spin_unlock_irqrestore(&phba->hbalock, flags);
3776 lpfc_worker_wake_up(phba);
3777 break;
3778 case IOSTAT_LOCAL_REJECT:
3779 case IOSTAT_REMOTE_STOP:
3780 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3781 lpfc_cmd->result ==
3782 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3783 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3784 lpfc_cmd->result ==
3785 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3786 cmd->result = DID_NO_CONNECT << 16;
3787 break;
3788 }
3789 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3790 lpfc_cmd->result == IOERR_NO_RESOURCES ||
3791 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3792 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3793 cmd->result = DID_REQUEUE << 16;
3794 break;
3795 }
3796 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3797 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3798 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3799 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3800
3801
3802
3803
3804 lpfc_parse_bg_err(phba, lpfc_cmd,
3805 pIocbOut);
3806 break;
3807 } else {
3808 lpfc_printf_vlog(vport, KERN_WARNING,
3809 LOG_BG,
3810 "9031 non-zero BGSTAT "
3811 "on unprotected cmd\n");
3812 }
3813 }
3814 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3815 && (phba->sli_rev == LPFC_SLI_REV4)
3816 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3817
3818
3819
3820
3821 lpfc_set_rrq_active(phba, pnode,
3822 lpfc_cmd->cur_iocbq.sli4_lxritag,
3823 0, 0);
3824 }
3825
3826 default:
3827 cmd->result = DID_ERROR << 16;
3828 break;
3829 }
3830
3831 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3832 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3833 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
3834 SAM_STAT_BUSY;
3835 } else
3836 cmd->result = DID_OK << 16;
3837
3838 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3839 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3840
3841 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3842 "0710 Iodone <%d/%llu> cmd %p, error "
3843 "x%x SNS x%x x%x Data: x%x x%x\n",
3844 cmd->device->id, cmd->device->lun, cmd,
3845 cmd->result, *lp, *(lp + 3), cmd->retries,
3846 scsi_get_resid(cmd));
3847 }
3848
3849 lpfc_update_stats(phba, lpfc_cmd);
3850 if (vport->cfg_max_scsicmpl_time &&
3851 time_after(jiffies, lpfc_cmd->start_time +
3852 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
3853 spin_lock_irqsave(shost->host_lock, flags);
3854 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3855 if (pnode->cmd_qdepth >
3856 atomic_read(&pnode->cmd_pending) &&
3857 (atomic_read(&pnode->cmd_pending) >
3858 LPFC_MIN_TGT_QDEPTH) &&
3859 ((cmd->cmnd[0] == READ_10) ||
3860 (cmd->cmnd[0] == WRITE_10)))
3861 pnode->cmd_qdepth =
3862 atomic_read(&pnode->cmd_pending);
3863
3864 pnode->last_change_time = jiffies;
3865 }
3866 spin_unlock_irqrestore(shost->host_lock, flags);
3867 }
3868 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
3869
3870 lpfc_cmd->pCmd = NULL;
3871 spin_unlock(&lpfc_cmd->buf_lock);
3872
3873
3874 cmd->scsi_done(cmd);
3875
3876
3877
3878
3879
3880 spin_lock(&lpfc_cmd->buf_lock);
3881 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
3882 if (lpfc_cmd->waitq)
3883 wake_up(lpfc_cmd->waitq);
3884 spin_unlock(&lpfc_cmd->buf_lock);
3885
3886 lpfc_release_scsi_buf(phba, lpfc_cmd);
3887}
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897static void
3898lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
3899{
3900 int i, j;
3901 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
3902 i += sizeof(uint32_t), j++) {
3903 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
3904 }
3905}
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916static void
3917lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3918 struct lpfc_nodelist *pnode)
3919{
3920 struct lpfc_hba *phba = vport->phba;
3921 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3922 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3923 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3924 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
3925 struct lpfc_sli4_hdw_queue *hdwq = NULL;
3926 int datadir = scsi_cmnd->sc_data_direction;
3927 int idx;
3928 uint8_t *ptr;
3929 bool sli4;
3930 uint32_t fcpdl;
3931
3932 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3933 return;
3934
3935 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
3936
3937 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
3938
3939 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
3940 &lpfc_cmd->fcp_cmnd->fcp_lun);
3941
3942 ptr = &fcp_cmnd->fcpCdb[0];
3943 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
3944 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
3945 ptr += scsi_cmnd->cmd_len;
3946 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
3947 }
3948
3949 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
3950
3951 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
3952 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
3953 idx = lpfc_cmd->hdwq_no;
3954 if (phba->sli4_hba.hdwq)
3955 hdwq = &phba->sli4_hba.hdwq[idx];
3956
3957
3958
3959
3960
3961
3962
3963 if (scsi_sg_count(scsi_cmnd)) {
3964 if (datadir == DMA_TO_DEVICE) {
3965 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
3966 iocb_cmd->ulpPU = PARM_READ_CHECK;
3967 if (vport->cfg_first_burst_size &&
3968 (pnode->nlp_flag & NLP_FIRSTBURST)) {
3969 fcpdl = scsi_bufflen(scsi_cmnd);
3970 if (fcpdl < vport->cfg_first_burst_size)
3971 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
3972 else
3973 piocbq->iocb.un.fcpi.fcpi_XRdy =
3974 vport->cfg_first_burst_size;
3975 }
3976 fcp_cmnd->fcpCntl3 = WRITE_DATA;
3977 if (hdwq)
3978 hdwq->scsi_cstat.output_requests++;
3979 } else {
3980 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
3981 iocb_cmd->ulpPU = PARM_READ_CHECK;
3982 fcp_cmnd->fcpCntl3 = READ_DATA;
3983 if (hdwq)
3984 hdwq->scsi_cstat.input_requests++;
3985 }
3986 } else {
3987 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
3988 iocb_cmd->un.fcpi.fcpi_parm = 0;
3989 iocb_cmd->ulpPU = 0;
3990 fcp_cmnd->fcpCntl3 = 0;
3991 if (hdwq)
3992 hdwq->scsi_cstat.control_requests++;
3993 }
3994 if (phba->sli_rev == 3 &&
3995 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
3996 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
3997
3998
3999
4000
4001 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4002 if (sli4)
4003 piocbq->iocb.ulpContext =
4004 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4005 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4006 piocbq->iocb.ulpFCP2Rcvy = 1;
4007 else
4008 piocbq->iocb.ulpFCP2Rcvy = 0;
4009
4010 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4011 piocbq->context1 = lpfc_cmd;
4012 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4013 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4014 piocbq->vport = vport;
4015}
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031static int
4032lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4033 struct lpfc_io_buf *lpfc_cmd,
4034 uint64_t lun,
4035 uint8_t task_mgmt_cmd)
4036{
4037 struct lpfc_iocbq *piocbq;
4038 IOCB_t *piocb;
4039 struct fcp_cmnd *fcp_cmnd;
4040 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4041 struct lpfc_nodelist *ndlp = rdata->pnode;
4042
4043 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4044 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4045 return 0;
4046
4047 piocbq = &(lpfc_cmd->cur_iocbq);
4048 piocbq->vport = vport;
4049
4050 piocb = &piocbq->iocb;
4051
4052 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4053
4054 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4055 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4056 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4057 if (vport->phba->sli_rev == 3 &&
4058 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4059 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4060 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4061 piocb->ulpContext = ndlp->nlp_rpi;
4062 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4063 piocb->ulpContext =
4064 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4065 }
4066 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4067 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4068 piocb->ulpPU = 0;
4069 piocb->un.fcpi.fcpi_parm = 0;
4070
4071
4072 if (lpfc_cmd->timeout > 0xff) {
4073
4074
4075
4076
4077 piocb->ulpTimeout = 0;
4078 } else
4079 piocb->ulpTimeout = lpfc_cmd->timeout;
4080
4081 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4082 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4083
4084 return 1;
4085}
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096int
4097lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4098{
4099
4100 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4101 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4102
4103 switch (dev_grp) {
4104 case LPFC_PCI_DEV_LP:
4105 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4106 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4107 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4108 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4109 break;
4110 case LPFC_PCI_DEV_OC:
4111 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4112 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4113 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4114 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4115 break;
4116 default:
4117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4118 "1418 Invalid HBA PCI-device group: 0x%x\n",
4119 dev_grp);
4120 return -ENODEV;
4121 break;
4122 }
4123 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4124 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4125 return 0;
4126}
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137static void
4138lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4139 struct lpfc_iocbq *cmdiocbq,
4140 struct lpfc_iocbq *rspiocbq)
4141{
4142 struct lpfc_io_buf *lpfc_cmd =
4143 (struct lpfc_io_buf *) cmdiocbq->context1;
4144 if (lpfc_cmd)
4145 lpfc_release_scsi_buf(phba, lpfc_cmd);
4146 return;
4147}
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163int
4164lpfc_check_pci_resettable(const struct lpfc_hba *phba)
4165{
4166 const struct pci_dev *pdev = phba->pcidev;
4167 struct pci_dev *ptr = NULL;
4168 u8 counter = 0;
4169
4170
4171 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4172
4173 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4174 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4175 "8346 Non-Emulex vendor found: "
4176 "0x%04x\n", ptr->vendor);
4177 return -EBADSLT;
4178 }
4179
4180
4181 switch (ptr->device) {
4182 case PCI_DEVICE_ID_LANCER_FC:
4183 case PCI_DEVICE_ID_LANCER_G6_FC:
4184 case PCI_DEVICE_ID_LANCER_G7_FC:
4185 break;
4186 default:
4187 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4188 "8347 Invalid device found: "
4189 "0x%04x\n", ptr->device);
4190 return -EBADSLT;
4191 }
4192
4193
4194
4195
4196 if (ptr->devfn == 0) {
4197 if (++counter > 1) {
4198 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4199 "8348 More than one device on "
4200 "secondary bus found\n");
4201 return -EBADSLT;
4202 }
4203 }
4204 }
4205
4206 return 0;
4207}
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218const char *
4219lpfc_info(struct Scsi_Host *host)
4220{
4221 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4222 struct lpfc_hba *phba = vport->phba;
4223 int link_speed = 0;
4224 static char lpfcinfobuf[384];
4225 char tmp[384] = {0};
4226
4227 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4228 if (phba && phba->pcidev){
4229
4230 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4231 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4232 sizeof(lpfcinfobuf))
4233 goto buffer_done;
4234
4235
4236 scnprintf(tmp, sizeof(tmp),
4237 " on PCI bus %02x device %02x irq %d",
4238 phba->pcidev->bus->number, phba->pcidev->devfn,
4239 phba->pcidev->irq);
4240 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4241 sizeof(lpfcinfobuf))
4242 goto buffer_done;
4243
4244
4245 if (phba->Port[0]) {
4246 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4247 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4248 sizeof(lpfcinfobuf))
4249 goto buffer_done;
4250 }
4251
4252
4253 link_speed = lpfc_sli_port_speed_get(phba);
4254 if (link_speed != 0) {
4255 scnprintf(tmp, sizeof(tmp),
4256 " Logical Link Speed: %d Mbps", link_speed);
4257 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4258 sizeof(lpfcinfobuf))
4259 goto buffer_done;
4260 }
4261
4262
4263 if (!lpfc_check_pci_resettable(phba)) {
4264 scnprintf(tmp, sizeof(tmp), " PCI resettable");
4265 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4266 }
4267 }
4268
4269buffer_done:
4270 return lpfcinfobuf;
4271}
4272
4273
4274
4275
4276
4277
4278
4279
4280static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4281{
4282 unsigned long poll_tmo_expires =
4283 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4284
4285 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4286 mod_timer(&phba->fcp_poll_timer,
4287 poll_tmo_expires);
4288}
4289
4290
4291
4292
4293
4294
4295
4296void lpfc_poll_start_timer(struct lpfc_hba * phba)
4297{
4298 lpfc_poll_rearm_timer(phba);
4299}
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309void lpfc_poll_timeout(struct timer_list *t)
4310{
4311 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4312
4313 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4314 lpfc_sli_handle_fast_ring_event(phba,
4315 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4316
4317 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4318 lpfc_poll_rearm_timer(phba);
4319 }
4320}
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335static int
4336lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4337{
4338 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4339 struct lpfc_hba *phba = vport->phba;
4340 struct lpfc_rport_data *rdata;
4341 struct lpfc_nodelist *ndlp;
4342 struct lpfc_io_buf *lpfc_cmd;
4343 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4344 int err, idx;
4345#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4346 int cpu;
4347#endif
4348
4349 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4350
4351
4352 if (unlikely(!rdata) || unlikely(!rport))
4353 goto out_fail_command;
4354
4355 err = fc_remote_port_chkready(rport);
4356 if (err) {
4357 cmnd->result = err;
4358 goto out_fail_command;
4359 }
4360 ndlp = rdata->pnode;
4361
4362 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4363 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4364
4365 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4366 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4367 " op:%02x str=%s without registering for"
4368 " BlockGuard - Rejecting command\n",
4369 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4370 dif_op_str[scsi_get_prot_op(cmnd)]);
4371 goto out_fail_command;
4372 }
4373
4374
4375
4376
4377
4378 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4379 goto out_tgt_busy;
4380 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4381 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4382 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4383 "3377 Target Queue Full, scsi Id:%d "
4384 "Qdepth:%d Pending command:%d"
4385 " WWNN:%02x:%02x:%02x:%02x:"
4386 "%02x:%02x:%02x:%02x, "
4387 " WWPN:%02x:%02x:%02x:%02x:"
4388 "%02x:%02x:%02x:%02x",
4389 ndlp->nlp_sid, ndlp->cmd_qdepth,
4390 atomic_read(&ndlp->cmd_pending),
4391 ndlp->nlp_nodename.u.wwn[0],
4392 ndlp->nlp_nodename.u.wwn[1],
4393 ndlp->nlp_nodename.u.wwn[2],
4394 ndlp->nlp_nodename.u.wwn[3],
4395 ndlp->nlp_nodename.u.wwn[4],
4396 ndlp->nlp_nodename.u.wwn[5],
4397 ndlp->nlp_nodename.u.wwn[6],
4398 ndlp->nlp_nodename.u.wwn[7],
4399 ndlp->nlp_portname.u.wwn[0],
4400 ndlp->nlp_portname.u.wwn[1],
4401 ndlp->nlp_portname.u.wwn[2],
4402 ndlp->nlp_portname.u.wwn[3],
4403 ndlp->nlp_portname.u.wwn[4],
4404 ndlp->nlp_portname.u.wwn[5],
4405 ndlp->nlp_portname.u.wwn[6],
4406 ndlp->nlp_portname.u.wwn[7]);
4407 goto out_tgt_busy;
4408 }
4409 }
4410
4411 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
4412 if (lpfc_cmd == NULL) {
4413 lpfc_rampdown_queue_depth(phba);
4414
4415 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4416 "0707 driver's buffer pool is empty, "
4417 "IO busied\n");
4418 goto out_host_busy;
4419 }
4420
4421
4422
4423
4424
4425 lpfc_cmd->pCmd = cmnd;
4426 lpfc_cmd->rdata = rdata;
4427 lpfc_cmd->ndlp = ndlp;
4428 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4429
4430 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4431 if (vport->phba->cfg_enable_bg) {
4432 lpfc_printf_vlog(vport,
4433 KERN_INFO, LOG_SCSI_CMD,
4434 "9033 BLKGRD: rcvd %s cmd:x%x "
4435 "sector x%llx cnt %u pt %x\n",
4436 dif_op_str[scsi_get_prot_op(cmnd)],
4437 cmnd->cmnd[0],
4438 (unsigned long long)scsi_get_lba(cmnd),
4439 blk_rq_sectors(cmnd->request),
4440 (cmnd->cmnd[1]>>5));
4441 }
4442 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4443 } else {
4444 if (vport->phba->cfg_enable_bg) {
4445 lpfc_printf_vlog(vport,
4446 KERN_INFO, LOG_SCSI_CMD,
4447 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4448 "x%x sector x%llx cnt %u pt %x\n",
4449 cmnd->cmnd[0],
4450 (unsigned long long)scsi_get_lba(cmnd),
4451 blk_rq_sectors(cmnd->request),
4452 (cmnd->cmnd[1]>>5));
4453 }
4454 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4455 }
4456
4457 if (err)
4458 goto out_host_busy_free_buf;
4459
4460 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4461
4462#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4463 if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
4464 cpu = raw_smp_processor_id();
4465 if (cpu < LPFC_CHECK_CPU_CNT) {
4466 struct lpfc_sli4_hdw_queue *hdwq =
4467 &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
4468 hdwq->cpucheck_xmt_io[cpu]++;
4469 }
4470 }
4471#endif
4472 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4473 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4474 if (err) {
4475 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4476 "3376 FCP could not issue IOCB err %x"
4477 "FCP cmd x%x <%d/%llu> "
4478 "sid: x%x did: x%x oxid: x%x "
4479 "Data: x%x x%x x%x x%x\n",
4480 err, cmnd->cmnd[0],
4481 cmnd->device ? cmnd->device->id : 0xffff,
4482 cmnd->device ? cmnd->device->lun : (u64) -1,
4483 vport->fc_myDID, ndlp->nlp_DID,
4484 phba->sli_rev == LPFC_SLI_REV4 ?
4485 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4486 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4487 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4488 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4489 (uint32_t)
4490 (cmnd->request->timeout / 1000));
4491
4492 goto out_host_busy_free_buf;
4493 }
4494 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4495 lpfc_sli_handle_fast_ring_event(phba,
4496 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4497
4498 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4499 lpfc_poll_rearm_timer(phba);
4500 }
4501
4502 if (phba->cfg_xri_rebalancing)
4503 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
4504
4505 return 0;
4506
4507 out_host_busy_free_buf:
4508 idx = lpfc_cmd->hdwq_no;
4509 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4510 if (phba->sli4_hba.hdwq) {
4511 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4512 case WRITE_DATA:
4513 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
4514 break;
4515 case READ_DATA:
4516 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
4517 break;
4518 default:
4519 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
4520 }
4521 }
4522 lpfc_release_scsi_buf(phba, lpfc_cmd);
4523 out_host_busy:
4524 return SCSI_MLQUEUE_HOST_BUSY;
4525
4526 out_tgt_busy:
4527 return SCSI_MLQUEUE_TARGET_BUSY;
4528
4529 out_fail_command:
4530 cmnd->scsi_done(cmnd);
4531 return 0;
4532}
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545static int
4546lpfc_abort_handler(struct scsi_cmnd *cmnd)
4547{
4548 struct Scsi_Host *shost = cmnd->device->host;
4549 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4550 struct lpfc_hba *phba = vport->phba;
4551 struct lpfc_iocbq *iocb;
4552 struct lpfc_iocbq *abtsiocb;
4553 struct lpfc_io_buf *lpfc_cmd;
4554 IOCB_t *cmd, *icmd;
4555 int ret = SUCCESS, status = 0;
4556 struct lpfc_sli_ring *pring_s4 = NULL;
4557 int ret_val;
4558 unsigned long flags;
4559 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4560
4561 status = fc_block_scsi_eh(cmnd);
4562 if (status != 0 && status != SUCCESS)
4563 return status;
4564
4565 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
4566 if (!lpfc_cmd)
4567 return ret;
4568
4569 spin_lock_irqsave(&phba->hbalock, flags);
4570
4571 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4572 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4573 "3168 SCSI Layer abort requested I/O has been "
4574 "flushed by LLD.\n");
4575 ret = FAILED;
4576 goto out_unlock;
4577 }
4578
4579
4580 spin_lock(&lpfc_cmd->buf_lock);
4581
4582 if (!lpfc_cmd->pCmd) {
4583 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4584 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4585 "x%x ID %d LUN %llu\n",
4586 SUCCESS, cmnd->device->id, cmnd->device->lun);
4587 goto out_unlock_buf;
4588 }
4589
4590 iocb = &lpfc_cmd->cur_iocbq;
4591 if (phba->sli_rev == LPFC_SLI_REV4) {
4592 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring;
4593 if (!pring_s4) {
4594 ret = FAILED;
4595 goto out_unlock_buf;
4596 }
4597 spin_lock(&pring_s4->ring_lock);
4598 }
4599
4600 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4601 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4602 "3169 SCSI Layer abort requested I/O has been "
4603 "cancelled by LLD.\n");
4604 ret = FAILED;
4605 goto out_unlock_ring;
4606 }
4607
4608
4609
4610
4611
4612
4613 if (lpfc_cmd->pCmd != cmnd) {
4614 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4615 "3170 SCSI Layer abort requested I/O has been "
4616 "completed by LLD.\n");
4617 goto out_unlock_ring;
4618 }
4619
4620 BUG_ON(iocb->context1 != lpfc_cmd);
4621
4622
4623 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4624 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4625 "3389 SCSI Layer I/O Abort Request is pending\n");
4626 if (phba->sli_rev == LPFC_SLI_REV4)
4627 spin_unlock(&pring_s4->ring_lock);
4628 spin_unlock(&lpfc_cmd->buf_lock);
4629 spin_unlock_irqrestore(&phba->hbalock, flags);
4630 goto wait_for_cmpl;
4631 }
4632
4633 abtsiocb = __lpfc_sli_get_iocbq(phba);
4634 if (abtsiocb == NULL) {
4635 ret = FAILED;
4636 goto out_unlock_ring;
4637 }
4638
4639
4640 iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4641
4642
4643
4644
4645
4646
4647
4648 cmd = &iocb->iocb;
4649 icmd = &abtsiocb->iocb;
4650 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4651 icmd->un.acxri.abortContextTag = cmd->ulpContext;
4652 if (phba->sli_rev == LPFC_SLI_REV4)
4653 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4654 else
4655 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4656
4657 icmd->ulpLe = 1;
4658 icmd->ulpClass = cmd->ulpClass;
4659
4660
4661 abtsiocb->hba_wqidx = iocb->hba_wqidx;
4662 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4663 if (iocb->iocb_flag & LPFC_IO_FOF)
4664 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4665
4666 if (lpfc_is_link_up(phba))
4667 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4668 else
4669 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4670
4671 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4672 abtsiocb->vport = vport;
4673 lpfc_cmd->waitq = &waitq;
4674 if (phba->sli_rev == LPFC_SLI_REV4) {
4675
4676 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4677 abtsiocb, 0);
4678 spin_unlock(&pring_s4->ring_lock);
4679 } else {
4680 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4681 abtsiocb, 0);
4682 }
4683
4684 spin_unlock_irqrestore(&phba->hbalock, flags);
4685
4686 if (ret_val == IOCB_ERROR) {
4687
4688 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4689 lpfc_cmd->waitq = NULL;
4690 spin_unlock(&lpfc_cmd->buf_lock);
4691 lpfc_sli_release_iocbq(phba, abtsiocb);
4692 ret = FAILED;
4693 goto out;
4694 }
4695
4696 spin_unlock(&lpfc_cmd->buf_lock);
4697
4698 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4699 lpfc_sli_handle_fast_ring_event(phba,
4700 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4701
4702wait_for_cmpl:
4703
4704 wait_event_timeout(waitq,
4705 (lpfc_cmd->pCmd != cmnd),
4706 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4707
4708 spin_lock(&lpfc_cmd->buf_lock);
4709
4710 if (lpfc_cmd->pCmd == cmnd) {
4711 ret = FAILED;
4712 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4713 "0748 abort handler timed out waiting "
4714 "for aborting I/O (xri:x%x) to complete: "
4715 "ret %#x, ID %d, LUN %llu\n",
4716 iocb->sli4_xritag, ret,
4717 cmnd->device->id, cmnd->device->lun);
4718 }
4719
4720 lpfc_cmd->waitq = NULL;
4721
4722 spin_unlock(&lpfc_cmd->buf_lock);
4723 goto out;
4724
4725out_unlock_ring:
4726 if (phba->sli_rev == LPFC_SLI_REV4)
4727 spin_unlock(&pring_s4->ring_lock);
4728out_unlock_buf:
4729 spin_unlock(&lpfc_cmd->buf_lock);
4730out_unlock:
4731 spin_unlock_irqrestore(&phba->hbalock, flags);
4732out:
4733 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4734 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4735 "LUN %llu\n", ret, cmnd->device->id,
4736 cmnd->device->lun);
4737 return ret;
4738}
4739
4740static char *
4741lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4742{
4743 switch (task_mgmt_cmd) {
4744 case FCP_ABORT_TASK_SET:
4745 return "ABORT_TASK_SET";
4746 case FCP_CLEAR_TASK_SET:
4747 return "FCP_CLEAR_TASK_SET";
4748 case FCP_BUS_RESET:
4749 return "FCP_BUS_RESET";
4750 case FCP_LUN_RESET:
4751 return "FCP_LUN_RESET";
4752 case FCP_TARGET_RESET:
4753 return "FCP_TARGET_RESET";
4754 case FCP_CLEAR_ACA:
4755 return "FCP_CLEAR_ACA";
4756 case FCP_TERMINATE_TASK:
4757 return "FCP_TERMINATE_TASK";
4758 default:
4759 return "unknown";
4760 }
4761}
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775static int
4776lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
4777{
4778 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4779 uint32_t rsp_info;
4780 uint32_t rsp_len;
4781 uint8_t rsp_info_code;
4782 int ret = FAILED;
4783
4784
4785 if (fcprsp == NULL)
4786 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4787 "0703 fcp_rsp is missing\n");
4788 else {
4789 rsp_info = fcprsp->rspStatus2;
4790 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4791 rsp_info_code = fcprsp->rspInfo3;
4792
4793
4794 lpfc_printf_vlog(vport, KERN_INFO,
4795 LOG_FCP,
4796 "0706 fcp_rsp valid 0x%x,"
4797 " rsp len=%d code 0x%x\n",
4798 rsp_info,
4799 rsp_len, rsp_info_code);
4800
4801
4802
4803
4804
4805 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
4806 ((rsp_len == 8) || (rsp_len == 4))) {
4807 switch (rsp_info_code) {
4808 case RSP_NO_FAILURE:
4809 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4810 "0715 Task Mgmt No Failure\n");
4811 ret = SUCCESS;
4812 break;
4813 case RSP_TM_NOT_SUPPORTED:
4814 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4815 "0716 Task Mgmt Target "
4816 "reject\n");
4817 break;
4818 case RSP_TM_NOT_COMPLETED:
4819 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4820 "0717 Task Mgmt Target "
4821 "failed TM\n");
4822 break;
4823 case RSP_TM_INVALID_LU:
4824 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4825 "0718 Task Mgmt to invalid "
4826 "LUN\n");
4827 break;
4828 }
4829 }
4830 }
4831 return ret;
4832}
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850static int
4851lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
4852 unsigned int tgt_id, uint64_t lun_id,
4853 uint8_t task_mgmt_cmd)
4854{
4855 struct lpfc_hba *phba = vport->phba;
4856 struct lpfc_io_buf *lpfc_cmd;
4857 struct lpfc_iocbq *iocbq;
4858 struct lpfc_iocbq *iocbqrsp;
4859 struct lpfc_rport_data *rdata;
4860 struct lpfc_nodelist *pnode;
4861 int ret;
4862 int status;
4863
4864 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4865 if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
4866 return FAILED;
4867 pnode = rdata->pnode;
4868
4869 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
4870 if (lpfc_cmd == NULL)
4871 return FAILED;
4872 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
4873 lpfc_cmd->rdata = rdata;
4874 lpfc_cmd->pCmd = cmnd;
4875 lpfc_cmd->ndlp = pnode;
4876
4877 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
4878 task_mgmt_cmd);
4879 if (!status) {
4880 lpfc_release_scsi_buf(phba, lpfc_cmd);
4881 return FAILED;
4882 }
4883
4884 iocbq = &lpfc_cmd->cur_iocbq;
4885 iocbqrsp = lpfc_sli_get_iocbq(phba);
4886 if (iocbqrsp == NULL) {
4887 lpfc_release_scsi_buf(phba, lpfc_cmd);
4888 return FAILED;
4889 }
4890 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
4891
4892 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4893 "0702 Issue %s to TGT %d LUN %llu "
4894 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
4895 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
4896 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
4897 iocbq->iocb_flag);
4898
4899 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
4900 iocbq, iocbqrsp, lpfc_cmd->timeout);
4901 if ((status != IOCB_SUCCESS) ||
4902 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
4903 if (status != IOCB_SUCCESS ||
4904 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
4905 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4906 "0727 TMF %s to TGT %d LUN %llu "
4907 "failed (%d, %d) iocb_flag x%x\n",
4908 lpfc_taskmgmt_name(task_mgmt_cmd),
4909 tgt_id, lun_id,
4910 iocbqrsp->iocb.ulpStatus,
4911 iocbqrsp->iocb.un.ulpWord[4],
4912 iocbq->iocb_flag);
4913
4914 if (status == IOCB_SUCCESS) {
4915 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
4916
4917
4918 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
4919 else
4920 ret = FAILED;
4921 } else if (status == IOCB_TIMEDOUT) {
4922 ret = TIMEOUT_ERROR;
4923 } else {
4924 ret = FAILED;
4925 }
4926 } else
4927 ret = SUCCESS;
4928
4929 lpfc_sli_release_iocbq(phba, iocbqrsp);
4930
4931 if (ret != TIMEOUT_ERROR)
4932 lpfc_release_scsi_buf(phba, lpfc_cmd);
4933
4934 return ret;
4935}
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949static int
4950lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
4951{
4952 struct lpfc_rport_data *rdata;
4953 struct lpfc_nodelist *pnode;
4954 unsigned long later;
4955
4956 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4957 if (!rdata) {
4958 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4959 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
4960 return FAILED;
4961 }
4962 pnode = rdata->pnode;
4963
4964
4965
4966
4967 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
4968 while (time_after(later, jiffies)) {
4969 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4970 return FAILED;
4971 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
4972 return SUCCESS;
4973 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
4974 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4975 if (!rdata)
4976 return FAILED;
4977 pnode = rdata->pnode;
4978 }
4979 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
4980 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4981 return FAILED;
4982 return SUCCESS;
4983}
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001static int
5002lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5003 uint64_t lun_id, lpfc_ctx_cmd context)
5004{
5005 struct lpfc_hba *phba = vport->phba;
5006 unsigned long later;
5007 int cnt;
5008
5009 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5010 if (cnt)
5011 lpfc_sli_abort_taskmgmt(vport,
5012 &phba->sli.sli3_ring[LPFC_FCP_RING],
5013 tgt_id, lun_id, context);
5014 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5015 while (time_after(later, jiffies) && cnt) {
5016 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5017 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5018 }
5019 if (cnt) {
5020 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5021 "0724 I/O flush failure for context %s : cnt x%x\n",
5022 ((context == LPFC_CTX_LUN) ? "LUN" :
5023 ((context == LPFC_CTX_TGT) ? "TGT" :
5024 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5025 cnt);
5026 return FAILED;
5027 }
5028 return SUCCESS;
5029}
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042static int
5043lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5044{
5045 struct Scsi_Host *shost = cmnd->device->host;
5046 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5047 struct lpfc_rport_data *rdata;
5048 struct lpfc_nodelist *pnode;
5049 unsigned tgt_id = cmnd->device->id;
5050 uint64_t lun_id = cmnd->device->lun;
5051 struct lpfc_scsi_event_header scsi_event;
5052 int status;
5053
5054 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5055 if (!rdata || !rdata->pnode) {
5056 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5057 "0798 Device Reset rdata failure: rdata x%p\n",
5058 rdata);
5059 return FAILED;
5060 }
5061 pnode = rdata->pnode;
5062 status = fc_block_scsi_eh(cmnd);
5063 if (status != 0 && status != SUCCESS)
5064 return status;
5065
5066 status = lpfc_chk_tgt_mapped(vport, cmnd);
5067 if (status == FAILED) {
5068 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5069 "0721 Device Reset rport failure: rdata x%p\n", rdata);
5070 return FAILED;
5071 }
5072
5073 scsi_event.event_type = FC_REG_SCSI_EVENT;
5074 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5075 scsi_event.lun = lun_id;
5076 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5077 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5078
5079 fc_host_post_vendor_event(shost, fc_get_event_number(),
5080 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5081
5082 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5083 FCP_LUN_RESET);
5084
5085 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5086 "0713 SCSI layer issued Device Reset (%d, %llu) "
5087 "return x%x\n", tgt_id, lun_id, status);
5088
5089
5090
5091
5092
5093
5094
5095 if (status == SUCCESS)
5096 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5097 LPFC_CTX_LUN);
5098
5099 return status;
5100}
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113static int
5114lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5115{
5116 struct Scsi_Host *shost = cmnd->device->host;
5117 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5118 struct lpfc_rport_data *rdata;
5119 struct lpfc_nodelist *pnode;
5120 unsigned tgt_id = cmnd->device->id;
5121 uint64_t lun_id = cmnd->device->lun;
5122 struct lpfc_scsi_event_header scsi_event;
5123 int status;
5124
5125 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5126 if (!rdata || !rdata->pnode) {
5127 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5128 "0799 Target Reset rdata failure: rdata x%p\n",
5129 rdata);
5130 return FAILED;
5131 }
5132 pnode = rdata->pnode;
5133 status = fc_block_scsi_eh(cmnd);
5134 if (status != 0 && status != SUCCESS)
5135 return status;
5136
5137 status = lpfc_chk_tgt_mapped(vport, cmnd);
5138 if (status == FAILED) {
5139 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5140 "0722 Target Reset rport failure: rdata x%p\n", rdata);
5141 if (pnode) {
5142 spin_lock_irq(shost->host_lock);
5143 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5144 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5145 spin_unlock_irq(shost->host_lock);
5146 }
5147 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5148 LPFC_CTX_TGT);
5149 return FAST_IO_FAIL;
5150 }
5151
5152 scsi_event.event_type = FC_REG_SCSI_EVENT;
5153 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5154 scsi_event.lun = 0;
5155 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5156 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5157
5158 fc_host_post_vendor_event(shost, fc_get_event_number(),
5159 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5160
5161 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5162 FCP_TARGET_RESET);
5163
5164 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5165 "0723 SCSI layer issued Target Reset (%d, %llu) "
5166 "return x%x\n", tgt_id, lun_id, status);
5167
5168
5169
5170
5171
5172
5173
5174 if (status == SUCCESS)
5175 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5176 LPFC_CTX_TGT);
5177 return status;
5178}
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191static int
5192lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5193{
5194 struct Scsi_Host *shost = cmnd->device->host;
5195 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5196 struct lpfc_nodelist *ndlp = NULL;
5197 struct lpfc_scsi_event_header scsi_event;
5198 int match;
5199 int ret = SUCCESS, status, i;
5200
5201 scsi_event.event_type = FC_REG_SCSI_EVENT;
5202 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5203 scsi_event.lun = 0;
5204 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5205 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5206
5207 fc_host_post_vendor_event(shost, fc_get_event_number(),
5208 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5209
5210 status = fc_block_scsi_eh(cmnd);
5211 if (status != 0 && status != SUCCESS)
5212 return status;
5213
5214
5215
5216
5217
5218
5219 for (i = 0; i < LPFC_MAX_TARGET; i++) {
5220
5221 match = 0;
5222 spin_lock_irq(shost->host_lock);
5223 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5224 if (!NLP_CHK_NODE_ACT(ndlp))
5225 continue;
5226 if (vport->phba->cfg_fcp2_no_tgt_reset &&
5227 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5228 continue;
5229 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5230 ndlp->nlp_sid == i &&
5231 ndlp->rport &&
5232 ndlp->nlp_type & NLP_FCP_TARGET) {
5233 match = 1;
5234 break;
5235 }
5236 }
5237 spin_unlock_irq(shost->host_lock);
5238 if (!match)
5239 continue;
5240
5241 status = lpfc_send_taskmgmt(vport, cmnd,
5242 i, 0, FCP_TARGET_RESET);
5243
5244 if (status != SUCCESS) {
5245 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5246 "0700 Bus Reset on target %d failed\n",
5247 i);
5248 ret = FAILED;
5249 }
5250 }
5251
5252
5253
5254
5255
5256
5257
5258 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5259 if (status != SUCCESS)
5260 ret = FAILED;
5261
5262 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5263 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5264 return ret;
5265}
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283static int
5284lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5285{
5286 struct Scsi_Host *shost = cmnd->device->host;
5287 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5288 struct lpfc_hba *phba = vport->phba;
5289 int rc, ret = SUCCESS;
5290
5291 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5292 "3172 SCSI layer issued Host Reset Data:\n");
5293
5294 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5295 lpfc_offline(phba);
5296 rc = lpfc_sli_brdrestart(phba);
5297 if (rc)
5298 ret = FAILED;
5299 rc = lpfc_online(phba);
5300 if (rc)
5301 ret = FAILED;
5302 lpfc_unblock_mgmt_io(phba);
5303
5304 if (ret == FAILED) {
5305 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5306 "3323 Failed host reset, bring it offline\n");
5307 lpfc_sli4_offline_eratt(phba);
5308 }
5309 return ret;
5310}
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325static int
5326lpfc_slave_alloc(struct scsi_device *sdev)
5327{
5328 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5329 struct lpfc_hba *phba = vport->phba;
5330 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5331 uint32_t total = 0;
5332 uint32_t num_to_alloc = 0;
5333 int num_allocated = 0;
5334 uint32_t sdev_cnt;
5335 struct lpfc_device_data *device_data;
5336 unsigned long flags;
5337 struct lpfc_name target_wwpn;
5338
5339 if (!rport || fc_remote_port_chkready(rport))
5340 return -ENXIO;
5341
5342 if (phba->cfg_fof) {
5343
5344
5345
5346
5347
5348
5349 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5350 spin_lock_irqsave(&phba->devicelock, flags);
5351 device_data = __lpfc_get_device_data(phba,
5352 &phba->luns,
5353 &vport->fc_portname,
5354 &target_wwpn,
5355 sdev->lun);
5356 if (!device_data) {
5357 spin_unlock_irqrestore(&phba->devicelock, flags);
5358 device_data = lpfc_create_device_data(phba,
5359 &vport->fc_portname,
5360 &target_wwpn,
5361 sdev->lun,
5362 phba->cfg_XLanePriority,
5363 true);
5364 if (!device_data)
5365 return -ENOMEM;
5366 spin_lock_irqsave(&phba->devicelock, flags);
5367 list_add_tail(&device_data->listentry, &phba->luns);
5368 }
5369 device_data->rport_data = rport->dd_data;
5370 device_data->available = true;
5371 spin_unlock_irqrestore(&phba->devicelock, flags);
5372 sdev->hostdata = device_data;
5373 } else {
5374 sdev->hostdata = rport->dd_data;
5375 }
5376 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5377
5378
5379 if (phba->sli_rev == LPFC_SLI_REV4)
5380 return 0;
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391 total = phba->total_scsi_bufs;
5392 num_to_alloc = vport->cfg_lun_queue_depth + 2;
5393
5394
5395 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5396 return 0;
5397
5398
5399 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5400 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5401 "0704 At limitation of %d preallocated "
5402 "command buffers\n", total);
5403 return 0;
5404
5405 } else if (total + num_to_alloc >
5406 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5408 "0705 Allocation request of %d "
5409 "command buffers will exceed max of %d. "
5410 "Reducing allocation request to %d.\n",
5411 num_to_alloc, phba->cfg_hba_queue_depth,
5412 (phba->cfg_hba_queue_depth - total));
5413 num_to_alloc = phba->cfg_hba_queue_depth - total;
5414 }
5415 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
5416 if (num_to_alloc != num_allocated) {
5417 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5418 "0708 Allocation request of %d "
5419 "command buffers did not succeed. "
5420 "Allocated %d buffers.\n",
5421 num_to_alloc, num_allocated);
5422 }
5423 if (num_allocated > 0)
5424 phba->total_scsi_bufs += num_allocated;
5425 return 0;
5426}
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439static int
5440lpfc_slave_configure(struct scsi_device *sdev)
5441{
5442 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5443 struct lpfc_hba *phba = vport->phba;
5444
5445 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5446
5447 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5448 lpfc_sli_handle_fast_ring_event(phba,
5449 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5450 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5451 lpfc_poll_rearm_timer(phba);
5452 }
5453
5454 return 0;
5455}
5456
5457
5458
5459
5460
5461
5462
5463static void
5464lpfc_slave_destroy(struct scsi_device *sdev)
5465{
5466 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5467 struct lpfc_hba *phba = vport->phba;
5468 unsigned long flags;
5469 struct lpfc_device_data *device_data = sdev->hostdata;
5470
5471 atomic_dec(&phba->sdev_cnt);
5472 if ((phba->cfg_fof) && (device_data)) {
5473 spin_lock_irqsave(&phba->devicelock, flags);
5474 device_data->available = false;
5475 if (!device_data->oas_enabled)
5476 lpfc_delete_device_data(phba, device_data);
5477 spin_unlock_irqrestore(&phba->devicelock, flags);
5478 }
5479 sdev->hostdata = NULL;
5480 return;
5481}
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501struct lpfc_device_data*
5502lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5503 struct lpfc_name *target_wwpn, uint64_t lun,
5504 uint32_t pri, bool atomic_create)
5505{
5506
5507 struct lpfc_device_data *lun_info;
5508 int memory_flags;
5509
5510 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5511 !(phba->cfg_fof))
5512 return NULL;
5513
5514
5515
5516 if (atomic_create)
5517 memory_flags = GFP_ATOMIC;
5518 else
5519 memory_flags = GFP_KERNEL;
5520 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5521 if (!lun_info)
5522 return NULL;
5523 INIT_LIST_HEAD(&lun_info->listentry);
5524 lun_info->rport_data = NULL;
5525 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5526 sizeof(struct lpfc_name));
5527 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5528 sizeof(struct lpfc_name));
5529 lun_info->device_id.lun = lun;
5530 lun_info->oas_enabled = false;
5531 lun_info->priority = pri;
5532 lun_info->available = false;
5533 return lun_info;
5534}
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544void
5545lpfc_delete_device_data(struct lpfc_hba *phba,
5546 struct lpfc_device_data *lun_info)
5547{
5548
5549 if (unlikely(!phba) || !lun_info ||
5550 !(phba->cfg_fof))
5551 return;
5552
5553 if (!list_empty(&lun_info->listentry))
5554 list_del(&lun_info->listentry);
5555 mempool_free(lun_info, phba->device_data_mem_pool);
5556 return;
5557}
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575struct lpfc_device_data*
5576__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5577 struct lpfc_name *vport_wwpn,
5578 struct lpfc_name *target_wwpn, uint64_t lun)
5579{
5580
5581 struct lpfc_device_data *lun_info;
5582
5583 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5584 !phba->cfg_fof)
5585 return NULL;
5586
5587
5588
5589 list_for_each_entry(lun_info, list, listentry) {
5590 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5591 sizeof(struct lpfc_name)) == 0) &&
5592 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5593 sizeof(struct lpfc_name)) == 0) &&
5594 (lun_info->device_id.lun == lun))
5595 return lun_info;
5596 }
5597
5598 return NULL;
5599}
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627bool
5628lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5629 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5630 struct lpfc_name *found_vport_wwpn,
5631 struct lpfc_name *found_target_wwpn,
5632 uint64_t *found_lun,
5633 uint32_t *found_lun_status,
5634 uint32_t *found_lun_pri)
5635{
5636
5637 unsigned long flags;
5638 struct lpfc_device_data *lun_info;
5639 struct lpfc_device_id *device_id;
5640 uint64_t lun;
5641 bool found = false;
5642
5643 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5644 !starting_lun || !found_vport_wwpn ||
5645 !found_target_wwpn || !found_lun || !found_lun_status ||
5646 (*starting_lun == NO_MORE_OAS_LUN) ||
5647 !phba->cfg_fof)
5648 return false;
5649
5650 lun = *starting_lun;
5651 *found_lun = NO_MORE_OAS_LUN;
5652 *starting_lun = NO_MORE_OAS_LUN;
5653
5654
5655
5656 spin_lock_irqsave(&phba->devicelock, flags);
5657 list_for_each_entry(lun_info, &phba->luns, listentry) {
5658 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5659 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5660 sizeof(struct lpfc_name)) == 0)) &&
5661 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5662 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5663 sizeof(struct lpfc_name)) == 0)) &&
5664 (lun_info->oas_enabled)) {
5665 device_id = &lun_info->device_id;
5666 if ((!found) &&
5667 ((lun == FIND_FIRST_OAS_LUN) ||
5668 (device_id->lun == lun))) {
5669 *found_lun = device_id->lun;
5670 memcpy(found_vport_wwpn,
5671 &device_id->vport_wwpn,
5672 sizeof(struct lpfc_name));
5673 memcpy(found_target_wwpn,
5674 &device_id->target_wwpn,
5675 sizeof(struct lpfc_name));
5676 if (lun_info->available)
5677 *found_lun_status =
5678 OAS_LUN_STATUS_EXISTS;
5679 else
5680 *found_lun_status = 0;
5681 *found_lun_pri = lun_info->priority;
5682 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5683 memset(vport_wwpn, 0x0,
5684 sizeof(struct lpfc_name));
5685 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5686 memset(target_wwpn, 0x0,
5687 sizeof(struct lpfc_name));
5688 found = true;
5689 } else if (found) {
5690 *starting_lun = device_id->lun;
5691 memcpy(vport_wwpn, &device_id->vport_wwpn,
5692 sizeof(struct lpfc_name));
5693 memcpy(target_wwpn, &device_id->target_wwpn,
5694 sizeof(struct lpfc_name));
5695 break;
5696 }
5697 }
5698 }
5699 spin_unlock_irqrestore(&phba->devicelock, flags);
5700 return found;
5701}
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723bool
5724lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5725 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5726{
5727
5728 struct lpfc_device_data *lun_info;
5729 unsigned long flags;
5730
5731 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5732 !phba->cfg_fof)
5733 return false;
5734
5735 spin_lock_irqsave(&phba->devicelock, flags);
5736
5737
5738 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5739 target_wwpn, lun);
5740 if (lun_info) {
5741 if (!lun_info->oas_enabled)
5742 lun_info->oas_enabled = true;
5743 lun_info->priority = pri;
5744 spin_unlock_irqrestore(&phba->devicelock, flags);
5745 return true;
5746 }
5747
5748
5749 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5750 pri, true);
5751 if (lun_info) {
5752 lun_info->oas_enabled = true;
5753 lun_info->priority = pri;
5754 lun_info->available = false;
5755 list_add_tail(&lun_info->listentry, &phba->luns);
5756 spin_unlock_irqrestore(&phba->devicelock, flags);
5757 return true;
5758 }
5759 spin_unlock_irqrestore(&phba->devicelock, flags);
5760 return false;
5761}
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782bool
5783lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5784 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5785{
5786
5787 struct lpfc_device_data *lun_info;
5788 unsigned long flags;
5789
5790 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5791 !phba->cfg_fof)
5792 return false;
5793
5794 spin_lock_irqsave(&phba->devicelock, flags);
5795
5796
5797 lun_info = __lpfc_get_device_data(phba,
5798 &phba->luns, vport_wwpn,
5799 target_wwpn, lun);
5800 if (lun_info) {
5801 lun_info->oas_enabled = false;
5802 lun_info->priority = pri;
5803 if (!lun_info->available)
5804 lpfc_delete_device_data(phba, lun_info);
5805 spin_unlock_irqrestore(&phba->devicelock, flags);
5806 return true;
5807 }
5808
5809 spin_unlock_irqrestore(&phba->devicelock, flags);
5810 return false;
5811}
5812
5813static int
5814lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5815{
5816 return SCSI_MLQUEUE_HOST_BUSY;
5817}
5818
5819static int
5820lpfc_no_handler(struct scsi_cmnd *cmnd)
5821{
5822 return FAILED;
5823}
5824
5825static int
5826lpfc_no_slave(struct scsi_device *sdev)
5827{
5828 return -ENODEV;
5829}
5830
5831struct scsi_host_template lpfc_template_nvme = {
5832 .module = THIS_MODULE,
5833 .name = LPFC_DRIVER_NAME,
5834 .proc_name = LPFC_DRIVER_NAME,
5835 .info = lpfc_info,
5836 .queuecommand = lpfc_no_command,
5837 .eh_abort_handler = lpfc_no_handler,
5838 .eh_device_reset_handler = lpfc_no_handler,
5839 .eh_target_reset_handler = lpfc_no_handler,
5840 .eh_bus_reset_handler = lpfc_no_handler,
5841 .eh_host_reset_handler = lpfc_no_handler,
5842 .slave_alloc = lpfc_no_slave,
5843 .slave_configure = lpfc_no_slave,
5844 .scan_finished = lpfc_scan_finished,
5845 .this_id = -1,
5846 .sg_tablesize = 1,
5847 .cmd_per_lun = 1,
5848 .shost_attrs = lpfc_hba_attrs,
5849 .max_sectors = 0xFFFF,
5850 .vendor_id = LPFC_NL_VENDOR_ID,
5851 .track_queue_depth = 0,
5852};
5853
5854struct scsi_host_template lpfc_template_no_hr = {
5855 .module = THIS_MODULE,
5856 .name = LPFC_DRIVER_NAME,
5857 .proc_name = LPFC_DRIVER_NAME,
5858 .info = lpfc_info,
5859 .queuecommand = lpfc_queuecommand,
5860 .eh_timed_out = fc_eh_timed_out,
5861 .eh_abort_handler = lpfc_abort_handler,
5862 .eh_device_reset_handler = lpfc_device_reset_handler,
5863 .eh_target_reset_handler = lpfc_target_reset_handler,
5864 .eh_bus_reset_handler = lpfc_bus_reset_handler,
5865 .slave_alloc = lpfc_slave_alloc,
5866 .slave_configure = lpfc_slave_configure,
5867 .slave_destroy = lpfc_slave_destroy,
5868 .scan_finished = lpfc_scan_finished,
5869 .this_id = -1,
5870 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
5871 .cmd_per_lun = LPFC_CMD_PER_LUN,
5872 .shost_attrs = lpfc_hba_attrs,
5873 .max_sectors = 0xFFFF,
5874 .vendor_id = LPFC_NL_VENDOR_ID,
5875 .change_queue_depth = scsi_change_queue_depth,
5876 .track_queue_depth = 1,
5877};
5878
5879struct scsi_host_template lpfc_template = {
5880 .module = THIS_MODULE,
5881 .name = LPFC_DRIVER_NAME,
5882 .proc_name = LPFC_DRIVER_NAME,
5883 .info = lpfc_info,
5884 .queuecommand = lpfc_queuecommand,
5885 .eh_timed_out = fc_eh_timed_out,
5886 .eh_abort_handler = lpfc_abort_handler,
5887 .eh_device_reset_handler = lpfc_device_reset_handler,
5888 .eh_target_reset_handler = lpfc_target_reset_handler,
5889 .eh_bus_reset_handler = lpfc_bus_reset_handler,
5890 .eh_host_reset_handler = lpfc_host_reset_handler,
5891 .slave_alloc = lpfc_slave_alloc,
5892 .slave_configure = lpfc_slave_configure,
5893 .slave_destroy = lpfc_slave_destroy,
5894 .scan_finished = lpfc_scan_finished,
5895 .this_id = -1,
5896 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
5897 .cmd_per_lun = LPFC_CMD_PER_LUN,
5898 .shost_attrs = lpfc_hba_attrs,
5899 .max_sectors = 0xFFFF,
5900 .vendor_id = LPFC_NL_VENDOR_ID,
5901 .change_queue_depth = scsi_change_queue_depth,
5902 .track_queue_depth = 1,
5903};
5904
5905struct scsi_host_template lpfc_vport_template = {
5906 .module = THIS_MODULE,
5907 .name = LPFC_DRIVER_NAME,
5908 .proc_name = LPFC_DRIVER_NAME,
5909 .info = lpfc_info,
5910 .queuecommand = lpfc_queuecommand,
5911 .eh_timed_out = fc_eh_timed_out,
5912 .eh_abort_handler = lpfc_abort_handler,
5913 .eh_device_reset_handler = lpfc_device_reset_handler,
5914 .eh_target_reset_handler = lpfc_target_reset_handler,
5915 .slave_alloc = lpfc_slave_alloc,
5916 .slave_configure = lpfc_slave_configure,
5917 .slave_destroy = lpfc_slave_destroy,
5918 .scan_finished = lpfc_scan_finished,
5919 .this_id = -1,
5920 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
5921 .cmd_per_lun = LPFC_CMD_PER_LUN,
5922 .shost_attrs = lpfc_vport_attrs,
5923 .max_sectors = 0xFFFF,
5924 .change_queue_depth = scsi_change_queue_depth,
5925 .track_queue_depth = 1,
5926};
5927