1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/pci.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/export.h>
25#include <linux/delay.h>
26#include <asm/unaligned.h>
27#include <linux/crc-t10dif.h>
28#include <net/checksum.h>
29
30#include <scsi/scsi.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_tcq.h>
35#include <scsi/scsi_transport_fc.h>
36
37#include "lpfc_version.h"
38#include "lpfc_hw4.h"
39#include "lpfc_hw.h"
40#include "lpfc_sli.h"
41#include "lpfc_sli4.h"
42#include "lpfc_nl.h"
43#include "lpfc_disc.h"
44#include "lpfc.h"
45#include "lpfc_scsi.h"
46#include "lpfc_logmsg.h"
47#include "lpfc_crtn.h"
48#include "lpfc_vport.h"
49
50#define LPFC_RESET_WAIT 2
51#define LPFC_ABORT_WAIT 2
52
53int _dump_buf_done = 1;
54
55static char *dif_op_str[] = {
56 "PROT_NORMAL",
57 "PROT_READ_INSERT",
58 "PROT_WRITE_STRIP",
59 "PROT_READ_STRIP",
60 "PROT_WRITE_INSERT",
61 "PROT_READ_PASS",
62 "PROT_WRITE_PASS",
63};
64
65struct scsi_dif_tuple {
66 __be16 guard_tag;
67 __be16 app_tag;
68 __be32 ref_tag;
69};
70
71static struct lpfc_rport_data *
72lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
73{
74 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
75
76 if (vport->phba->cfg_fof)
77 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
78 else
79 return (struct lpfc_rport_data *)sdev->hostdata;
80}
81
82static void
83lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
84static void
85lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
86static int
87lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
88
89static void
90lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
91{
92 void *src, *dst;
93 struct scatterlist *sgde = scsi_sglist(cmnd);
94
95 if (!_dump_buf_data) {
96 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
97 "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
98 __func__);
99 return;
100 }
101
102
103 if (!sgde) {
104 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
105 "9051 BLKGRD: ERROR: data scatterlist is null\n");
106 return;
107 }
108
109 dst = (void *) _dump_buf_data;
110 while (sgde) {
111 src = sg_virt(sgde);
112 memcpy(dst, src, sgde->length);
113 dst += sgde->length;
114 sgde = sg_next(sgde);
115 }
116}
117
118static void
119lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
120{
121 void *src, *dst;
122 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
123
124 if (!_dump_buf_dif) {
125 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
126 "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
127 __func__);
128 return;
129 }
130
131 if (!sgde) {
132 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
133 "9053 BLKGRD: ERROR: prot scatterlist is null\n");
134 return;
135 }
136
137 dst = _dump_buf_dif;
138 while (sgde) {
139 src = sg_virt(sgde);
140 memcpy(dst, src, sgde->length);
141 dst += sgde->length;
142 sgde = sg_next(sgde);
143 }
144}
145
146static inline unsigned
147lpfc_cmd_blksize(struct scsi_cmnd *sc)
148{
149 return sc->device->sector_size;
150}
151
152#define LPFC_CHECK_PROTECT_GUARD 1
153#define LPFC_CHECK_PROTECT_REF 2
154static inline unsigned
155lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
156{
157 return 1;
158}
159
160static inline unsigned
161lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
162{
163 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
164 return 0;
165 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
166 return 1;
167 return 0;
168}
169
170
171
172
173
174
175
176
177
178static void
179lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
180 struct lpfc_scsi_buf *lpfc_cmd)
181{
182 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
183 if (sgl) {
184 sgl += 1;
185 sgl->word2 = le32_to_cpu(sgl->word2);
186 bf_set(lpfc_sli4_sge_last, sgl, 1);
187 sgl->word2 = cpu_to_le32(sgl->word2);
188 }
189}
190
191
192
193
194
195
196
197
198
199static void
200lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
201{
202 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
203 struct lpfc_nodelist *pnode = rdata->pnode;
204 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
205 unsigned long flags;
206 struct Scsi_Host *shost = cmd->device->host;
207 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
208 unsigned long latency;
209 int i;
210
211 if (cmd->result)
212 return;
213
214 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
215
216 spin_lock_irqsave(shost->host_lock, flags);
217 if (!vport->stat_data_enabled ||
218 vport->stat_data_blocked ||
219 !pnode ||
220 !pnode->lat_data ||
221 (phba->bucket_type == LPFC_NO_BUCKET)) {
222 spin_unlock_irqrestore(shost->host_lock, flags);
223 return;
224 }
225
226 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
227 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
228 phba->bucket_step;
229
230 if (i < 0)
231 i = 0;
232 else if (i >= LPFC_MAX_BUCKET_COUNT)
233 i = LPFC_MAX_BUCKET_COUNT - 1;
234 } else {
235 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
236 if (latency <= (phba->bucket_base +
237 ((1<<i)*phba->bucket_step)))
238 break;
239 }
240
241 pnode->lat_data[i].cmd_count++;
242 spin_unlock_irqrestore(shost->host_lock, flags);
243}
244
245
246
247
248
249
250
251
252
253
254
255
256
257static void
258lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
259 struct lpfc_vport *vport,
260 struct lpfc_nodelist *ndlp,
261 uint64_t lun,
262 uint32_t old_val,
263 uint32_t new_val)
264{
265 struct lpfc_fast_path_event *fast_path_evt;
266 unsigned long flags;
267
268 fast_path_evt = lpfc_alloc_fast_evt(phba);
269 if (!fast_path_evt)
270 return;
271
272 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
273 FC_REG_SCSI_EVENT;
274 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
275 LPFC_EVENT_VARQUEDEPTH;
276
277
278 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
279 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
280 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
281 &ndlp->nlp_portname, sizeof(struct lpfc_name));
282 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
283 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
284 }
285
286 fast_path_evt->un.queue_depth_evt.oldval = old_val;
287 fast_path_evt->un.queue_depth_evt.newval = new_val;
288 fast_path_evt->vport = vport;
289
290 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
291 spin_lock_irqsave(&phba->hbalock, flags);
292 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
293 spin_unlock_irqrestore(&phba->hbalock, flags);
294 lpfc_worker_wake_up(phba);
295
296 return;
297}
298
299
300
301
302
303
304
305
306
307
308
309static int
310lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
311{
312 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
313 struct lpfc_hba *phba = vport->phba;
314 struct lpfc_rport_data *rdata;
315 unsigned long new_queue_depth, old_queue_depth;
316
317 old_queue_depth = sdev->queue_depth;
318
319 switch (reason) {
320 case SCSI_QDEPTH_DEFAULT:
321
322 case SCSI_QDEPTH_RAMP_UP:
323 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
324 break;
325 case SCSI_QDEPTH_QFULL:
326 if (scsi_track_queue_full(sdev, qdepth) == 0)
327 return sdev->queue_depth;
328
329 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
330 "0711 detected queue full - lun queue "
331 "depth adjusted to %d.\n", sdev->queue_depth);
332 break;
333 default:
334 return -EOPNOTSUPP;
335 }
336
337 new_queue_depth = sdev->queue_depth;
338 rdata = lpfc_rport_data_from_scsi_device(sdev);
339 if (rdata)
340 lpfc_send_sdev_queuedepth_change_event(phba, vport,
341 rdata->pnode, sdev->lun,
342 old_queue_depth,
343 new_queue_depth);
344 return sdev->queue_depth;
345}
346
347
348
349
350
351
352static int
353lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
354{
355 if (sdev->tagged_supported) {
356 scsi_set_tag_type(sdev, tag_type);
357 if (tag_type)
358 scsi_activate_tcq(sdev, sdev->queue_depth);
359 else
360 scsi_deactivate_tcq(sdev, sdev->queue_depth);
361 } else
362 tag_type = 0;
363
364 return tag_type;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378void
379lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
380{
381 unsigned long flags;
382 uint32_t evt_posted;
383 unsigned long expires;
384
385 spin_lock_irqsave(&phba->hbalock, flags);
386 atomic_inc(&phba->num_rsrc_err);
387 phba->last_rsrc_error_time = jiffies;
388
389 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
390 if (time_after(expires, jiffies)) {
391 spin_unlock_irqrestore(&phba->hbalock, flags);
392 return;
393 }
394
395 phba->last_ramp_down_time = jiffies;
396
397 spin_unlock_irqrestore(&phba->hbalock, flags);
398
399 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
400 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
401 if (!evt_posted)
402 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
403 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
404
405 if (!evt_posted)
406 lpfc_worker_wake_up(phba);
407 return;
408}
409
410
411
412
413
414
415
416
417
418void
419lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
420{
421 struct lpfc_vport **vports;
422 struct Scsi_Host *shost;
423 struct scsi_device *sdev;
424 unsigned long new_queue_depth;
425 unsigned long num_rsrc_err, num_cmd_success;
426 int i;
427
428 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
429 num_cmd_success = atomic_read(&phba->num_cmd_success);
430
431
432
433
434
435
436 if (num_rsrc_err == 0)
437 return;
438
439 vports = lpfc_create_vport_work_array(phba);
440 if (vports != NULL)
441 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
442 shost = lpfc_shost_from_vport(vports[i]);
443 shost_for_each_device(sdev, shost) {
444 new_queue_depth =
445 sdev->queue_depth * num_rsrc_err /
446 (num_rsrc_err + num_cmd_success);
447 if (!new_queue_depth)
448 new_queue_depth = sdev->queue_depth - 1;
449 else
450 new_queue_depth = sdev->queue_depth -
451 new_queue_depth;
452 lpfc_change_queue_depth(sdev, new_queue_depth,
453 SCSI_QDEPTH_DEFAULT);
454 }
455 }
456 lpfc_destroy_vport_work_array(phba, vports);
457 atomic_set(&phba->num_rsrc_err, 0);
458 atomic_set(&phba->num_cmd_success, 0);
459}
460
461
462
463
464
465
466
467
468
469void
470lpfc_scsi_dev_block(struct lpfc_hba *phba)
471{
472 struct lpfc_vport **vports;
473 struct Scsi_Host *shost;
474 struct scsi_device *sdev;
475 struct fc_rport *rport;
476 int i;
477
478 vports = lpfc_create_vport_work_array(phba);
479 if (vports != NULL)
480 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
481 shost = lpfc_shost_from_vport(vports[i]);
482 shost_for_each_device(sdev, shost) {
483 rport = starget_to_rport(scsi_target(sdev));
484 fc_remote_port_delete(rport);
485 }
486 }
487 lpfc_destroy_vport_work_array(phba, vports);
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506static int
507lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
508{
509 struct lpfc_hba *phba = vport->phba;
510 struct lpfc_scsi_buf *psb;
511 struct ulp_bde64 *bpl;
512 IOCB_t *iocb;
513 dma_addr_t pdma_phys_fcp_cmd;
514 dma_addr_t pdma_phys_fcp_rsp;
515 dma_addr_t pdma_phys_bpl;
516 uint16_t iotag;
517 int bcnt, bpl_size;
518
519 bpl_size = phba->cfg_sg_dma_buf_size -
520 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
521
522 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
523 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
524 num_to_alloc, phba->cfg_sg_dma_buf_size,
525 (int)sizeof(struct fcp_cmnd),
526 (int)sizeof(struct fcp_rsp), bpl_size);
527
528 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
529 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
530 if (!psb)
531 break;
532
533
534
535
536
537
538
539 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
540 GFP_KERNEL, &psb->dma_handle);
541 if (!psb->data) {
542 kfree(psb);
543 break;
544 }
545
546
547 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
548
549
550 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
551 if (iotag == 0) {
552 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
553 psb->data, psb->dma_handle);
554 kfree(psb);
555 break;
556 }
557 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
558
559 psb->fcp_cmnd = psb->data;
560 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
561 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
562 sizeof(struct fcp_rsp);
563
564
565 bpl = psb->fcp_bpl;
566 pdma_phys_fcp_cmd = psb->dma_handle;
567 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
568 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
569 sizeof(struct fcp_rsp);
570
571
572
573
574
575
576 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
577 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
578 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
579 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
580 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
581
582
583 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
584 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
585 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
586 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
587 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
588
589
590
591
592
593 iocb = &psb->cur_iocbq.iocb;
594 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
595 if ((phba->sli_rev == 3) &&
596 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
597
598 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
599 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
600 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
601 unsli3.fcp_ext.icd);
602 iocb->un.fcpi64.bdl.addrHigh = 0;
603 iocb->ulpBdeCount = 0;
604 iocb->ulpLe = 0;
605
606 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
607 BUFF_TYPE_BDE_64;
608 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
609 sizeof(struct fcp_rsp);
610 iocb->unsli3.fcp_ext.rbde.addrLow =
611 putPaddrLow(pdma_phys_fcp_rsp);
612 iocb->unsli3.fcp_ext.rbde.addrHigh =
613 putPaddrHigh(pdma_phys_fcp_rsp);
614 } else {
615 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
616 iocb->un.fcpi64.bdl.bdeSize =
617 (2 * sizeof(struct ulp_bde64));
618 iocb->un.fcpi64.bdl.addrLow =
619 putPaddrLow(pdma_phys_bpl);
620 iocb->un.fcpi64.bdl.addrHigh =
621 putPaddrHigh(pdma_phys_bpl);
622 iocb->ulpBdeCount = 1;
623 iocb->ulpLe = 1;
624 }
625 iocb->ulpClass = CLASS3;
626 psb->status = IOSTAT_SUCCESS;
627
628 psb->cur_iocbq.context1 = psb;
629 lpfc_release_scsi_buf_s3(phba, psb);
630
631 }
632
633 return bcnt;
634}
635
636
637
638
639
640
641
642
643void
644lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
645{
646 struct lpfc_hba *phba = vport->phba;
647 struct lpfc_scsi_buf *psb, *next_psb;
648 unsigned long iflag = 0;
649
650 spin_lock_irqsave(&phba->hbalock, iflag);
651 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
652 list_for_each_entry_safe(psb, next_psb,
653 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
654 if (psb->rdata && psb->rdata->pnode
655 && psb->rdata->pnode->vport == vport)
656 psb->rdata = NULL;
657 }
658 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
659 spin_unlock_irqrestore(&phba->hbalock, iflag);
660}
661
662
663
664
665
666
667
668
669
670void
671lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
672 struct sli4_wcqe_xri_aborted *axri)
673{
674 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
675 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
676 struct lpfc_scsi_buf *psb, *next_psb;
677 unsigned long iflag = 0;
678 struct lpfc_iocbq *iocbq;
679 int i;
680 struct lpfc_nodelist *ndlp;
681 int rrq_empty = 0;
682 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
683
684 spin_lock_irqsave(&phba->hbalock, iflag);
685 spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
686 list_for_each_entry_safe(psb, next_psb,
687 &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
688 if (psb->cur_iocbq.sli4_xritag == xri) {
689 list_del(&psb->list);
690 psb->exch_busy = 0;
691 psb->status = IOSTAT_SUCCESS;
692 spin_unlock(
693 &phba->sli4_hba.abts_scsi_buf_list_lock);
694 if (psb->rdata && psb->rdata->pnode)
695 ndlp = psb->rdata->pnode;
696 else
697 ndlp = NULL;
698
699 rrq_empty = list_empty(&phba->active_rrq_list);
700 spin_unlock_irqrestore(&phba->hbalock, iflag);
701 if (ndlp) {
702 lpfc_set_rrq_active(phba, ndlp,
703 psb->cur_iocbq.sli4_lxritag, rxid, 1);
704 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
705 }
706 lpfc_release_scsi_buf_s4(phba, psb);
707 if (rrq_empty)
708 lpfc_worker_wake_up(phba);
709 return;
710 }
711 }
712 spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
713 for (i = 1; i <= phba->sli.last_iotag; i++) {
714 iocbq = phba->sli.iocbq_lookup[i];
715
716 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
717 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
718 continue;
719 if (iocbq->sli4_xritag != xri)
720 continue;
721 psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
722 psb->exch_busy = 0;
723 spin_unlock_irqrestore(&phba->hbalock, iflag);
724 if (!list_empty(&pring->txq))
725 lpfc_worker_wake_up(phba);
726 return;
727
728 }
729 spin_unlock_irqrestore(&phba->hbalock, iflag);
730}
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746static int
747lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
748 struct list_head *post_sblist, int sb_count)
749{
750 struct lpfc_scsi_buf *psb, *psb_next;
751 int status, sgl_size;
752 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
753 dma_addr_t pdma_phys_bpl1;
754 int last_xritag = NO_XRI;
755 LIST_HEAD(prep_sblist);
756 LIST_HEAD(blck_sblist);
757 LIST_HEAD(scsi_sblist);
758
759
760 if (sb_count <= 0)
761 return -EINVAL;
762
763 sgl_size = phba->cfg_sg_dma_buf_size -
764 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
765
766 list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
767 list_del_init(&psb->list);
768 block_cnt++;
769 if ((last_xritag != NO_XRI) &&
770 (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
771
772 list_splice_init(&prep_sblist, &blck_sblist);
773 post_cnt = block_cnt - 1;
774
775 list_add_tail(&psb->list, &prep_sblist);
776 block_cnt = 1;
777 } else {
778
779 list_add_tail(&psb->list, &prep_sblist);
780
781 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
782 list_splice_init(&prep_sblist, &blck_sblist);
783 post_cnt = block_cnt;
784 block_cnt = 0;
785 }
786 }
787 num_posting++;
788 last_xritag = psb->cur_iocbq.sli4_xritag;
789
790
791 if (num_posting == sb_count) {
792 if (post_cnt == 0) {
793
794 list_splice_init(&prep_sblist, &blck_sblist);
795 post_cnt = block_cnt;
796 } else if (block_cnt == 1) {
797
798 if (sgl_size > SGL_PAGE_SIZE)
799 pdma_phys_bpl1 = psb->dma_phys_bpl +
800 SGL_PAGE_SIZE;
801 else
802 pdma_phys_bpl1 = 0;
803 status = lpfc_sli4_post_sgl(phba,
804 psb->dma_phys_bpl,
805 pdma_phys_bpl1,
806 psb->cur_iocbq.sli4_xritag);
807 if (status) {
808
809 psb->exch_busy = 1;
810 } else {
811
812 psb->exch_busy = 0;
813 psb->status = IOSTAT_SUCCESS;
814 num_posted++;
815 }
816
817 list_add_tail(&psb->list, &scsi_sblist);
818 }
819 }
820
821
822 if (post_cnt == 0)
823 continue;
824
825
826 status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
827 post_cnt);
828
829
830 if (block_cnt == 0)
831 last_xritag = NO_XRI;
832
833
834 post_cnt = 0;
835
836
837 while (!list_empty(&blck_sblist)) {
838 list_remove_head(&blck_sblist, psb,
839 struct lpfc_scsi_buf, list);
840 if (status) {
841
842 psb->exch_busy = 1;
843 } else {
844
845 psb->exch_busy = 0;
846 psb->status = IOSTAT_SUCCESS;
847 num_posted++;
848 }
849 list_add_tail(&psb->list, &scsi_sblist);
850 }
851 }
852
853 while (!list_empty(&scsi_sblist)) {
854 list_remove_head(&scsi_sblist, psb,
855 struct lpfc_scsi_buf, list);
856 lpfc_release_scsi_buf_s4(phba, psb);
857 }
858 return num_posted;
859}
860
861
862
863
864
865
866
867
868
869
870
871
872
873int
874lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
875{
876 LIST_HEAD(post_sblist);
877 int num_posted, rc = 0;
878
879
880 spin_lock_irq(&phba->scsi_buf_list_get_lock);
881 spin_lock(&phba->scsi_buf_list_put_lock);
882 list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
883 list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
884 spin_unlock(&phba->scsi_buf_list_put_lock);
885 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
886
887
888 if (!list_empty(&post_sblist)) {
889 num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
890 phba->sli4_hba.scsi_xri_cnt);
891
892 if (num_posted == 0)
893 rc = -EIO;
894 }
895 return rc;
896}
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912static int
913lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
914{
915 struct lpfc_hba *phba = vport->phba;
916 struct lpfc_scsi_buf *psb;
917 struct sli4_sge *sgl;
918 IOCB_t *iocb;
919 dma_addr_t pdma_phys_fcp_cmd;
920 dma_addr_t pdma_phys_fcp_rsp;
921 dma_addr_t pdma_phys_bpl;
922 uint16_t iotag, lxri = 0;
923 int bcnt, num_posted, sgl_size;
924 LIST_HEAD(prep_sblist);
925 LIST_HEAD(post_sblist);
926 LIST_HEAD(scsi_sblist);
927
928 sgl_size = phba->cfg_sg_dma_buf_size -
929 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
930
931 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
932 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
933 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
934 (int)sizeof(struct fcp_cmnd),
935 (int)sizeof(struct fcp_rsp));
936
937 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
938 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
939 if (!psb)
940 break;
941
942
943
944
945
946
947 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
948 GFP_KERNEL, &psb->dma_handle);
949 if (!psb->data) {
950 kfree(psb);
951 break;
952 }
953 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
954
955
956
957
958
959 if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
960 (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
961 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
962 psb->data, psb->dma_handle);
963 kfree(psb);
964 break;
965 }
966
967
968 lxri = lpfc_sli4_next_xritag(phba);
969 if (lxri == NO_XRI) {
970 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
971 psb->data, psb->dma_handle);
972 kfree(psb);
973 break;
974 }
975
976
977 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
978 if (iotag == 0) {
979 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
980 psb->data, psb->dma_handle);
981 kfree(psb);
982 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
983 "3368 Failed to allocated IOTAG for"
984 " XRI:0x%x\n", lxri);
985 lpfc_sli4_free_xri(phba, lxri);
986 break;
987 }
988 psb->cur_iocbq.sli4_lxritag = lxri;
989 psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
990 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
991 psb->fcp_bpl = psb->data;
992 psb->fcp_cmnd = (psb->data + sgl_size);
993 psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
994 sizeof(struct fcp_cmnd));
995
996
997 sgl = (struct sli4_sge *)psb->fcp_bpl;
998 pdma_phys_bpl = psb->dma_handle;
999 pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
1000 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
1001
1002
1003
1004
1005
1006
1007 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
1008 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
1009 sgl->word2 = le32_to_cpu(sgl->word2);
1010 bf_set(lpfc_sli4_sge_last, sgl, 0);
1011 sgl->word2 = cpu_to_le32(sgl->word2);
1012 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
1013 sgl++;
1014
1015
1016 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
1017 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
1018 sgl->word2 = le32_to_cpu(sgl->word2);
1019 bf_set(lpfc_sli4_sge_last, sgl, 1);
1020 sgl->word2 = cpu_to_le32(sgl->word2);
1021 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
1022
1023
1024
1025
1026
1027 iocb = &psb->cur_iocbq.iocb;
1028 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
1029 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
1030
1031
1032
1033
1034 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
1035 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
1036 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
1037 iocb->ulpBdeCount = 1;
1038 iocb->ulpLe = 1;
1039 iocb->ulpClass = CLASS3;
1040 psb->cur_iocbq.context1 = psb;
1041 psb->dma_phys_bpl = pdma_phys_bpl;
1042
1043
1044 list_add_tail(&psb->list, &post_sblist);
1045 spin_lock_irq(&phba->scsi_buf_list_get_lock);
1046 phba->sli4_hba.scsi_xri_cnt++;
1047 spin_unlock_irq(&phba->scsi_buf_list_get_lock);
1048 }
1049 lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1050 "3021 Allocate %d out of %d requested new SCSI "
1051 "buffers\n", bcnt, num_to_alloc);
1052
1053
1054 if (!list_empty(&post_sblist))
1055 num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1056 &post_sblist, bcnt);
1057 else
1058 num_posted = 0;
1059
1060 return num_posted;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075static inline int
1076lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1077{
1078 return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1079}
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092static struct lpfc_scsi_buf*
1093lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1094{
1095 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1096 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1097 unsigned long iflag = 0;
1098
1099 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1100 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1101 list);
1102 if (!lpfc_cmd) {
1103 spin_lock(&phba->scsi_buf_list_put_lock);
1104 list_splice(&phba->lpfc_scsi_buf_list_put,
1105 &phba->lpfc_scsi_buf_list_get);
1106 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1107 list_remove_head(scsi_buf_list_get, lpfc_cmd,
1108 struct lpfc_scsi_buf, list);
1109 spin_unlock(&phba->scsi_buf_list_put_lock);
1110 }
1111 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1112 return lpfc_cmd;
1113}
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125static struct lpfc_scsi_buf*
1126lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1127{
1128 struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1129 unsigned long iflag = 0;
1130 int found = 0;
1131
1132 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
1133 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1134 &phba->lpfc_scsi_buf_list_get, list) {
1135 if (lpfc_test_rrq_active(phba, ndlp,
1136 lpfc_cmd->cur_iocbq.sli4_lxritag))
1137 continue;
1138 list_del(&lpfc_cmd->list);
1139 found = 1;
1140 break;
1141 }
1142 if (!found) {
1143 spin_lock(&phba->scsi_buf_list_put_lock);
1144 list_splice(&phba->lpfc_scsi_buf_list_put,
1145 &phba->lpfc_scsi_buf_list_get);
1146 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1147 spin_unlock(&phba->scsi_buf_list_put_lock);
1148 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1149 &phba->lpfc_scsi_buf_list_get, list) {
1150 if (lpfc_test_rrq_active(
1151 phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1152 continue;
1153 list_del(&lpfc_cmd->list);
1154 found = 1;
1155 break;
1156 }
1157 }
1158 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
1159 if (!found)
1160 return NULL;
1161 return lpfc_cmd;
1162}
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174static struct lpfc_scsi_buf*
1175lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1176{
1177 return phba->lpfc_get_scsi_buf(phba, ndlp);
1178}
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static void
1189lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1190{
1191 unsigned long iflag = 0;
1192
1193 psb->seg_cnt = 0;
1194 psb->nonsg_phys = 0;
1195 psb->prot_seg_cnt = 0;
1196
1197 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1198 psb->pCmd = NULL;
1199 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1200 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1201 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214static void
1215lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1216{
1217 unsigned long iflag = 0;
1218
1219 psb->seg_cnt = 0;
1220 psb->nonsg_phys = 0;
1221 psb->prot_seg_cnt = 0;
1222
1223 if (psb->exch_busy) {
1224 spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1225 iflag);
1226 psb->pCmd = NULL;
1227 list_add_tail(&psb->list,
1228 &phba->sli4_hba.lpfc_abts_scsi_buf_list);
1229 spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1230 iflag);
1231 } else {
1232 psb->pCmd = NULL;
1233 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1234 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1235 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1236 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1237 }
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248static void
1249lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1250{
1251
1252 phba->lpfc_release_scsi_buf(phba, psb);
1253}
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269static int
1270lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1271{
1272 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1273 struct scatterlist *sgel = NULL;
1274 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1275 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1276 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1277 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1278 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1279 dma_addr_t physaddr;
1280 uint32_t num_bde = 0;
1281 int nseg, datadir = scsi_cmnd->sc_data_direction;
1282
1283
1284
1285
1286
1287
1288
1289 bpl += 2;
1290 if (scsi_sg_count(scsi_cmnd)) {
1291
1292
1293
1294
1295
1296
1297
1298 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1299 scsi_sg_count(scsi_cmnd), datadir);
1300 if (unlikely(!nseg))
1301 return 1;
1302
1303 lpfc_cmd->seg_cnt = nseg;
1304 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1305 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1306 "9064 BLKGRD: %s: Too many sg segments from "
1307 "dma_map_sg. Config %d, seg_cnt %d\n",
1308 __func__, phba->cfg_sg_seg_cnt,
1309 lpfc_cmd->seg_cnt);
1310 lpfc_cmd->seg_cnt = 0;
1311 scsi_dma_unmap(scsi_cmnd);
1312 return 1;
1313 }
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1325 physaddr = sg_dma_address(sgel);
1326 if (phba->sli_rev == 3 &&
1327 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1328 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1329 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1330 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1331 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1332 data_bde->addrLow = putPaddrLow(physaddr);
1333 data_bde->addrHigh = putPaddrHigh(physaddr);
1334 data_bde++;
1335 } else {
1336 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1337 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1338 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1339 bpl->addrLow =
1340 le32_to_cpu(putPaddrLow(physaddr));
1341 bpl->addrHigh =
1342 le32_to_cpu(putPaddrHigh(physaddr));
1343 bpl++;
1344 }
1345 }
1346 }
1347
1348
1349
1350
1351
1352
1353
1354 if (phba->sli_rev == 3 &&
1355 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1356 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1357 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1358
1359
1360
1361
1362
1363 physaddr = lpfc_cmd->dma_handle;
1364 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1365 data_bde->tus.f.bdeSize = (num_bde *
1366 sizeof(struct ulp_bde64));
1367 physaddr += (sizeof(struct fcp_cmnd) +
1368 sizeof(struct fcp_rsp) +
1369 (2 * sizeof(struct ulp_bde64)));
1370 data_bde->addrHigh = putPaddrHigh(physaddr);
1371 data_bde->addrLow = putPaddrLow(physaddr);
1372
1373 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1374 } else {
1375
1376 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1377 }
1378 } else {
1379 iocb_cmd->un.fcpi64.bdl.bdeSize =
1380 ((num_bde + 2) * sizeof(struct ulp_bde64));
1381 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1382 }
1383 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1384
1385
1386
1387
1388
1389 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1390 return 0;
1391}
1392
1393#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1394
1395
1396#define BG_ERR_INIT 0x1
1397
1398#define BG_ERR_TGT 0x2
1399
1400#define BG_ERR_SWAP 0x10
1401
1402#define BG_ERR_CHECK 0x20
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414static int
1415lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1416 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1417{
1418 struct scatterlist *sgpe;
1419 struct scatterlist *sgde;
1420 struct lpfc_scsi_buf *lpfc_cmd = NULL;
1421 struct scsi_dif_tuple *src = NULL;
1422 struct lpfc_nodelist *ndlp;
1423 struct lpfc_rport_data *rdata;
1424 uint32_t op = scsi_get_prot_op(sc);
1425 uint32_t blksize;
1426 uint32_t numblks;
1427 sector_t lba;
1428 int rc = 0;
1429 int blockoff = 0;
1430
1431 if (op == SCSI_PROT_NORMAL)
1432 return 0;
1433
1434 sgpe = scsi_prot_sglist(sc);
1435 sgde = scsi_sglist(sc);
1436 lba = scsi_get_lba(sc);
1437
1438
1439 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1440 blksize = lpfc_cmd_blksize(sc);
1441 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1442
1443
1444 if ((phba->lpfc_injerr_lba < lba) ||
1445 (phba->lpfc_injerr_lba >= (lba + numblks)))
1446 return 0;
1447 if (sgpe) {
1448 blockoff = phba->lpfc_injerr_lba - lba;
1449 numblks = sg_dma_len(sgpe) /
1450 sizeof(struct scsi_dif_tuple);
1451 if (numblks < blockoff)
1452 blockoff = numblks;
1453 }
1454 }
1455
1456
1457 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1458 if (rdata && rdata->pnode) {
1459 ndlp = rdata->pnode;
1460
1461
1462 if (phba->lpfc_injerr_nportid &&
1463 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1464 return 0;
1465
1466
1467
1468
1469
1470 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1471 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1472 sizeof(struct lpfc_name)) != 0))
1473 return 0;
1474 }
1475
1476
1477 if (sgpe) {
1478 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1479 src += blockoff;
1480 lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1481 }
1482
1483
1484 if (reftag) {
1485 if (phba->lpfc_injerr_wref_cnt) {
1486 switch (op) {
1487 case SCSI_PROT_WRITE_PASS:
1488 if (src) {
1489
1490
1491
1492
1493
1494
1495
1496
1497 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1498 "9076 BLKGRD: Injecting reftag error: "
1499 "write lba x%lx + x%x oldrefTag x%x\n",
1500 (unsigned long)lba, blockoff,
1501 be32_to_cpu(src->ref_tag));
1502
1503
1504
1505
1506
1507 if (lpfc_cmd) {
1508 lpfc_cmd->prot_data_type =
1509 LPFC_INJERR_REFTAG;
1510 lpfc_cmd->prot_data_segment =
1511 src;
1512 lpfc_cmd->prot_data =
1513 src->ref_tag;
1514 }
1515 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1516 phba->lpfc_injerr_wref_cnt--;
1517 if (phba->lpfc_injerr_wref_cnt == 0) {
1518 phba->lpfc_injerr_nportid = 0;
1519 phba->lpfc_injerr_lba =
1520 LPFC_INJERR_LBA_OFF;
1521 memset(&phba->lpfc_injerr_wwpn,
1522 0, sizeof(struct lpfc_name));
1523 }
1524 rc = BG_ERR_TGT | BG_ERR_CHECK;
1525
1526 break;
1527 }
1528
1529 case SCSI_PROT_WRITE_INSERT:
1530
1531
1532
1533
1534
1535
1536 *reftag = 0xDEADBEEF;
1537 phba->lpfc_injerr_wref_cnt--;
1538 if (phba->lpfc_injerr_wref_cnt == 0) {
1539 phba->lpfc_injerr_nportid = 0;
1540 phba->lpfc_injerr_lba =
1541 LPFC_INJERR_LBA_OFF;
1542 memset(&phba->lpfc_injerr_wwpn,
1543 0, sizeof(struct lpfc_name));
1544 }
1545 rc = BG_ERR_TGT | BG_ERR_CHECK;
1546
1547 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1548 "9078 BLKGRD: Injecting reftag error: "
1549 "write lba x%lx\n", (unsigned long)lba);
1550 break;
1551 case SCSI_PROT_WRITE_STRIP:
1552
1553
1554
1555
1556
1557 *reftag = 0xDEADBEEF;
1558 phba->lpfc_injerr_wref_cnt--;
1559 if (phba->lpfc_injerr_wref_cnt == 0) {
1560 phba->lpfc_injerr_nportid = 0;
1561 phba->lpfc_injerr_lba =
1562 LPFC_INJERR_LBA_OFF;
1563 memset(&phba->lpfc_injerr_wwpn,
1564 0, sizeof(struct lpfc_name));
1565 }
1566 rc = BG_ERR_INIT;
1567
1568 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1569 "9077 BLKGRD: Injecting reftag error: "
1570 "write lba x%lx\n", (unsigned long)lba);
1571 break;
1572 }
1573 }
1574 if (phba->lpfc_injerr_rref_cnt) {
1575 switch (op) {
1576 case SCSI_PROT_READ_INSERT:
1577 case SCSI_PROT_READ_STRIP:
1578 case SCSI_PROT_READ_PASS:
1579
1580
1581
1582
1583
1584 *reftag = 0xDEADBEEF;
1585 phba->lpfc_injerr_rref_cnt--;
1586 if (phba->lpfc_injerr_rref_cnt == 0) {
1587 phba->lpfc_injerr_nportid = 0;
1588 phba->lpfc_injerr_lba =
1589 LPFC_INJERR_LBA_OFF;
1590 memset(&phba->lpfc_injerr_wwpn,
1591 0, sizeof(struct lpfc_name));
1592 }
1593 rc = BG_ERR_INIT;
1594
1595 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1596 "9079 BLKGRD: Injecting reftag error: "
1597 "read lba x%lx\n", (unsigned long)lba);
1598 break;
1599 }
1600 }
1601 }
1602
1603
1604 if (apptag) {
1605 if (phba->lpfc_injerr_wapp_cnt) {
1606 switch (op) {
1607 case SCSI_PROT_WRITE_PASS:
1608 if (src) {
1609
1610
1611
1612
1613
1614
1615
1616
1617 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1618 "9080 BLKGRD: Injecting apptag error: "
1619 "write lba x%lx + x%x oldappTag x%x\n",
1620 (unsigned long)lba, blockoff,
1621 be16_to_cpu(src->app_tag));
1622
1623
1624
1625
1626
1627 if (lpfc_cmd) {
1628 lpfc_cmd->prot_data_type =
1629 LPFC_INJERR_APPTAG;
1630 lpfc_cmd->prot_data_segment =
1631 src;
1632 lpfc_cmd->prot_data =
1633 src->app_tag;
1634 }
1635 src->app_tag = cpu_to_be16(0xDEAD);
1636 phba->lpfc_injerr_wapp_cnt--;
1637 if (phba->lpfc_injerr_wapp_cnt == 0) {
1638 phba->lpfc_injerr_nportid = 0;
1639 phba->lpfc_injerr_lba =
1640 LPFC_INJERR_LBA_OFF;
1641 memset(&phba->lpfc_injerr_wwpn,
1642 0, sizeof(struct lpfc_name));
1643 }
1644 rc = BG_ERR_TGT | BG_ERR_CHECK;
1645 break;
1646 }
1647
1648 case SCSI_PROT_WRITE_INSERT:
1649
1650
1651
1652
1653
1654
1655 *apptag = 0xDEAD;
1656 phba->lpfc_injerr_wapp_cnt--;
1657 if (phba->lpfc_injerr_wapp_cnt == 0) {
1658 phba->lpfc_injerr_nportid = 0;
1659 phba->lpfc_injerr_lba =
1660 LPFC_INJERR_LBA_OFF;
1661 memset(&phba->lpfc_injerr_wwpn,
1662 0, sizeof(struct lpfc_name));
1663 }
1664 rc = BG_ERR_TGT | BG_ERR_CHECK;
1665
1666 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1667 "0813 BLKGRD: Injecting apptag error: "
1668 "write lba x%lx\n", (unsigned long)lba);
1669 break;
1670 case SCSI_PROT_WRITE_STRIP:
1671
1672
1673
1674
1675
1676 *apptag = 0xDEAD;
1677 phba->lpfc_injerr_wapp_cnt--;
1678 if (phba->lpfc_injerr_wapp_cnt == 0) {
1679 phba->lpfc_injerr_nportid = 0;
1680 phba->lpfc_injerr_lba =
1681 LPFC_INJERR_LBA_OFF;
1682 memset(&phba->lpfc_injerr_wwpn,
1683 0, sizeof(struct lpfc_name));
1684 }
1685 rc = BG_ERR_INIT;
1686
1687 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1688 "0812 BLKGRD: Injecting apptag error: "
1689 "write lba x%lx\n", (unsigned long)lba);
1690 break;
1691 }
1692 }
1693 if (phba->lpfc_injerr_rapp_cnt) {
1694 switch (op) {
1695 case SCSI_PROT_READ_INSERT:
1696 case SCSI_PROT_READ_STRIP:
1697 case SCSI_PROT_READ_PASS:
1698
1699
1700
1701
1702
1703 *apptag = 0xDEAD;
1704 phba->lpfc_injerr_rapp_cnt--;
1705 if (phba->lpfc_injerr_rapp_cnt == 0) {
1706 phba->lpfc_injerr_nportid = 0;
1707 phba->lpfc_injerr_lba =
1708 LPFC_INJERR_LBA_OFF;
1709 memset(&phba->lpfc_injerr_wwpn,
1710 0, sizeof(struct lpfc_name));
1711 }
1712 rc = BG_ERR_INIT;
1713
1714 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1715 "0814 BLKGRD: Injecting apptag error: "
1716 "read lba x%lx\n", (unsigned long)lba);
1717 break;
1718 }
1719 }
1720 }
1721
1722
1723
1724 if (new_guard) {
1725 if (phba->lpfc_injerr_wgrd_cnt) {
1726 switch (op) {
1727 case SCSI_PROT_WRITE_PASS:
1728 rc = BG_ERR_CHECK;
1729
1730
1731 case SCSI_PROT_WRITE_INSERT:
1732
1733
1734
1735
1736
1737 phba->lpfc_injerr_wgrd_cnt--;
1738 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1739 phba->lpfc_injerr_nportid = 0;
1740 phba->lpfc_injerr_lba =
1741 LPFC_INJERR_LBA_OFF;
1742 memset(&phba->lpfc_injerr_wwpn,
1743 0, sizeof(struct lpfc_name));
1744 }
1745
1746 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1747
1748
1749 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1750 "0817 BLKGRD: Injecting guard error: "
1751 "write lba x%lx\n", (unsigned long)lba);
1752 break;
1753 case SCSI_PROT_WRITE_STRIP:
1754
1755
1756
1757
1758
1759 phba->lpfc_injerr_wgrd_cnt--;
1760 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1761 phba->lpfc_injerr_nportid = 0;
1762 phba->lpfc_injerr_lba =
1763 LPFC_INJERR_LBA_OFF;
1764 memset(&phba->lpfc_injerr_wwpn,
1765 0, sizeof(struct lpfc_name));
1766 }
1767
1768 rc = BG_ERR_INIT | BG_ERR_SWAP;
1769
1770
1771 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1772 "0816 BLKGRD: Injecting guard error: "
1773 "write lba x%lx\n", (unsigned long)lba);
1774 break;
1775 }
1776 }
1777 if (phba->lpfc_injerr_rgrd_cnt) {
1778 switch (op) {
1779 case SCSI_PROT_READ_INSERT:
1780 case SCSI_PROT_READ_STRIP:
1781 case SCSI_PROT_READ_PASS:
1782
1783
1784
1785
1786
1787 phba->lpfc_injerr_rgrd_cnt--;
1788 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1789 phba->lpfc_injerr_nportid = 0;
1790 phba->lpfc_injerr_lba =
1791 LPFC_INJERR_LBA_OFF;
1792 memset(&phba->lpfc_injerr_wwpn,
1793 0, sizeof(struct lpfc_name));
1794 }
1795
1796 rc = BG_ERR_INIT | BG_ERR_SWAP;
1797
1798
1799 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1800 "0818 BLKGRD: Injecting guard error: "
1801 "read lba x%lx\n", (unsigned long)lba);
1802 }
1803 }
1804 }
1805
1806 return rc;
1807}
1808#endif
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821static int
1822lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1823 uint8_t *txop, uint8_t *rxop)
1824{
1825 uint8_t ret = 0;
1826
1827 if (lpfc_cmd_guard_csum(sc)) {
1828 switch (scsi_get_prot_op(sc)) {
1829 case SCSI_PROT_READ_INSERT:
1830 case SCSI_PROT_WRITE_STRIP:
1831 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1832 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1833 break;
1834
1835 case SCSI_PROT_READ_STRIP:
1836 case SCSI_PROT_WRITE_INSERT:
1837 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1838 *txop = BG_OP_IN_NODIF_OUT_CRC;
1839 break;
1840
1841 case SCSI_PROT_READ_PASS:
1842 case SCSI_PROT_WRITE_PASS:
1843 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1844 *txop = BG_OP_IN_CSUM_OUT_CRC;
1845 break;
1846
1847 case SCSI_PROT_NORMAL:
1848 default:
1849 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1850 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1851 scsi_get_prot_op(sc));
1852 ret = 1;
1853 break;
1854
1855 }
1856 } else {
1857 switch (scsi_get_prot_op(sc)) {
1858 case SCSI_PROT_READ_STRIP:
1859 case SCSI_PROT_WRITE_INSERT:
1860 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1861 *txop = BG_OP_IN_NODIF_OUT_CRC;
1862 break;
1863
1864 case SCSI_PROT_READ_PASS:
1865 case SCSI_PROT_WRITE_PASS:
1866 *rxop = BG_OP_IN_CRC_OUT_CRC;
1867 *txop = BG_OP_IN_CRC_OUT_CRC;
1868 break;
1869
1870 case SCSI_PROT_READ_INSERT:
1871 case SCSI_PROT_WRITE_STRIP:
1872 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1873 *txop = BG_OP_IN_CRC_OUT_NODIF;
1874 break;
1875
1876 case SCSI_PROT_NORMAL:
1877 default:
1878 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1879 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1880 scsi_get_prot_op(sc));
1881 ret = 1;
1882 break;
1883 }
1884 }
1885
1886 return ret;
1887}
1888
1889#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901static int
1902lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1903 uint8_t *txop, uint8_t *rxop)
1904{
1905 uint8_t ret = 0;
1906
1907 if (lpfc_cmd_guard_csum(sc)) {
1908 switch (scsi_get_prot_op(sc)) {
1909 case SCSI_PROT_READ_INSERT:
1910 case SCSI_PROT_WRITE_STRIP:
1911 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1912 *txop = BG_OP_IN_CRC_OUT_NODIF;
1913 break;
1914
1915 case SCSI_PROT_READ_STRIP:
1916 case SCSI_PROT_WRITE_INSERT:
1917 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1918 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1919 break;
1920
1921 case SCSI_PROT_READ_PASS:
1922 case SCSI_PROT_WRITE_PASS:
1923 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1924 *txop = BG_OP_IN_CRC_OUT_CSUM;
1925 break;
1926
1927 case SCSI_PROT_NORMAL:
1928 default:
1929 break;
1930
1931 }
1932 } else {
1933 switch (scsi_get_prot_op(sc)) {
1934 case SCSI_PROT_READ_STRIP:
1935 case SCSI_PROT_WRITE_INSERT:
1936 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1937 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1938 break;
1939
1940 case SCSI_PROT_READ_PASS:
1941 case SCSI_PROT_WRITE_PASS:
1942 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1943 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1944 break;
1945
1946 case SCSI_PROT_READ_INSERT:
1947 case SCSI_PROT_WRITE_STRIP:
1948 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1949 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1950 break;
1951
1952 case SCSI_PROT_NORMAL:
1953 default:
1954 break;
1955 }
1956 }
1957
1958 return ret;
1959}
1960#endif
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993static int
1994lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1995 struct ulp_bde64 *bpl, int datasegcnt)
1996{
1997 struct scatterlist *sgde = NULL;
1998 struct lpfc_pde5 *pde5 = NULL;
1999 struct lpfc_pde6 *pde6 = NULL;
2000 dma_addr_t physaddr;
2001 int i = 0, num_bde = 0, status;
2002 int datadir = sc->sc_data_direction;
2003#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2004 uint32_t rc;
2005#endif
2006 uint32_t checking = 1;
2007 uint32_t reftag;
2008 unsigned blksize;
2009 uint8_t txop, rxop;
2010
2011 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2012 if (status)
2013 goto out;
2014
2015
2016 blksize = lpfc_cmd_blksize(sc);
2017 reftag = (uint32_t)scsi_get_lba(sc);
2018
2019#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2020 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2021 if (rc) {
2022 if (rc & BG_ERR_SWAP)
2023 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2024 if (rc & BG_ERR_CHECK)
2025 checking = 0;
2026 }
2027#endif
2028
2029
2030 pde5 = (struct lpfc_pde5 *) bpl;
2031 memset(pde5, 0, sizeof(struct lpfc_pde5));
2032 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2033
2034
2035 pde5->word0 = cpu_to_le32(pde5->word0);
2036 pde5->reftag = cpu_to_le32(reftag);
2037
2038
2039 num_bde++;
2040 bpl++;
2041 pde6 = (struct lpfc_pde6 *) bpl;
2042
2043
2044 memset(pde6, 0, sizeof(struct lpfc_pde6));
2045 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2046 bf_set(pde6_optx, pde6, txop);
2047 bf_set(pde6_oprx, pde6, rxop);
2048
2049
2050
2051
2052
2053 if (datadir == DMA_FROM_DEVICE) {
2054 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2055 bf_set(pde6_ce, pde6, checking);
2056 else
2057 bf_set(pde6_ce, pde6, 0);
2058
2059 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2060 bf_set(pde6_re, pde6, checking);
2061 else
2062 bf_set(pde6_re, pde6, 0);
2063 }
2064 bf_set(pde6_ai, pde6, 1);
2065 bf_set(pde6_ae, pde6, 0);
2066 bf_set(pde6_apptagval, pde6, 0);
2067
2068
2069 pde6->word0 = cpu_to_le32(pde6->word0);
2070 pde6->word1 = cpu_to_le32(pde6->word1);
2071 pde6->word2 = cpu_to_le32(pde6->word2);
2072
2073
2074 num_bde++;
2075 bpl++;
2076
2077
2078 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2079 physaddr = sg_dma_address(sgde);
2080 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2081 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2082 bpl->tus.f.bdeSize = sg_dma_len(sgde);
2083 if (datadir == DMA_TO_DEVICE)
2084 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2085 else
2086 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2087 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2088 bpl++;
2089 num_bde++;
2090 }
2091
2092out:
2093 return num_bde;
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135static int
2136lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2137 struct ulp_bde64 *bpl, int datacnt, int protcnt)
2138{
2139 struct scatterlist *sgde = NULL;
2140 struct scatterlist *sgpe = NULL;
2141 struct lpfc_pde5 *pde5 = NULL;
2142 struct lpfc_pde6 *pde6 = NULL;
2143 struct lpfc_pde7 *pde7 = NULL;
2144 dma_addr_t dataphysaddr, protphysaddr;
2145 unsigned short curr_data = 0, curr_prot = 0;
2146 unsigned int split_offset;
2147 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2148 unsigned int protgrp_blks, protgrp_bytes;
2149 unsigned int remainder, subtotal;
2150 int status;
2151 int datadir = sc->sc_data_direction;
2152 unsigned char pgdone = 0, alldone = 0;
2153 unsigned blksize;
2154#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2155 uint32_t rc;
2156#endif
2157 uint32_t checking = 1;
2158 uint32_t reftag;
2159 uint8_t txop, rxop;
2160 int num_bde = 0;
2161
2162 sgpe = scsi_prot_sglist(sc);
2163 sgde = scsi_sglist(sc);
2164
2165 if (!sgpe || !sgde) {
2166 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2167 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2168 sgpe, sgde);
2169 return 0;
2170 }
2171
2172 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2173 if (status)
2174 goto out;
2175
2176
2177 blksize = lpfc_cmd_blksize(sc);
2178 reftag = (uint32_t)scsi_get_lba(sc);
2179
2180#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2181 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2182 if (rc) {
2183 if (rc & BG_ERR_SWAP)
2184 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2185 if (rc & BG_ERR_CHECK)
2186 checking = 0;
2187 }
2188#endif
2189
2190 split_offset = 0;
2191 do {
2192
2193 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2194 return num_bde + 3;
2195
2196
2197 pde5 = (struct lpfc_pde5 *) bpl;
2198 memset(pde5, 0, sizeof(struct lpfc_pde5));
2199 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2200
2201
2202 pde5->word0 = cpu_to_le32(pde5->word0);
2203 pde5->reftag = cpu_to_le32(reftag);
2204
2205
2206 num_bde++;
2207 bpl++;
2208 pde6 = (struct lpfc_pde6 *) bpl;
2209
2210
2211 memset(pde6, 0, sizeof(struct lpfc_pde6));
2212 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2213 bf_set(pde6_optx, pde6, txop);
2214 bf_set(pde6_oprx, pde6, rxop);
2215
2216 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2217 bf_set(pde6_ce, pde6, checking);
2218 else
2219 bf_set(pde6_ce, pde6, 0);
2220
2221 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2222 bf_set(pde6_re, pde6, checking);
2223 else
2224 bf_set(pde6_re, pde6, 0);
2225
2226 bf_set(pde6_ai, pde6, 1);
2227 bf_set(pde6_ae, pde6, 0);
2228 bf_set(pde6_apptagval, pde6, 0);
2229
2230
2231 pde6->word0 = cpu_to_le32(pde6->word0);
2232 pde6->word1 = cpu_to_le32(pde6->word1);
2233 pde6->word2 = cpu_to_le32(pde6->word2);
2234
2235
2236 num_bde++;
2237 bpl++;
2238
2239
2240 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2241 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2242
2243
2244 BUG_ON(protgroup_len % 8);
2245
2246 pde7 = (struct lpfc_pde7 *) bpl;
2247 memset(pde7, 0, sizeof(struct lpfc_pde7));
2248 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2249
2250 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2251 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2252
2253 protgrp_blks = protgroup_len / 8;
2254 protgrp_bytes = protgrp_blks * blksize;
2255
2256
2257 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2258 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2259 protgroup_offset += protgroup_remainder;
2260 protgrp_blks = protgroup_remainder / 8;
2261 protgrp_bytes = protgrp_blks * blksize;
2262 } else {
2263 protgroup_offset = 0;
2264 curr_prot++;
2265 }
2266
2267 num_bde++;
2268
2269
2270 pgdone = 0;
2271 subtotal = 0;
2272 while (!pgdone) {
2273
2274 if (num_bde >= phba->cfg_total_seg_cnt)
2275 return num_bde + 1;
2276
2277 if (!sgde) {
2278 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2279 "9065 BLKGRD:%s Invalid data segment\n",
2280 __func__);
2281 return 0;
2282 }
2283 bpl++;
2284 dataphysaddr = sg_dma_address(sgde) + split_offset;
2285 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2286 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2287
2288 remainder = sg_dma_len(sgde) - split_offset;
2289
2290 if ((subtotal + remainder) <= protgrp_bytes) {
2291
2292 bpl->tus.f.bdeSize = remainder;
2293 split_offset = 0;
2294
2295 if ((subtotal + remainder) == protgrp_bytes)
2296 pgdone = 1;
2297 } else {
2298
2299 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2300 split_offset += bpl->tus.f.bdeSize;
2301 }
2302
2303 subtotal += bpl->tus.f.bdeSize;
2304
2305 if (datadir == DMA_TO_DEVICE)
2306 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2307 else
2308 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2309 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2310
2311 num_bde++;
2312 curr_data++;
2313
2314 if (split_offset)
2315 break;
2316
2317
2318 sgde = sg_next(sgde);
2319
2320 }
2321
2322 if (protgroup_offset) {
2323
2324 reftag += protgrp_blks;
2325 bpl++;
2326 continue;
2327 }
2328
2329
2330 if (curr_prot == protcnt) {
2331 alldone = 1;
2332 } else if (curr_prot < protcnt) {
2333
2334 sgpe = sg_next(sgpe);
2335 bpl++;
2336
2337
2338 reftag += protgrp_blks;
2339 } else {
2340
2341 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2342 "9054 BLKGRD: bug in %s\n", __func__);
2343 }
2344
2345 } while (!alldone);
2346out:
2347
2348 return num_bde;
2349}
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380static int
2381lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2382 struct sli4_sge *sgl, int datasegcnt)
2383{
2384 struct scatterlist *sgde = NULL;
2385 struct sli4_sge_diseed *diseed = NULL;
2386 dma_addr_t physaddr;
2387 int i = 0, num_sge = 0, status;
2388 uint32_t reftag;
2389 unsigned blksize;
2390 uint8_t txop, rxop;
2391#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2392 uint32_t rc;
2393#endif
2394 uint32_t checking = 1;
2395 uint32_t dma_len;
2396 uint32_t dma_offset = 0;
2397
2398 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2399 if (status)
2400 goto out;
2401
2402
2403 blksize = lpfc_cmd_blksize(sc);
2404 reftag = (uint32_t)scsi_get_lba(sc);
2405
2406#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2407 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2408 if (rc) {
2409 if (rc & BG_ERR_SWAP)
2410 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2411 if (rc & BG_ERR_CHECK)
2412 checking = 0;
2413 }
2414#endif
2415
2416
2417 diseed = (struct sli4_sge_diseed *) sgl;
2418 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2419 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2420
2421
2422 diseed->ref_tag = cpu_to_le32(reftag);
2423 diseed->ref_tag_tran = diseed->ref_tag;
2424
2425
2426
2427
2428
2429 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2430 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2431 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2432 else
2433 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2434
2435 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2436 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2437 else
2438 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2439 }
2440
2441
2442 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2443 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2444
2445 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2446 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2447
2448
2449 diseed->word2 = cpu_to_le32(diseed->word2);
2450 diseed->word3 = cpu_to_le32(diseed->word3);
2451
2452
2453 num_sge++;
2454 sgl++;
2455
2456
2457 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2458 physaddr = sg_dma_address(sgde);
2459 dma_len = sg_dma_len(sgde);
2460 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2461 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2462 if ((i + 1) == datasegcnt)
2463 bf_set(lpfc_sli4_sge_last, sgl, 1);
2464 else
2465 bf_set(lpfc_sli4_sge_last, sgl, 0);
2466 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2467 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2468
2469 sgl->sge_len = cpu_to_le32(dma_len);
2470 dma_offset += dma_len;
2471
2472 sgl++;
2473 num_sge++;
2474 }
2475
2476out:
2477 return num_sge;
2478}
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517static int
2518lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2519 struct sli4_sge *sgl, int datacnt, int protcnt)
2520{
2521 struct scatterlist *sgde = NULL;
2522 struct scatterlist *sgpe = NULL;
2523 struct sli4_sge_diseed *diseed = NULL;
2524 dma_addr_t dataphysaddr, protphysaddr;
2525 unsigned short curr_data = 0, curr_prot = 0;
2526 unsigned int split_offset;
2527 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2528 unsigned int protgrp_blks, protgrp_bytes;
2529 unsigned int remainder, subtotal;
2530 int status;
2531 unsigned char pgdone = 0, alldone = 0;
2532 unsigned blksize;
2533 uint32_t reftag;
2534 uint8_t txop, rxop;
2535 uint32_t dma_len;
2536#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2537 uint32_t rc;
2538#endif
2539 uint32_t checking = 1;
2540 uint32_t dma_offset = 0;
2541 int num_sge = 0;
2542
2543 sgpe = scsi_prot_sglist(sc);
2544 sgde = scsi_sglist(sc);
2545
2546 if (!sgpe || !sgde) {
2547 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2548 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2549 sgpe, sgde);
2550 return 0;
2551 }
2552
2553 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2554 if (status)
2555 goto out;
2556
2557
2558 blksize = lpfc_cmd_blksize(sc);
2559 reftag = (uint32_t)scsi_get_lba(sc);
2560
2561#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2562 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2563 if (rc) {
2564 if (rc & BG_ERR_SWAP)
2565 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2566 if (rc & BG_ERR_CHECK)
2567 checking = 0;
2568 }
2569#endif
2570
2571 split_offset = 0;
2572 do {
2573
2574 if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2575 return num_sge + 3;
2576
2577
2578 diseed = (struct sli4_sge_diseed *) sgl;
2579 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2580 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2581
2582
2583 diseed->ref_tag = cpu_to_le32(reftag);
2584 diseed->ref_tag_tran = diseed->ref_tag;
2585
2586 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2587 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2588
2589 } else {
2590 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2591
2592
2593
2594
2595
2596
2597
2598 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2599 txop = BG_OP_RAW_MODE;
2600 rxop = BG_OP_RAW_MODE;
2601 }
2602 }
2603
2604
2605 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2606 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2607 else
2608 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2609
2610
2611 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2612 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2613
2614 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2615 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2616
2617
2618 diseed->word2 = cpu_to_le32(diseed->word2);
2619 diseed->word3 = cpu_to_le32(diseed->word3);
2620
2621
2622 num_sge++;
2623 sgl++;
2624
2625
2626 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2627 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2628
2629
2630 BUG_ON(protgroup_len % 8);
2631
2632
2633 sgl->word2 = 0;
2634 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2635 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2636 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2637 sgl->word2 = cpu_to_le32(sgl->word2);
2638
2639 protgrp_blks = protgroup_len / 8;
2640 protgrp_bytes = protgrp_blks * blksize;
2641
2642
2643 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2644 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2645 protgroup_offset += protgroup_remainder;
2646 protgrp_blks = protgroup_remainder / 8;
2647 protgrp_bytes = protgrp_blks * blksize;
2648 } else {
2649 protgroup_offset = 0;
2650 curr_prot++;
2651 }
2652
2653 num_sge++;
2654
2655
2656 pgdone = 0;
2657 subtotal = 0;
2658 while (!pgdone) {
2659
2660 if (num_sge >= phba->cfg_total_seg_cnt)
2661 return num_sge + 1;
2662
2663 if (!sgde) {
2664 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2665 "9086 BLKGRD:%s Invalid data segment\n",
2666 __func__);
2667 return 0;
2668 }
2669 sgl++;
2670 dataphysaddr = sg_dma_address(sgde) + split_offset;
2671
2672 remainder = sg_dma_len(sgde) - split_offset;
2673
2674 if ((subtotal + remainder) <= protgrp_bytes) {
2675
2676 dma_len = remainder;
2677 split_offset = 0;
2678
2679 if ((subtotal + remainder) == protgrp_bytes)
2680 pgdone = 1;
2681 } else {
2682
2683 dma_len = protgrp_bytes - subtotal;
2684 split_offset += dma_len;
2685 }
2686
2687 subtotal += dma_len;
2688
2689 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2690 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2691 bf_set(lpfc_sli4_sge_last, sgl, 0);
2692 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2693 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2694
2695 sgl->sge_len = cpu_to_le32(dma_len);
2696 dma_offset += dma_len;
2697
2698 num_sge++;
2699 curr_data++;
2700
2701 if (split_offset)
2702 break;
2703
2704
2705 sgde = sg_next(sgde);
2706 }
2707
2708 if (protgroup_offset) {
2709
2710 reftag += protgrp_blks;
2711 sgl++;
2712 continue;
2713 }
2714
2715
2716 if (curr_prot == protcnt) {
2717 bf_set(lpfc_sli4_sge_last, sgl, 1);
2718 alldone = 1;
2719 } else if (curr_prot < protcnt) {
2720
2721 sgpe = sg_next(sgpe);
2722 sgl++;
2723
2724
2725 reftag += protgrp_blks;
2726 } else {
2727
2728 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2729 "9085 BLKGRD: bug in %s\n", __func__);
2730 }
2731
2732 } while (!alldone);
2733
2734out:
2735
2736 return num_sge;
2737}
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750static int
2751lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2752{
2753 int ret = LPFC_PG_TYPE_INVALID;
2754 unsigned char op = scsi_get_prot_op(sc);
2755
2756 switch (op) {
2757 case SCSI_PROT_READ_STRIP:
2758 case SCSI_PROT_WRITE_INSERT:
2759 ret = LPFC_PG_TYPE_NO_DIF;
2760 break;
2761 case SCSI_PROT_READ_INSERT:
2762 case SCSI_PROT_WRITE_STRIP:
2763 case SCSI_PROT_READ_PASS:
2764 case SCSI_PROT_WRITE_PASS:
2765 ret = LPFC_PG_TYPE_DIF_BUF;
2766 break;
2767 default:
2768 if (phba)
2769 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2770 "9021 Unsupported protection op:%d\n",
2771 op);
2772 break;
2773 }
2774 return ret;
2775}
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787static int
2788lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2789 struct lpfc_scsi_buf *lpfc_cmd)
2790{
2791 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2792 int fcpdl;
2793
2794 fcpdl = scsi_bufflen(sc);
2795
2796
2797 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2798
2799 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2800 return fcpdl;
2801
2802 } else {
2803
2804 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2805 return fcpdl;
2806 }
2807
2808
2809
2810
2811
2812
2813 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2814
2815 return fcpdl;
2816}
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827static int
2828lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2829 struct lpfc_scsi_buf *lpfc_cmd)
2830{
2831 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2832 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2833 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2834 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2835 uint32_t num_bde = 0;
2836 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2837 int prot_group_type = 0;
2838 int fcpdl;
2839
2840
2841
2842
2843
2844 bpl += 2;
2845 if (scsi_sg_count(scsi_cmnd)) {
2846
2847
2848
2849
2850
2851
2852 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2853 scsi_sglist(scsi_cmnd),
2854 scsi_sg_count(scsi_cmnd), datadir);
2855 if (unlikely(!datasegcnt))
2856 return 1;
2857
2858 lpfc_cmd->seg_cnt = datasegcnt;
2859
2860
2861 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2862 goto err;
2863
2864 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2865
2866 switch (prot_group_type) {
2867 case LPFC_PG_TYPE_NO_DIF:
2868
2869
2870 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2871 goto err;
2872
2873 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2874 datasegcnt);
2875
2876 if (num_bde < 2)
2877 goto err;
2878 break;
2879
2880 case LPFC_PG_TYPE_DIF_BUF:
2881
2882
2883
2884
2885
2886 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2887 scsi_prot_sglist(scsi_cmnd),
2888 scsi_prot_sg_count(scsi_cmnd), datadir);
2889 if (unlikely(!protsegcnt)) {
2890 scsi_dma_unmap(scsi_cmnd);
2891 return 1;
2892 }
2893
2894 lpfc_cmd->prot_seg_cnt = protsegcnt;
2895
2896
2897
2898
2899
2900 if ((lpfc_cmd->prot_seg_cnt * 4) >
2901 (phba->cfg_total_seg_cnt - 2))
2902 goto err;
2903
2904 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2905 datasegcnt, protsegcnt);
2906
2907 if ((num_bde < 3) ||
2908 (num_bde > phba->cfg_total_seg_cnt))
2909 goto err;
2910 break;
2911
2912 case LPFC_PG_TYPE_INVALID:
2913 default:
2914 scsi_dma_unmap(scsi_cmnd);
2915 lpfc_cmd->seg_cnt = 0;
2916
2917 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2918 "9022 Unexpected protection group %i\n",
2919 prot_group_type);
2920 return 1;
2921 }
2922 }
2923
2924
2925
2926
2927
2928
2929
2930 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2931 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2932 iocb_cmd->ulpBdeCount = 1;
2933 iocb_cmd->ulpLe = 1;
2934
2935 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2936 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2937
2938
2939
2940
2941
2942 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2943
2944 return 0;
2945err:
2946 if (lpfc_cmd->seg_cnt)
2947 scsi_dma_unmap(scsi_cmnd);
2948 if (lpfc_cmd->prot_seg_cnt)
2949 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2950 scsi_prot_sg_count(scsi_cmnd),
2951 scsi_cmnd->sc_data_direction);
2952
2953 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2954 "9023 Cannot setup S/G List for HBA"
2955 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2956 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2957 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2958 prot_group_type, num_bde);
2959
2960 lpfc_cmd->seg_cnt = 0;
2961 lpfc_cmd->prot_seg_cnt = 0;
2962 return 1;
2963}
2964
2965
2966
2967
2968
2969
2970static uint16_t
2971lpfc_bg_crc(uint8_t *data, int count)
2972{
2973 uint16_t crc = 0;
2974 uint16_t x;
2975
2976 crc = crc_t10dif(data, count);
2977 x = cpu_to_be16(crc);
2978 return x;
2979}
2980
2981
2982
2983
2984
2985
2986static uint16_t
2987lpfc_bg_csum(uint8_t *data, int count)
2988{
2989 uint16_t ret;
2990
2991 ret = ip_compute_csum(data, count);
2992 return ret;
2993}
2994
2995
2996
2997
2998
2999static void
3000lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3001{
3002 struct scatterlist *sgpe;
3003 struct scatterlist *sgde;
3004 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3005 struct scsi_dif_tuple *src = NULL;
3006 uint8_t *data_src = NULL;
3007 uint16_t guard_tag, guard_type;
3008 uint16_t start_app_tag, app_tag;
3009 uint32_t start_ref_tag, ref_tag;
3010 int prot, protsegcnt;
3011 int err_type, len, data_len;
3012 int chk_ref, chk_app, chk_guard;
3013 uint16_t sum;
3014 unsigned blksize;
3015
3016 err_type = BGS_GUARD_ERR_MASK;
3017 sum = 0;
3018 guard_tag = 0;
3019
3020
3021 prot = scsi_get_prot_op(cmd);
3022 if ((prot == SCSI_PROT_READ_STRIP) ||
3023 (prot == SCSI_PROT_WRITE_INSERT) ||
3024 (prot == SCSI_PROT_NORMAL))
3025 goto out;
3026
3027
3028 chk_ref = 1;
3029 chk_app = 0;
3030 chk_guard = 0;
3031
3032
3033 sgpe = scsi_prot_sglist(cmd);
3034 protsegcnt = lpfc_cmd->prot_seg_cnt;
3035
3036 if (sgpe && protsegcnt) {
3037
3038
3039
3040
3041
3042 sgde = scsi_sglist(cmd);
3043 blksize = lpfc_cmd_blksize(cmd);
3044 data_src = (uint8_t *)sg_virt(sgde);
3045 data_len = sgde->length;
3046 if ((data_len & (blksize - 1)) == 0)
3047 chk_guard = 1;
3048 guard_type = scsi_host_get_guard(cmd->device->host);
3049
3050 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3051 start_ref_tag = (uint32_t)scsi_get_lba(cmd);
3052 start_app_tag = src->app_tag;
3053 len = sgpe->length;
3054 while (src && protsegcnt) {
3055 while (len) {
3056
3057
3058
3059
3060
3061 if ((src->ref_tag == 0xffffffff) ||
3062 (src->app_tag == 0xffff)) {
3063 start_ref_tag++;
3064 goto skipit;
3065 }
3066
3067
3068 if (chk_guard) {
3069 guard_tag = src->guard_tag;
3070 if (lpfc_cmd_guard_csum(cmd))
3071 sum = lpfc_bg_csum(data_src,
3072 blksize);
3073 else
3074 sum = lpfc_bg_crc(data_src,
3075 blksize);
3076 if ((guard_tag != sum)) {
3077 err_type = BGS_GUARD_ERR_MASK;
3078 goto out;
3079 }
3080 }
3081
3082
3083 ref_tag = be32_to_cpu(src->ref_tag);
3084 if (chk_ref && (ref_tag != start_ref_tag)) {
3085 err_type = BGS_REFTAG_ERR_MASK;
3086 goto out;
3087 }
3088 start_ref_tag++;
3089
3090
3091 app_tag = src->app_tag;
3092 if (chk_app && (app_tag != start_app_tag)) {
3093 err_type = BGS_APPTAG_ERR_MASK;
3094 goto out;
3095 }
3096skipit:
3097 len -= sizeof(struct scsi_dif_tuple);
3098 if (len < 0)
3099 len = 0;
3100 src++;
3101
3102 data_src += blksize;
3103 data_len -= blksize;
3104
3105
3106
3107
3108
3109
3110 if (chk_guard && (data_len == 0)) {
3111 chk_guard = 0;
3112 sgde = sg_next(sgde);
3113 if (!sgde)
3114 goto out;
3115
3116 data_src = (uint8_t *)sg_virt(sgde);
3117 data_len = sgde->length;
3118 if ((data_len & (blksize - 1)) == 0)
3119 chk_guard = 1;
3120 }
3121 }
3122
3123
3124 sgpe = sg_next(sgpe);
3125 if (sgpe) {
3126 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3127 len = sgpe->length;
3128 } else {
3129 src = NULL;
3130 }
3131 protsegcnt--;
3132 }
3133 }
3134out:
3135 if (err_type == BGS_GUARD_ERR_MASK) {
3136 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3137 0x10, 0x1);
3138 cmd->result = DRIVER_SENSE << 24
3139 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3140 phba->bg_guard_err_cnt++;
3141 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3142 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3143 (unsigned long)scsi_get_lba(cmd),
3144 sum, guard_tag);
3145
3146 } else if (err_type == BGS_REFTAG_ERR_MASK) {
3147 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3148 0x10, 0x3);
3149 cmd->result = DRIVER_SENSE << 24
3150 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3151
3152 phba->bg_reftag_err_cnt++;
3153 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3154 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3155 (unsigned long)scsi_get_lba(cmd),
3156 ref_tag, start_ref_tag);
3157
3158 } else if (err_type == BGS_APPTAG_ERR_MASK) {
3159 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3160 0x10, 0x2);
3161 cmd->result = DRIVER_SENSE << 24
3162 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3163
3164 phba->bg_apptag_err_cnt++;
3165 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3166 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3167 (unsigned long)scsi_get_lba(cmd),
3168 app_tag, start_app_tag);
3169 }
3170}
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185static int
3186lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
3187 struct lpfc_iocbq *pIocbOut)
3188{
3189 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3190 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3191 int ret = 0;
3192 uint32_t bghm = bgf->bghm;
3193 uint32_t bgstat = bgf->bgstat;
3194 uint64_t failing_sector = 0;
3195
3196 spin_lock(&_dump_buf_lock);
3197 if (!_dump_buf_done) {
3198 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving"
3199 " Data for %u blocks to debugfs\n",
3200 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3201 lpfc_debug_save_data(phba, cmd);
3202
3203
3204 if (lpfc_prot_group_type(phba, cmd) ==
3205 LPFC_PG_TYPE_DIF_BUF) {
3206 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
3207 "Saving DIF for %u blocks to debugfs\n",
3208 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3209 lpfc_debug_save_dif(phba, cmd);
3210 }
3211
3212 _dump_buf_done = 1;
3213 }
3214 spin_unlock(&_dump_buf_lock);
3215
3216 if (lpfc_bgs_get_invalid_prof(bgstat)) {
3217 cmd->result = ScsiResult(DID_ERROR, 0);
3218 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3219 "9072 BLKGRD: Invalid BG Profile in cmd"
3220 " 0x%x lba 0x%llx blk cnt 0x%x "
3221 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3222 (unsigned long long)scsi_get_lba(cmd),
3223 blk_rq_sectors(cmd->request), bgstat, bghm);
3224 ret = (-1);
3225 goto out;
3226 }
3227
3228 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3229 cmd->result = ScsiResult(DID_ERROR, 0);
3230 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3231 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
3232 " 0x%x lba 0x%llx blk cnt 0x%x "
3233 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3234 (unsigned long long)scsi_get_lba(cmd),
3235 blk_rq_sectors(cmd->request), bgstat, bghm);
3236 ret = (-1);
3237 goto out;
3238 }
3239
3240 if (lpfc_bgs_get_guard_err(bgstat)) {
3241 ret = 1;
3242
3243 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3244 0x10, 0x1);
3245 cmd->result = DRIVER_SENSE << 24
3246 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3247 phba->bg_guard_err_cnt++;
3248 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3249 "9055 BLKGRD: Guard Tag error in cmd"
3250 " 0x%x lba 0x%llx blk cnt 0x%x "
3251 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3252 (unsigned long long)scsi_get_lba(cmd),
3253 blk_rq_sectors(cmd->request), bgstat, bghm);
3254 }
3255
3256 if (lpfc_bgs_get_reftag_err(bgstat)) {
3257 ret = 1;
3258
3259 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3260 0x10, 0x3);
3261 cmd->result = DRIVER_SENSE << 24
3262 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3263
3264 phba->bg_reftag_err_cnt++;
3265 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3266 "9056 BLKGRD: Ref Tag error in cmd"
3267 " 0x%x lba 0x%llx blk cnt 0x%x "
3268 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3269 (unsigned long long)scsi_get_lba(cmd),
3270 blk_rq_sectors(cmd->request), bgstat, bghm);
3271 }
3272
3273 if (lpfc_bgs_get_apptag_err(bgstat)) {
3274 ret = 1;
3275
3276 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3277 0x10, 0x2);
3278 cmd->result = DRIVER_SENSE << 24
3279 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3280
3281 phba->bg_apptag_err_cnt++;
3282 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3283 "9061 BLKGRD: App Tag error in cmd"
3284 " 0x%x lba 0x%llx blk cnt 0x%x "
3285 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3286 (unsigned long long)scsi_get_lba(cmd),
3287 blk_rq_sectors(cmd->request), bgstat, bghm);
3288 }
3289
3290 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3291
3292
3293
3294
3295
3296
3297 cmd->sense_buffer[7] = 0xc;
3298 cmd->sense_buffer[8] = 0;
3299 cmd->sense_buffer[9] = 0xa;
3300 cmd->sense_buffer[10] = 0x80;
3301
3302
3303 switch (scsi_get_prot_op(cmd)) {
3304 case SCSI_PROT_READ_INSERT:
3305 case SCSI_PROT_WRITE_STRIP:
3306 bghm /= cmd->device->sector_size;
3307 break;
3308 case SCSI_PROT_READ_STRIP:
3309 case SCSI_PROT_WRITE_INSERT:
3310 case SCSI_PROT_READ_PASS:
3311 case SCSI_PROT_WRITE_PASS:
3312 bghm /= (cmd->device->sector_size +
3313 sizeof(struct scsi_dif_tuple));
3314 break;
3315 }
3316
3317 failing_sector = scsi_get_lba(cmd);
3318 failing_sector += bghm;
3319
3320
3321 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3322 }
3323
3324 if (!ret) {
3325
3326 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3327 "9057 BLKGRD: Unknown error in cmd"
3328 " 0x%x lba 0x%llx blk cnt 0x%x "
3329 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3330 (unsigned long long)scsi_get_lba(cmd),
3331 blk_rq_sectors(cmd->request), bgstat, bghm);
3332
3333
3334 lpfc_calc_bg_err(phba, lpfc_cmd);
3335 }
3336out:
3337 return ret;
3338}
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352static int
3353lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3354{
3355 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3356 struct scatterlist *sgel = NULL;
3357 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3358 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3359 struct sli4_sge *first_data_sgl;
3360 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3361 dma_addr_t physaddr;
3362 uint32_t num_bde = 0;
3363 uint32_t dma_len;
3364 uint32_t dma_offset = 0;
3365 int nseg;
3366 struct ulp_bde64 *bde;
3367
3368
3369
3370
3371
3372
3373
3374 if (scsi_sg_count(scsi_cmnd)) {
3375
3376
3377
3378
3379
3380
3381
3382 nseg = scsi_dma_map(scsi_cmnd);
3383 if (unlikely(!nseg))
3384 return 1;
3385 sgl += 1;
3386
3387 sgl->word2 = le32_to_cpu(sgl->word2);
3388 bf_set(lpfc_sli4_sge_last, sgl, 0);
3389 sgl->word2 = cpu_to_le32(sgl->word2);
3390 sgl += 1;
3391 first_data_sgl = sgl;
3392 lpfc_cmd->seg_cnt = nseg;
3393 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3394 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3395 " %s: Too many sg segments from "
3396 "dma_map_sg. Config %d, seg_cnt %d\n",
3397 __func__, phba->cfg_sg_seg_cnt,
3398 lpfc_cmd->seg_cnt);
3399 lpfc_cmd->seg_cnt = 0;
3400 scsi_dma_unmap(scsi_cmnd);
3401 return 1;
3402 }
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3414 physaddr = sg_dma_address(sgel);
3415 dma_len = sg_dma_len(sgel);
3416 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3417 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3418 sgl->word2 = le32_to_cpu(sgl->word2);
3419 if ((num_bde + 1) == nseg)
3420 bf_set(lpfc_sli4_sge_last, sgl, 1);
3421 else
3422 bf_set(lpfc_sli4_sge_last, sgl, 0);
3423 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3424 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3425 sgl->word2 = cpu_to_le32(sgl->word2);
3426 sgl->sge_len = cpu_to_le32(dma_len);
3427 dma_offset += dma_len;
3428 sgl++;
3429 }
3430
3431 if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3432 bde = (struct ulp_bde64 *)
3433 &(iocb_cmd->unsli3.sli3Words[5]);
3434 bde->addrLow = first_data_sgl->addr_lo;
3435 bde->addrHigh = first_data_sgl->addr_hi;
3436 bde->tus.f.bdeSize =
3437 le32_to_cpu(first_data_sgl->sge_len);
3438 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3439 bde->tus.w = cpu_to_le32(bde->tus.w);
3440 }
3441 } else {
3442 sgl += 1;
3443
3444 sgl->word2 = le32_to_cpu(sgl->word2);
3445 bf_set(lpfc_sli4_sge_last, sgl, 1);
3446 sgl->word2 = cpu_to_le32(sgl->word2);
3447 }
3448
3449
3450
3451
3452
3453
3454
3455 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3456
3457
3458
3459
3460
3461 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3462
3463
3464
3465
3466
3467 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3468 scsi_cmnd->device->hostdata)->oas_enabled)
3469 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3470 return 0;
3471}
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482static int
3483lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3484 struct lpfc_scsi_buf *lpfc_cmd)
3485{
3486 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3487 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3488 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3489 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3490 uint32_t num_sge = 0;
3491 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3492 int prot_group_type = 0;
3493 int fcpdl;
3494
3495
3496
3497
3498
3499 if (scsi_sg_count(scsi_cmnd)) {
3500
3501
3502
3503
3504
3505
3506 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3507 scsi_sglist(scsi_cmnd),
3508 scsi_sg_count(scsi_cmnd), datadir);
3509 if (unlikely(!datasegcnt))
3510 return 1;
3511
3512 sgl += 1;
3513
3514 sgl->word2 = le32_to_cpu(sgl->word2);
3515 bf_set(lpfc_sli4_sge_last, sgl, 0);
3516 sgl->word2 = cpu_to_le32(sgl->word2);
3517
3518 sgl += 1;
3519 lpfc_cmd->seg_cnt = datasegcnt;
3520
3521
3522 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3523 goto err;
3524
3525 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3526
3527 switch (prot_group_type) {
3528 case LPFC_PG_TYPE_NO_DIF:
3529
3530 if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3531 goto err;
3532
3533 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3534 datasegcnt);
3535
3536
3537 if (num_sge < 2)
3538 goto err;
3539 break;
3540
3541 case LPFC_PG_TYPE_DIF_BUF:
3542
3543
3544
3545
3546
3547 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3548 scsi_prot_sglist(scsi_cmnd),
3549 scsi_prot_sg_count(scsi_cmnd), datadir);
3550 if (unlikely(!protsegcnt)) {
3551 scsi_dma_unmap(scsi_cmnd);
3552 return 1;
3553 }
3554
3555 lpfc_cmd->prot_seg_cnt = protsegcnt;
3556
3557
3558
3559
3560 if ((lpfc_cmd->prot_seg_cnt * 3) >
3561 (phba->cfg_total_seg_cnt - 2))
3562 goto err;
3563
3564 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3565 datasegcnt, protsegcnt);
3566
3567
3568 if ((num_sge < 3) ||
3569 (num_sge > phba->cfg_total_seg_cnt))
3570 goto err;
3571 break;
3572
3573 case LPFC_PG_TYPE_INVALID:
3574 default:
3575 scsi_dma_unmap(scsi_cmnd);
3576 lpfc_cmd->seg_cnt = 0;
3577
3578 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3579 "9083 Unexpected protection group %i\n",
3580 prot_group_type);
3581 return 1;
3582 }
3583 }
3584
3585 switch (scsi_get_prot_op(scsi_cmnd)) {
3586 case SCSI_PROT_WRITE_STRIP:
3587 case SCSI_PROT_READ_STRIP:
3588 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3589 break;
3590 case SCSI_PROT_WRITE_INSERT:
3591 case SCSI_PROT_READ_INSERT:
3592 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3593 break;
3594 case SCSI_PROT_WRITE_PASS:
3595 case SCSI_PROT_READ_PASS:
3596 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3597 break;
3598 }
3599
3600 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3601 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3602
3603
3604
3605
3606
3607 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3608
3609
3610
3611
3612
3613 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3614 scsi_cmnd->device->hostdata)->oas_enabled)
3615 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3616
3617 return 0;
3618err:
3619 if (lpfc_cmd->seg_cnt)
3620 scsi_dma_unmap(scsi_cmnd);
3621 if (lpfc_cmd->prot_seg_cnt)
3622 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3623 scsi_prot_sg_count(scsi_cmnd),
3624 scsi_cmnd->sc_data_direction);
3625
3626 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3627 "9084 Cannot setup S/G List for HBA"
3628 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3629 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3630 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3631 prot_group_type, num_sge);
3632
3633 lpfc_cmd->seg_cnt = 0;
3634 lpfc_cmd->prot_seg_cnt = 0;
3635 return 1;
3636}
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650static inline int
3651lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3652{
3653 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3654}
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669static inline int
3670lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3671{
3672 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3673}
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685static void
3686lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3687 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3688 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3689 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3690 uint32_t resp_info = fcprsp->rspStatus2;
3691 uint32_t scsi_status = fcprsp->rspStatus3;
3692 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3693 struct lpfc_fast_path_event *fast_path_evt = NULL;
3694 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3695 unsigned long flags;
3696
3697 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3698 return;
3699
3700
3701 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3702 (cmnd->result == SAM_STAT_BUSY)) {
3703 fast_path_evt = lpfc_alloc_fast_evt(phba);
3704 if (!fast_path_evt)
3705 return;
3706 fast_path_evt->un.scsi_evt.event_type =
3707 FC_REG_SCSI_EVENT;
3708 fast_path_evt->un.scsi_evt.subcategory =
3709 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3710 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3711 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3712 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3713 &pnode->nlp_portname, sizeof(struct lpfc_name));
3714 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3715 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3716 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3717 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3718 fast_path_evt = lpfc_alloc_fast_evt(phba);
3719 if (!fast_path_evt)
3720 return;
3721 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3722 FC_REG_SCSI_EVENT;
3723 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3724 LPFC_EVENT_CHECK_COND;
3725 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3726 cmnd->device->lun;
3727 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3728 &pnode->nlp_portname, sizeof(struct lpfc_name));
3729 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3730 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3731 fast_path_evt->un.check_cond_evt.sense_key =
3732 cmnd->sense_buffer[2] & 0xf;
3733 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3734 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3735 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3736 fcpi_parm &&
3737 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3738 ((scsi_status == SAM_STAT_GOOD) &&
3739 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3740
3741
3742
3743
3744 fast_path_evt = lpfc_alloc_fast_evt(phba);
3745 if (!fast_path_evt)
3746 return;
3747 fast_path_evt->un.read_check_error.header.event_type =
3748 FC_REG_FABRIC_EVENT;
3749 fast_path_evt->un.read_check_error.header.subcategory =
3750 LPFC_EVENT_FCPRDCHKERR;
3751 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3752 &pnode->nlp_portname, sizeof(struct lpfc_name));
3753 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3754 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3755 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3756 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3757 fast_path_evt->un.read_check_error.fcpiparam =
3758 fcpi_parm;
3759 } else
3760 return;
3761
3762 fast_path_evt->vport = vport;
3763 spin_lock_irqsave(&phba->hbalock, flags);
3764 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3765 spin_unlock_irqrestore(&phba->hbalock, flags);
3766 lpfc_worker_wake_up(phba);
3767 return;
3768}
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778static void
3779lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3780{
3781
3782
3783
3784
3785
3786
3787 if (psb->seg_cnt > 0)
3788 scsi_dma_unmap(psb->pCmd);
3789 if (psb->prot_seg_cnt > 0)
3790 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3791 scsi_prot_sg_count(psb->pCmd),
3792 psb->pCmd->sc_data_direction);
3793}
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805static void
3806lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3807 struct lpfc_iocbq *rsp_iocb)
3808{
3809 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3810 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3811 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3812 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3813 uint32_t resp_info = fcprsp->rspStatus2;
3814 uint32_t scsi_status = fcprsp->rspStatus3;
3815 uint32_t *lp;
3816 uint32_t host_status = DID_OK;
3817 uint32_t rsplen = 0;
3818 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3819
3820
3821
3822
3823
3824
3825
3826 if (fcpcmd->fcpCntl2) {
3827 scsi_status = 0;
3828 goto out;
3829 }
3830
3831 if (resp_info & RSP_LEN_VALID) {
3832 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3833 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3834 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3835 "2719 Invalid response length: "
3836 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3837 cmnd->device->id,
3838 cmnd->device->lun, cmnd->cmnd[0],
3839 rsplen);
3840 host_status = DID_ERROR;
3841 goto out;
3842 }
3843 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3844 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3845 "2757 Protocol failure detected during "
3846 "processing of FCP I/O op: "
3847 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3848 cmnd->device->id,
3849 cmnd->device->lun, cmnd->cmnd[0],
3850 fcprsp->rspInfo3);
3851 host_status = DID_ERROR;
3852 goto out;
3853 }
3854 }
3855
3856 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3857 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3858 if (snslen > SCSI_SENSE_BUFFERSIZE)
3859 snslen = SCSI_SENSE_BUFFERSIZE;
3860
3861 if (resp_info & RSP_LEN_VALID)
3862 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3863 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3864 }
3865 lp = (uint32_t *)cmnd->sense_buffer;
3866
3867
3868 if (!scsi_status && (resp_info & RESID_UNDER)) {
3869
3870 if (vport->cfg_log_verbose & LOG_FCP)
3871 logit = LOG_FCP_ERROR;
3872
3873 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3874 logit = LOG_FCP_UNDER;
3875 }
3876
3877 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3878 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3879 "Data: x%x x%x x%x x%x x%x\n",
3880 cmnd->cmnd[0], scsi_status,
3881 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3882 be32_to_cpu(fcprsp->rspResId),
3883 be32_to_cpu(fcprsp->rspSnsLen),
3884 be32_to_cpu(fcprsp->rspRspLen),
3885 fcprsp->rspInfo3);
3886
3887 scsi_set_resid(cmnd, 0);
3888 if (resp_info & RESID_UNDER) {
3889 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3890
3891 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3892 "9025 FCP Read Underrun, expected %d, "
3893 "residual %d Data: x%x x%x x%x\n",
3894 be32_to_cpu(fcpcmd->fcpDl),
3895 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3896 cmnd->underflow);
3897
3898
3899
3900
3901
3902
3903 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3904 fcpi_parm &&
3905 (scsi_get_resid(cmnd) != fcpi_parm)) {
3906 lpfc_printf_vlog(vport, KERN_WARNING,
3907 LOG_FCP | LOG_FCP_ERROR,
3908 "9026 FCP Read Check Error "
3909 "and Underrun Data: x%x x%x x%x x%x\n",
3910 be32_to_cpu(fcpcmd->fcpDl),
3911 scsi_get_resid(cmnd), fcpi_parm,
3912 cmnd->cmnd[0]);
3913 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3914 host_status = DID_ERROR;
3915 }
3916
3917
3918
3919
3920
3921
3922 if (!(resp_info & SNS_LEN_VALID) &&
3923 (scsi_status == SAM_STAT_GOOD) &&
3924 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3925 < cmnd->underflow)) {
3926 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3927 "9027 FCP command x%x residual "
3928 "underrun converted to error "
3929 "Data: x%x x%x x%x\n",
3930 cmnd->cmnd[0], scsi_bufflen(cmnd),
3931 scsi_get_resid(cmnd), cmnd->underflow);
3932 host_status = DID_ERROR;
3933 }
3934 } else if (resp_info & RESID_OVER) {
3935 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3936 "9028 FCP command x%x residual overrun error. "
3937 "Data: x%x x%x\n", cmnd->cmnd[0],
3938 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3939 host_status = DID_ERROR;
3940
3941
3942
3943
3944
3945 } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
3946 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3947 "9029 FCP Read Check Error Data: "
3948 "x%x x%x x%x x%x x%x\n",
3949 be32_to_cpu(fcpcmd->fcpDl),
3950 be32_to_cpu(fcprsp->rspResId),
3951 fcpi_parm, cmnd->cmnd[0], scsi_status);
3952 switch (scsi_status) {
3953 case SAM_STAT_GOOD:
3954 case SAM_STAT_CHECK_CONDITION:
3955
3956
3957
3958
3959
3960 host_status = DID_ERROR;
3961 break;
3962 }
3963 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3964 }
3965
3966 out:
3967 cmnd->result = ScsiResult(host_status, scsi_status);
3968 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3969}
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981static void
3982lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3983 struct lpfc_iocbq *pIocbOut)
3984{
3985 struct lpfc_scsi_buf *lpfc_cmd =
3986 (struct lpfc_scsi_buf *) pIocbIn->context1;
3987 struct lpfc_vport *vport = pIocbIn->vport;
3988 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3989 struct lpfc_nodelist *pnode = rdata->pnode;
3990 struct scsi_cmnd *cmd;
3991 int result;
3992 int depth;
3993 unsigned long flags;
3994 struct lpfc_fast_path_event *fast_path_evt;
3995 struct Scsi_Host *shost;
3996 uint32_t queue_depth, scsi_id;
3997 uint32_t logit = LOG_FCP;
3998
3999
4000 if (!(lpfc_cmd->pCmd))
4001 return;
4002 cmd = lpfc_cmd->pCmd;
4003 shost = cmd->device->host;
4004
4005 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4006 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4007
4008 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
4009
4010#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4011 if (lpfc_cmd->prot_data_type) {
4012 struct scsi_dif_tuple *src = NULL;
4013
4014 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4015
4016
4017
4018
4019 switch (lpfc_cmd->prot_data_type) {
4020 case LPFC_INJERR_REFTAG:
4021 src->ref_tag =
4022 lpfc_cmd->prot_data;
4023 break;
4024 case LPFC_INJERR_APPTAG:
4025 src->app_tag =
4026 (uint16_t)lpfc_cmd->prot_data;
4027 break;
4028 case LPFC_INJERR_GUARD:
4029 src->guard_tag =
4030 (uint16_t)lpfc_cmd->prot_data;
4031 break;
4032 default:
4033 break;
4034 }
4035
4036 lpfc_cmd->prot_data = 0;
4037 lpfc_cmd->prot_data_type = 0;
4038 lpfc_cmd->prot_data_segment = NULL;
4039 }
4040#endif
4041 if (pnode && NLP_CHK_NODE_ACT(pnode))
4042 atomic_dec(&pnode->cmd_pending);
4043
4044 if (lpfc_cmd->status) {
4045 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4046 (lpfc_cmd->result & IOERR_DRVR_MASK))
4047 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4048 else if (lpfc_cmd->status >= IOSTAT_CNT)
4049 lpfc_cmd->status = IOSTAT_DEFAULT;
4050 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4051 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4052 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4053 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4054 logit = 0;
4055 else
4056 logit = LOG_FCP | LOG_FCP_UNDER;
4057 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4058 "9030 FCP cmd x%x failed <%d/%lld> "
4059 "status: x%x result: x%x "
4060 "sid: x%x did: x%x oxid: x%x "
4061 "Data: x%x x%x\n",
4062 cmd->cmnd[0],
4063 cmd->device ? cmd->device->id : 0xffff,
4064 cmd->device ? cmd->device->lun : 0xffff,
4065 lpfc_cmd->status, lpfc_cmd->result,
4066 vport->fc_myDID,
4067 (pnode) ? pnode->nlp_DID : 0,
4068 phba->sli_rev == LPFC_SLI_REV4 ?
4069 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4070 pIocbOut->iocb.ulpContext,
4071 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4072
4073 switch (lpfc_cmd->status) {
4074 case IOSTAT_FCP_RSP_ERROR:
4075
4076 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
4077 break;
4078 case IOSTAT_NPORT_BSY:
4079 case IOSTAT_FABRIC_BSY:
4080 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
4081 fast_path_evt = lpfc_alloc_fast_evt(phba);
4082 if (!fast_path_evt)
4083 break;
4084 fast_path_evt->un.fabric_evt.event_type =
4085 FC_REG_FABRIC_EVENT;
4086 fast_path_evt->un.fabric_evt.subcategory =
4087 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4088 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4089 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4090 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4091 &pnode->nlp_portname,
4092 sizeof(struct lpfc_name));
4093 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4094 &pnode->nlp_nodename,
4095 sizeof(struct lpfc_name));
4096 }
4097 fast_path_evt->vport = vport;
4098 fast_path_evt->work_evt.evt =
4099 LPFC_EVT_FASTPATH_MGMT_EVT;
4100 spin_lock_irqsave(&phba->hbalock, flags);
4101 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4102 &phba->work_list);
4103 spin_unlock_irqrestore(&phba->hbalock, flags);
4104 lpfc_worker_wake_up(phba);
4105 break;
4106 case IOSTAT_LOCAL_REJECT:
4107 case IOSTAT_REMOTE_STOP:
4108 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4109 lpfc_cmd->result ==
4110 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4111 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4112 lpfc_cmd->result ==
4113 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4114 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
4115 break;
4116 }
4117 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4118 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4119 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4120 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4121 cmd->result = ScsiResult(DID_REQUEUE, 0);
4122 break;
4123 }
4124 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4125 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4126 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4127 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4128
4129
4130
4131
4132 lpfc_parse_bg_err(phba, lpfc_cmd,
4133 pIocbOut);
4134 break;
4135 } else {
4136 lpfc_printf_vlog(vport, KERN_WARNING,
4137 LOG_BG,
4138 "9031 non-zero BGSTAT "
4139 "on unprotected cmd\n");
4140 }
4141 }
4142 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4143 && (phba->sli_rev == LPFC_SLI_REV4)
4144 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
4145
4146
4147
4148
4149 lpfc_set_rrq_active(phba, pnode,
4150 lpfc_cmd->cur_iocbq.sli4_lxritag,
4151 0, 0);
4152 }
4153
4154 default:
4155 cmd->result = ScsiResult(DID_ERROR, 0);
4156 break;
4157 }
4158
4159 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4160 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4161 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
4162 SAM_STAT_BUSY);
4163 } else
4164 cmd->result = ScsiResult(DID_OK, 0);
4165
4166 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4167 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4168
4169 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4170 "0710 Iodone <%d/%llu> cmd %p, error "
4171 "x%x SNS x%x x%x Data: x%x x%x\n",
4172 cmd->device->id, cmd->device->lun, cmd,
4173 cmd->result, *lp, *(lp + 3), cmd->retries,
4174 scsi_get_resid(cmd));
4175 }
4176
4177 lpfc_update_stats(phba, lpfc_cmd);
4178 result = cmd->result;
4179 if (vport->cfg_max_scsicmpl_time &&
4180 time_after(jiffies, lpfc_cmd->start_time +
4181 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4182 spin_lock_irqsave(shost->host_lock, flags);
4183 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4184 if (pnode->cmd_qdepth >
4185 atomic_read(&pnode->cmd_pending) &&
4186 (atomic_read(&pnode->cmd_pending) >
4187 LPFC_MIN_TGT_QDEPTH) &&
4188 ((cmd->cmnd[0] == READ_10) ||
4189 (cmd->cmnd[0] == WRITE_10)))
4190 pnode->cmd_qdepth =
4191 atomic_read(&pnode->cmd_pending);
4192
4193 pnode->last_change_time = jiffies;
4194 }
4195 spin_unlock_irqrestore(shost->host_lock, flags);
4196 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4197 if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
4198 time_after(jiffies, pnode->last_change_time +
4199 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4200 spin_lock_irqsave(shost->host_lock, flags);
4201 depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
4202 / 100;
4203 depth = depth ? depth : 1;
4204 pnode->cmd_qdepth += depth;
4205 if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
4206 pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4207 pnode->last_change_time = jiffies;
4208 spin_unlock_irqrestore(shost->host_lock, flags);
4209 }
4210 }
4211
4212 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4213
4214
4215 queue_depth = cmd->device->queue_depth;
4216 scsi_id = cmd->device->id;
4217 cmd->scsi_done(cmd);
4218
4219 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4220 spin_lock_irqsave(&phba->hbalock, flags);
4221 lpfc_cmd->pCmd = NULL;
4222 spin_unlock_irqrestore(&phba->hbalock, flags);
4223
4224
4225
4226
4227
4228 spin_lock_irqsave(shost->host_lock, flags);
4229 if (lpfc_cmd->waitq)
4230 wake_up(lpfc_cmd->waitq);
4231 spin_unlock_irqrestore(shost->host_lock, flags);
4232 lpfc_release_scsi_buf(phba, lpfc_cmd);
4233 return;
4234 }
4235
4236 spin_lock_irqsave(&phba->hbalock, flags);
4237 lpfc_cmd->pCmd = NULL;
4238 spin_unlock_irqrestore(&phba->hbalock, flags);
4239
4240
4241
4242
4243
4244 spin_lock_irqsave(shost->host_lock, flags);
4245 if (lpfc_cmd->waitq)
4246 wake_up(lpfc_cmd->waitq);
4247 spin_unlock_irqrestore(shost->host_lock, flags);
4248
4249 lpfc_release_scsi_buf(phba, lpfc_cmd);
4250}
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260static void
4261lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4262{
4263 int i, j;
4264 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4265 i += sizeof(uint32_t), j++) {
4266 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4267 }
4268}
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279static void
4280lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4281 struct lpfc_nodelist *pnode)
4282{
4283 struct lpfc_hba *phba = vport->phba;
4284 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4285 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4286 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4287 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4288 int datadir = scsi_cmnd->sc_data_direction;
4289 char tag[2];
4290 uint8_t *ptr;
4291 bool sli4;
4292 uint32_t fcpdl;
4293
4294 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4295 return;
4296
4297 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4298
4299 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4300
4301 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4302 &lpfc_cmd->fcp_cmnd->fcp_lun);
4303
4304 ptr = &fcp_cmnd->fcpCdb[0];
4305 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4306 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4307 ptr += scsi_cmnd->cmd_len;
4308 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4309 }
4310
4311 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
4312 switch (tag[0]) {
4313 case HEAD_OF_QUEUE_TAG:
4314 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
4315 break;
4316 case ORDERED_QUEUE_TAG:
4317 fcp_cmnd->fcpCntl1 = ORDERED_Q;
4318 break;
4319 default:
4320 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4321 break;
4322 }
4323 } else
4324 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4325
4326 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4327 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4328
4329
4330
4331
4332
4333
4334
4335 if (scsi_sg_count(scsi_cmnd)) {
4336 if (datadir == DMA_TO_DEVICE) {
4337 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4338 iocb_cmd->ulpPU = PARM_READ_CHECK;
4339 if (vport->cfg_first_burst_size &&
4340 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4341 fcpdl = scsi_bufflen(scsi_cmnd);
4342 if (fcpdl < vport->cfg_first_burst_size)
4343 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4344 else
4345 piocbq->iocb.un.fcpi.fcpi_XRdy =
4346 vport->cfg_first_burst_size;
4347 }
4348 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4349 phba->fc4OutputRequests++;
4350 } else {
4351 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4352 iocb_cmd->ulpPU = PARM_READ_CHECK;
4353 fcp_cmnd->fcpCntl3 = READ_DATA;
4354 phba->fc4InputRequests++;
4355 }
4356 } else {
4357 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4358 iocb_cmd->un.fcpi.fcpi_parm = 0;
4359 iocb_cmd->ulpPU = 0;
4360 fcp_cmnd->fcpCntl3 = 0;
4361 phba->fc4ControlRequests++;
4362 }
4363 if (phba->sli_rev == 3 &&
4364 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4365 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4366
4367
4368
4369
4370 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4371 if (sli4)
4372 piocbq->iocb.ulpContext =
4373 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4374 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4375 piocbq->iocb.ulpFCP2Rcvy = 1;
4376 else
4377 piocbq->iocb.ulpFCP2Rcvy = 0;
4378
4379 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4380 piocbq->context1 = lpfc_cmd;
4381 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4382 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4383 piocbq->vport = vport;
4384}
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400static int
4401lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4402 struct lpfc_scsi_buf *lpfc_cmd,
4403 uint64_t lun,
4404 uint8_t task_mgmt_cmd)
4405{
4406 struct lpfc_iocbq *piocbq;
4407 IOCB_t *piocb;
4408 struct fcp_cmnd *fcp_cmnd;
4409 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4410 struct lpfc_nodelist *ndlp = rdata->pnode;
4411
4412 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4413 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4414 return 0;
4415
4416 piocbq = &(lpfc_cmd->cur_iocbq);
4417 piocbq->vport = vport;
4418
4419 piocb = &piocbq->iocb;
4420
4421 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4422
4423 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4424 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4425 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4426 if (vport->phba->sli_rev == 3 &&
4427 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4428 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4429 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4430 piocb->ulpContext = ndlp->nlp_rpi;
4431 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4432 piocb->ulpContext =
4433 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4434 }
4435 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4436 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4437 piocb->ulpPU = 0;
4438 piocb->un.fcpi.fcpi_parm = 0;
4439
4440
4441 if (lpfc_cmd->timeout > 0xff) {
4442
4443
4444
4445
4446 piocb->ulpTimeout = 0;
4447 } else
4448 piocb->ulpTimeout = lpfc_cmd->timeout;
4449
4450 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4451 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4452
4453 return 1;
4454}
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465int
4466lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4467{
4468
4469 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4470 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4471
4472 switch (dev_grp) {
4473 case LPFC_PCI_DEV_LP:
4474 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4475 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4476 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4477 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4478 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4479 break;
4480 case LPFC_PCI_DEV_OC:
4481 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4482 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4483 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4484 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4485 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4486 break;
4487 default:
4488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4489 "1418 Invalid HBA PCI-device group: 0x%x\n",
4490 dev_grp);
4491 return -ENODEV;
4492 break;
4493 }
4494 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4495 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4496 return 0;
4497}
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508static void
4509lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4510 struct lpfc_iocbq *cmdiocbq,
4511 struct lpfc_iocbq *rspiocbq)
4512{
4513 struct lpfc_scsi_buf *lpfc_cmd =
4514 (struct lpfc_scsi_buf *) cmdiocbq->context1;
4515 if (lpfc_cmd)
4516 lpfc_release_scsi_buf(phba, lpfc_cmd);
4517 return;
4518}
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529const char *
4530lpfc_info(struct Scsi_Host *host)
4531{
4532 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4533 struct lpfc_hba *phba = vport->phba;
4534 int len, link_speed = 0;
4535 static char lpfcinfobuf[384];
4536
4537 memset(lpfcinfobuf,0,384);
4538 if (phba && phba->pcidev){
4539 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4540 len = strlen(lpfcinfobuf);
4541 snprintf(lpfcinfobuf + len,
4542 384-len,
4543 " on PCI bus %02x device %02x irq %d",
4544 phba->pcidev->bus->number,
4545 phba->pcidev->devfn,
4546 phba->pcidev->irq);
4547 len = strlen(lpfcinfobuf);
4548 if (phba->Port[0]) {
4549 snprintf(lpfcinfobuf + len,
4550 384-len,
4551 " port %s",
4552 phba->Port);
4553 }
4554 len = strlen(lpfcinfobuf);
4555 if (phba->sli_rev <= LPFC_SLI_REV3) {
4556 link_speed = lpfc_sli_port_speed_get(phba);
4557 } else {
4558 if (phba->sli4_hba.link_state.logical_speed)
4559 link_speed =
4560 phba->sli4_hba.link_state.logical_speed;
4561 else
4562 link_speed = phba->sli4_hba.link_state.speed;
4563 }
4564 if (link_speed != 0)
4565 snprintf(lpfcinfobuf + len, 384-len,
4566 " Logical Link Speed: %d Mbps", link_speed);
4567 }
4568 return lpfcinfobuf;
4569}
4570
4571
4572
4573
4574
4575
4576
4577
4578static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4579{
4580 unsigned long poll_tmo_expires =
4581 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4582
4583 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4584 mod_timer(&phba->fcp_poll_timer,
4585 poll_tmo_expires);
4586}
4587
4588
4589
4590
4591
4592
4593
4594void lpfc_poll_start_timer(struct lpfc_hba * phba)
4595{
4596 lpfc_poll_rearm_timer(phba);
4597}
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607void lpfc_poll_timeout(unsigned long ptr)
4608{
4609 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4610
4611 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4612 lpfc_sli_handle_fast_ring_event(phba,
4613 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4614
4615 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4616 lpfc_poll_rearm_timer(phba);
4617 }
4618}
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633static int
4634lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4635{
4636 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4637 struct lpfc_hba *phba = vport->phba;
4638 struct lpfc_rport_data *rdata;
4639 struct lpfc_nodelist *ndlp;
4640 struct lpfc_scsi_buf *lpfc_cmd;
4641 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4642 int err;
4643
4644 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4645 err = fc_remote_port_chkready(rport);
4646 if (err) {
4647 cmnd->result = err;
4648 goto out_fail_command;
4649 }
4650 ndlp = rdata->pnode;
4651
4652 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4653 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4654
4655 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4656 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4657 " op:%02x str=%s without registering for"
4658 " BlockGuard - Rejecting command\n",
4659 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4660 dif_op_str[scsi_get_prot_op(cmnd)]);
4661 goto out_fail_command;
4662 }
4663
4664
4665
4666
4667
4668 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4669 goto out_tgt_busy;
4670 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4671 goto out_tgt_busy;
4672
4673 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4674 if (lpfc_cmd == NULL) {
4675 lpfc_rampdown_queue_depth(phba);
4676
4677 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4678 "0707 driver's buffer pool is empty, "
4679 "IO busied\n");
4680 goto out_host_busy;
4681 }
4682
4683
4684
4685
4686
4687 lpfc_cmd->pCmd = cmnd;
4688 lpfc_cmd->rdata = rdata;
4689 lpfc_cmd->timeout = 0;
4690 lpfc_cmd->start_time = jiffies;
4691 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4692
4693 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4694 if (vport->phba->cfg_enable_bg) {
4695 lpfc_printf_vlog(vport,
4696 KERN_INFO, LOG_SCSI_CMD,
4697 "9033 BLKGRD: rcvd %s cmd:x%x "
4698 "sector x%llx cnt %u pt %x\n",
4699 dif_op_str[scsi_get_prot_op(cmnd)],
4700 cmnd->cmnd[0],
4701 (unsigned long long)scsi_get_lba(cmnd),
4702 blk_rq_sectors(cmnd->request),
4703 (cmnd->cmnd[1]>>5));
4704 }
4705 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4706 } else {
4707 if (vport->phba->cfg_enable_bg) {
4708 lpfc_printf_vlog(vport,
4709 KERN_INFO, LOG_SCSI_CMD,
4710 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4711 "x%x sector x%llx cnt %u pt %x\n",
4712 cmnd->cmnd[0],
4713 (unsigned long long)scsi_get_lba(cmnd),
4714 blk_rq_sectors(cmnd->request),
4715 (cmnd->cmnd[1]>>5));
4716 }
4717 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4718 }
4719
4720 if (err)
4721 goto out_host_busy_free_buf;
4722
4723 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4724
4725 atomic_inc(&ndlp->cmd_pending);
4726 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4727 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4728 if (err) {
4729 atomic_dec(&ndlp->cmd_pending);
4730 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4731 "3376 FCP could not issue IOCB err %x"
4732 "FCP cmd x%x <%d/%llu> "
4733 "sid: x%x did: x%x oxid: x%x "
4734 "Data: x%x x%x x%x x%x\n",
4735 err, cmnd->cmnd[0],
4736 cmnd->device ? cmnd->device->id : 0xffff,
4737 cmnd->device ? cmnd->device->lun : (u64) -1,
4738 vport->fc_myDID, ndlp->nlp_DID,
4739 phba->sli_rev == LPFC_SLI_REV4 ?
4740 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4741 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4742 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4743 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4744 (uint32_t)
4745 (cmnd->request->timeout / 1000));
4746
4747
4748 goto out_host_busy_free_buf;
4749 }
4750 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4751 lpfc_sli_handle_fast_ring_event(phba,
4752 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4753
4754 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4755 lpfc_poll_rearm_timer(phba);
4756 }
4757
4758 return 0;
4759
4760 out_host_busy_free_buf:
4761 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4762 lpfc_release_scsi_buf(phba, lpfc_cmd);
4763 out_host_busy:
4764 return SCSI_MLQUEUE_HOST_BUSY;
4765
4766 out_tgt_busy:
4767 return SCSI_MLQUEUE_TARGET_BUSY;
4768
4769 out_fail_command:
4770 cmnd->scsi_done(cmnd);
4771 return 0;
4772}
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785static int
4786lpfc_abort_handler(struct scsi_cmnd *cmnd)
4787{
4788 struct Scsi_Host *shost = cmnd->device->host;
4789 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4790 struct lpfc_hba *phba = vport->phba;
4791 struct lpfc_iocbq *iocb;
4792 struct lpfc_iocbq *abtsiocb;
4793 struct lpfc_scsi_buf *lpfc_cmd;
4794 IOCB_t *cmd, *icmd;
4795 int ret = SUCCESS, status = 0;
4796 struct lpfc_sli_ring *pring_s4;
4797 int ring_number, ret_val;
4798 unsigned long flags, iflags;
4799 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4800
4801 status = fc_block_scsi_eh(cmnd);
4802 if (status != 0 && status != SUCCESS)
4803 return status;
4804
4805 spin_lock_irqsave(&phba->hbalock, flags);
4806
4807 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4808 spin_unlock_irqrestore(&phba->hbalock, flags);
4809 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4810 "3168 SCSI Layer abort requested I/O has been "
4811 "flushed by LLD.\n");
4812 return FAILED;
4813 }
4814
4815 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4816 if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4817 spin_unlock_irqrestore(&phba->hbalock, flags);
4818 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4819 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4820 "x%x ID %d LUN %llu\n",
4821 SUCCESS, cmnd->device->id, cmnd->device->lun);
4822 return SUCCESS;
4823 }
4824
4825 iocb = &lpfc_cmd->cur_iocbq;
4826
4827 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4828 spin_unlock_irqrestore(&phba->hbalock, flags);
4829 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4830 "3169 SCSI Layer abort requested I/O has been "
4831 "cancelled by LLD.\n");
4832 return FAILED;
4833 }
4834
4835
4836
4837
4838
4839
4840 if (lpfc_cmd->pCmd != cmnd) {
4841 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4842 "3170 SCSI Layer abort requested I/O has been "
4843 "completed by LLD.\n");
4844 goto out_unlock;
4845 }
4846
4847 BUG_ON(iocb->context1 != lpfc_cmd);
4848
4849
4850 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4851 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4852 "3389 SCSI Layer I/O Abort Request is pending\n");
4853 spin_unlock_irqrestore(&phba->hbalock, flags);
4854 goto wait_for_cmpl;
4855 }
4856
4857 abtsiocb = __lpfc_sli_get_iocbq(phba);
4858 if (abtsiocb == NULL) {
4859 ret = FAILED;
4860 goto out_unlock;
4861 }
4862
4863
4864 iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4865
4866
4867
4868
4869
4870
4871
4872 cmd = &iocb->iocb;
4873 icmd = &abtsiocb->iocb;
4874 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4875 icmd->un.acxri.abortContextTag = cmd->ulpContext;
4876 if (phba->sli_rev == LPFC_SLI_REV4)
4877 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4878 else
4879 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4880
4881 icmd->ulpLe = 1;
4882 icmd->ulpClass = cmd->ulpClass;
4883
4884
4885 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4886 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4887 if (iocb->iocb_flag & LPFC_IO_FOF)
4888 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4889
4890 if (lpfc_is_link_up(phba))
4891 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4892 else
4893 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4894
4895 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4896 abtsiocb->vport = vport;
4897 if (phba->sli_rev == LPFC_SLI_REV4) {
4898 ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
4899 pring_s4 = &phba->sli.ring[ring_number];
4900
4901 spin_lock_irqsave(&pring_s4->ring_lock, iflags);
4902 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4903 abtsiocb, 0);
4904 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
4905 } else {
4906 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4907 abtsiocb, 0);
4908 }
4909
4910 spin_unlock_irqrestore(&phba->hbalock, flags);
4911
4912
4913 if (ret_val == IOCB_ERROR) {
4914 lpfc_sli_release_iocbq(phba, abtsiocb);
4915 ret = FAILED;
4916 goto out;
4917 }
4918
4919 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4920 lpfc_sli_handle_fast_ring_event(phba,
4921 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4922
4923wait_for_cmpl:
4924 lpfc_cmd->waitq = &waitq;
4925
4926 wait_event_timeout(waitq,
4927 (lpfc_cmd->pCmd != cmnd),
4928 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4929
4930 spin_lock_irqsave(shost->host_lock, flags);
4931 lpfc_cmd->waitq = NULL;
4932 spin_unlock_irqrestore(shost->host_lock, flags);
4933
4934 if (lpfc_cmd->pCmd == cmnd) {
4935 ret = FAILED;
4936 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4937 "0748 abort handler timed out waiting "
4938 "for abortng I/O (xri:x%x) to complete: "
4939 "ret %#x, ID %d, LUN %llu\n",
4940 iocb->sli4_xritag, ret,
4941 cmnd->device->id, cmnd->device->lun);
4942 }
4943 goto out;
4944
4945out_unlock:
4946 spin_unlock_irqrestore(&phba->hbalock, flags);
4947out:
4948 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4949 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4950 "LUN %llu\n", ret, cmnd->device->id,
4951 cmnd->device->lun);
4952 return ret;
4953}
4954
4955static char *
4956lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4957{
4958 switch (task_mgmt_cmd) {
4959 case FCP_ABORT_TASK_SET:
4960 return "ABORT_TASK_SET";
4961 case FCP_CLEAR_TASK_SET:
4962 return "FCP_CLEAR_TASK_SET";
4963 case FCP_BUS_RESET:
4964 return "FCP_BUS_RESET";
4965 case FCP_LUN_RESET:
4966 return "FCP_LUN_RESET";
4967 case FCP_TARGET_RESET:
4968 return "FCP_TARGET_RESET";
4969 case FCP_CLEAR_ACA:
4970 return "FCP_CLEAR_ACA";
4971 case FCP_TERMINATE_TASK:
4972 return "FCP_TERMINATE_TASK";
4973 default:
4974 return "unknown";
4975 }
4976}
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990static int
4991lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
4992{
4993 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4994 uint32_t rsp_info;
4995 uint32_t rsp_len;
4996 uint8_t rsp_info_code;
4997 int ret = FAILED;
4998
4999
5000 if (fcprsp == NULL)
5001 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5002 "0703 fcp_rsp is missing\n");
5003 else {
5004 rsp_info = fcprsp->rspStatus2;
5005 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5006 rsp_info_code = fcprsp->rspInfo3;
5007
5008
5009 lpfc_printf_vlog(vport, KERN_INFO,
5010 LOG_FCP,
5011 "0706 fcp_rsp valid 0x%x,"
5012 " rsp len=%d code 0x%x\n",
5013 rsp_info,
5014 rsp_len, rsp_info_code);
5015
5016 if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) {
5017 switch (rsp_info_code) {
5018 case RSP_NO_FAILURE:
5019 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5020 "0715 Task Mgmt No Failure\n");
5021 ret = SUCCESS;
5022 break;
5023 case RSP_TM_NOT_SUPPORTED:
5024 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5025 "0716 Task Mgmt Target "
5026 "reject\n");
5027 break;
5028 case RSP_TM_NOT_COMPLETED:
5029 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5030 "0717 Task Mgmt Target "
5031 "failed TM\n");
5032 break;
5033 case RSP_TM_INVALID_LU:
5034 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5035 "0718 Task Mgmt to invalid "
5036 "LUN\n");
5037 break;
5038 }
5039 }
5040 }
5041 return ret;
5042}
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060static int
5061lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
5062 unsigned tgt_id, uint64_t lun_id,
5063 uint8_t task_mgmt_cmd)
5064{
5065 struct lpfc_hba *phba = vport->phba;
5066 struct lpfc_scsi_buf *lpfc_cmd;
5067 struct lpfc_iocbq *iocbq;
5068 struct lpfc_iocbq *iocbqrsp;
5069 struct lpfc_nodelist *pnode = rdata->pnode;
5070 int ret;
5071 int status;
5072
5073 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5074 return FAILED;
5075
5076 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
5077 if (lpfc_cmd == NULL)
5078 return FAILED;
5079 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5080 lpfc_cmd->rdata = rdata;
5081
5082 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5083 task_mgmt_cmd);
5084 if (!status) {
5085 lpfc_release_scsi_buf(phba, lpfc_cmd);
5086 return FAILED;
5087 }
5088
5089 iocbq = &lpfc_cmd->cur_iocbq;
5090 iocbqrsp = lpfc_sli_get_iocbq(phba);
5091 if (iocbqrsp == NULL) {
5092 lpfc_release_scsi_buf(phba, lpfc_cmd);
5093 return FAILED;
5094 }
5095 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5096
5097 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5098 "0702 Issue %s to TGT %d LUN %llu "
5099 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5100 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5101 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5102 iocbq->iocb_flag);
5103
5104 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5105 iocbq, iocbqrsp, lpfc_cmd->timeout);
5106 if ((status != IOCB_SUCCESS) ||
5107 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5108 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5109 "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) "
5110 "iocb_flag x%x\n",
5111 lpfc_taskmgmt_name(task_mgmt_cmd),
5112 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
5113 iocbqrsp->iocb.un.ulpWord[4],
5114 iocbq->iocb_flag);
5115
5116 if (status == IOCB_SUCCESS) {
5117 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5118
5119
5120 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5121 else
5122 ret = FAILED;
5123 } else if (status == IOCB_TIMEDOUT) {
5124 ret = TIMEOUT_ERROR;
5125 } else {
5126 ret = FAILED;
5127 }
5128 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
5129 } else
5130 ret = SUCCESS;
5131
5132 lpfc_sli_release_iocbq(phba, iocbqrsp);
5133
5134 if (ret != TIMEOUT_ERROR)
5135 lpfc_release_scsi_buf(phba, lpfc_cmd);
5136
5137 return ret;
5138}
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152static int
5153lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5154{
5155 struct lpfc_rport_data *rdata;
5156 struct lpfc_nodelist *pnode;
5157 unsigned long later;
5158
5159 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5160 if (!rdata) {
5161 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5162 "0797 Tgt Map rport failure: rdata x%p\n", rdata);
5163 return FAILED;
5164 }
5165 pnode = rdata->pnode;
5166
5167
5168
5169
5170 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5171 while (time_after(later, jiffies)) {
5172 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5173 return FAILED;
5174 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5175 return SUCCESS;
5176 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5177 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5178 if (!rdata)
5179 return FAILED;
5180 pnode = rdata->pnode;
5181 }
5182 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5183 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5184 return FAILED;
5185 return SUCCESS;
5186}
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204static int
5205lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5206 uint64_t lun_id, lpfc_ctx_cmd context)
5207{
5208 struct lpfc_hba *phba = vport->phba;
5209 unsigned long later;
5210 int cnt;
5211
5212 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5213 if (cnt)
5214 lpfc_sli_abort_taskmgmt(vport,
5215 &phba->sli.ring[phba->sli.fcp_ring],
5216 tgt_id, lun_id, context);
5217 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5218 while (time_after(later, jiffies) && cnt) {
5219 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5220 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5221 }
5222 if (cnt) {
5223 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5224 "0724 I/O flush failure for context %s : cnt x%x\n",
5225 ((context == LPFC_CTX_LUN) ? "LUN" :
5226 ((context == LPFC_CTX_TGT) ? "TGT" :
5227 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5228 cnt);
5229 return FAILED;
5230 }
5231 return SUCCESS;
5232}
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245static int
5246lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5247{
5248 struct Scsi_Host *shost = cmnd->device->host;
5249 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5250 struct lpfc_rport_data *rdata;
5251 struct lpfc_nodelist *pnode;
5252 unsigned tgt_id = cmnd->device->id;
5253 uint64_t lun_id = cmnd->device->lun;
5254 struct lpfc_scsi_event_header scsi_event;
5255 int status;
5256
5257 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5258 if (!rdata) {
5259 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5260 "0798 Device Reset rport failure: rdata x%p\n", rdata);
5261 return FAILED;
5262 }
5263 pnode = rdata->pnode;
5264 status = fc_block_scsi_eh(cmnd);
5265 if (status != 0 && status != SUCCESS)
5266 return status;
5267
5268 status = lpfc_chk_tgt_mapped(vport, cmnd);
5269 if (status == FAILED) {
5270 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5271 "0721 Device Reset rport failure: rdata x%p\n", rdata);
5272 return FAILED;
5273 }
5274
5275 scsi_event.event_type = FC_REG_SCSI_EVENT;
5276 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5277 scsi_event.lun = lun_id;
5278 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5279 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5280
5281 fc_host_post_vendor_event(shost, fc_get_event_number(),
5282 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5283
5284 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5285 FCP_LUN_RESET);
5286
5287 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5288 "0713 SCSI layer issued Device Reset (%d, %llu) "
5289 "return x%x\n", tgt_id, lun_id, status);
5290
5291
5292
5293
5294
5295
5296
5297 if (status == SUCCESS)
5298 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5299 LPFC_CTX_LUN);
5300
5301 return status;
5302}
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315static int
5316lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5317{
5318 struct Scsi_Host *shost = cmnd->device->host;
5319 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5320 struct lpfc_rport_data *rdata;
5321 struct lpfc_nodelist *pnode;
5322 unsigned tgt_id = cmnd->device->id;
5323 uint64_t lun_id = cmnd->device->lun;
5324 struct lpfc_scsi_event_header scsi_event;
5325 int status;
5326
5327 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5328 if (!rdata) {
5329 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5330 "0799 Target Reset rport failure: rdata x%p\n", rdata);
5331 return FAILED;
5332 }
5333 pnode = rdata->pnode;
5334 status = fc_block_scsi_eh(cmnd);
5335 if (status != 0 && status != SUCCESS)
5336 return status;
5337
5338 status = lpfc_chk_tgt_mapped(vport, cmnd);
5339 if (status == FAILED) {
5340 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5341 "0722 Target Reset rport failure: rdata x%p\n", rdata);
5342 spin_lock_irq(shost->host_lock);
5343 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5344 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5345 spin_unlock_irq(shost->host_lock);
5346 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5347 LPFC_CTX_TGT);
5348 return FAST_IO_FAIL;
5349 }
5350
5351 scsi_event.event_type = FC_REG_SCSI_EVENT;
5352 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5353 scsi_event.lun = 0;
5354 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5355 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5356
5357 fc_host_post_vendor_event(shost, fc_get_event_number(),
5358 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5359
5360 status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5361 FCP_TARGET_RESET);
5362
5363 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5364 "0723 SCSI layer issued Target Reset (%d, %llu) "
5365 "return x%x\n", tgt_id, lun_id, status);
5366
5367
5368
5369
5370
5371
5372
5373 if (status == SUCCESS)
5374 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5375 LPFC_CTX_TGT);
5376 return status;
5377}
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390static int
5391lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5392{
5393 struct Scsi_Host *shost = cmnd->device->host;
5394 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5395 struct lpfc_nodelist *ndlp = NULL;
5396 struct lpfc_scsi_event_header scsi_event;
5397 int match;
5398 int ret = SUCCESS, status, i;
5399
5400 scsi_event.event_type = FC_REG_SCSI_EVENT;
5401 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5402 scsi_event.lun = 0;
5403 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5404 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5405
5406 fc_host_post_vendor_event(shost, fc_get_event_number(),
5407 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5408
5409 status = fc_block_scsi_eh(cmnd);
5410 if (status != 0 && status != SUCCESS)
5411 return status;
5412
5413
5414
5415
5416
5417
5418 for (i = 0; i < LPFC_MAX_TARGET; i++) {
5419
5420 match = 0;
5421 spin_lock_irq(shost->host_lock);
5422 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5423 if (!NLP_CHK_NODE_ACT(ndlp))
5424 continue;
5425 if (vport->phba->cfg_fcp2_no_tgt_reset &&
5426 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5427 continue;
5428 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5429 ndlp->nlp_sid == i &&
5430 ndlp->rport) {
5431 match = 1;
5432 break;
5433 }
5434 }
5435 spin_unlock_irq(shost->host_lock);
5436 if (!match)
5437 continue;
5438
5439 status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
5440 i, 0, FCP_TARGET_RESET);
5441
5442 if (status != SUCCESS) {
5443 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5444 "0700 Bus Reset on target %d failed\n",
5445 i);
5446 ret = FAILED;
5447 }
5448 }
5449
5450
5451
5452
5453
5454
5455
5456 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5457 if (status != SUCCESS)
5458 ret = FAILED;
5459
5460 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5461 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5462 return ret;
5463}
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481static int
5482lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5483{
5484 struct Scsi_Host *shost = cmnd->device->host;
5485 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5486 struct lpfc_hba *phba = vport->phba;
5487 int rc, ret = SUCCESS;
5488
5489 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5490 "3172 SCSI layer issued Host Reset Data:\n");
5491
5492 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5493 lpfc_offline(phba);
5494 rc = lpfc_sli_brdrestart(phba);
5495 if (rc)
5496 ret = FAILED;
5497 rc = lpfc_online(phba);
5498 if (rc)
5499 ret = FAILED;
5500 lpfc_unblock_mgmt_io(phba);
5501
5502 if (ret == FAILED) {
5503 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5504 "3323 Failed host reset, bring it offline\n");
5505 lpfc_sli4_offline_eratt(phba);
5506 }
5507 return ret;
5508}
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523static int
5524lpfc_slave_alloc(struct scsi_device *sdev)
5525{
5526 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5527 struct lpfc_hba *phba = vport->phba;
5528 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5529 uint32_t total = 0;
5530 uint32_t num_to_alloc = 0;
5531 int num_allocated = 0;
5532 uint32_t sdev_cnt;
5533 struct lpfc_device_data *device_data;
5534 unsigned long flags;
5535 struct lpfc_name target_wwpn;
5536
5537 if (!rport || fc_remote_port_chkready(rport))
5538 return -ENXIO;
5539
5540 if (phba->cfg_fof) {
5541
5542
5543
5544
5545
5546
5547 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5548 spin_lock_irqsave(&phba->devicelock, flags);
5549 device_data = __lpfc_get_device_data(phba,
5550 &phba->luns,
5551 &vport->fc_portname,
5552 &target_wwpn,
5553 sdev->lun);
5554 if (!device_data) {
5555 spin_unlock_irqrestore(&phba->devicelock, flags);
5556 device_data = lpfc_create_device_data(phba,
5557 &vport->fc_portname,
5558 &target_wwpn,
5559 sdev->lun, true);
5560 if (!device_data)
5561 return -ENOMEM;
5562 spin_lock_irqsave(&phba->devicelock, flags);
5563 list_add_tail(&device_data->listentry, &phba->luns);
5564 }
5565 device_data->rport_data = rport->dd_data;
5566 device_data->available = true;
5567 spin_unlock_irqrestore(&phba->devicelock, flags);
5568 sdev->hostdata = device_data;
5569 } else {
5570 sdev->hostdata = rport->dd_data;
5571 }
5572 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5573
5574
5575
5576
5577
5578
5579
5580
5581 total = phba->total_scsi_bufs;
5582 num_to_alloc = vport->cfg_lun_queue_depth + 2;
5583
5584
5585 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5586 return 0;
5587
5588
5589 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5590 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5591 "0704 At limitation of %d preallocated "
5592 "command buffers\n", total);
5593 return 0;
5594
5595 } else if (total + num_to_alloc >
5596 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5597 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5598 "0705 Allocation request of %d "
5599 "command buffers will exceed max of %d. "
5600 "Reducing allocation request to %d.\n",
5601 num_to_alloc, phba->cfg_hba_queue_depth,
5602 (phba->cfg_hba_queue_depth - total));
5603 num_to_alloc = phba->cfg_hba_queue_depth - total;
5604 }
5605 num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5606 if (num_to_alloc != num_allocated) {
5607 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5608 "0708 Allocation request of %d "
5609 "command buffers did not succeed. "
5610 "Allocated %d buffers.\n",
5611 num_to_alloc, num_allocated);
5612 }
5613 if (num_allocated > 0)
5614 phba->total_scsi_bufs += num_allocated;
5615 return 0;
5616}
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629static int
5630lpfc_slave_configure(struct scsi_device *sdev)
5631{
5632 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5633 struct lpfc_hba *phba = vport->phba;
5634
5635 if (sdev->tagged_supported)
5636 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
5637 else
5638 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
5639
5640 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5641 lpfc_sli_handle_fast_ring_event(phba,
5642 &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5643 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5644 lpfc_poll_rearm_timer(phba);
5645 }
5646
5647 return 0;
5648}
5649
5650
5651
5652
5653
5654
5655
5656static void
5657lpfc_slave_destroy(struct scsi_device *sdev)
5658{
5659 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5660 struct lpfc_hba *phba = vport->phba;
5661 unsigned long flags;
5662 struct lpfc_device_data *device_data = sdev->hostdata;
5663
5664 atomic_dec(&phba->sdev_cnt);
5665 if ((phba->cfg_fof) && (device_data)) {
5666 spin_lock_irqsave(&phba->devicelock, flags);
5667 device_data->available = false;
5668 if (!device_data->oas_enabled)
5669 lpfc_delete_device_data(phba, device_data);
5670 spin_unlock_irqrestore(&phba->devicelock, flags);
5671 }
5672 sdev->hostdata = NULL;
5673 return;
5674}
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694struct lpfc_device_data*
5695lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5696 struct lpfc_name *target_wwpn, uint64_t lun,
5697 bool atomic_create)
5698{
5699
5700 struct lpfc_device_data *lun_info;
5701 int memory_flags;
5702
5703 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5704 !(phba->cfg_fof))
5705 return NULL;
5706
5707
5708
5709 if (atomic_create)
5710 memory_flags = GFP_ATOMIC;
5711 else
5712 memory_flags = GFP_KERNEL;
5713 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5714 if (!lun_info)
5715 return NULL;
5716 INIT_LIST_HEAD(&lun_info->listentry);
5717 lun_info->rport_data = NULL;
5718 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5719 sizeof(struct lpfc_name));
5720 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5721 sizeof(struct lpfc_name));
5722 lun_info->device_id.lun = lun;
5723 lun_info->oas_enabled = false;
5724 lun_info->available = false;
5725 return lun_info;
5726}
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736void
5737lpfc_delete_device_data(struct lpfc_hba *phba,
5738 struct lpfc_device_data *lun_info)
5739{
5740
5741 if (unlikely(!phba) || !lun_info ||
5742 !(phba->cfg_fof))
5743 return;
5744
5745 if (!list_empty(&lun_info->listentry))
5746 list_del(&lun_info->listentry);
5747 mempool_free(lun_info, phba->device_data_mem_pool);
5748 return;
5749}
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767struct lpfc_device_data*
5768__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5769 struct lpfc_name *vport_wwpn,
5770 struct lpfc_name *target_wwpn, uint64_t lun)
5771{
5772
5773 struct lpfc_device_data *lun_info;
5774
5775 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5776 !phba->cfg_fof)
5777 return NULL;
5778
5779
5780
5781 list_for_each_entry(lun_info, list, listentry) {
5782 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5783 sizeof(struct lpfc_name)) == 0) &&
5784 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5785 sizeof(struct lpfc_name)) == 0) &&
5786 (lun_info->device_id.lun == lun))
5787 return lun_info;
5788 }
5789
5790 return NULL;
5791}
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819bool
5820lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5821 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5822 struct lpfc_name *found_vport_wwpn,
5823 struct lpfc_name *found_target_wwpn,
5824 uint64_t *found_lun,
5825 uint32_t *found_lun_status)
5826{
5827
5828 unsigned long flags;
5829 struct lpfc_device_data *lun_info;
5830 struct lpfc_device_id *device_id;
5831 uint64_t lun;
5832 bool found = false;
5833
5834 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5835 !starting_lun || !found_vport_wwpn ||
5836 !found_target_wwpn || !found_lun || !found_lun_status ||
5837 (*starting_lun == NO_MORE_OAS_LUN) ||
5838 !phba->cfg_fof)
5839 return false;
5840
5841 lun = *starting_lun;
5842 *found_lun = NO_MORE_OAS_LUN;
5843 *starting_lun = NO_MORE_OAS_LUN;
5844
5845
5846
5847 spin_lock_irqsave(&phba->devicelock, flags);
5848 list_for_each_entry(lun_info, &phba->luns, listentry) {
5849 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5850 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5851 sizeof(struct lpfc_name)) == 0)) &&
5852 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5853 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5854 sizeof(struct lpfc_name)) == 0)) &&
5855 (lun_info->oas_enabled)) {
5856 device_id = &lun_info->device_id;
5857 if ((!found) &&
5858 ((lun == FIND_FIRST_OAS_LUN) ||
5859 (device_id->lun == lun))) {
5860 *found_lun = device_id->lun;
5861 memcpy(found_vport_wwpn,
5862 &device_id->vport_wwpn,
5863 sizeof(struct lpfc_name));
5864 memcpy(found_target_wwpn,
5865 &device_id->target_wwpn,
5866 sizeof(struct lpfc_name));
5867 if (lun_info->available)
5868 *found_lun_status =
5869 OAS_LUN_STATUS_EXISTS;
5870 else
5871 *found_lun_status = 0;
5872 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5873 memset(vport_wwpn, 0x0,
5874 sizeof(struct lpfc_name));
5875 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5876 memset(target_wwpn, 0x0,
5877 sizeof(struct lpfc_name));
5878 found = true;
5879 } else if (found) {
5880 *starting_lun = device_id->lun;
5881 memcpy(vport_wwpn, &device_id->vport_wwpn,
5882 sizeof(struct lpfc_name));
5883 memcpy(target_wwpn, &device_id->target_wwpn,
5884 sizeof(struct lpfc_name));
5885 break;
5886 }
5887 }
5888 }
5889 spin_unlock_irqrestore(&phba->devicelock, flags);
5890 return found;
5891}
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913bool
5914lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5915 struct lpfc_name *target_wwpn, uint64_t lun)
5916{
5917
5918 struct lpfc_device_data *lun_info;
5919 unsigned long flags;
5920
5921 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5922 !phba->cfg_fof)
5923 return false;
5924
5925 spin_lock_irqsave(&phba->devicelock, flags);
5926
5927
5928 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5929 target_wwpn, lun);
5930 if (lun_info) {
5931 if (!lun_info->oas_enabled)
5932 lun_info->oas_enabled = true;
5933 spin_unlock_irqrestore(&phba->devicelock, flags);
5934 return true;
5935 }
5936
5937
5938 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5939 false);
5940 if (lun_info) {
5941 lun_info->oas_enabled = true;
5942 lun_info->available = false;
5943 list_add_tail(&lun_info->listentry, &phba->luns);
5944 spin_unlock_irqrestore(&phba->devicelock, flags);
5945 return true;
5946 }
5947 spin_unlock_irqrestore(&phba->devicelock, flags);
5948 return false;
5949}
5950
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970bool
5971lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5972 struct lpfc_name *target_wwpn, uint64_t lun)
5973{
5974
5975 struct lpfc_device_data *lun_info;
5976 unsigned long flags;
5977
5978 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5979 !phba->cfg_fof)
5980 return false;
5981
5982 spin_lock_irqsave(&phba->devicelock, flags);
5983
5984
5985 lun_info = __lpfc_get_device_data(phba,
5986 &phba->luns, vport_wwpn,
5987 target_wwpn, lun);
5988 if (lun_info) {
5989 lun_info->oas_enabled = false;
5990 if (!lun_info->available)
5991 lpfc_delete_device_data(phba, lun_info);
5992 spin_unlock_irqrestore(&phba->devicelock, flags);
5993 return true;
5994 }
5995
5996 spin_unlock_irqrestore(&phba->devicelock, flags);
5997 return false;
5998}
5999
6000struct scsi_host_template lpfc_template = {
6001 .module = THIS_MODULE,
6002 .name = LPFC_DRIVER_NAME,
6003 .info = lpfc_info,
6004 .queuecommand = lpfc_queuecommand,
6005 .eh_abort_handler = lpfc_abort_handler,
6006 .eh_device_reset_handler = lpfc_device_reset_handler,
6007 .eh_target_reset_handler = lpfc_target_reset_handler,
6008 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6009 .eh_host_reset_handler = lpfc_host_reset_handler,
6010 .slave_alloc = lpfc_slave_alloc,
6011 .slave_configure = lpfc_slave_configure,
6012 .slave_destroy = lpfc_slave_destroy,
6013 .scan_finished = lpfc_scan_finished,
6014 .this_id = -1,
6015 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6016 .cmd_per_lun = LPFC_CMD_PER_LUN,
6017 .use_clustering = ENABLE_CLUSTERING,
6018 .shost_attrs = lpfc_hba_attrs,
6019 .max_sectors = 0xFFFF,
6020 .vendor_id = LPFC_NL_VENDOR_ID,
6021 .change_queue_depth = lpfc_change_queue_depth,
6022 .change_queue_type = lpfc_change_queue_type,
6023};
6024
6025struct scsi_host_template lpfc_vport_template = {
6026 .module = THIS_MODULE,
6027 .name = LPFC_DRIVER_NAME,
6028 .info = lpfc_info,
6029 .queuecommand = lpfc_queuecommand,
6030 .eh_abort_handler = lpfc_abort_handler,
6031 .eh_device_reset_handler = lpfc_device_reset_handler,
6032 .eh_target_reset_handler = lpfc_target_reset_handler,
6033 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6034 .slave_alloc = lpfc_slave_alloc,
6035 .slave_configure = lpfc_slave_configure,
6036 .slave_destroy = lpfc_slave_destroy,
6037 .scan_finished = lpfc_scan_finished,
6038 .this_id = -1,
6039 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6040 .cmd_per_lun = LPFC_CMD_PER_LUN,
6041 .use_clustering = ENABLE_CLUSTERING,
6042 .shost_attrs = lpfc_vport_attrs,
6043 .max_sectors = 0xFFFF,
6044 .change_queue_depth = lpfc_change_queue_depth,
6045 .change_queue_type = lpfc_change_queue_type,
6046};
6047