1
2
3
4
5
6#include <linux/spinlock.h>
7#include <linux/vmalloc.h>
8#include "qedf.h"
9#include <scsi/scsi_tcq.h>
10
11void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
12 unsigned int timer_msec)
13{
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
15 msecs_to_jiffies(timer_msec));
16}
17
18static void qedf_cmd_timeout(struct work_struct *work)
19{
20
21 struct qedf_ioreq *io_req =
22 container_of(work, struct qedf_ioreq, timeout_work.work);
23 struct qedf_ctx *qedf;
24 struct qedf_rport *fcport;
25 u8 op = 0;
26
27 if (io_req == NULL) {
28 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
29 return;
30 }
31
32 fcport = io_req->fcport;
33 if (io_req->fcport == NULL) {
34 QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
35 return;
36 }
37
38 qedf = fcport->qedf;
39
40 switch (io_req->cmd_type) {
41 case QEDF_ABTS:
42 if (qedf == NULL) {
43 QEDF_INFO(NULL, QEDF_LOG_IO,
44 "qedf is NULL for ABTS xid=0x%x.\n",
45 io_req->xid);
46 return;
47 }
48
49 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
50 io_req->xid);
51
52 qedf_initiate_cleanup(io_req, true);
53 complete(&io_req->abts_done);
54
55
56
57
58
59
60 kref_put(&io_req->refcount, qedf_release_cmd);
61
62
63 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
64
65
66
67
68
69 qedf_restart_rport(fcport);
70 break;
71 case QEDF_ELS:
72 if (!qedf) {
73 QEDF_INFO(NULL, QEDF_LOG_IO,
74 "qedf is NULL for ELS xid=0x%x.\n",
75 io_req->xid);
76 return;
77 }
78
79 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
80
81 kref_get(&io_req->refcount);
82
83
84
85
86
87 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
88 io_req->xid);
89 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
90
91 if (io_req->cb_func && io_req->cb_arg) {
92 op = io_req->cb_arg->op;
93 io_req->cb_func(io_req->cb_arg);
94 io_req->cb_arg = NULL;
95 }
96 qedf_initiate_cleanup(io_req, true);
97 kref_put(&io_req->refcount, qedf_release_cmd);
98 break;
99 case QEDF_SEQ_CLEANUP:
100 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
101 "xid=0x%x.\n", io_req->xid);
102 qedf_initiate_cleanup(io_req, true);
103 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
104 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
105 break;
106 default:
107 break;
108 }
109}
110
111void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
112{
113 struct io_bdt *bdt_info;
114 struct qedf_ctx *qedf = cmgr->qedf;
115 size_t bd_tbl_sz;
116 u16 min_xid = 0;
117 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
118 int num_ios;
119 int i;
120 struct qedf_ioreq *io_req;
121
122 num_ios = max_xid - min_xid + 1;
123
124
125 if (!cmgr->io_bdt_pool)
126 goto free_cmd_pool;
127
128 bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
129 for (i = 0; i < num_ios; i++) {
130 bdt_info = cmgr->io_bdt_pool[i];
131 if (bdt_info->bd_tbl) {
132 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
133 bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
134 bdt_info->bd_tbl = NULL;
135 }
136 }
137
138
139 for (i = 0; i < num_ios; i++) {
140 kfree(cmgr->io_bdt_pool[i]);
141 cmgr->io_bdt_pool[i] = NULL;
142 }
143
144 kfree(cmgr->io_bdt_pool);
145 cmgr->io_bdt_pool = NULL;
146
147free_cmd_pool:
148
149 for (i = 0; i < num_ios; i++) {
150 io_req = &cmgr->cmds[i];
151 kfree(io_req->sgl_task_params);
152 kfree(io_req->task_params);
153
154 if (io_req->sense_buffer)
155 dma_free_coherent(&qedf->pdev->dev,
156 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
157 io_req->sense_buffer_dma);
158 cancel_delayed_work_sync(&io_req->rrq_work);
159 }
160
161
162 vfree(cmgr);
163}
164
165static void qedf_handle_rrq(struct work_struct *work)
166{
167 struct qedf_ioreq *io_req =
168 container_of(work, struct qedf_ioreq, rrq_work.work);
169
170 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
171 qedf_send_rrq(io_req);
172
173}
174
175struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
176{
177 struct qedf_cmd_mgr *cmgr;
178 struct io_bdt *bdt_info;
179 struct qedf_ioreq *io_req;
180 u16 xid;
181 int i;
182 int num_ios;
183 u16 min_xid = 0;
184 u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
185
186
187 if (!qedf->num_queues) {
188 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
189 return NULL;
190 }
191
192 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
193 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
194 "max_xid 0x%x.\n", min_xid, max_xid);
195 return NULL;
196 }
197
198 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
199 "0x%x.\n", min_xid, max_xid);
200
201 num_ios = max_xid - min_xid + 1;
202
203 cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
204 if (!cmgr) {
205 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
206 return NULL;
207 }
208
209 cmgr->qedf = qedf;
210 spin_lock_init(&cmgr->lock);
211
212
213
214
215 xid = 0;
216
217 for (i = 0; i < num_ios; i++) {
218 io_req = &cmgr->cmds[i];
219 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
220
221 io_req->xid = xid++;
222
223 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
224
225
226 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
227 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
228 GFP_KERNEL);
229 if (!io_req->sense_buffer)
230 goto mem_err;
231
232
233 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
234 GFP_KERNEL);
235 if (!io_req->task_params) {
236 QEDF_ERR(&(qedf->dbg_ctx),
237 "Failed to allocate task_params for xid=0x%x\n",
238 i);
239 goto mem_err;
240 }
241
242
243
244
245
246 io_req->sgl_task_params = kzalloc(
247 sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
248 if (!io_req->sgl_task_params) {
249 QEDF_ERR(&(qedf->dbg_ctx),
250 "Failed to allocate sgl_task_params for xid=0x%x\n",
251 i);
252 goto mem_err;
253 }
254 }
255
256
257 cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
258 GFP_KERNEL);
259
260 if (!cmgr->io_bdt_pool) {
261 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
262 goto mem_err;
263 }
264
265 for (i = 0; i < num_ios; i++) {
266 cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
267 GFP_KERNEL);
268 if (!cmgr->io_bdt_pool[i]) {
269 QEDF_WARN(&(qedf->dbg_ctx),
270 "Failed to alloc io_bdt_pool[%d].\n", i);
271 goto mem_err;
272 }
273 }
274
275 for (i = 0; i < num_ios; i++) {
276 bdt_info = cmgr->io_bdt_pool[i];
277 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
278 QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
279 &bdt_info->bd_tbl_dma, GFP_KERNEL);
280 if (!bdt_info->bd_tbl) {
281 QEDF_WARN(&(qedf->dbg_ctx),
282 "Failed to alloc bdt_tbl[%d].\n", i);
283 goto mem_err;
284 }
285 }
286 atomic_set(&cmgr->free_list_cnt, num_ios);
287 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
288 "cmgr->free_list_cnt=%d.\n",
289 atomic_read(&cmgr->free_list_cnt));
290
291 return cmgr;
292
293mem_err:
294 qedf_cmd_mgr_free(cmgr);
295 return NULL;
296}
297
298struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
299{
300 struct qedf_ctx *qedf = fcport->qedf;
301 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
302 struct qedf_ioreq *io_req = NULL;
303 struct io_bdt *bd_tbl;
304 u16 xid;
305 uint32_t free_sqes;
306 int i;
307 unsigned long flags;
308
309 free_sqes = atomic_read(&fcport->free_sqes);
310
311 if (!free_sqes) {
312 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
313 "Returning NULL, free_sqes=%d.\n ",
314 free_sqes);
315 goto out_failed;
316 }
317
318
319 if ((atomic_read(&fcport->num_active_ios) >=
320 NUM_RW_TASKS_PER_CONNECTION)) {
321 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
322 "Returning NULL, num_active_ios=%d.\n",
323 atomic_read(&fcport->num_active_ios));
324 goto out_failed;
325 }
326
327
328 if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
329 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
330 "Returning NULL, free_list_cnt=%d.\n",
331 atomic_read(&cmd_mgr->free_list_cnt));
332 goto out_failed;
333 }
334
335 spin_lock_irqsave(&cmd_mgr->lock, flags);
336 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
337 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
338 cmd_mgr->idx++;
339 if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
340 cmd_mgr->idx = 0;
341
342
343 if (!io_req->alloc)
344 break;
345 }
346
347 if (i == FCOE_PARAMS_NUM_TASKS) {
348 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
349 goto out_failed;
350 }
351
352 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
353 QEDF_ERR(&qedf->dbg_ctx,
354 "io_req found to be dirty ox_id = 0x%x.\n",
355 io_req->xid);
356
357
358 io_req->flags = 0;
359 io_req->alloc = 1;
360 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
361
362 atomic_inc(&fcport->num_active_ios);
363 atomic_dec(&fcport->free_sqes);
364 xid = io_req->xid;
365 atomic_dec(&cmd_mgr->free_list_cnt);
366
367 io_req->cmd_mgr = cmd_mgr;
368 io_req->fcport = fcport;
369
370
371 io_req->sc_cmd = NULL;
372 io_req->lun = -1;
373
374
375 kref_init(&io_req->refcount);
376 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
377
378
379
380 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
381 if (bd_tbl == NULL) {
382 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
383 kref_put(&io_req->refcount, qedf_release_cmd);
384 goto out_failed;
385 }
386 bd_tbl->io_req = io_req;
387 io_req->cmd_type = cmd_type;
388 io_req->tm_flags = 0;
389
390
391 io_req->rx_buf_off = 0;
392 io_req->tx_buf_off = 0;
393 io_req->rx_id = 0xffff;
394
395 return io_req;
396
397out_failed:
398
399 qedf->alloc_failures++;
400 return NULL;
401}
402
403static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
404{
405 struct qedf_mp_req *mp_req = &(io_req->mp_req);
406 struct qedf_ctx *qedf = io_req->fcport->qedf;
407 uint64_t sz = sizeof(struct scsi_sge);
408
409
410 if (mp_req->mp_req_bd) {
411 dma_free_coherent(&qedf->pdev->dev, sz,
412 mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
413 mp_req->mp_req_bd = NULL;
414 }
415 if (mp_req->mp_resp_bd) {
416 dma_free_coherent(&qedf->pdev->dev, sz,
417 mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
418 mp_req->mp_resp_bd = NULL;
419 }
420 if (mp_req->req_buf) {
421 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
422 mp_req->req_buf, mp_req->req_buf_dma);
423 mp_req->req_buf = NULL;
424 }
425 if (mp_req->resp_buf) {
426 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
427 mp_req->resp_buf, mp_req->resp_buf_dma);
428 mp_req->resp_buf = NULL;
429 }
430}
431
432void qedf_release_cmd(struct kref *ref)
433{
434 struct qedf_ioreq *io_req =
435 container_of(ref, struct qedf_ioreq, refcount);
436 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
437 struct qedf_rport *fcport = io_req->fcport;
438 unsigned long flags;
439
440 if (io_req->cmd_type == QEDF_SCSI_CMD)
441 WARN_ON(io_req->sc_cmd);
442
443 if (io_req->cmd_type == QEDF_ELS ||
444 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
445 qedf_free_mp_resc(io_req);
446
447 atomic_inc(&cmd_mgr->free_list_cnt);
448 atomic_dec(&fcport->num_active_ios);
449 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
450 if (atomic_read(&fcport->num_active_ios) < 0)
451 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
452
453
454 io_req->task_retry_identifier++;
455 io_req->fcport = NULL;
456
457 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
458 io_req->cpu = 0;
459 spin_lock_irqsave(&cmd_mgr->lock, flags);
460 io_req->fcport = NULL;
461 io_req->alloc = 0;
462 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
463}
464
465static int qedf_map_sg(struct qedf_ioreq *io_req)
466{
467 struct scsi_cmnd *sc = io_req->sc_cmd;
468 struct Scsi_Host *host = sc->device->host;
469 struct fc_lport *lport = shost_priv(host);
470 struct qedf_ctx *qedf = lport_priv(lport);
471 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
472 struct scatterlist *sg;
473 int byte_count = 0;
474 int sg_count = 0;
475 int bd_count = 0;
476 u32 sg_len;
477 u64 addr, end_addr;
478 int i = 0;
479
480 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
481 scsi_sg_count(sc), sc->sc_data_direction);
482 sg = scsi_sglist(sc);
483
484 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
485
486 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
487 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
488
489 scsi_for_each_sg(sc, sg, sg_count, i) {
490 sg_len = (u32)sg_dma_len(sg);
491 addr = (u64)sg_dma_address(sg);
492 end_addr = (u64)(addr + sg_len);
493
494
495
496
497
498
499 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
500 (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
501 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
502
503 bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
504 bd[bd_count].sge_addr.hi = cpu_to_le32(U64_HI(addr));
505 bd[bd_count].sge_len = cpu_to_le32(sg_len);
506
507 bd_count++;
508 byte_count += sg_len;
509 }
510
511
512 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
513 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
514
515 if (byte_count != scsi_bufflen(sc))
516 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
517 "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
518 scsi_bufflen(sc), io_req->xid);
519
520 return bd_count;
521}
522
523static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
524{
525 struct scsi_cmnd *sc = io_req->sc_cmd;
526 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
527 int bd_count;
528
529 if (scsi_sg_count(sc)) {
530 bd_count = qedf_map_sg(io_req);
531 if (bd_count == 0)
532 return -ENOMEM;
533 } else {
534 bd_count = 0;
535 bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
536 bd[0].sge_len = 0;
537 }
538 io_req->bd_tbl->bd_valid = bd_count;
539
540 return 0;
541}
542
543static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
544 struct fcp_cmnd *fcp_cmnd)
545{
546 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
547
548
549 memset(fcp_cmnd, 0, FCP_CMND_LEN);
550
551
552 int_to_scsilun(sc_cmd->device->lun,
553 (struct scsi_lun *)&fcp_cmnd->fc_lun);
554
555
556 fcp_cmnd->fc_pri_ta = 0;
557 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
558 fcp_cmnd->fc_flags = io_req->io_req_flags;
559 fcp_cmnd->fc_cmdref = 0;
560
561
562 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
563 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
564 } else {
565 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
566 fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
567 else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
568 fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
569 }
570
571 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
572
573
574 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
575 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
576
577
578 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
579}
580
581static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
582 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
583 struct fcoe_wqe *sqe)
584{
585 enum fcoe_task_type task_type;
586 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
587 struct io_bdt *bd_tbl = io_req->bd_tbl;
588 u8 fcp_cmnd[32];
589 u32 tmp_fcp_cmnd[8];
590 int bd_count = 0;
591 struct qedf_ctx *qedf = fcport->qedf;
592 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
593 struct regpair sense_data_buffer_phys_addr;
594 u32 tx_io_size = 0;
595 u32 rx_io_size = 0;
596 int i, cnt;
597
598
599 io_req->task = task_ctx;
600 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
601 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
602 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
603
604
605 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
606 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
607 } else {
608 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
609 task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
610 tx_io_size = io_req->data_xfer_len;
611 } else {
612 task_type = FCOE_TASK_TYPE_READ_INITIATOR;
613 rx_io_size = io_req->data_xfer_len;
614 }
615 }
616
617
618 io_req->task_params->context = task_ctx;
619 io_req->task_params->sqe = sqe;
620 io_req->task_params->task_type = task_type;
621 io_req->task_params->tx_io_size = tx_io_size;
622 io_req->task_params->rx_io_size = rx_io_size;
623 io_req->task_params->conn_cid = fcport->fw_cid;
624 io_req->task_params->itid = io_req->xid;
625 io_req->task_params->cq_rss_number = cq_idx;
626 io_req->task_params->is_tape_device = fcport->dev_type;
627
628
629 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
630 bd_count = bd_tbl->bd_valid;
631 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
632 io_req->sgl_task_params->sgl_phys_addr.lo =
633 U64_LO(bd_tbl->bd_tbl_dma);
634 io_req->sgl_task_params->sgl_phys_addr.hi =
635 U64_HI(bd_tbl->bd_tbl_dma);
636 io_req->sgl_task_params->num_sges = bd_count;
637 io_req->sgl_task_params->total_buffer_size =
638 scsi_bufflen(io_req->sc_cmd);
639 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
640 io_req->sgl_task_params->small_mid_sge = 1;
641 else
642 io_req->sgl_task_params->small_mid_sge = 0;
643 }
644
645
646 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
647 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
648
649
650 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
651
652
653 cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
654 for (i = 0; i < cnt; i++) {
655 tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
656 }
657 memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
658
659 init_initiator_rw_fcoe_task(io_req->task_params,
660 io_req->sgl_task_params,
661 sense_data_buffer_phys_addr,
662 io_req->task_retry_identifier, fcp_cmnd);
663
664
665 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
666 qedf->slow_sge_ios++;
667 else
668 qedf->fast_sge_ios++;
669}
670
671void qedf_init_mp_task(struct qedf_ioreq *io_req,
672 struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
673{
674 struct qedf_mp_req *mp_req = &(io_req->mp_req);
675 struct qedf_rport *fcport = io_req->fcport;
676 struct qedf_ctx *qedf = io_req->fcport->qedf;
677 struct fc_frame_header *fc_hdr;
678 struct fcoe_tx_mid_path_params task_fc_hdr;
679 struct scsi_sgl_task_params tx_sgl_task_params;
680 struct scsi_sgl_task_params rx_sgl_task_params;
681
682 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
683 "Initializing MP task for cmd_type=%d\n",
684 io_req->cmd_type);
685
686 qedf->control_requests++;
687
688 memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
689 memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
690 memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
691 memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
692
693
694 io_req->task = task_ctx;
695
696
697 io_req->task_params->context = task_ctx;
698 io_req->task_params->sqe = sqe;
699 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
700 io_req->task_params->tx_io_size = io_req->data_xfer_len;
701
702 io_req->task_params->rx_io_size = PAGE_SIZE;
703 io_req->task_params->conn_cid = fcport->fw_cid;
704 io_req->task_params->itid = io_req->xid;
705
706 io_req->task_params->cq_rss_number = 0;
707 io_req->task_params->is_tape_device = fcport->dev_type;
708
709 fc_hdr = &(mp_req->req_fc_hdr);
710
711 fc_hdr->fh_ox_id = io_req->xid;
712 fc_hdr->fh_rx_id = htons(0xffff);
713
714
715 task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
716 task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
717 task_fc_hdr.type = fc_hdr->fh_type;
718 task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
719 task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
720 task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
721 task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
722
723
724 tx_sgl_task_params.sgl = mp_req->mp_req_bd;
725 tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
726 tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
727 tx_sgl_task_params.num_sges = 1;
728
729 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
730 tx_sgl_task_params.small_mid_sge = 0;
731
732
733 rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
734 rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
735 rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
736 rx_sgl_task_params.num_sges = 1;
737
738 rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
739 rx_sgl_task_params.small_mid_sge = 0;
740
741
742
743
744
745
746 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
747 &task_fc_hdr,
748 &tx_sgl_task_params,
749 &rx_sgl_task_params, 0);
750}
751
752
753u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
754{
755 uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
756 u16 rval;
757
758 rval = fcport->sq_prod_idx;
759
760
761 fcport->sq_prod_idx++;
762 fcport->fw_sq_prod_idx++;
763 if (fcport->sq_prod_idx == total_sqe)
764 fcport->sq_prod_idx = 0;
765
766 return rval;
767}
768
769void qedf_ring_doorbell(struct qedf_rport *fcport)
770{
771 struct fcoe_db_data dbell = { 0 };
772
773 dbell.agg_flags = 0;
774
775 dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
776 dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
777 dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
778 FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
779
780 dbell.sq_prod = fcport->fw_sq_prod_idx;
781
782
783
784 wmb();
785 barrier();
786 writel(*(u32 *)&dbell, fcport->p_doorbell);
787
788
789
790
791
792 wmb();
793}
794
795static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
796 int8_t direction)
797{
798 struct qedf_ctx *qedf = fcport->qedf;
799 struct qedf_io_log *io_log;
800 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
801 unsigned long flags;
802 uint8_t op;
803
804 spin_lock_irqsave(&qedf->io_trace_lock, flags);
805
806 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
807 io_log->direction = direction;
808 io_log->task_id = io_req->xid;
809 io_log->port_id = fcport->rdata->ids.port_id;
810 io_log->lun = sc_cmd->device->lun;
811 io_log->op = op = sc_cmd->cmnd[0];
812 io_log->lba[0] = sc_cmd->cmnd[2];
813 io_log->lba[1] = sc_cmd->cmnd[3];
814 io_log->lba[2] = sc_cmd->cmnd[4];
815 io_log->lba[3] = sc_cmd->cmnd[5];
816 io_log->bufflen = scsi_bufflen(sc_cmd);
817 io_log->sg_count = scsi_sg_count(sc_cmd);
818 io_log->result = sc_cmd->result;
819 io_log->jiffies = jiffies;
820 io_log->refcount = kref_read(&io_req->refcount);
821
822 if (direction == QEDF_IO_TRACE_REQ) {
823
824 io_log->req_cpu = io_req->cpu;
825 io_log->int_cpu = 0;
826 io_log->rsp_cpu = 0;
827 } else if (direction == QEDF_IO_TRACE_RSP) {
828 io_log->req_cpu = io_req->cpu;
829 io_log->int_cpu = io_req->int_cpu;
830 io_log->rsp_cpu = smp_processor_id();
831 }
832
833 io_log->sge_type = io_req->sge_type;
834
835 qedf->io_trace_idx++;
836 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
837 qedf->io_trace_idx = 0;
838
839 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
840}
841
842int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
843{
844 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
845 struct Scsi_Host *host = sc_cmd->device->host;
846 struct fc_lport *lport = shost_priv(host);
847 struct qedf_ctx *qedf = lport_priv(lport);
848 struct e4_fcoe_task_context *task_ctx;
849 u16 xid;
850 enum fcoe_task_type req_type = 0;
851 struct fcoe_wqe *sqe;
852 u16 sqe_idx;
853
854
855 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
856 sc_cmd->SCp.ptr = (char *)io_req;
857 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
858
859
860 io_req->cpu = smp_processor_id();
861
862 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
863 req_type = FCOE_TASK_TYPE_READ_INITIATOR;
864 io_req->io_req_flags = QEDF_READ;
865 qedf->input_requests++;
866 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
867 req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
868 io_req->io_req_flags = QEDF_WRITE;
869 qedf->output_requests++;
870 } else {
871 io_req->io_req_flags = 0;
872 qedf->control_requests++;
873 }
874
875 xid = io_req->xid;
876
877
878 if (qedf_build_bd_list_from_sg(io_req)) {
879 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
880
881 io_req->sc_cmd = NULL;
882 kref_put(&io_req->refcount, qedf_release_cmd);
883 return -EAGAIN;
884 }
885
886 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
887 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
888 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
889
890 io_req->sc_cmd = NULL;
891 kref_put(&io_req->refcount, qedf_release_cmd);
892 return -EINVAL;
893 }
894
895
896 io_req->lun = (int)sc_cmd->device->lun;
897
898
899 sqe_idx = qedf_get_sqe_idx(fcport);
900 sqe = &fcport->sq[sqe_idx];
901 memset(sqe, 0, sizeof(struct fcoe_wqe));
902
903
904 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
905 if (!task_ctx) {
906 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
907 xid);
908
909 io_req->sc_cmd = NULL;
910 kref_put(&io_req->refcount, qedf_release_cmd);
911 return -EINVAL;
912 }
913
914 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
915
916
917 qedf_ring_doorbell(fcport);
918
919
920 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
921
922 if (qedf_io_tracing && io_req->sc_cmd)
923 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
924
925 return false;
926}
927
928int
929qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
930{
931 struct fc_lport *lport = shost_priv(host);
932 struct qedf_ctx *qedf = lport_priv(lport);
933 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
934 struct fc_rport_libfc_priv *rp = rport->dd_data;
935 struct qedf_rport *fcport;
936 struct qedf_ioreq *io_req;
937 int rc = 0;
938 int rval;
939 unsigned long flags = 0;
940 int num_sgs = 0;
941
942 num_sgs = scsi_sg_count(sc_cmd);
943 if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
944 QEDF_ERR(&qedf->dbg_ctx,
945 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
946 num_sgs, QEDF_MAX_BDS_PER_CMD);
947 sc_cmd->result = DID_ERROR;
948 sc_cmd->scsi_done(sc_cmd);
949 return 0;
950 }
951
952 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
953 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
954 sc_cmd->result = DID_NO_CONNECT << 16;
955 sc_cmd->scsi_done(sc_cmd);
956 return 0;
957 }
958
959 if (!qedf->pdev->msix_enabled) {
960 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
961 "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
962 sc_cmd);
963 sc_cmd->result = DID_NO_CONNECT << 16;
964 sc_cmd->scsi_done(sc_cmd);
965 return 0;
966 }
967
968 rval = fc_remote_port_chkready(rport);
969 if (rval) {
970 sc_cmd->result = rval;
971 sc_cmd->scsi_done(sc_cmd);
972 return 0;
973 }
974
975
976 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
977 rc = SCSI_MLQUEUE_HOST_BUSY;
978 goto exit_qcmd;
979 }
980
981 if (lport->state != LPORT_ST_READY ||
982 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
983 rc = SCSI_MLQUEUE_HOST_BUSY;
984 goto exit_qcmd;
985 }
986
987
988 fcport = (struct qedf_rport *)&rp[1];
989
990 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
991 test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
992
993
994
995
996 rc = SCSI_MLQUEUE_TARGET_BUSY;
997 goto exit_qcmd;
998 }
999
1000 atomic_inc(&fcport->ios_to_queue);
1001
1002 if (fcport->retry_delay_timestamp) {
1003 if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1004 fcport->retry_delay_timestamp = 0;
1005 } else {
1006
1007 rc = SCSI_MLQUEUE_TARGET_BUSY;
1008 atomic_dec(&fcport->ios_to_queue);
1009 goto exit_qcmd;
1010 }
1011 }
1012
1013 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1014 if (!io_req) {
1015 rc = SCSI_MLQUEUE_HOST_BUSY;
1016 atomic_dec(&fcport->ios_to_queue);
1017 goto exit_qcmd;
1018 }
1019
1020 io_req->sc_cmd = sc_cmd;
1021
1022
1023 spin_lock_irqsave(&fcport->rport_lock, flags);
1024 if (qedf_post_io_req(fcport, io_req)) {
1025 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1026
1027 atomic_inc(&fcport->free_sqes);
1028 rc = SCSI_MLQUEUE_HOST_BUSY;
1029 }
1030 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1031 atomic_dec(&fcport->ios_to_queue);
1032
1033exit_qcmd:
1034 return rc;
1035}
1036
1037static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1038 struct fcoe_cqe_rsp_info *fcp_rsp)
1039{
1040 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1041 struct qedf_ctx *qedf = io_req->fcport->qedf;
1042 u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1043 int fcp_sns_len = 0;
1044 int fcp_rsp_len = 0;
1045 uint8_t *rsp_info, *sense_data;
1046
1047 io_req->fcp_status = FC_GOOD;
1048 io_req->fcp_resid = 0;
1049 if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1050 FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1051 io_req->fcp_resid = fcp_rsp->fcp_resid;
1052
1053 io_req->scsi_comp_flags = rsp_flags;
1054 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1055 fcp_rsp->scsi_status_code;
1056
1057 if (rsp_flags &
1058 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1059 fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1060
1061 if (rsp_flags &
1062 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1063 fcp_sns_len = fcp_rsp->fcp_sns_len;
1064
1065 io_req->fcp_rsp_len = fcp_rsp_len;
1066 io_req->fcp_sns_len = fcp_sns_len;
1067 rsp_info = sense_data = io_req->sense_buffer;
1068
1069
1070 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1071
1072 io_req->fcp_rsp_code = rsp_info[3];
1073 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1074 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1075
1076 sense_data += fcp_rsp_len;
1077 }
1078
1079 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1080 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1081 "Truncating sense buffer\n");
1082 fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1083 }
1084
1085
1086 if (sc_cmd->sense_buffer) {
1087 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1088 if (fcp_sns_len)
1089 memcpy(sc_cmd->sense_buffer, sense_data,
1090 fcp_sns_len);
1091 }
1092}
1093
1094static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1095{
1096 struct scsi_cmnd *sc = io_req->sc_cmd;
1097
1098 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1099 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1100 scsi_sg_count(sc), sc->sc_data_direction);
1101 io_req->bd_tbl->bd_valid = 0;
1102 }
1103}
1104
1105void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1106 struct qedf_ioreq *io_req)
1107{
1108 u16 xid;
1109 struct e4_fcoe_task_context *task_ctx;
1110 struct scsi_cmnd *sc_cmd;
1111 struct fcoe_cqe_rsp_info *fcp_rsp;
1112 struct qedf_rport *fcport;
1113 int refcount;
1114 u16 scope, qualifier = 0;
1115 u8 fw_residual_flag = 0;
1116
1117 if (!io_req)
1118 return;
1119 if (!cqe)
1120 return;
1121
1122 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1123 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1124 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1125 QEDF_ERR(&qedf->dbg_ctx,
1126 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1127 io_req->xid);
1128 return;
1129 }
1130
1131 xid = io_req->xid;
1132 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1133 sc_cmd = io_req->sc_cmd;
1134 fcp_rsp = &cqe->cqe_info.rsp_info;
1135
1136 if (!sc_cmd) {
1137 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1138 return;
1139 }
1140
1141 if (!sc_cmd->SCp.ptr) {
1142 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1143 "another context.\n");
1144 return;
1145 }
1146
1147 if (!sc_cmd->device) {
1148 QEDF_ERR(&qedf->dbg_ctx,
1149 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1150 return;
1151 }
1152
1153 if (!sc_cmd->request) {
1154 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1155 "sc_cmd=%p.\n", sc_cmd);
1156 return;
1157 }
1158
1159 if (!sc_cmd->request->q) {
1160 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1161 "is not valid, sc_cmd=%p.\n", sc_cmd);
1162 return;
1163 }
1164
1165 fcport = io_req->fcport;
1166
1167
1168
1169
1170
1171 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1172 (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1173 sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1174 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1175 "Dropping good completion xid=0x%x as fcport is flushing",
1176 io_req->xid);
1177 return;
1178 }
1179
1180 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1181
1182 qedf_unmap_sg_list(qedf, io_req);
1183
1184
1185 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1186 QEDF_ERR(&(qedf->dbg_ctx),
1187 "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1188 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1189 io_req->fcp_rsp_code);
1190 sc_cmd->result = DID_BUS_BUSY << 16;
1191 goto out;
1192 }
1193
1194 fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1195 FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1196 if (fw_residual_flag) {
1197 QEDF_ERR(&qedf->dbg_ctx,
1198 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1199 io_req->xid, fcp_rsp->rsp_flags.flags,
1200 io_req->fcp_resid,
1201 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1202 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1203
1204 if (io_req->cdb_status == 0)
1205 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1206 else
1207 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1208
1209
1210
1211
1212
1213 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1214 goto out;
1215 }
1216
1217 switch (io_req->fcp_status) {
1218 case FC_GOOD:
1219 if (io_req->cdb_status == 0) {
1220
1221 sc_cmd->result = DID_OK << 16;
1222 } else {
1223 refcount = kref_read(&io_req->refcount);
1224 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1225 "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1226 "lba=%02x%02x%02x%02x cdb_status=%d "
1227 "fcp_resid=0x%x refcount=%d.\n",
1228 qedf->lport->host->host_no, sc_cmd->device->id,
1229 sc_cmd->device->lun, io_req->xid,
1230 sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1231 sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1232 io_req->cdb_status, io_req->fcp_resid,
1233 refcount);
1234 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1235
1236 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1237 io_req->cdb_status == SAM_STAT_BUSY) {
1238
1239
1240
1241
1242
1243
1244
1245 scope = fcp_rsp->retry_delay_timer & 0xC000;
1246
1247 qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1248
1249 if (qedf_retry_delay &&
1250 scope > 0 && qualifier > 0 &&
1251 qualifier <= 0x3FEF) {
1252
1253 if (qualifier > QEDF_RETRY_DELAY_MAX)
1254 qualifier =
1255 QEDF_RETRY_DELAY_MAX;
1256 fcport->retry_delay_timestamp =
1257 jiffies + (qualifier * HZ / 10);
1258 }
1259
1260 if (io_req->cdb_status ==
1261 SAM_STAT_TASK_SET_FULL)
1262 qedf->task_set_fulls++;
1263 else
1264 qedf->busy++;
1265 }
1266 }
1267 if (io_req->fcp_resid)
1268 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1269 break;
1270 default:
1271 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1272 io_req->fcp_status);
1273 break;
1274 }
1275
1276out:
1277 if (qedf_io_tracing)
1278 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1279
1280
1281
1282
1283
1284 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1285
1286 io_req->sc_cmd = NULL;
1287 sc_cmd->SCp.ptr = NULL;
1288 sc_cmd->scsi_done(sc_cmd);
1289 kref_put(&io_req->refcount, qedf_release_cmd);
1290}
1291
1292
1293void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1294 int result)
1295{
1296 u16 xid;
1297 struct scsi_cmnd *sc_cmd;
1298 int refcount;
1299
1300 if (!io_req)
1301 return;
1302
1303 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1304 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1305 "io_req:%p scsi_done handling already done\n",
1306 io_req);
1307 return;
1308 }
1309
1310
1311
1312
1313
1314 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1315
1316 xid = io_req->xid;
1317 sc_cmd = io_req->sc_cmd;
1318
1319 if (!sc_cmd) {
1320 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1321 return;
1322 }
1323
1324 if (!virt_addr_valid(sc_cmd)) {
1325 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1326 goto bad_scsi_ptr;
1327 }
1328
1329 if (!sc_cmd->SCp.ptr) {
1330 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1331 "another context.\n");
1332 return;
1333 }
1334
1335 if (!sc_cmd->device) {
1336 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1337 sc_cmd);
1338 goto bad_scsi_ptr;
1339 }
1340
1341 if (!virt_addr_valid(sc_cmd->device)) {
1342 QEDF_ERR(&qedf->dbg_ctx,
1343 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1344 goto bad_scsi_ptr;
1345 }
1346
1347 if (!sc_cmd->sense_buffer) {
1348 QEDF_ERR(&qedf->dbg_ctx,
1349 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1350 sc_cmd);
1351 goto bad_scsi_ptr;
1352 }
1353
1354 if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1355 QEDF_ERR(&qedf->dbg_ctx,
1356 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1357 sc_cmd);
1358 goto bad_scsi_ptr;
1359 }
1360
1361 if (!sc_cmd->scsi_done) {
1362 QEDF_ERR(&qedf->dbg_ctx,
1363 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1364 sc_cmd);
1365 goto bad_scsi_ptr;
1366 }
1367
1368 qedf_unmap_sg_list(qedf, io_req);
1369
1370 sc_cmd->result = result << 16;
1371 refcount = kref_read(&io_req->refcount);
1372 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1373 "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1374 "allowed=%d retries=%d refcount=%d.\n",
1375 qedf->lport->host->host_no, sc_cmd->device->id,
1376 sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1377 sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1378 sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1379 refcount);
1380
1381
1382
1383
1384
1385 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1386
1387 if (qedf_io_tracing)
1388 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1389
1390 io_req->sc_cmd = NULL;
1391 sc_cmd->SCp.ptr = NULL;
1392 sc_cmd->scsi_done(sc_cmd);
1393 kref_put(&io_req->refcount, qedf_release_cmd);
1394 return;
1395
1396bad_scsi_ptr:
1397
1398
1399
1400
1401 io_req->sc_cmd = NULL;
1402 kref_put(&io_req->refcount, qedf_release_cmd);
1403}
1404
1405
1406
1407
1408
1409void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1410 struct qedf_ioreq *io_req)
1411{
1412 int rval, i;
1413 struct qedf_rport *fcport = io_req->fcport;
1414 u64 err_warn_bit_map;
1415 u8 err_warn = 0xff;
1416
1417 if (!cqe)
1418 return;
1419
1420 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1421 "xid=0x%x\n", io_req->xid);
1422 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1423 "err_warn_bitmap=%08x:%08x\n",
1424 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1425 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1426 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1427 "rx_buff_off=%08x, rx_id=%04x\n",
1428 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1429 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1430 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1431
1432
1433 err_warn_bit_map = (u64)
1434 ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1435 (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1436 for (i = 0; i < 64; i++) {
1437 if (err_warn_bit_map & (u64)((u64)1 << i)) {
1438 err_warn = i;
1439 break;
1440 }
1441 }
1442
1443
1444 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1445 if (err_warn ==
1446 FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1447 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1448 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1449 io_req->rx_buf_off =
1450 cqe->cqe_info.err_info.rx_buf_off;
1451 io_req->tx_buf_off =
1452 cqe->cqe_info.err_info.tx_buf_off;
1453 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1454 rval = qedf_send_rec(io_req);
1455
1456
1457
1458
1459
1460 if (rval)
1461 goto send_abort;
1462 }
1463 return;
1464 }
1465 }
1466
1467send_abort:
1468 init_completion(&io_req->abts_done);
1469 rval = qedf_initiate_abts(io_req, true);
1470 if (rval)
1471 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1472}
1473
1474
1475void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1476 struct qedf_ioreq *io_req)
1477{
1478 int rval;
1479
1480 if (!cqe)
1481 return;
1482
1483 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1484 "xid=0x%x\n", io_req->xid);
1485 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1486 "err_warn_bitmap=%08x:%08x\n",
1487 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1488 le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1489 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1490 "rx_buff_off=%08x, rx_id=%04x\n",
1491 le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1492 le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1493 le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1494
1495 if (qedf->stop_io_on_error) {
1496 qedf_stop_all_io(qedf);
1497 return;
1498 }
1499
1500 init_completion(&io_req->abts_done);
1501 rval = qedf_initiate_abts(io_req, true);
1502 if (rval)
1503 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1504}
1505
1506static void qedf_flush_els_req(struct qedf_ctx *qedf,
1507 struct qedf_ioreq *els_req)
1508{
1509 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1510 "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1511 kref_read(&els_req->refcount));
1512
1513
1514
1515
1516
1517 els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1518
1519
1520 cancel_delayed_work_sync(&els_req->timeout_work);
1521
1522
1523 if (els_req->cb_func && els_req->cb_arg) {
1524 els_req->cb_func(els_req->cb_arg);
1525 els_req->cb_arg = NULL;
1526 }
1527
1528
1529 kref_put(&els_req->refcount, qedf_release_cmd);
1530}
1531
1532
1533
1534
1535void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1536{
1537 struct qedf_ioreq *io_req;
1538 struct qedf_ctx *qedf;
1539 struct qedf_cmd_mgr *cmd_mgr;
1540 int i, rc;
1541 unsigned long flags;
1542 int flush_cnt = 0;
1543 int wait_cnt = 100;
1544 int refcount = 0;
1545
1546 if (!fcport)
1547 return;
1548
1549
1550 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1551 QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1552 return;
1553 }
1554
1555 qedf = fcport->qedf;
1556
1557 if (!qedf) {
1558 QEDF_ERR(NULL, "qedf is NULL.\n");
1559 return;
1560 }
1561
1562
1563 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1564 (lun == -1)) {
1565 while (atomic_read(&fcport->ios_to_queue)) {
1566 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1567 "Waiting for %d I/Os to be queued\n",
1568 atomic_read(&fcport->ios_to_queue));
1569 if (wait_cnt == 0) {
1570 QEDF_ERR(NULL,
1571 "%d IOs request could not be queued\n",
1572 atomic_read(&fcport->ios_to_queue));
1573 }
1574 msleep(20);
1575 wait_cnt--;
1576 }
1577 }
1578
1579 cmd_mgr = qedf->cmd_mgr;
1580
1581 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1582 "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1583 atomic_read(&fcport->num_active_ios), fcport,
1584 fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1585 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1586
1587 mutex_lock(&qedf->flush_mutex);
1588 if (lun == -1) {
1589 set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1590 } else {
1591 set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1592 fcport->lun_reset_lun = lun;
1593 }
1594
1595 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1596 io_req = &cmd_mgr->cmds[i];
1597
1598 if (!io_req)
1599 continue;
1600 if (!io_req->fcport)
1601 continue;
1602
1603 spin_lock_irqsave(&cmd_mgr->lock, flags);
1604
1605 if (io_req->alloc) {
1606 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1607 if (io_req->cmd_type == QEDF_SCSI_CMD)
1608 QEDF_ERR(&qedf->dbg_ctx,
1609 "Allocated but not queued, xid=0x%x\n",
1610 io_req->xid);
1611 }
1612 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1613 } else {
1614 spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1615 continue;
1616 }
1617
1618 if (io_req->fcport != fcport)
1619 continue;
1620
1621
1622
1623
1624
1625
1626 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1627 refcount = kref_read(&io_req->refcount);
1628 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1629 "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1630 io_req->xid, io_req->cmd_type, refcount);
1631
1632
1633
1634 if (atomic_read(&io_req->state) ==
1635 QEDFC_CMD_ST_RRQ_WAIT) {
1636 if (cancel_delayed_work_sync
1637 (&io_req->rrq_work)) {
1638 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1639 "Putting reference for pending RRQ work xid=0x%x.\n",
1640 io_req->xid);
1641
1642 kref_put(&io_req->refcount,
1643 qedf_release_cmd);
1644 }
1645 }
1646 continue;
1647 }
1648
1649
1650 if (io_req->cmd_type == QEDF_ELS &&
1651 lun == -1) {
1652 rc = kref_get_unless_zero(&io_req->refcount);
1653 if (!rc) {
1654 QEDF_ERR(&(qedf->dbg_ctx),
1655 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1656 io_req, io_req->xid);
1657 continue;
1658 }
1659 flush_cnt++;
1660 qedf_flush_els_req(qedf, io_req);
1661
1662
1663
1664
1665 goto free_cmd;
1666 }
1667
1668 if (io_req->cmd_type == QEDF_ABTS) {
1669
1670 rc = kref_get_unless_zero(&io_req->refcount);
1671 if (!rc) {
1672 QEDF_ERR(&(qedf->dbg_ctx),
1673 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1674 io_req, io_req->xid);
1675 continue;
1676 }
1677 if (lun != -1 && io_req->lun != lun)
1678 goto free_cmd;
1679
1680 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1681 "Flushing abort xid=0x%x.\n", io_req->xid);
1682
1683 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1684 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1685 "Putting ref for cancelled RRQ work xid=0x%x.\n",
1686 io_req->xid);
1687 kref_put(&io_req->refcount, qedf_release_cmd);
1688 }
1689
1690 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1691 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1692 "Putting ref for cancelled tmo work xid=0x%x.\n",
1693 io_req->xid);
1694 qedf_initiate_cleanup(io_req, true);
1695
1696
1697
1698 complete(&io_req->abts_done);
1699 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1700
1701 kref_put(&io_req->refcount, qedf_release_cmd);
1702 }
1703 flush_cnt++;
1704 goto free_cmd;
1705 }
1706
1707 if (!io_req->sc_cmd)
1708 continue;
1709 if (!io_req->sc_cmd->device) {
1710 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1711 "Device backpointer NULL for sc_cmd=%p.\n",
1712 io_req->sc_cmd);
1713
1714 io_req->sc_cmd = NULL;
1715 qedf_initiate_cleanup(io_req, false);
1716 kref_put(&io_req->refcount, qedf_release_cmd);
1717 continue;
1718 }
1719 if (lun > -1) {
1720 if (io_req->lun != lun)
1721 continue;
1722 }
1723
1724
1725
1726
1727
1728 rc = kref_get_unless_zero(&io_req->refcount);
1729 if (!rc) {
1730 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1731 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1732 continue;
1733 }
1734
1735 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1736 "Cleanup xid=0x%x.\n", io_req->xid);
1737 flush_cnt++;
1738
1739
1740 qedf_initiate_cleanup(io_req, true);
1741
1742free_cmd:
1743 kref_put(&io_req->refcount, qedf_release_cmd);
1744 }
1745
1746 wait_cnt = 60;
1747 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1748 "Flushed 0x%x I/Os, active=0x%x.\n",
1749 flush_cnt, atomic_read(&fcport->num_active_ios));
1750
1751 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1752 (lun == -1)) {
1753 while (atomic_read(&fcport->num_active_ios)) {
1754 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1755 "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1756 flush_cnt,
1757 atomic_read(&fcport->num_active_ios),
1758 wait_cnt);
1759 if (wait_cnt == 0) {
1760 QEDF_ERR(&qedf->dbg_ctx,
1761 "Flushed %d I/Os, active=%d.\n",
1762 flush_cnt,
1763 atomic_read(&fcport->num_active_ios));
1764 for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1765 io_req = &cmd_mgr->cmds[i];
1766 if (io_req->fcport &&
1767 io_req->fcport == fcport) {
1768 refcount =
1769 kref_read(&io_req->refcount);
1770 set_bit(QEDF_CMD_DIRTY,
1771 &io_req->flags);
1772 QEDF_ERR(&qedf->dbg_ctx,
1773 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1774 io_req, io_req->xid,
1775 io_req->flags,
1776 io_req->sc_cmd,
1777 refcount,
1778 io_req->cmd_type);
1779 }
1780 }
1781 WARN_ON(1);
1782 break;
1783 }
1784 msleep(500);
1785 wait_cnt--;
1786 }
1787 }
1788
1789 clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1790 clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1791 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1792 mutex_unlock(&qedf->flush_mutex);
1793}
1794
1795
1796
1797
1798
1799int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1800{
1801 struct fc_lport *lport;
1802 struct qedf_rport *fcport = io_req->fcport;
1803 struct fc_rport_priv *rdata;
1804 struct qedf_ctx *qedf;
1805 u16 xid;
1806 u32 r_a_tov = 0;
1807 int rc = 0;
1808 unsigned long flags;
1809 struct fcoe_wqe *sqe;
1810 u16 sqe_idx;
1811 int refcount = 0;
1812
1813
1814 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1815 QEDF_ERR(NULL, "tgt not offloaded\n");
1816 rc = 1;
1817 goto out;
1818 }
1819
1820 qedf = fcport->qedf;
1821 rdata = fcport->rdata;
1822
1823 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1824 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1825 rc = 1;
1826 goto out;
1827 }
1828
1829 r_a_tov = rdata->r_a_tov;
1830 lport = qedf->lport;
1831
1832 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1833 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1834 rc = 1;
1835 goto drop_rdata_kref;
1836 }
1837
1838 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1839 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1840 rc = 1;
1841 goto drop_rdata_kref;
1842 }
1843
1844
1845 if (!atomic_read(&fcport->free_sqes)) {
1846 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1847 rc = 1;
1848 goto drop_rdata_kref;
1849 }
1850
1851 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1852 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1853 rc = 1;
1854 goto drop_rdata_kref;
1855 }
1856
1857 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1858 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1859 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1860 QEDF_ERR(&qedf->dbg_ctx,
1861 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1862 io_req->xid, io_req->sc_cmd);
1863 rc = 1;
1864 goto drop_rdata_kref;
1865 }
1866
1867 kref_get(&io_req->refcount);
1868
1869 xid = io_req->xid;
1870 qedf->control_requests++;
1871 qedf->packet_aborts++;
1872
1873
1874 io_req->cmd_type = QEDF_ABTS;
1875 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1876
1877 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1878 refcount = kref_read(&io_req->refcount);
1879 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1880 "ABTS io_req xid = 0x%x refcount=%d\n",
1881 xid, refcount);
1882
1883 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1884
1885 spin_lock_irqsave(&fcport->rport_lock, flags);
1886
1887 sqe_idx = qedf_get_sqe_idx(fcport);
1888 sqe = &fcport->sq[sqe_idx];
1889 memset(sqe, 0, sizeof(struct fcoe_wqe));
1890 io_req->task_params->sqe = sqe;
1891
1892 init_initiator_abort_fcoe_task(io_req->task_params);
1893 qedf_ring_doorbell(fcport);
1894
1895 spin_unlock_irqrestore(&fcport->rport_lock, flags);
1896
1897drop_rdata_kref:
1898 kref_put(&rdata->kref, fc_rport_destroy);
1899out:
1900 return rc;
1901}
1902
1903void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1904 struct qedf_ioreq *io_req)
1905{
1906 uint32_t r_ctl;
1907 uint16_t xid;
1908 int rc;
1909 struct qedf_rport *fcport = io_req->fcport;
1910
1911 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1912 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1913
1914 xid = io_req->xid;
1915 r_ctl = cqe->cqe_info.abts_info.r_ctl;
1916
1917
1918
1919
1920
1921 if (!fcport) {
1922 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1923 "Dropping ABTS completion xid=0x%x as fcport is NULL",
1924 io_req->xid);
1925 return;
1926 }
1927
1928
1929
1930
1931
1932 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1933 test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1934 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1935 "Dropping ABTS completion xid=0x%x as fcport is flushing",
1936 io_req->xid);
1937 return;
1938 }
1939
1940 if (!cancel_delayed_work(&io_req->timeout_work)) {
1941 QEDF_ERR(&qedf->dbg_ctx,
1942 "Wasn't able to cancel abts timeout work.\n");
1943 }
1944
1945 switch (r_ctl) {
1946 case FC_RCTL_BA_ACC:
1947 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1948 "ABTS response - ACC Send RRQ after R_A_TOV\n");
1949 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1950 rc = kref_get_unless_zero(&io_req->refcount);
1951 if (!rc) {
1952 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1953 "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
1954 io_req->xid);
1955 return;
1956 }
1957
1958
1959
1960
1961 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1962 msecs_to_jiffies(qedf->lport->r_a_tov));
1963 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
1964 break;
1965
1966 case FC_RCTL_BA_RJT:
1967 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1968 "ABTS response - RJT\n");
1969 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1970 break;
1971 default:
1972 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1973 break;
1974 }
1975
1976 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1977
1978 if (io_req->sc_cmd) {
1979 if (io_req->return_scsi_cmd_on_abts)
1980 qedf_scsi_done(qedf, io_req, DID_ERROR);
1981 }
1982
1983
1984 complete(&io_req->abts_done);
1985
1986 kref_put(&io_req->refcount, qedf_release_cmd);
1987}
1988
1989int qedf_init_mp_req(struct qedf_ioreq *io_req)
1990{
1991 struct qedf_mp_req *mp_req;
1992 struct scsi_sge *mp_req_bd;
1993 struct scsi_sge *mp_resp_bd;
1994 struct qedf_ctx *qedf = io_req->fcport->qedf;
1995 dma_addr_t addr;
1996 uint64_t sz;
1997
1998 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1999
2000 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2001 memset(mp_req, 0, sizeof(struct qedf_mp_req));
2002
2003 if (io_req->cmd_type != QEDF_ELS) {
2004 mp_req->req_len = sizeof(struct fcp_cmnd);
2005 io_req->data_xfer_len = mp_req->req_len;
2006 } else
2007 mp_req->req_len = io_req->data_xfer_len;
2008
2009 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2010 &mp_req->req_buf_dma, GFP_KERNEL);
2011 if (!mp_req->req_buf) {
2012 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2013 qedf_free_mp_resc(io_req);
2014 return -ENOMEM;
2015 }
2016
2017 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2018 QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2019 if (!mp_req->resp_buf) {
2020 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2021 "buffer\n");
2022 qedf_free_mp_resc(io_req);
2023 return -ENOMEM;
2024 }
2025
2026
2027 sz = sizeof(struct scsi_sge);
2028 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2029 &mp_req->mp_req_bd_dma, GFP_KERNEL);
2030 if (!mp_req->mp_req_bd) {
2031 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2032 qedf_free_mp_resc(io_req);
2033 return -ENOMEM;
2034 }
2035
2036 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2037 &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2038 if (!mp_req->mp_resp_bd) {
2039 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2040 qedf_free_mp_resc(io_req);
2041 return -ENOMEM;
2042 }
2043
2044
2045 addr = mp_req->req_buf_dma;
2046 mp_req_bd = mp_req->mp_req_bd;
2047 mp_req_bd->sge_addr.lo = U64_LO(addr);
2048 mp_req_bd->sge_addr.hi = U64_HI(addr);
2049 mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2050
2051
2052
2053
2054
2055
2056 mp_resp_bd = mp_req->mp_resp_bd;
2057 addr = mp_req->resp_buf_dma;
2058 mp_resp_bd->sge_addr.lo = U64_LO(addr);
2059 mp_resp_bd->sge_addr.hi = U64_HI(addr);
2060 mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2061
2062 return 0;
2063}
2064
2065
2066
2067
2068
2069static void qedf_drain_request(struct qedf_ctx *qedf)
2070{
2071 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2072 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2073 return;
2074 }
2075
2076
2077 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2078
2079
2080 qed_ops->common->drain(qedf->cdev);
2081
2082
2083 msleep(100);
2084
2085
2086 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2087}
2088
2089
2090
2091
2092
2093int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2094 bool return_scsi_cmd_on_abts)
2095{
2096 struct qedf_rport *fcport;
2097 struct qedf_ctx *qedf;
2098 uint16_t xid;
2099 struct e4_fcoe_task_context *task;
2100 int tmo = 0;
2101 int rc = SUCCESS;
2102 unsigned long flags;
2103 struct fcoe_wqe *sqe;
2104 u16 sqe_idx;
2105 int refcount = 0;
2106
2107 fcport = io_req->fcport;
2108 if (!fcport) {
2109 QEDF_ERR(NULL, "fcport is NULL.\n");
2110 return SUCCESS;
2111 }
2112
2113
2114 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2115 QEDF_ERR(NULL, "tgt not offloaded\n");
2116 rc = 1;
2117 return SUCCESS;
2118 }
2119
2120 qedf = fcport->qedf;
2121 if (!qedf) {
2122 QEDF_ERR(NULL, "qedf is NULL.\n");
2123 return SUCCESS;
2124 }
2125
2126 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2127 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2128 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2129 "cleanup processing or already completed.\n",
2130 io_req->xid);
2131 return SUCCESS;
2132 }
2133 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2134
2135
2136 if (!atomic_read(&fcport->free_sqes)) {
2137 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2138
2139 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2140 return FAILED;
2141 }
2142
2143 if (io_req->cmd_type == QEDF_CLEANUP) {
2144 QEDF_ERR(&qedf->dbg_ctx,
2145 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2146 io_req->xid, io_req->cmd_type);
2147 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2148 return SUCCESS;
2149 }
2150
2151 refcount = kref_read(&io_req->refcount);
2152
2153 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2154 "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2155 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2156 refcount, fcport, fcport->rdata->ids.port_id);
2157
2158
2159 xid = io_req->xid;
2160 io_req->cmd_type = QEDF_CLEANUP;
2161 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2162
2163 task = qedf_get_task_mem(&qedf->tasks, xid);
2164
2165 init_completion(&io_req->cleanup_done);
2166
2167 spin_lock_irqsave(&fcport->rport_lock, flags);
2168
2169 sqe_idx = qedf_get_sqe_idx(fcport);
2170 sqe = &fcport->sq[sqe_idx];
2171 memset(sqe, 0, sizeof(struct fcoe_wqe));
2172 io_req->task_params->sqe = sqe;
2173
2174 init_initiator_cleanup_fcoe_task(io_req->task_params);
2175 qedf_ring_doorbell(fcport);
2176
2177 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2178
2179 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2180 QEDF_CLEANUP_TIMEOUT * HZ);
2181
2182 if (!tmo) {
2183 rc = FAILED;
2184
2185 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2186 "xid=%x.\n", io_req->xid);
2187 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2188
2189 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2190 qedf_drain_request(qedf);
2191 }
2192
2193
2194
2195
2196 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2197 io_req->tm_flags == FCP_TMF_TGT_RESET) {
2198 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2199 io_req->sc_cmd = NULL;
2200 complete(&io_req->tm_done);
2201 }
2202
2203 if (io_req->sc_cmd) {
2204 if (io_req->return_scsi_cmd_on_abts)
2205 qedf_scsi_done(qedf, io_req, DID_ERROR);
2206 }
2207
2208 if (rc == SUCCESS)
2209 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2210 else
2211 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2212
2213 return rc;
2214}
2215
2216void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2217 struct qedf_ioreq *io_req)
2218{
2219 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2220 io_req->xid);
2221
2222 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2223
2224
2225 complete(&io_req->cleanup_done);
2226}
2227
2228static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2229 uint8_t tm_flags)
2230{
2231 struct qedf_ioreq *io_req;
2232 struct e4_fcoe_task_context *task;
2233 struct qedf_ctx *qedf = fcport->qedf;
2234 struct fc_lport *lport = qedf->lport;
2235 int rc = 0;
2236 uint16_t xid;
2237 int tmo = 0;
2238 int lun = 0;
2239 unsigned long flags;
2240 struct fcoe_wqe *sqe;
2241 u16 sqe_idx;
2242
2243 if (!sc_cmd) {
2244 QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
2245 return FAILED;
2246 }
2247
2248 lun = (int)sc_cmd->device->lun;
2249 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2250 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2251 rc = FAILED;
2252 goto no_flush;
2253 }
2254
2255 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2256 if (!io_req) {
2257 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2258 rc = -EAGAIN;
2259 goto no_flush;
2260 }
2261
2262 if (tm_flags == FCP_TMF_LUN_RESET)
2263 qedf->lun_resets++;
2264 else if (tm_flags == FCP_TMF_TGT_RESET)
2265 qedf->target_resets++;
2266
2267
2268 io_req->sc_cmd = sc_cmd;
2269 io_req->fcport = fcport;
2270 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2271
2272
2273 io_req->cpu = smp_processor_id();
2274
2275
2276 io_req->io_req_flags = QEDF_READ;
2277 io_req->data_xfer_len = 0;
2278 io_req->tm_flags = tm_flags;
2279
2280
2281 io_req->return_scsi_cmd_on_abts = false;
2282
2283
2284 xid = io_req->xid;
2285
2286 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2287 "0x%x\n", xid);
2288
2289
2290 task = qedf_get_task_mem(&qedf->tasks, xid);
2291
2292 init_completion(&io_req->tm_done);
2293
2294 spin_lock_irqsave(&fcport->rport_lock, flags);
2295
2296 sqe_idx = qedf_get_sqe_idx(fcport);
2297 sqe = &fcport->sq[sqe_idx];
2298 memset(sqe, 0, sizeof(struct fcoe_wqe));
2299
2300 qedf_init_task(fcport, lport, io_req, task, sqe);
2301 qedf_ring_doorbell(fcport);
2302
2303 spin_unlock_irqrestore(&fcport->rport_lock, flags);
2304
2305 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2306 tmo = wait_for_completion_timeout(&io_req->tm_done,
2307 QEDF_TM_TIMEOUT * HZ);
2308
2309 if (!tmo) {
2310 rc = FAILED;
2311 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2312
2313 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2314 io_req->sc_cmd = NULL;
2315 } else {
2316
2317 if (io_req->fcp_rsp_code == 0)
2318 rc = SUCCESS;
2319 else
2320 rc = FAILED;
2321 }
2322
2323
2324
2325
2326 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2327 QEDF_ERR(&qedf->dbg_ctx,
2328 "fcport is uploading, not executing flush.\n");
2329 goto no_flush;
2330 }
2331
2332 kref_put(&io_req->refcount, qedf_release_cmd);
2333
2334
2335 if (tm_flags == FCP_TMF_LUN_RESET)
2336 qedf_flush_active_ios(fcport, lun);
2337 else
2338 qedf_flush_active_ios(fcport, -1);
2339
2340no_flush:
2341 if (rc != SUCCESS) {
2342 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2343 rc = FAILED;
2344 } else {
2345 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2346 rc = SUCCESS;
2347 }
2348 return rc;
2349}
2350
2351int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2352{
2353 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2354 struct fc_rport_libfc_priv *rp = rport->dd_data;
2355 struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2356 struct qedf_ctx *qedf;
2357 struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2358 int rc = SUCCESS;
2359 int rval;
2360 struct qedf_ioreq *io_req = NULL;
2361 int ref_cnt = 0;
2362 struct fc_rport_priv *rdata = fcport->rdata;
2363
2364 QEDF_ERR(NULL,
2365 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2366 tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id,
2367 (int)sc_cmd->device->lun);
2368
2369 if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2370 QEDF_ERR(NULL, "stale rport\n");
2371 return FAILED;
2372 }
2373
2374 QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2375 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2376 "LUN RESET");
2377
2378 if (sc_cmd->SCp.ptr) {
2379 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2380 ref_cnt = kref_read(&io_req->refcount);
2381 QEDF_ERR(NULL,
2382 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2383 io_req, io_req->xid, ref_cnt);
2384 }
2385
2386 rval = fc_remote_port_chkready(rport);
2387 if (rval) {
2388 QEDF_ERR(NULL, "device_reset rport not ready\n");
2389 rc = FAILED;
2390 goto tmf_err;
2391 }
2392
2393 rc = fc_block_scsi_eh(sc_cmd);
2394 if (rc)
2395 goto tmf_err;
2396
2397 if (!fcport) {
2398 QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2399 rc = FAILED;
2400 goto tmf_err;
2401 }
2402
2403 qedf = fcport->qedf;
2404
2405 if (!qedf) {
2406 QEDF_ERR(NULL, "qedf is NULL.\n");
2407 rc = FAILED;
2408 goto tmf_err;
2409 }
2410
2411 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2412 QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2413 rc = SUCCESS;
2414 goto tmf_err;
2415 }
2416
2417 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2418 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2419 rc = SUCCESS;
2420 goto tmf_err;
2421 }
2422
2423 if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2424 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2425 rc = FAILED;
2426 goto tmf_err;
2427 }
2428
2429 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2430 if (!fcport->rdata)
2431 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2432 fcport);
2433 else
2434 QEDF_ERR(&qedf->dbg_ctx,
2435 "fcport %p port_id=%06x is uploading.\n",
2436 fcport, fcport->rdata->ids.port_id);
2437 rc = FAILED;
2438 goto tmf_err;
2439 }
2440
2441 rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2442
2443tmf_err:
2444 kref_put(&rdata->kref, fc_rport_destroy);
2445 return rc;
2446}
2447
2448void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2449 struct qedf_ioreq *io_req)
2450{
2451 struct fcoe_cqe_rsp_info *fcp_rsp;
2452
2453 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2454
2455 fcp_rsp = &cqe->cqe_info.rsp_info;
2456 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2457
2458 io_req->sc_cmd = NULL;
2459 complete(&io_req->tm_done);
2460}
2461
2462void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2463 struct fcoe_cqe *cqe)
2464{
2465 unsigned long flags;
2466 uint16_t tmp;
2467 uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2468 u32 payload_len, crc;
2469 struct fc_frame_header *fh;
2470 struct fc_frame *fp;
2471 struct qedf_io_work *io_work;
2472 u32 bdq_idx;
2473 void *bdq_addr;
2474 struct scsi_bd *p_bd_info;
2475
2476 p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2477 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2478 "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2479 le32_to_cpu(p_bd_info->address.hi),
2480 le32_to_cpu(p_bd_info->address.lo),
2481 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2482 le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2483 qedf->bdq_prod_idx, pktlen);
2484
2485 bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2486 if (bdq_idx >= QEDF_BDQ_SIZE) {
2487 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2488 bdq_idx);
2489 goto increment_prod;
2490 }
2491
2492 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2493 if (!bdq_addr) {
2494 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2495 "unsolicited packet.\n");
2496 goto increment_prod;
2497 }
2498
2499 if (qedf_dump_frames) {
2500 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2501 "BDQ frame is at addr=%p.\n", bdq_addr);
2502 print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2503 (void *)bdq_addr, pktlen, false);
2504 }
2505
2506
2507 payload_len = pktlen - sizeof(struct fc_frame_header);
2508 fp = fc_frame_alloc(qedf->lport, payload_len);
2509 if (!fp) {
2510 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2511 goto increment_prod;
2512 }
2513
2514
2515 fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2516 memcpy(fh, (void *)bdq_addr, pktlen);
2517
2518
2519 crc = fcoe_fc_crc(fp);
2520 fc_frame_init(fp);
2521 fr_dev(fp) = qedf->lport;
2522 fr_sof(fp) = FC_SOF_I3;
2523 fr_eof(fp) = FC_EOF_T;
2524 fr_crc(fp) = cpu_to_le32(~crc);
2525
2526
2527
2528
2529
2530 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2531 if (!io_work) {
2532 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2533 "work for I/O completion.\n");
2534 fc_frame_free(fp);
2535 goto increment_prod;
2536 }
2537 memset(io_work, 0, sizeof(struct qedf_io_work));
2538
2539 INIT_WORK(&io_work->work, qedf_fp_io_handler);
2540
2541
2542 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2543
2544 io_work->qedf = qedf;
2545 io_work->fp = fp;
2546
2547 queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2548increment_prod:
2549 spin_lock_irqsave(&qedf->hba_lock, flags);
2550
2551
2552 qedf->bdq_prod_idx++;
2553
2554
2555 if (qedf->bdq_prod_idx == 0xffff)
2556 qedf->bdq_prod_idx = 0;
2557
2558 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2559 tmp = readw(qedf->bdq_primary_prod);
2560 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2561 tmp = readw(qedf->bdq_secondary_prod);
2562
2563 spin_unlock_irqrestore(&qedf->hba_lock, flags);
2564}
2565