1
2
3
4
5
6
7#include "efct_driver.h"
8#include "efct_hw.h"
9#include "efct_unsol.h"
10
11struct efct_hw_link_stat_cb_arg {
12 void (*cb)(int status, u32 num_counters,
13 struct efct_hw_link_stat_counts *counters, void *arg);
14 void *arg;
15};
16
17struct efct_hw_host_stat_cb_arg {
18 void (*cb)(int status, u32 num_counters,
19 struct efct_hw_host_stat_counts *counters, void *arg);
20 void *arg;
21};
22
23struct efct_hw_fw_wr_cb_arg {
24 void (*cb)(int status, u32 bytes_written, u32 change_status, void *arg);
25 void *arg;
26};
27
28struct efct_mbox_rqst_ctx {
29 int (*callback)(struct efc *efc, int status, u8 *mqe, void *arg);
30 void *arg;
31};
32
33static int
34efct_hw_link_event_init(struct efct_hw *hw)
35{
36 hw->link.status = SLI4_LINK_STATUS_MAX;
37 hw->link.topology = SLI4_LINK_TOPO_NONE;
38 hw->link.medium = SLI4_LINK_MEDIUM_MAX;
39 hw->link.speed = 0;
40 hw->link.loop_map = NULL;
41 hw->link.fc_id = U32_MAX;
42
43 return 0;
44}
45
46static int
47efct_hw_read_max_dump_size(struct efct_hw *hw)
48{
49 u8 buf[SLI4_BMBX_SIZE];
50 struct efct *efct = hw->os;
51 int rc = 0;
52 struct sli4_rsp_cmn_set_dump_location *rsp;
53
54
55 if (PCI_FUNC(efct->pci->devfn) != 0)
56 return rc;
57
58 if (sli_cmd_common_set_dump_location(&hw->sli, buf, 1, 0, NULL, 0))
59 return -EIO;
60
61 rsp = (struct sli4_rsp_cmn_set_dump_location *)
62 (buf + offsetof(struct sli4_cmd_sli_config, payload.embed));
63
64 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
65 if (rc != 0) {
66 efc_log_debug(hw->os, "set dump location cmd failed\n");
67 return rc;
68 }
69
70 hw->dump_size =
71 le32_to_cpu(rsp->buffer_length_dword) & SLI4_CMN_SET_DUMP_BUFFER_LEN;
72
73 efc_log_debug(hw->os, "Dump size %x\n", hw->dump_size);
74
75 return rc;
76}
77
78static int
79__efct_read_topology_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
80{
81 struct sli4_cmd_read_topology *read_topo =
82 (struct sli4_cmd_read_topology *)mqe;
83 u8 speed;
84 struct efc_domain_record drec = {0};
85 struct efct *efct = hw->os;
86
87 if (status || le16_to_cpu(read_topo->hdr.status)) {
88 efc_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
89 le16_to_cpu(read_topo->hdr.status));
90 return -EIO;
91 }
92
93 switch (le32_to_cpu(read_topo->dw2_attentype) &
94 SLI4_READTOPO_ATTEN_TYPE) {
95 case SLI4_READ_TOPOLOGY_LINK_UP:
96 hw->link.status = SLI4_LINK_STATUS_UP;
97 break;
98 case SLI4_READ_TOPOLOGY_LINK_DOWN:
99 hw->link.status = SLI4_LINK_STATUS_DOWN;
100 break;
101 case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
102 hw->link.status = SLI4_LINK_STATUS_NO_ALPA;
103 break;
104 default:
105 hw->link.status = SLI4_LINK_STATUS_MAX;
106 break;
107 }
108
109 switch (read_topo->topology) {
110 case SLI4_READ_TOPO_NON_FC_AL:
111 hw->link.topology = SLI4_LINK_TOPO_NON_FC_AL;
112 break;
113 case SLI4_READ_TOPO_FC_AL:
114 hw->link.topology = SLI4_LINK_TOPO_FC_AL;
115 if (hw->link.status == SLI4_LINK_STATUS_UP)
116 hw->link.loop_map = hw->loop_map.virt;
117 hw->link.fc_id = read_topo->acquired_al_pa;
118 break;
119 default:
120 hw->link.topology = SLI4_LINK_TOPO_MAX;
121 break;
122 }
123
124 hw->link.medium = SLI4_LINK_MEDIUM_FC;
125
126 speed = (le32_to_cpu(read_topo->currlink_state) &
127 SLI4_READTOPO_LINKSTATE_SPEED) >> 8;
128 switch (speed) {
129 case SLI4_READ_TOPOLOGY_SPEED_1G:
130 hw->link.speed = 1 * 1000;
131 break;
132 case SLI4_READ_TOPOLOGY_SPEED_2G:
133 hw->link.speed = 2 * 1000;
134 break;
135 case SLI4_READ_TOPOLOGY_SPEED_4G:
136 hw->link.speed = 4 * 1000;
137 break;
138 case SLI4_READ_TOPOLOGY_SPEED_8G:
139 hw->link.speed = 8 * 1000;
140 break;
141 case SLI4_READ_TOPOLOGY_SPEED_16G:
142 hw->link.speed = 16 * 1000;
143 break;
144 case SLI4_READ_TOPOLOGY_SPEED_32G:
145 hw->link.speed = 32 * 1000;
146 break;
147 case SLI4_READ_TOPOLOGY_SPEED_64G:
148 hw->link.speed = 64 * 1000;
149 break;
150 case SLI4_READ_TOPOLOGY_SPEED_128G:
151 hw->link.speed = 128 * 1000;
152 break;
153 }
154
155 drec.speed = hw->link.speed;
156 drec.fc_id = hw->link.fc_id;
157 drec.is_nport = true;
158 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND, &drec);
159
160 return 0;
161}
162
163static int
164efct_hw_cb_link(void *ctx, void *e)
165{
166 struct efct_hw *hw = ctx;
167 struct sli4_link_event *event = e;
168 struct efc_domain *d = NULL;
169 int rc = 0;
170 struct efct *efct = hw->os;
171
172 efct_hw_link_event_init(hw);
173
174 switch (event->status) {
175 case SLI4_LINK_STATUS_UP:
176
177 hw->link = *event;
178 efct->efcport->link_status = EFC_LINK_STATUS_UP;
179
180 if (event->topology == SLI4_LINK_TOPO_NON_FC_AL) {
181 struct efc_domain_record drec = {0};
182
183 efc_log_info(hw->os, "Link Up, NPORT, speed is %d\n",
184 event->speed);
185 drec.speed = event->speed;
186 drec.fc_id = event->fc_id;
187 drec.is_nport = true;
188 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_FOUND,
189 &drec);
190 } else if (event->topology == SLI4_LINK_TOPO_FC_AL) {
191 u8 buf[SLI4_BMBX_SIZE];
192
193 efc_log_info(hw->os, "Link Up, LOOP, speed is %d\n",
194 event->speed);
195
196 if (!sli_cmd_read_topology(&hw->sli, buf,
197 &hw->loop_map)) {
198 rc = efct_hw_command(hw, buf, EFCT_CMD_NOWAIT,
199 __efct_read_topology_cb, NULL);
200 }
201
202 if (rc)
203 efc_log_debug(hw->os, "READ_TOPOLOGY failed\n");
204 } else {
205 efc_log_info(hw->os, "%s(%#x), speed is %d\n",
206 "Link Up, unsupported topology ",
207 event->topology, event->speed);
208 }
209 break;
210 case SLI4_LINK_STATUS_DOWN:
211 efc_log_info(hw->os, "Link down\n");
212
213 hw->link.status = event->status;
214 efct->efcport->link_status = EFC_LINK_STATUS_DOWN;
215
216 d = efct->efcport->domain;
217 if (d)
218 efc_domain_cb(efct->efcport, EFC_HW_DOMAIN_LOST, d);
219 break;
220 default:
221 efc_log_debug(hw->os, "unhandled link status %#x\n",
222 event->status);
223 break;
224 }
225
226 return 0;
227}
228
229int
230efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev)
231{
232 u32 i, max_sgl, cpus;
233
234 if (hw->hw_setup_called)
235 return 0;
236
237
238
239
240
241
242 memset(hw, 0, sizeof(struct efct_hw));
243
244 hw->hw_setup_called = true;
245
246 hw->os = os;
247
248 mutex_init(&hw->bmbx_lock);
249 spin_lock_init(&hw->cmd_lock);
250 INIT_LIST_HEAD(&hw->cmd_head);
251 INIT_LIST_HEAD(&hw->cmd_pending);
252 hw->cmd_head_count = 0;
253
254
255 hw->cmd_ctx_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
256 sizeof(struct efct_command_ctx));
257 if (!hw->cmd_ctx_pool) {
258 efc_log_err(hw->os, "failed to allocate mailbox buffer pool\n");
259 return -EIO;
260 }
261
262
263 hw->mbox_rqst_pool = mempool_create_kmalloc_pool(EFCT_CMD_CTX_POOL_SZ,
264 sizeof(struct efct_mbox_rqst_ctx));
265 if (!hw->mbox_rqst_pool) {
266 efc_log_err(hw->os, "failed to allocate mbox request pool\n");
267 return -EIO;
268 }
269
270 spin_lock_init(&hw->io_lock);
271 INIT_LIST_HEAD(&hw->io_inuse);
272 INIT_LIST_HEAD(&hw->io_free);
273 INIT_LIST_HEAD(&hw->io_wait_free);
274
275 atomic_set(&hw->io_alloc_failed_count, 0);
276
277 hw->config.speed = SLI4_LINK_SPEED_AUTO_16_8_4;
278 if (sli_setup(&hw->sli, hw->os, pdev, ((struct efct *)os)->reg)) {
279 efc_log_err(hw->os, "SLI setup failed\n");
280 return -EIO;
281 }
282
283 efct_hw_link_event_init(hw);
284
285 sli_callback(&hw->sli, SLI4_CB_LINK, efct_hw_cb_link, hw);
286
287
288
289
290 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++)
291 hw->num_qentries[i] = hw->sli.qinfo.max_qentries[i];
292
293
294
295
296
297 hw->num_qentries[SLI4_QTYPE_WQ] = hw->num_qentries[SLI4_QTYPE_CQ] / 2;
298
299
300
301
302
303 hw->config.rq_default_buffer_size = EFCT_HW_RQ_SIZE_PAYLOAD;
304 hw->config.n_io = hw->sli.ext[SLI4_RSRC_XRI].size;
305
306 cpus = num_possible_cpus();
307 hw->config.n_eq = cpus > EFCT_HW_MAX_NUM_EQ ? EFCT_HW_MAX_NUM_EQ : cpus;
308
309 max_sgl = sli_get_max_sgl(&hw->sli) - SLI4_SGE_MAX_RESERVED;
310 max_sgl = (max_sgl > EFCT_FC_MAX_SGL) ? EFCT_FC_MAX_SGL : max_sgl;
311 hw->config.n_sgl = max_sgl;
312
313 (void)efct_hw_read_max_dump_size(hw);
314
315 return 0;
316}
317
318static void
319efct_logfcfi(struct efct_hw *hw, u32 j, u32 i, u32 id)
320{
321 efc_log_info(hw->os,
322 "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
323 j, hw->config.filter_def[j], i, id);
324}
325
326static inline void
327efct_hw_init_free_io(struct efct_hw_io *io)
328{
329
330
331
332
333 io->done = NULL;
334 io->abort_done = NULL;
335 io->status_saved = false;
336 io->abort_in_progress = false;
337 io->type = 0xFFFF;
338 io->wq = NULL;
339}
340
341static bool efct_hw_iotype_is_originator(u16 io_type)
342{
343 switch (io_type) {
344 case EFCT_HW_FC_CT:
345 case EFCT_HW_ELS_REQ:
346 return true;
347 default:
348 return false;
349 }
350}
351
352static void
353efct_hw_io_restore_sgl(struct efct_hw *hw, struct efct_hw_io *io)
354{
355
356 io->sgl = &io->def_sgl;
357 io->sgl_count = io->def_sgl_count;
358}
359
360static void
361efct_hw_wq_process_io(void *arg, u8 *cqe, int status)
362{
363 struct efct_hw_io *io = arg;
364 struct efct_hw *hw = io->hw;
365 struct sli4_fc_wcqe *wcqe = (void *)cqe;
366 u32 len = 0;
367 u32 ext = 0;
368
369
370 if (io->xbusy && (wcqe->flags & SLI4_WCQE_XB) == 0)
371 io->xbusy = false;
372
373
374 switch (io->type) {
375 case EFCT_HW_BLS_ACC:
376 case EFCT_HW_BLS_RJT:
377 break;
378 case EFCT_HW_ELS_REQ:
379 sli_fc_els_did(&hw->sli, cqe, &ext);
380 len = sli_fc_response_length(&hw->sli, cqe);
381 break;
382 case EFCT_HW_ELS_RSP:
383 case EFCT_HW_FC_CT_RSP:
384 break;
385 case EFCT_HW_FC_CT:
386 len = sli_fc_response_length(&hw->sli, cqe);
387 break;
388 case EFCT_HW_IO_TARGET_WRITE:
389 len = sli_fc_io_length(&hw->sli, cqe);
390 break;
391 case EFCT_HW_IO_TARGET_READ:
392 len = sli_fc_io_length(&hw->sli, cqe);
393 break;
394 case EFCT_HW_IO_TARGET_RSP:
395 break;
396 case EFCT_HW_IO_DNRX_REQUEUE:
397
398
399 break;
400 default:
401 efc_log_err(hw->os, "unhandled io type %#x for XRI 0x%x\n",
402 io->type, io->indicator);
403 break;
404 }
405 if (status) {
406 ext = sli_fc_ext_status(&hw->sli, cqe);
407
408
409
410
411 if (efct_hw_iotype_is_originator(io->type) &&
412 wcqe->flags & SLI4_WCQE_XB) {
413 int rc;
414
415 efc_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
416 io->indicator, io->reqtag);
417
418
419
420
421
422
423 rc = efct_hw_io_abort(hw, io, false, NULL, NULL);
424 if (rc == 0) {
425
426
427
428
429 io->status_saved = true;
430 io->saved_status = status;
431 io->saved_ext = ext;
432 io->saved_len = len;
433 goto exit_efct_hw_wq_process_io;
434 } else if (rc == -EINPROGRESS) {
435
436
437
438
439
440 efc_log_debug(hw->os, "%s%#x tag=%#x\n",
441 "abort in progress xri=",
442 io->indicator, io->reqtag);
443
444 } else {
445
446
447
448 efc_log_debug(hw->os, "%s%#x tag=%#x rc=%d\n",
449 "Failed to abort xri=",
450 io->indicator, io->reqtag, rc);
451 }
452 }
453 }
454
455 if (io->done) {
456 efct_hw_done_t done = io->done;
457
458 io->done = NULL;
459
460 if (io->status_saved) {
461
462 status = io->saved_status;
463 len = io->saved_len;
464 ext = io->saved_ext;
465 io->status_saved = false;
466 }
467
468
469 efct_hw_io_restore_sgl(hw, io);
470 done(io, len, status, ext, io->arg);
471 }
472
473exit_efct_hw_wq_process_io:
474 return;
475}
476
477static int
478efct_hw_setup_io(struct efct_hw *hw)
479{
480 u32 i = 0;
481 struct efct_hw_io *io = NULL;
482 uintptr_t xfer_virt = 0;
483 uintptr_t xfer_phys = 0;
484 u32 index;
485 bool new_alloc = true;
486 struct efc_dma *dma;
487 struct efct *efct = hw->os;
488
489 if (!hw->io) {
490 hw->io = kmalloc_array(hw->config.n_io, sizeof(io), GFP_KERNEL);
491 if (!hw->io)
492 return -ENOMEM;
493
494 memset(hw->io, 0, hw->config.n_io * sizeof(io));
495
496 for (i = 0; i < hw->config.n_io; i++) {
497 hw->io[i] = kzalloc(sizeof(*io), GFP_KERNEL);
498 if (!hw->io[i])
499 goto error;
500 }
501
502
503 hw->wqe_buffs = kzalloc((hw->config.n_io * hw->sli.wqe_size),
504 GFP_KERNEL);
505 if (!hw->wqe_buffs) {
506 kfree(hw->io);
507 return -ENOMEM;
508 }
509
510 } else {
511
512 new_alloc = false;
513 }
514
515 if (new_alloc) {
516 dma = &hw->xfer_rdy;
517 dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
518 dma->virt = dma_alloc_coherent(&efct->pci->dev,
519 dma->size, &dma->phys, GFP_DMA);
520 if (!dma->virt)
521 return -ENOMEM;
522 }
523 xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
524 xfer_phys = hw->xfer_rdy.phys;
525
526
527 for (i = 0; i < hw->config.n_io; i++) {
528 struct hw_wq_callback *wqcb;
529
530 io = hw->io[i];
531
532
533 io->hw = hw;
534
535
536 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.wqe_size];
537
538
539 wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_io, io);
540 if (!wqcb) {
541 efc_log_err(hw->os, "can't allocate request tag\n");
542 return -ENOSPC;
543 }
544 io->reqtag = wqcb->instance_index;
545
546
547 efct_hw_init_free_io(io);
548
549
550 io->xbusy = 0;
551
552 if (sli_resource_alloc(&hw->sli, SLI4_RSRC_XRI,
553 &io->indicator, &index)) {
554 efc_log_err(hw->os,
555 "sli_resource_alloc failed @ %d\n", i);
556 return -ENOMEM;
557 }
558
559 if (new_alloc) {
560 dma = &io->def_sgl;
561 dma->size = hw->config.n_sgl *
562 sizeof(struct sli4_sge);
563 dma->virt = dma_alloc_coherent(&efct->pci->dev,
564 dma->size, &dma->phys,
565 GFP_DMA);
566 if (!dma->virt) {
567 efc_log_err(hw->os, "dma_alloc fail %d\n", i);
568 memset(&io->def_sgl, 0,
569 sizeof(struct efc_dma));
570 return -ENOMEM;
571 }
572 }
573 io->def_sgl_count = hw->config.n_sgl;
574 io->sgl = &io->def_sgl;
575 io->sgl_count = io->def_sgl_count;
576
577 if (hw->xfer_rdy.size) {
578 io->xfer_rdy.virt = (void *)xfer_virt;
579 io->xfer_rdy.phys = xfer_phys;
580 io->xfer_rdy.size = sizeof(struct fcp_txrdy);
581
582 xfer_virt += sizeof(struct fcp_txrdy);
583 xfer_phys += sizeof(struct fcp_txrdy);
584 }
585 }
586
587 return 0;
588error:
589 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
590 kfree(hw->io[i]);
591 hw->io[i] = NULL;
592 }
593
594 kfree(hw->io);
595 hw->io = NULL;
596
597 return -ENOMEM;
598}
599
600static int
601efct_hw_init_prereg_io(struct efct_hw *hw)
602{
603 u32 i, idx = 0;
604 struct efct_hw_io *io = NULL;
605 u8 cmd[SLI4_BMBX_SIZE];
606 int rc = 0;
607 u32 n_rem;
608 u32 n = 0;
609 u32 sgls_per_request = 256;
610 struct efc_dma **sgls = NULL;
611 struct efc_dma req;
612 struct efct *efct = hw->os;
613
614 sgls = kmalloc_array(sgls_per_request, sizeof(*sgls), GFP_KERNEL);
615 if (!sgls)
616 return -ENOMEM;
617
618 memset(&req, 0, sizeof(struct efc_dma));
619 req.size = 32 + sgls_per_request * 16;
620 req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
621 GFP_DMA);
622 if (!req.virt) {
623 kfree(sgls);
624 return -ENOMEM;
625 }
626
627 for (n_rem = hw->config.n_io; n_rem; n_rem -= n) {
628
629
630
631 u32 min = (sgls_per_request < n_rem) ? sgls_per_request : n_rem;
632
633 for (n = 0; n < min; n++) {
634
635 if (n > 0) {
636 if (hw->io[idx + n]->indicator !=
637 hw->io[idx + n - 1]->indicator + 1)
638 break;
639 }
640
641 sgls[n] = hw->io[idx + n]->sgl;
642 }
643
644 if (sli_cmd_post_sgl_pages(&hw->sli, cmd,
645 hw->io[idx]->indicator, n, sgls, NULL, &req)) {
646 rc = -EIO;
647 break;
648 }
649
650 rc = efct_hw_command(hw, cmd, EFCT_CMD_POLL, NULL, NULL);
651 if (rc) {
652 efc_log_err(hw->os, "SGL post failed, rc=%d\n", rc);
653 break;
654 }
655
656
657 for (i = 0; i < n; i++, idx++) {
658 io = hw->io[idx];
659 io->state = EFCT_HW_IO_STATE_FREE;
660 INIT_LIST_HEAD(&io->list_entry);
661 list_add_tail(&io->list_entry, &hw->io_free);
662 }
663 }
664
665 dma_free_coherent(&efct->pci->dev, req.size, req.virt, req.phys);
666 memset(&req, 0, sizeof(struct efc_dma));
667 kfree(sgls);
668
669 return rc;
670}
671
672static int
673efct_hw_init_io(struct efct_hw *hw)
674{
675 u32 i, idx = 0;
676 bool prereg = false;
677 struct efct_hw_io *io = NULL;
678 int rc = 0;
679
680 prereg = hw->sli.params.sgl_pre_registered;
681
682 if (prereg)
683 return efct_hw_init_prereg_io(hw);
684
685 for (i = 0; i < hw->config.n_io; i++, idx++) {
686 io = hw->io[idx];
687 io->state = EFCT_HW_IO_STATE_FREE;
688 INIT_LIST_HEAD(&io->list_entry);
689 list_add_tail(&io->list_entry, &hw->io_free);
690 }
691
692 return rc;
693}
694
695static int
696efct_hw_config_set_fdt_xfer_hint(struct efct_hw *hw, u32 fdt_xfer_hint)
697{
698 int rc = 0;
699 u8 buf[SLI4_BMBX_SIZE];
700 struct sli4_rqst_cmn_set_features_set_fdt_xfer_hint param;
701
702 memset(¶m, 0, sizeof(param));
703 param.fdt_xfer_hint = cpu_to_le32(fdt_xfer_hint);
704
705 sli_cmd_common_set_features(&hw->sli, buf,
706 SLI4_SET_FEATURES_SET_FTD_XFER_HINT, sizeof(param), ¶m);
707
708 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
709 if (rc)
710 efc_log_warn(hw->os, "set FDT hint %d failed: %d\n",
711 fdt_xfer_hint, rc);
712 else
713 efc_log_info(hw->os, "Set FTD transfer hint to %d\n",
714 le32_to_cpu(param.fdt_xfer_hint));
715
716 return rc;
717}
718
719static int
720efct_hw_config_rq(struct efct_hw *hw)
721{
722 u32 min_rq_count, i, rc;
723 struct sli4_cmd_rq_cfg rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
724 u8 buf[SLI4_BMBX_SIZE];
725
726 efc_log_info(hw->os, "using REG_FCFI standard\n");
727
728
729
730
731
732 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
733 rq_cfg[i].rq_id = cpu_to_le16(0xffff);
734 rq_cfg[i].r_ctl_mask = (u8)hw->config.filter_def[i];
735 rq_cfg[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 8);
736 rq_cfg[i].type_mask = (u8)(hw->config.filter_def[i] >> 16);
737 rq_cfg[i].type_match = (u8)(hw->config.filter_def[i] >> 24);
738 }
739
740
741
742
743
744
745 min_rq_count = (hw->hw_rq_count < SLI4_CMD_REG_FCFI_NUM_RQ_CFG) ?
746 hw->hw_rq_count : SLI4_CMD_REG_FCFI_NUM_RQ_CFG;
747 for (i = 0; i < min_rq_count; i++) {
748 struct hw_rq *rq = hw->hw_rq[i];
749 u32 j;
750
751 for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
752 u32 mask = (rq->filter_mask != 0) ?
753 rq->filter_mask : 1;
754
755 if (!(mask & (1U << j)))
756 continue;
757
758 rq_cfg[i].rq_id = cpu_to_le16(rq->hdr->id);
759 efct_logfcfi(hw, j, i, rq->hdr->id);
760 }
761 }
762
763 rc = -EIO;
764 if (!sli_cmd_reg_fcfi(&hw->sli, buf, 0, rq_cfg))
765 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
766
767 if (rc != 0) {
768 efc_log_err(hw->os, "FCFI registration failed\n");
769 return rc;
770 }
771 hw->fcf_indicator =
772 le16_to_cpu(((struct sli4_cmd_reg_fcfi *)buf)->fcfi);
773
774 return rc;
775}
776
777static int
778efct_hw_config_mrq(struct efct_hw *hw, u8 mode, u16 fcf_index)
779{
780 u8 buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
781 struct hw_rq *rq;
782 struct sli4_cmd_reg_fcfi_mrq *rsp = NULL;
783 struct sli4_cmd_rq_cfg rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
784 u32 rc, i;
785
786 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
787 goto issue_cmd;
788
789
790 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
791 rq_filter[i].rq_id = cpu_to_le16(0xffff);
792 rq_filter[i].type_mask = (u8)hw->config.filter_def[i];
793 rq_filter[i].type_match = (u8)(hw->config.filter_def[i] >> 8);
794 rq_filter[i].r_ctl_mask = (u8)(hw->config.filter_def[i] >> 16);
795 rq_filter[i].r_ctl_match = (u8)(hw->config.filter_def[i] >> 24);
796 }
797
798 rq = hw->hw_rq[0];
799 rq_filter[0].rq_id = cpu_to_le16(rq->hdr->id);
800 rq_filter[1].rq_id = cpu_to_le16(rq->hdr->id);
801
802 mrq_bitmask = 0x2;
803issue_cmd:
804 efc_log_debug(hw->os, "Issue reg_fcfi_mrq count:%d policy:%d mode:%d\n",
805 hw->hw_rq_count, hw->config.rq_selection_policy, mode);
806
807 rc = sli_cmd_reg_fcfi_mrq(&hw->sli, buf, mode, fcf_index,
808 hw->config.rq_selection_policy, mrq_bitmask,
809 hw->hw_mrq_count, rq_filter);
810 if (rc) {
811 efc_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed\n");
812 return -EIO;
813 }
814
815 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
816
817 rsp = (struct sli4_cmd_reg_fcfi_mrq *)buf;
818
819 if ((rc) || (le16_to_cpu(rsp->hdr.status))) {
820 efc_log_err(hw->os, "FCFI MRQ reg failed. cmd=%x status=%x\n",
821 rsp->hdr.command, le16_to_cpu(rsp->hdr.status));
822 return -EIO;
823 }
824
825 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE)
826 hw->fcf_indicator = le16_to_cpu(rsp->fcfi);
827
828 return 0;
829}
830
831static void
832efct_hw_queue_hash_add(struct efct_queue_hash *hash,
833 u16 id, u16 index)
834{
835 u32 hash_index = id & (EFCT_HW_Q_HASH_SIZE - 1);
836
837
838
839
840
841 while (hash[hash_index].in_use)
842 hash_index = (hash_index + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
843
844
845 hash[hash_index].id = id;
846 hash[hash_index].in_use = true;
847 hash[hash_index].index = index;
848}
849
850static int
851efct_hw_config_sli_port_health_check(struct efct_hw *hw, u8 query, u8 enable)
852{
853 int rc = 0;
854 u8 buf[SLI4_BMBX_SIZE];
855 struct sli4_rqst_cmn_set_features_health_check param;
856 u32 health_check_flag = 0;
857
858 memset(¶m, 0, sizeof(param));
859
860 if (enable)
861 health_check_flag |= SLI4_RQ_HEALTH_CHECK_ENABLE;
862
863 if (query)
864 health_check_flag |= SLI4_RQ_HEALTH_CHECK_QUERY;
865
866 param.health_check_dword = cpu_to_le32(health_check_flag);
867
868
869 sli_cmd_common_set_features(&hw->sli, buf,
870 SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK, sizeof(param), ¶m);
871
872 rc = efct_hw_command(hw, buf, EFCT_CMD_POLL, NULL, NULL);
873 if (rc)
874 efc_log_err(hw->os, "efct_hw_command returns %d\n", rc);
875 else
876 efc_log_debug(hw->os, "SLI Port Health Check is enabled\n");
877
878 return rc;
879}
880
881int
882efct_hw_init(struct efct_hw *hw)
883{
884 int rc;
885 u32 i = 0;
886 int rem_count;
887 unsigned long flags = 0;
888 struct efct_hw_io *temp;
889 struct efc_dma *dma;
890
891
892
893
894
895
896
897
898 spin_lock_irqsave(&hw->cmd_lock, flags);
899 if (!list_empty(&hw->cmd_head)) {
900 spin_unlock_irqrestore(&hw->cmd_lock, flags);
901 efc_log_err(hw->os, "command found on cmd list\n");
902 return -EIO;
903 }
904 if (!list_empty(&hw->cmd_pending)) {
905 spin_unlock_irqrestore(&hw->cmd_lock, flags);
906 efc_log_err(hw->os, "command found on pending list\n");
907 return -EIO;
908 }
909 spin_unlock_irqrestore(&hw->cmd_lock, flags);
910
911
912 efct_hw_rx_free(hw);
913
914
915
916
917
918
919
920
921
922
923
924 rem_count = 0;
925 while ((!list_empty(&hw->io_wait_free))) {
926 rem_count++;
927 temp = list_first_entry(&hw->io_wait_free, struct efct_hw_io,
928 list_entry);
929 list_del_init(&temp->list_entry);
930 }
931 if (rem_count > 0)
932 efc_log_debug(hw->os, "rmvd %d items from io_wait_free list\n",
933 rem_count);
934
935 rem_count = 0;
936 while ((!list_empty(&hw->io_inuse))) {
937 rem_count++;
938 temp = list_first_entry(&hw->io_inuse, struct efct_hw_io,
939 list_entry);
940 list_del_init(&temp->list_entry);
941 }
942 if (rem_count > 0)
943 efc_log_debug(hw->os, "rmvd %d items from io_inuse list\n",
944 rem_count);
945
946 rem_count = 0;
947 while ((!list_empty(&hw->io_free))) {
948 rem_count++;
949 temp = list_first_entry(&hw->io_free, struct efct_hw_io,
950 list_entry);
951 list_del_init(&temp->list_entry);
952 }
953 if (rem_count > 0)
954 efc_log_debug(hw->os, "rmvd %d items from io_free list\n",
955 rem_count);
956
957
958 if (hw->config.n_rq == 1)
959 hw->sli.features &= (~SLI4_REQFEAT_MRQP);
960
961 if (sli_init(&hw->sli)) {
962 efc_log_err(hw->os, "SLI failed to initialize\n");
963 return -EIO;
964 }
965
966 if (hw->sliport_healthcheck) {
967 rc = efct_hw_config_sli_port_health_check(hw, 0, 1);
968 if (rc != 0) {
969 efc_log_err(hw->os, "Enable port Health check fail\n");
970 return rc;
971 }
972 }
973
974
975
976
977 if (hw->sli.if_type == SLI4_INTF_IF_TYPE_2) {
978
979
980
981
982
983 efct_hw_config_set_fdt_xfer_hint(hw, EFCT_HW_FDT_XFER_HINT);
984 }
985
986
987 memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
988 efc_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
989 EFCT_HW_MAX_NUM_CQ, EFCT_HW_Q_HASH_SIZE);
990
991 memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
992 efc_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
993 EFCT_HW_MAX_NUM_RQ, EFCT_HW_Q_HASH_SIZE);
994
995 memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
996 efc_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
997 EFCT_HW_MAX_NUM_WQ, EFCT_HW_Q_HASH_SIZE);
998
999 rc = efct_hw_init_queues(hw);
1000 if (rc)
1001 return rc;
1002
1003 rc = efct_hw_map_wq_cpu(hw);
1004 if (rc)
1005 return rc;
1006
1007
1008 rc = efct_hw_rx_allocate(hw);
1009 if (rc) {
1010 efc_log_err(hw->os, "rx_allocate failed\n");
1011 return rc;
1012 }
1013
1014 rc = efct_hw_rx_post(hw);
1015 if (rc) {
1016 efc_log_err(hw->os, "WARNING - error posting RQ buffers\n");
1017 return rc;
1018 }
1019
1020 if (hw->config.n_eq == 1) {
1021 rc = efct_hw_config_rq(hw);
1022 if (rc) {
1023 efc_log_err(hw->os, "config rq failed %d\n", rc);
1024 return rc;
1025 }
1026 } else {
1027 rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0);
1028 if (rc != 0) {
1029 efc_log_err(hw->os, "REG_FCFI_MRQ FCFI reg failed\n");
1030 return rc;
1031 }
1032
1033 rc = efct_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0);
1034 if (rc != 0) {
1035 efc_log_err(hw->os, "REG_FCFI_MRQ MRQ reg failed\n");
1036 return rc;
1037 }
1038 }
1039
1040
1041
1042
1043
1044
1045 hw->wq_reqtag_pool = efct_hw_reqtag_pool_alloc(hw);
1046 if (!hw->wq_reqtag_pool) {
1047 efc_log_err(hw->os, "efct_hw_reqtag_pool_alloc failed\n");
1048 return -ENOMEM;
1049 }
1050
1051 rc = efct_hw_setup_io(hw);
1052 if (rc) {
1053 efc_log_err(hw->os, "IO allocation failure\n");
1054 return rc;
1055 }
1056
1057 rc = efct_hw_init_io(hw);
1058 if (rc) {
1059 efc_log_err(hw->os, "IO initialization failure\n");
1060 return rc;
1061 }
1062
1063 dma = &hw->loop_map;
1064 dma->size = SLI4_MIN_LOOP_MAP_BYTES;
1065 dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
1066 GFP_DMA);
1067 if (!dma->virt)
1068 return -EIO;
1069
1070
1071
1072
1073
1074 for (i = 0; i < hw->eq_count; i++)
1075 sli_queue_arm(&hw->sli, &hw->eq[i], true);
1076
1077
1078
1079
1080 for (i = 0; i < hw->rq_count; i++)
1081 efct_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
1082
1083
1084
1085
1086 for (i = 0; i < hw->wq_count; i++)
1087 efct_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
1088
1089
1090
1091
1092 for (i = 0; i < hw->cq_count; i++) {
1093 efct_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
1094 sli_queue_arm(&hw->sli, &hw->cq[i], true);
1095 }
1096
1097
1098 for (i = 0; i < hw->hw_rq_count; i++) {
1099 struct hw_rq *rq = hw->hw_rq[i];
1100
1101 hw->cq[rq->cq->instance].proc_limit = hw->config.n_io / 2;
1102 }
1103
1104
1105 hw->state = EFCT_HW_STATE_ACTIVE;
1106
1107
1108
1109 hw->hw_wq[0]->send_frame_io = efct_hw_io_alloc(hw);
1110 if (!hw->hw_wq[0]->send_frame_io)
1111 efc_log_err(hw->os, "alloc for send_frame_io failed\n");
1112
1113
1114 atomic_set(&hw->send_frame_seq_id, 0);
1115
1116 return 0;
1117}
1118
1119int
1120efct_hw_parse_filter(struct efct_hw *hw, void *value)
1121{
1122 int rc = 0;
1123 char *p = NULL;
1124 char *token;
1125 u32 idx = 0;
1126
1127 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++)
1128 hw->config.filter_def[idx] = 0;
1129
1130 p = kstrdup(value, GFP_KERNEL);
1131 if (!p || !*p) {
1132 efc_log_err(hw->os, "p is NULL\n");
1133 return -ENOMEM;
1134 }
1135
1136 idx = 0;
1137 while ((token = strsep(&p, ",")) && *token) {
1138 if (kstrtou32(token, 0, &hw->config.filter_def[idx++]))
1139 efc_log_err(hw->os, "kstrtoint failed\n");
1140
1141 if (!p || !*p)
1142 break;
1143
1144 if (idx == ARRAY_SIZE(hw->config.filter_def))
1145 break;
1146 }
1147 kfree(p);
1148
1149 return rc;
1150}
1151
1152u64
1153efct_get_wwnn(struct efct_hw *hw)
1154{
1155 struct sli4 *sli = &hw->sli;
1156 u8 p[8];
1157
1158 memcpy(p, sli->wwnn, sizeof(p));
1159 return get_unaligned_be64(p);
1160}
1161
1162u64
1163efct_get_wwpn(struct efct_hw *hw)
1164{
1165 struct sli4 *sli = &hw->sli;
1166 u8 p[8];
1167
1168 memcpy(p, sli->wwpn, sizeof(p));
1169 return get_unaligned_be64(p);
1170}
1171
1172static struct efc_hw_rq_buffer *
1173efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
1174 u32 size)
1175{
1176 struct efct *efct = hw->os;
1177 struct efc_hw_rq_buffer *rq_buf = NULL;
1178 struct efc_hw_rq_buffer *prq;
1179 u32 i;
1180
1181 if (!count)
1182 return NULL;
1183
1184 rq_buf = kmalloc_array(count, sizeof(*rq_buf), GFP_KERNEL);
1185 if (!rq_buf)
1186 return NULL;
1187 memset(rq_buf, 0, sizeof(*rq_buf) * count);
1188
1189 for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
1190 prq->rqindex = rqindex;
1191 prq->dma.size = size;
1192 prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
1193 prq->dma.size,
1194 &prq->dma.phys,
1195 GFP_DMA);
1196 if (!prq->dma.virt) {
1197 efc_log_err(hw->os, "DMA allocation failed\n");
1198 kfree(rq_buf);
1199 return NULL;
1200 }
1201 }
1202 return rq_buf;
1203}
1204
1205static void
1206efct_hw_rx_buffer_free(struct efct_hw *hw,
1207 struct efc_hw_rq_buffer *rq_buf,
1208 u32 count)
1209{
1210 struct efct *efct = hw->os;
1211 u32 i;
1212 struct efc_hw_rq_buffer *prq;
1213
1214 if (rq_buf) {
1215 for (i = 0, prq = rq_buf; i < count; i++, prq++) {
1216 dma_free_coherent(&efct->pci->dev,
1217 prq->dma.size, prq->dma.virt,
1218 prq->dma.phys);
1219 memset(&prq->dma, 0, sizeof(struct efc_dma));
1220 }
1221
1222 kfree(rq_buf);
1223 }
1224}
1225
1226int
1227efct_hw_rx_allocate(struct efct_hw *hw)
1228{
1229 struct efct *efct = hw->os;
1230 u32 i;
1231 int rc = 0;
1232 u32 rqindex = 0;
1233 u32 hdr_size = EFCT_HW_RQ_SIZE_HDR;
1234 u32 payload_size = hw->config.rq_default_buffer_size;
1235
1236 rqindex = 0;
1237
1238 for (i = 0; i < hw->hw_rq_count; i++) {
1239 struct hw_rq *rq = hw->hw_rq[i];
1240
1241
1242 rq->hdr_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
1243 rq->entry_count,
1244 hdr_size);
1245 if (!rq->hdr_buf) {
1246 efc_log_err(efct, "rx_buffer_alloc hdr_buf failed\n");
1247 rc = -EIO;
1248 break;
1249 }
1250
1251 efc_log_debug(hw->os,
1252 "rq[%2d] rq_id %02d header %4d by %4d bytes\n",
1253 i, rq->hdr->id, rq->entry_count, hdr_size);
1254
1255 rqindex++;
1256
1257
1258 rq->payload_buf = efct_hw_rx_buffer_alloc(hw, rqindex,
1259 rq->entry_count,
1260 payload_size);
1261 if (!rq->payload_buf) {
1262 efc_log_err(efct, "rx_buffer_alloc fb_buf failed\n");
1263 rc = -EIO;
1264 break;
1265 }
1266 efc_log_debug(hw->os,
1267 "rq[%2d] rq_id %02d default %4d by %4d bytes\n",
1268 i, rq->data->id, rq->entry_count, payload_size);
1269 rqindex++;
1270 }
1271
1272 return rc ? -EIO : 0;
1273}
1274
1275int
1276efct_hw_rx_post(struct efct_hw *hw)
1277{
1278 u32 i;
1279 u32 idx;
1280 u32 rq_idx;
1281 int rc = 0;
1282
1283 if (!hw->seq_pool) {
1284 u32 count = 0;
1285
1286 for (i = 0; i < hw->hw_rq_count; i++)
1287 count += hw->hw_rq[i]->entry_count;
1288
1289 hw->seq_pool = kmalloc_array(count,
1290 sizeof(struct efc_hw_sequence), GFP_KERNEL);
1291 if (!hw->seq_pool)
1292 return -ENOMEM;
1293 }
1294
1295
1296
1297
1298
1299 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
1300 struct hw_rq *rq = hw->hw_rq[rq_idx];
1301
1302 for (i = 0; i < rq->entry_count - 1; i++) {
1303 struct efc_hw_sequence *seq;
1304
1305 seq = hw->seq_pool + idx;
1306 idx++;
1307 seq->header = &rq->hdr_buf[i];
1308 seq->payload = &rq->payload_buf[i];
1309 rc = efct_hw_sequence_free(hw, seq);
1310 if (rc)
1311 break;
1312 }
1313 if (rc)
1314 break;
1315 }
1316
1317 if (rc && hw->seq_pool)
1318 kfree(hw->seq_pool);
1319
1320 return rc;
1321}
1322
1323void
1324efct_hw_rx_free(struct efct_hw *hw)
1325{
1326 u32 i;
1327
1328
1329 for (i = 0; i < hw->hw_rq_count; i++) {
1330 struct hw_rq *rq = hw->hw_rq[i];
1331
1332 if (rq) {
1333 efct_hw_rx_buffer_free(hw, rq->hdr_buf,
1334 rq->entry_count);
1335 rq->hdr_buf = NULL;
1336 efct_hw_rx_buffer_free(hw, rq->payload_buf,
1337 rq->entry_count);
1338 rq->payload_buf = NULL;
1339 }
1340 }
1341}
1342
1343static int
1344efct_hw_cmd_submit_pending(struct efct_hw *hw)
1345{
1346 int rc = 0;
1347
1348
1349
1350
1351 while (hw->cmd_head_count < (EFCT_HW_MQ_DEPTH - 1) &&
1352 !list_empty(&hw->cmd_pending)) {
1353 struct efct_command_ctx *ctx;
1354
1355 ctx = list_first_entry(&hw->cmd_pending,
1356 struct efct_command_ctx, list_entry);
1357 if (!ctx)
1358 break;
1359
1360 list_del_init(&ctx->list_entry);
1361
1362 list_add_tail(&ctx->list_entry, &hw->cmd_head);
1363 hw->cmd_head_count++;
1364 if (sli_mq_write(&hw->sli, hw->mq, ctx->buf) < 0) {
1365 efc_log_debug(hw->os,
1366 "sli_queue_write failed: %d\n", rc);
1367 rc = -EIO;
1368 break;
1369 }
1370 }
1371 return rc;
1372}
1373
1374int
1375efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb, void *arg)
1376{
1377 int rc = -EIO;
1378 unsigned long flags = 0;
1379 void *bmbx = NULL;
1380
1381
1382
1383
1384
1385 if (sli_fw_error_status(&hw->sli) > 0) {
1386 efc_log_crit(hw->os, "Chip in an error state - reset needed\n");
1387 efc_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
1388 sli_reg_read_status(&hw->sli),
1389 sli_reg_read_err1(&hw->sli),
1390 sli_reg_read_err2(&hw->sli));
1391
1392 return -EIO;
1393 }
1394
1395
1396
1397
1398
1399
1400
1401 if (opts == EFCT_CMD_POLL) {
1402 mutex_lock(&hw->bmbx_lock);
1403 bmbx = hw->sli.bmbx.virt;
1404
1405 memset(bmbx, 0, SLI4_BMBX_SIZE);
1406 memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
1407
1408 if (sli_bmbx_command(&hw->sli) == 0) {
1409 rc = 0;
1410 memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
1411 }
1412 mutex_unlock(&hw->bmbx_lock);
1413 } else if (opts == EFCT_CMD_NOWAIT) {
1414 struct efct_command_ctx *ctx = NULL;
1415
1416 if (hw->state != EFCT_HW_STATE_ACTIVE) {
1417 efc_log_err(hw->os, "Can't send command, HW state=%d\n",
1418 hw->state);
1419 return -EIO;
1420 }
1421
1422 ctx = mempool_alloc(hw->cmd_ctx_pool, GFP_ATOMIC);
1423 if (!ctx)
1424 return -ENOSPC;
1425
1426 memset(ctx, 0, sizeof(struct efct_command_ctx));
1427
1428 if (cb) {
1429 ctx->cb = cb;
1430 ctx->arg = arg;
1431 }
1432
1433 memcpy(ctx->buf, cmd, SLI4_BMBX_SIZE);
1434 ctx->ctx = hw;
1435
1436 spin_lock_irqsave(&hw->cmd_lock, flags);
1437
1438
1439 INIT_LIST_HEAD(&ctx->list_entry);
1440 list_add_tail(&ctx->list_entry, &hw->cmd_pending);
1441
1442
1443 rc = efct_hw_cmd_submit_pending(hw);
1444
1445 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1446 }
1447
1448 return rc;
1449}
1450
1451static int
1452efct_hw_command_process(struct efct_hw *hw, int status, u8 *mqe,
1453 size_t size)
1454{
1455 struct efct_command_ctx *ctx = NULL;
1456 unsigned long flags = 0;
1457
1458 spin_lock_irqsave(&hw->cmd_lock, flags);
1459 if (!list_empty(&hw->cmd_head)) {
1460 ctx = list_first_entry(&hw->cmd_head,
1461 struct efct_command_ctx, list_entry);
1462 list_del_init(&ctx->list_entry);
1463 }
1464 if (!ctx) {
1465 efc_log_err(hw->os, "no command context\n");
1466 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1467 return -EIO;
1468 }
1469
1470 hw->cmd_head_count--;
1471
1472
1473 efct_hw_cmd_submit_pending(hw);
1474
1475 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1476
1477 if (ctx->cb) {
1478 memcpy(ctx->buf, mqe, size);
1479 ctx->cb(hw, status, ctx->buf, ctx->arg);
1480 }
1481
1482 mempool_free(ctx, hw->cmd_ctx_pool);
1483
1484 return 0;
1485}
1486
1487static int
1488efct_hw_mq_process(struct efct_hw *hw,
1489 int status, struct sli4_queue *mq)
1490{
1491 u8 mqe[SLI4_BMBX_SIZE];
1492 int rc;
1493
1494 rc = sli_mq_read(&hw->sli, mq, mqe);
1495 if (!rc)
1496 rc = efct_hw_command_process(hw, status, mqe, mq->size);
1497
1498 return rc;
1499}
1500
1501static int
1502efct_hw_command_cancel(struct efct_hw *hw)
1503{
1504 unsigned long flags = 0;
1505 int rc = 0;
1506
1507 spin_lock_irqsave(&hw->cmd_lock, flags);
1508
1509
1510
1511
1512
1513
1514 while (!list_empty(&hw->cmd_head)) {
1515 u8 mqe[SLI4_BMBX_SIZE] = { 0 };
1516 struct efct_command_ctx *ctx;
1517
1518 ctx = list_first_entry(&hw->cmd_head,
1519 struct efct_command_ctx, list_entry);
1520
1521 efc_log_debug(hw->os, "hung command %08x\n",
1522 !ctx ? U32_MAX : *((u32 *)ctx->buf));
1523 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1524 rc = efct_hw_command_process(hw, -1, mqe, SLI4_BMBX_SIZE);
1525 spin_lock_irqsave(&hw->cmd_lock, flags);
1526 }
1527
1528 spin_unlock_irqrestore(&hw->cmd_lock, flags);
1529
1530 return rc;
1531}
1532
1533static void
1534efct_mbox_rsp_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
1535{
1536 struct efct_mbox_rqst_ctx *ctx = arg;
1537
1538 if (ctx) {
1539 if (ctx->callback)
1540 (*ctx->callback)(hw->os->efcport, status, mqe,
1541 ctx->arg);
1542
1543 mempool_free(ctx, hw->mbox_rqst_pool);
1544 }
1545}
1546
1547int
1548efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg)
1549{
1550 struct efct_mbox_rqst_ctx *ctx;
1551 struct efct *efct = base;
1552 struct efct_hw *hw = &efct->hw;
1553 int rc;
1554
1555
1556
1557
1558
1559
1560 ctx = mempool_alloc(hw->mbox_rqst_pool, GFP_ATOMIC);
1561 if (!ctx)
1562 return -EIO;
1563
1564 ctx->callback = cb;
1565 ctx->arg = arg;
1566
1567 rc = efct_hw_command(hw, cmd, EFCT_CMD_NOWAIT, efct_mbox_rsp_cb, ctx);
1568 if (rc) {
1569 efc_log_err(efct, "issue mbox rqst failure rc:%d\n", rc);
1570 mempool_free(ctx, hw->mbox_rqst_pool);
1571 return -EIO;
1572 }
1573
1574 return 0;
1575}
1576
1577static inline struct efct_hw_io *
1578_efct_hw_io_alloc(struct efct_hw *hw)
1579{
1580 struct efct_hw_io *io = NULL;
1581
1582 if (!list_empty(&hw->io_free)) {
1583 io = list_first_entry(&hw->io_free, struct efct_hw_io,
1584 list_entry);
1585 list_del(&io->list_entry);
1586 }
1587 if (io) {
1588 INIT_LIST_HEAD(&io->list_entry);
1589 list_add_tail(&io->list_entry, &hw->io_inuse);
1590 io->state = EFCT_HW_IO_STATE_INUSE;
1591 io->abort_reqtag = U32_MAX;
1592 io->wq = hw->wq_cpu_array[raw_smp_processor_id()];
1593 if (!io->wq) {
1594 efc_log_err(hw->os, "WQ not assigned for cpu:%d\n",
1595 raw_smp_processor_id());
1596 io->wq = hw->hw_wq[0];
1597 }
1598 kref_init(&io->ref);
1599 io->release = efct_hw_io_free_internal;
1600 } else {
1601 atomic_add(1, &hw->io_alloc_failed_count);
1602 }
1603
1604 return io;
1605}
1606
1607struct efct_hw_io *
1608efct_hw_io_alloc(struct efct_hw *hw)
1609{
1610 struct efct_hw_io *io = NULL;
1611 unsigned long flags = 0;
1612
1613 spin_lock_irqsave(&hw->io_lock, flags);
1614 io = _efct_hw_io_alloc(hw);
1615 spin_unlock_irqrestore(&hw->io_lock, flags);
1616
1617 return io;
1618}
1619
1620static void
1621efct_hw_io_free_move_correct_list(struct efct_hw *hw,
1622 struct efct_hw_io *io)
1623{
1624
1625
1626
1627
1628 if (io->xbusy) {
1629
1630
1631
1632
1633 INIT_LIST_HEAD(&io->list_entry);
1634 list_add_tail(&io->list_entry, &hw->io_wait_free);
1635 io->state = EFCT_HW_IO_STATE_WAIT_FREE;
1636 } else {
1637
1638 INIT_LIST_HEAD(&io->list_entry);
1639 list_add_tail(&io->list_entry, &hw->io_free);
1640 io->state = EFCT_HW_IO_STATE_FREE;
1641 }
1642}
1643
1644static inline void
1645efct_hw_io_free_common(struct efct_hw *hw, struct efct_hw_io *io)
1646{
1647
1648 efct_hw_init_free_io(io);
1649
1650
1651 efct_hw_io_restore_sgl(hw, io);
1652}
1653
1654void
1655efct_hw_io_free_internal(struct kref *arg)
1656{
1657 unsigned long flags = 0;
1658 struct efct_hw_io *io = container_of(arg, struct efct_hw_io, ref);
1659 struct efct_hw *hw = io->hw;
1660
1661
1662 efct_hw_io_free_common(hw, io);
1663
1664 spin_lock_irqsave(&hw->io_lock, flags);
1665
1666 if (!list_empty(&io->list_entry) && !list_empty(&hw->io_inuse)) {
1667 list_del_init(&io->list_entry);
1668 efct_hw_io_free_move_correct_list(hw, io);
1669 }
1670 spin_unlock_irqrestore(&hw->io_lock, flags);
1671}
1672
1673int
1674efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io)
1675{
1676 return kref_put(&io->ref, io->release);
1677}
1678
1679struct efct_hw_io *
1680efct_hw_io_lookup(struct efct_hw *hw, u32 xri)
1681{
1682 u32 ioindex;
1683
1684 ioindex = xri - hw->sli.ext[SLI4_RSRC_XRI].base[0];
1685 return hw->io[ioindex];
1686}
1687
1688int
1689efct_hw_io_init_sges(struct efct_hw *hw, struct efct_hw_io *io,
1690 enum efct_hw_io_type type)
1691{
1692 struct sli4_sge *data = NULL;
1693 u32 i = 0;
1694 u32 skips = 0;
1695 u32 sge_flags = 0;
1696
1697 if (!io) {
1698 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n", hw, io);
1699 return -EIO;
1700 }
1701
1702
1703 io->sgl = &io->def_sgl;
1704 io->sgl_count = io->def_sgl_count;
1705 io->first_data_sge = 0;
1706
1707 memset(io->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
1708 io->n_sge = 0;
1709 io->sge_offset = 0;
1710
1711 io->type = type;
1712
1713 data = io->sgl->virt;
1714
1715
1716
1717
1718
1719 switch (type) {
1720 case EFCT_HW_IO_TARGET_WRITE:
1721
1722
1723 sge_flags = le32_to_cpu(data->dw2_flags);
1724 sge_flags &= (~SLI4_SGE_TYPE_MASK);
1725 sge_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
1726 data->buffer_address_high =
1727 cpu_to_le32(upper_32_bits(io->xfer_rdy.phys));
1728 data->buffer_address_low =
1729 cpu_to_le32(lower_32_bits(io->xfer_rdy.phys));
1730 data->buffer_length = cpu_to_le32(io->xfer_rdy.size);
1731 data->dw2_flags = cpu_to_le32(sge_flags);
1732 data++;
1733
1734 skips = EFCT_TARGET_WRITE_SKIPS;
1735
1736 io->n_sge = 1;
1737 break;
1738 case EFCT_HW_IO_TARGET_READ:
1739
1740
1741
1742 skips = EFCT_TARGET_READ_SKIPS;
1743 break;
1744 case EFCT_HW_IO_TARGET_RSP:
1745
1746
1747
1748 break;
1749 default:
1750 efc_log_err(hw->os, "unsupported IO type %#x\n", type);
1751 return -EIO;
1752 }
1753
1754
1755
1756
1757 for (i = 0; i < skips; i++) {
1758 sge_flags = le32_to_cpu(data->dw2_flags);
1759 sge_flags &= (~SLI4_SGE_TYPE_MASK);
1760 sge_flags |= (SLI4_SGE_TYPE_SKIP << SLI4_SGE_TYPE_SHIFT);
1761 data->dw2_flags = cpu_to_le32(sge_flags);
1762 data++;
1763 }
1764
1765 io->n_sge += skips;
1766
1767
1768
1769
1770 sge_flags = le32_to_cpu(data->dw2_flags);
1771 sge_flags |= SLI4_SGE_LAST;
1772 data->dw2_flags = cpu_to_le32(sge_flags);
1773
1774 return 0;
1775}
1776
1777int
1778efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
1779 uintptr_t addr, u32 length)
1780{
1781 struct sli4_sge *data = NULL;
1782 u32 sge_flags = 0;
1783
1784 if (!io || !addr || !length) {
1785 efc_log_err(hw->os,
1786 "bad parameter hw=%p io=%p addr=%lx length=%u\n",
1787 hw, io, addr, length);
1788 return -EIO;
1789 }
1790
1791 if (length > hw->sli.sge_supported_length) {
1792 efc_log_err(hw->os,
1793 "length of SGE %d bigger than allowed %d\n",
1794 length, hw->sli.sge_supported_length);
1795 return -EIO;
1796 }
1797
1798 data = io->sgl->virt;
1799 data += io->n_sge;
1800
1801 sge_flags = le32_to_cpu(data->dw2_flags);
1802 sge_flags &= ~SLI4_SGE_TYPE_MASK;
1803 sge_flags |= SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT;
1804 sge_flags &= ~SLI4_SGE_DATA_OFFSET_MASK;
1805 sge_flags |= SLI4_SGE_DATA_OFFSET_MASK & io->sge_offset;
1806
1807 data->buffer_address_high = cpu_to_le32(upper_32_bits(addr));
1808 data->buffer_address_low = cpu_to_le32(lower_32_bits(addr));
1809 data->buffer_length = cpu_to_le32(length);
1810
1811
1812
1813
1814
1815
1816 sge_flags |= SLI4_SGE_LAST;
1817 data->dw2_flags = cpu_to_le32(sge_flags);
1818
1819 if (io->n_sge) {
1820 sge_flags = le32_to_cpu(data[-1].dw2_flags);
1821 sge_flags &= ~SLI4_SGE_LAST;
1822 data[-1].dw2_flags = cpu_to_le32(sge_flags);
1823 }
1824
1825
1826 if (io->first_data_sge == 0)
1827 io->first_data_sge = io->n_sge;
1828
1829 io->sge_offset += length;
1830 io->n_sge++;
1831
1832 return 0;
1833}
1834
1835void
1836efct_hw_io_abort_all(struct efct_hw *hw)
1837{
1838 struct efct_hw_io *io_to_abort = NULL;
1839 struct efct_hw_io *next_io = NULL;
1840
1841 list_for_each_entry_safe(io_to_abort, next_io,
1842 &hw->io_inuse, list_entry) {
1843 efct_hw_io_abort(hw, io_to_abort, true, NULL, NULL);
1844 }
1845}
1846
1847static void
1848efct_hw_wq_process_abort(void *arg, u8 *cqe, int status)
1849{
1850 struct efct_hw_io *io = arg;
1851 struct efct_hw *hw = io->hw;
1852 u32 ext = 0;
1853 u32 len = 0;
1854 struct hw_wq_callback *wqcb;
1855
1856
1857
1858
1859
1860
1861
1862 ext = sli_fc_ext_status(&hw->sli, cqe);
1863 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
1864 ext == SLI4_FC_LOCAL_REJECT_NO_XRI && io->done) {
1865 efct_hw_done_t done = io->done;
1866
1867 io->done = NULL;
1868
1869
1870
1871
1872
1873
1874
1875 status = io->saved_status;
1876 len = io->saved_len;
1877 ext = io->saved_ext;
1878 io->status_saved = false;
1879 done(io, len, status, ext, io->arg);
1880 }
1881
1882 if (io->abort_done) {
1883 efct_hw_done_t done = io->abort_done;
1884
1885 io->abort_done = NULL;
1886 done(io, len, status, ext, io->abort_arg);
1887 }
1888
1889
1890 io->abort_in_progress = false;
1891
1892
1893 if (io->abort_reqtag == U32_MAX) {
1894 efc_log_err(hw->os, "HW IO already freed\n");
1895 return;
1896 }
1897
1898 wqcb = efct_hw_reqtag_get_instance(hw, io->abort_reqtag);
1899 efct_hw_reqtag_free(hw, wqcb);
1900
1901
1902
1903
1904
1905 (void)efct_hw_io_free(hw, io);
1906}
1907
1908static void
1909efct_hw_fill_abort_wqe(struct efct_hw *hw, struct efct_hw_wqe *wqe)
1910{
1911 struct sli4_abort_wqe *abort = (void *)wqe->wqebuf;
1912
1913 memset(abort, 0, hw->sli.wqe_size);
1914
1915 abort->criteria = SLI4_ABORT_CRITERIA_XRI_TAG;
1916 abort->ia_ir_byte |= wqe->send_abts ? 0 : 1;
1917
1918
1919 abort->ia_ir_byte |= SLI4_ABRT_WQE_IR;
1920
1921 abort->t_tag = cpu_to_le32(wqe->id);
1922 abort->command = SLI4_WQE_ABORT;
1923 abort->request_tag = cpu_to_le16(wqe->abort_reqtag);
1924
1925 abort->dw10w0_flags = cpu_to_le16(SLI4_ABRT_WQE_QOSD);
1926
1927 abort->cq_id = cpu_to_le16(SLI4_CQ_DEFAULT);
1928}
1929
1930int
1931efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
1932 bool send_abts, void *cb, void *arg)
1933{
1934 struct hw_wq_callback *wqcb;
1935 unsigned long flags = 0;
1936
1937 if (!io_to_abort) {
1938 efc_log_err(hw->os, "bad parameter hw=%p io=%p\n",
1939 hw, io_to_abort);
1940 return -EIO;
1941 }
1942
1943 if (hw->state != EFCT_HW_STATE_ACTIVE) {
1944 efc_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
1945 hw->state);
1946 return -EIO;
1947 }
1948
1949
1950 if (kref_get_unless_zero(&io_to_abort->ref) == 0) {
1951
1952 efc_log_debug(hw->os,
1953 "io not active xri=0x%x tag=0x%x\n",
1954 io_to_abort->indicator, io_to_abort->reqtag);
1955 return -ENOENT;
1956 }
1957
1958
1959 if (!io_to_abort->wq) {
1960 efc_log_debug(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
1961 io_to_abort->indicator);
1962
1963 kref_put(&io_to_abort->ref, io_to_abort->release);
1964 return -ENOENT;
1965 }
1966
1967
1968
1969
1970
1971 if (cmpxchg(&io_to_abort->abort_in_progress, false, true)) {
1972
1973 kref_put(&io_to_abort->ref, io_to_abort->release);
1974 efc_log_debug(hw->os,
1975 "io already being aborted xri=0x%x tag=0x%x\n",
1976 io_to_abort->indicator, io_to_abort->reqtag);
1977 return -EINPROGRESS;
1978 }
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994 io_to_abort->abort_done = cb;
1995 io_to_abort->abort_arg = arg;
1996
1997
1998 wqcb = efct_hw_reqtag_alloc(hw, efct_hw_wq_process_abort, io_to_abort);
1999 if (!wqcb) {
2000 efc_log_err(hw->os, "can't allocate request tag\n");
2001 return -ENOSPC;
2002 }
2003
2004 io_to_abort->abort_reqtag = wqcb->instance_index;
2005 io_to_abort->wqe.send_abts = send_abts;
2006 io_to_abort->wqe.id = io_to_abort->indicator;
2007 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
2008
2009
2010
2011
2012
2013 if (io_to_abort->wq) {
2014 spin_lock_irqsave(&io_to_abort->wq->queue->lock, flags);
2015 if (io_to_abort->wqe.list_entry.next) {
2016 io_to_abort->wqe.abort_wqe_submit_needed = true;
2017 spin_unlock_irqrestore(&io_to_abort->wq->queue->lock,
2018 flags);
2019 return 0;
2020 }
2021 spin_unlock_irqrestore(&io_to_abort->wq->queue->lock, flags);
2022 }
2023
2024 efct_hw_fill_abort_wqe(hw, &io_to_abort->wqe);
2025
2026
2027
2028
2029
2030 if (efct_hw_wq_write(io_to_abort->wq, &io_to_abort->wqe)) {
2031 io_to_abort->abort_in_progress = false;
2032
2033 kref_put(&io_to_abort->ref, io_to_abort->release);
2034 return -EIO;
2035 }
2036
2037 return 0;
2038}
2039
2040void
2041efct_hw_reqtag_pool_free(struct efct_hw *hw)
2042{
2043 u32 i;
2044 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
2045 struct hw_wq_callback *wqcb = NULL;
2046
2047 if (reqtag_pool) {
2048 for (i = 0; i < U16_MAX; i++) {
2049 wqcb = reqtag_pool->tags[i];
2050 if (!wqcb)
2051 continue;
2052
2053 kfree(wqcb);
2054 }
2055 kfree(reqtag_pool);
2056 hw->wq_reqtag_pool = NULL;
2057 }
2058}
2059
2060struct reqtag_pool *
2061efct_hw_reqtag_pool_alloc(struct efct_hw *hw)
2062{
2063 u32 i = 0;
2064 struct reqtag_pool *reqtag_pool;
2065 struct hw_wq_callback *wqcb;
2066
2067 reqtag_pool = kzalloc(sizeof(*reqtag_pool), GFP_KERNEL);
2068 if (!reqtag_pool)
2069 return NULL;
2070
2071 INIT_LIST_HEAD(&reqtag_pool->freelist);
2072
2073 spin_lock_init(&reqtag_pool->lock);
2074 for (i = 0; i < U16_MAX; i++) {
2075 wqcb = kmalloc(sizeof(*wqcb), GFP_KERNEL);
2076 if (!wqcb)
2077 break;
2078
2079 reqtag_pool->tags[i] = wqcb;
2080 wqcb->instance_index = i;
2081 wqcb->callback = NULL;
2082 wqcb->arg = NULL;
2083 INIT_LIST_HEAD(&wqcb->list_entry);
2084 list_add_tail(&wqcb->list_entry, &reqtag_pool->freelist);
2085 }
2086
2087 return reqtag_pool;
2088}
2089
2090struct hw_wq_callback *
2091efct_hw_reqtag_alloc(struct efct_hw *hw,
2092 void (*callback)(void *arg, u8 *cqe, int status),
2093 void *arg)
2094{
2095 struct hw_wq_callback *wqcb = NULL;
2096 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
2097 unsigned long flags = 0;
2098
2099 if (!callback)
2100 return wqcb;
2101
2102 spin_lock_irqsave(&reqtag_pool->lock, flags);
2103
2104 if (!list_empty(&reqtag_pool->freelist)) {
2105 wqcb = list_first_entry(&reqtag_pool->freelist,
2106 struct hw_wq_callback, list_entry);
2107 }
2108
2109 if (wqcb) {
2110 list_del_init(&wqcb->list_entry);
2111 spin_unlock_irqrestore(&reqtag_pool->lock, flags);
2112 wqcb->callback = callback;
2113 wqcb->arg = arg;
2114 } else {
2115 spin_unlock_irqrestore(&reqtag_pool->lock, flags);
2116 }
2117
2118 return wqcb;
2119}
2120
2121void
2122efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb)
2123{
2124 unsigned long flags = 0;
2125 struct reqtag_pool *reqtag_pool = hw->wq_reqtag_pool;
2126
2127 if (!wqcb->callback)
2128 efc_log_err(hw->os, "WQCB is already freed\n");
2129
2130 spin_lock_irqsave(&reqtag_pool->lock, flags);
2131 wqcb->callback = NULL;
2132 wqcb->arg = NULL;
2133 INIT_LIST_HEAD(&wqcb->list_entry);
2134 list_add(&wqcb->list_entry, &hw->wq_reqtag_pool->freelist);
2135 spin_unlock_irqrestore(&reqtag_pool->lock, flags);
2136}
2137
2138struct hw_wq_callback *
2139efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index)
2140{
2141 struct hw_wq_callback *wqcb;
2142
2143 wqcb = hw->wq_reqtag_pool->tags[instance_index];
2144 if (!wqcb)
2145 efc_log_err(hw->os, "wqcb for instance %d is null\n",
2146 instance_index);
2147
2148 return wqcb;
2149}
2150
2151int
2152efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id)
2153{
2154 int index = -1;
2155 int i = id & (EFCT_HW_Q_HASH_SIZE - 1);
2156
2157
2158
2159
2160
2161
2162 do {
2163 if (hash[i].in_use && hash[i].id == id)
2164 index = hash[i].index;
2165 else
2166 i = (i + 1) & (EFCT_HW_Q_HASH_SIZE - 1);
2167 } while (index == -1 && hash[i].in_use);
2168
2169 return index;
2170}
2171
2172int
2173efct_hw_process(struct efct_hw *hw, u32 vector,
2174 u32 max_isr_time_msec)
2175{
2176 struct hw_eq *eq;
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189 if (hw->state == EFCT_HW_STATE_UNINITIALIZED)
2190 return 0;
2191
2192
2193 eq = hw->hw_eq[vector];
2194 if (!eq)
2195 return 0;
2196
2197 eq->use_count++;
2198
2199 return efct_hw_eq_process(hw, eq, max_isr_time_msec);
2200}
2201
2202int
2203efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
2204 u32 max_isr_time_msec)
2205{
2206 u8 eqe[sizeof(struct sli4_eqe)] = { 0 };
2207 u32 tcheck_count;
2208 u64 tstart;
2209 u64 telapsed;
2210 bool done = false;
2211
2212 tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
2213 tstart = jiffies_to_msecs(jiffies);
2214
2215 while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) {
2216 u16 cq_id = 0;
2217 int rc;
2218
2219 rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2220 if (unlikely(rc)) {
2221 if (rc == SLI4_EQE_STATUS_EQ_FULL) {
2222 u32 i;
2223
2224
2225
2226
2227
2228 for (i = 0; i < hw->cq_count; i++)
2229 efct_hw_cq_process(hw, hw->hw_cq[i]);
2230 continue;
2231 } else {
2232 return rc;
2233 }
2234 } else {
2235 int index;
2236
2237 index = efct_hw_queue_hash_find(hw->cq_hash, cq_id);
2238
2239 if (likely(index >= 0))
2240 efct_hw_cq_process(hw, hw->hw_cq[index]);
2241 else
2242 efc_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2243 }
2244
2245 if (eq->queue->n_posted > eq->queue->posted_limit)
2246 sli_queue_arm(&hw->sli, eq->queue, false);
2247
2248 if (tcheck_count && (--tcheck_count == 0)) {
2249 tcheck_count = EFCT_HW_TIMECHECK_ITERATIONS;
2250 telapsed = jiffies_to_msecs(jiffies) - tstart;
2251 if (telapsed >= max_isr_time_msec)
2252 done = true;
2253 }
2254 }
2255 sli_queue_eq_arm(&hw->sli, eq->queue, true);
2256
2257 return 0;
2258}
2259
2260static int
2261_efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
2262{
2263 int queue_rc;
2264
2265
2266 if (wq->wqec_count)
2267 wq->wqec_count--;
2268
2269 if (wq->wqec_count == 0) {
2270 struct sli4_generic_wqe *genwqe = (void *)wqe->wqebuf;
2271
2272 genwqe->cmdtype_wqec_byte |= SLI4_GEN_WQE_WQEC;
2273 wq->wqec_count = wq->wqec_set_count;
2274 }
2275
2276
2277 wq->free_count--;
2278
2279 queue_rc = sli_wq_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
2280
2281 return (queue_rc < 0) ? -EIO : 0;
2282}
2283
2284static void
2285hw_wq_submit_pending(struct hw_wq *wq, u32 update_free_count)
2286{
2287 struct efct_hw_wqe *wqe;
2288 unsigned long flags = 0;
2289
2290 spin_lock_irqsave(&wq->queue->lock, flags);
2291
2292
2293 wq->free_count += update_free_count;
2294
2295 while ((wq->free_count > 0) && (!list_empty(&wq->pending_list))) {
2296 wqe = list_first_entry(&wq->pending_list,
2297 struct efct_hw_wqe, list_entry);
2298 list_del_init(&wqe->list_entry);
2299 _efct_hw_wq_write(wq, wqe);
2300
2301 if (wqe->abort_wqe_submit_needed) {
2302 wqe->abort_wqe_submit_needed = false;
2303 efct_hw_fill_abort_wqe(wq->hw, wqe);
2304 INIT_LIST_HEAD(&wqe->list_entry);
2305 list_add_tail(&wqe->list_entry, &wq->pending_list);
2306 wq->wq_pending_count++;
2307 }
2308 }
2309
2310 spin_unlock_irqrestore(&wq->queue->lock, flags);
2311}
2312
2313void
2314efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq)
2315{
2316 u8 cqe[sizeof(struct sli4_mcqe)];
2317 u16 rid = U16_MAX;
2318
2319 enum sli4_qentry ctype;
2320 u32 n_processed = 0;
2321 u32 tstart, telapsed;
2322
2323 tstart = jiffies_to_msecs(jiffies);
2324
2325 while (!sli_cq_read(&hw->sli, cq->queue, cqe)) {
2326 int status;
2327
2328 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338 if (status < 0) {
2339 if (status == SLI4_MCQE_STATUS_NOT_COMPLETED)
2340
2341
2342
2343
2344 continue;
2345
2346 break;
2347 }
2348
2349 switch (ctype) {
2350 case SLI4_QENTRY_ASYNC:
2351 sli_cqe_async(&hw->sli, cqe);
2352 break;
2353 case SLI4_QENTRY_MQ:
2354
2355
2356
2357
2358 efct_hw_mq_process(hw, status, hw->mq);
2359 break;
2360 case SLI4_QENTRY_WQ:
2361 efct_hw_wq_process(hw, cq, cqe, status, rid);
2362 break;
2363 case SLI4_QENTRY_WQ_RELEASE: {
2364 u32 wq_id = rid;
2365 int index;
2366 struct hw_wq *wq = NULL;
2367
2368 index = efct_hw_queue_hash_find(hw->wq_hash, wq_id);
2369
2370 if (likely(index >= 0)) {
2371 wq = hw->hw_wq[index];
2372 } else {
2373 efc_log_err(hw->os, "bad WQ_ID %#06x\n", wq_id);
2374 break;
2375 }
2376
2377 hw_wq_submit_pending(wq, wq->wqec_set_count);
2378
2379 break;
2380 }
2381
2382 case SLI4_QENTRY_RQ:
2383 efct_hw_rqpair_process_rq(hw, cq, cqe);
2384 break;
2385 case SLI4_QENTRY_XABT: {
2386 efct_hw_xabt_process(hw, cq, cqe, rid);
2387 break;
2388 }
2389 default:
2390 efc_log_debug(hw->os, "unhandled ctype=%#x rid=%#x\n",
2391 ctype, rid);
2392 break;
2393 }
2394
2395 n_processed++;
2396 if (n_processed == cq->queue->proc_limit)
2397 break;
2398
2399 if (cq->queue->n_posted >= cq->queue->posted_limit)
2400 sli_queue_arm(&hw->sli, cq->queue, false);
2401 }
2402
2403 sli_queue_arm(&hw->sli, cq->queue, true);
2404
2405 if (n_processed > cq->queue->max_num_processed)
2406 cq->queue->max_num_processed = n_processed;
2407 telapsed = jiffies_to_msecs(jiffies) - tstart;
2408 if (telapsed > cq->queue->max_process_time)
2409 cq->queue->max_process_time = telapsed;
2410}
2411
2412void
2413efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
2414 u8 *cqe, int status, u16 rid)
2415{
2416 struct hw_wq_callback *wqcb;
2417
2418 if (rid == EFCT_HW_REQUE_XRI_REGTAG) {
2419 if (status)
2420 efc_log_err(hw->os, "reque xri failed, status = %d\n",
2421 status);
2422 return;
2423 }
2424
2425 wqcb = efct_hw_reqtag_get_instance(hw, rid);
2426 if (!wqcb) {
2427 efc_log_err(hw->os, "invalid request tag: x%x\n", rid);
2428 return;
2429 }
2430
2431 if (!wqcb->callback) {
2432 efc_log_err(hw->os, "wqcb callback is NULL\n");
2433 return;
2434 }
2435
2436 (*wqcb->callback)(wqcb->arg, cqe, status);
2437}
2438
2439void
2440efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
2441 u8 *cqe, u16 rid)
2442{
2443
2444 struct efct_hw_io *io = NULL;
2445 unsigned long flags = 0;
2446
2447 io = efct_hw_io_lookup(hw, rid);
2448 if (!io) {
2449
2450 efc_log_err(hw->os, "xabt io lookup failed rid=%#x\n", rid);
2451 return;
2452 }
2453
2454 if (!io->xbusy)
2455 efc_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
2456 else
2457
2458 io->xbusy = false;
2459
2460
2461
2462
2463
2464 if (io->done) {
2465 efct_hw_done_t done = io->done;
2466 void *arg = io->arg;
2467
2468
2469
2470
2471
2472 int status = io->saved_status;
2473 u32 len = io->saved_len;
2474 u32 ext = io->saved_ext;
2475
2476 io->done = NULL;
2477 io->status_saved = false;
2478
2479 done(io, len, status, ext, arg);
2480 }
2481
2482 spin_lock_irqsave(&hw->io_lock, flags);
2483 if (io->state == EFCT_HW_IO_STATE_INUSE ||
2484 io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
2485
2486
2487
2488
2489
2490 if (io->state == EFCT_HW_IO_STATE_WAIT_FREE) {
2491 io->state = EFCT_HW_IO_STATE_FREE;
2492 list_del_init(&io->list_entry);
2493 efct_hw_io_free_move_correct_list(hw, io);
2494 }
2495 }
2496 spin_unlock_irqrestore(&hw->io_lock, flags);
2497}
2498
2499static int
2500efct_hw_flush(struct efct_hw *hw)
2501{
2502 u32 i = 0;
2503
2504
2505 for (i = 0; i < hw->eq_count; i++)
2506 efct_hw_process(hw, i, ~0);
2507
2508 return 0;
2509}
2510
2511int
2512efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe)
2513{
2514 int rc = 0;
2515 unsigned long flags = 0;
2516
2517 spin_lock_irqsave(&wq->queue->lock, flags);
2518 if (list_empty(&wq->pending_list)) {
2519 if (wq->free_count > 0) {
2520 rc = _efct_hw_wq_write(wq, wqe);
2521 } else {
2522 INIT_LIST_HEAD(&wqe->list_entry);
2523 list_add_tail(&wqe->list_entry, &wq->pending_list);
2524 wq->wq_pending_count++;
2525 }
2526
2527 spin_unlock_irqrestore(&wq->queue->lock, flags);
2528 return rc;
2529 }
2530
2531 INIT_LIST_HEAD(&wqe->list_entry);
2532 list_add_tail(&wqe->list_entry, &wq->pending_list);
2533 wq->wq_pending_count++;
2534 while (wq->free_count > 0) {
2535 wqe = list_first_entry(&wq->pending_list, struct efct_hw_wqe,
2536 list_entry);
2537 if (!wqe)
2538 break;
2539
2540 list_del_init(&wqe->list_entry);
2541 rc = _efct_hw_wq_write(wq, wqe);
2542 if (rc)
2543 break;
2544
2545 if (wqe->abort_wqe_submit_needed) {
2546 wqe->abort_wqe_submit_needed = false;
2547 efct_hw_fill_abort_wqe(wq->hw, wqe);
2548
2549 INIT_LIST_HEAD(&wqe->list_entry);
2550 list_add_tail(&wqe->list_entry, &wq->pending_list);
2551 wq->wq_pending_count++;
2552 }
2553 }
2554
2555 spin_unlock_irqrestore(&wq->queue->lock, flags);
2556
2557 return rc;
2558}
2559
2560int
2561efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls)
2562{
2563 struct efct *efct = efc->base;
2564
2565 return efct_hw_bls_send(efct, type, bls, NULL, NULL);
2566}
2567
2568int
2569efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
2570 void *cb, void *arg)
2571{
2572 struct efct_hw *hw = &efct->hw;
2573 struct efct_hw_io *hio;
2574 struct sli_bls_payload bls;
2575 int rc;
2576
2577 if (hw->state != EFCT_HW_STATE_ACTIVE) {
2578 efc_log_err(hw->os,
2579 "cannot send BLS, HW state=%d\n", hw->state);
2580 return -EIO;
2581 }
2582
2583 hio = efct_hw_io_alloc(hw);
2584 if (!hio) {
2585 efc_log_err(hw->os, "HIO allocation failed\n");
2586 return -EIO;
2587 }
2588
2589 hio->done = cb;
2590 hio->arg = arg;
2591
2592 bls_params->xri = hio->indicator;
2593 bls_params->tag = hio->reqtag;
2594
2595 if (type == FC_RCTL_BA_ACC) {
2596 hio->type = EFCT_HW_BLS_ACC;
2597 bls.type = SLI4_SLI_BLS_ACC;
2598 memcpy(&bls.u.acc, bls_params->payload, sizeof(bls.u.acc));
2599 } else {
2600 hio->type = EFCT_HW_BLS_RJT;
2601 bls.type = SLI4_SLI_BLS_RJT;
2602 memcpy(&bls.u.rjt, bls_params->payload, sizeof(bls.u.rjt));
2603 }
2604
2605 bls.ox_id = cpu_to_le16(bls_params->ox_id);
2606 bls.rx_id = cpu_to_le16(bls_params->rx_id);
2607
2608 if (sli_xmit_bls_rsp64_wqe(&hw->sli, hio->wqe.wqebuf,
2609 &bls, bls_params)) {
2610 efc_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
2611 return -EIO;
2612 }
2613
2614 hio->xbusy = true;
2615
2616
2617
2618
2619
2620 hio->wq->use_count++;
2621 rc = efct_hw_wq_write(hio->wq, &hio->wqe);
2622 if (rc >= 0) {
2623
2624 rc = 0;
2625 } else {
2626
2627 efc_log_err(hw->os,
2628 "sli_queue_write failed: %d\n", rc);
2629 hio->xbusy = false;
2630 }
2631
2632 return rc;
2633}
2634
2635static int
2636efct_els_ssrs_send_cb(struct efct_hw_io *hio, u32 length, int status,
2637 u32 ext_status, void *arg)
2638{
2639 struct efc_disc_io *io = arg;
2640
2641 efc_disc_io_complete(io, length, status, ext_status);
2642 return 0;
2643}
2644
2645static inline void
2646efct_fill_els_params(struct efc_disc_io *io, struct sli_els_params *params)
2647{
2648 u8 *cmd = io->req.virt;
2649
2650 params->cmd = *cmd;
2651 params->s_id = io->s_id;
2652 params->d_id = io->d_id;
2653 params->ox_id = io->iparam.els.ox_id;
2654 params->rpi = io->rpi;
2655 params->vpi = io->vpi;
2656 params->rpi_registered = io->rpi_registered;
2657 params->xmit_len = io->xmit_len;
2658 params->rsp_len = io->rsp_len;
2659 params->timeout = io->iparam.els.timeout;
2660}
2661
2662static inline void
2663efct_fill_ct_params(struct efc_disc_io *io, struct sli_ct_params *params)
2664{
2665 params->r_ctl = io->iparam.ct.r_ctl;
2666 params->type = io->iparam.ct.type;
2667 params->df_ctl = io->iparam.ct.df_ctl;
2668 params->d_id = io->d_id;
2669 params->ox_id = io->iparam.ct.ox_id;
2670 params->rpi = io->rpi;
2671 params->vpi = io->vpi;
2672 params->rpi_registered = io->rpi_registered;
2673 params->xmit_len = io->xmit_len;
2674 params->rsp_len = io->rsp_len;
2675 params->timeout = io->iparam.ct.timeout;
2676}
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694int
2695efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io)
2696{
2697 struct efct *efct = efc->base;
2698 struct efct_hw_io *hio;
2699 struct efct_hw *hw = &efct->hw;
2700 struct efc_dma *send = &io->req;
2701 struct efc_dma *receive = &io->rsp;
2702 struct sli4_sge *sge = NULL;
2703 int rc = 0;
2704 u32 len = io->xmit_len;
2705 u32 sge0_flags;
2706 u32 sge1_flags;
2707
2708 hio = efct_hw_io_alloc(hw);
2709 if (!hio) {
2710 pr_err("HIO alloc failed\n");
2711 return -EIO;
2712 }
2713
2714 if (hw->state != EFCT_HW_STATE_ACTIVE) {
2715 efc_log_debug(hw->os,
2716 "cannot send SRRS, HW state=%d\n", hw->state);
2717 return -EIO;
2718 }
2719
2720 hio->done = efct_els_ssrs_send_cb;
2721 hio->arg = io;
2722
2723 sge = hio->sgl->virt;
2724
2725
2726 memset(hio->sgl->virt, 0, 2 * sizeof(struct sli4_sge));
2727
2728 sge0_flags = le32_to_cpu(sge[0].dw2_flags);
2729 sge1_flags = le32_to_cpu(sge[1].dw2_flags);
2730 if (send->size) {
2731 sge[0].buffer_address_high =
2732 cpu_to_le32(upper_32_bits(send->phys));
2733 sge[0].buffer_address_low =
2734 cpu_to_le32(lower_32_bits(send->phys));
2735
2736 sge0_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
2737
2738 sge[0].buffer_length = cpu_to_le32(len);
2739 }
2740
2741 if (io->io_type == EFC_DISC_IO_ELS_REQ ||
2742 io->io_type == EFC_DISC_IO_CT_REQ) {
2743 sge[1].buffer_address_high =
2744 cpu_to_le32(upper_32_bits(receive->phys));
2745 sge[1].buffer_address_low =
2746 cpu_to_le32(lower_32_bits(receive->phys));
2747
2748 sge1_flags |= (SLI4_SGE_TYPE_DATA << SLI4_SGE_TYPE_SHIFT);
2749 sge1_flags |= SLI4_SGE_LAST;
2750
2751 sge[1].buffer_length = cpu_to_le32(receive->size);
2752 } else {
2753 sge0_flags |= SLI4_SGE_LAST;
2754 }
2755
2756 sge[0].dw2_flags = cpu_to_le32(sge0_flags);
2757 sge[1].dw2_flags = cpu_to_le32(sge1_flags);
2758
2759 switch (io->io_type) {
2760 case EFC_DISC_IO_ELS_REQ: {
2761 struct sli_els_params els_params;
2762
2763 hio->type = EFCT_HW_ELS_REQ;
2764 efct_fill_els_params(io, &els_params);
2765 els_params.xri = hio->indicator;
2766 els_params.tag = hio->reqtag;
2767
2768 if (sli_els_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2769 &els_params)) {
2770 efc_log_err(hw->os, "REQ WQE error\n");
2771 rc = -EIO;
2772 }
2773 break;
2774 }
2775 case EFC_DISC_IO_ELS_RESP: {
2776 struct sli_els_params els_params;
2777
2778 hio->type = EFCT_HW_ELS_RSP;
2779 efct_fill_els_params(io, &els_params);
2780 els_params.xri = hio->indicator;
2781 els_params.tag = hio->reqtag;
2782 if (sli_xmit_els_rsp64_wqe(&hw->sli, hio->wqe.wqebuf, send,
2783 &els_params)){
2784 efc_log_err(hw->os, "RSP WQE error\n");
2785 rc = -EIO;
2786 }
2787 break;
2788 }
2789 case EFC_DISC_IO_CT_REQ: {
2790 struct sli_ct_params ct_params;
2791
2792 hio->type = EFCT_HW_FC_CT;
2793 efct_fill_ct_params(io, &ct_params);
2794 ct_params.xri = hio->indicator;
2795 ct_params.tag = hio->reqtag;
2796 if (sli_gen_request64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2797 &ct_params)){
2798 efc_log_err(hw->os, "GEN WQE error\n");
2799 rc = -EIO;
2800 }
2801 break;
2802 }
2803 case EFC_DISC_IO_CT_RESP: {
2804 struct sli_ct_params ct_params;
2805
2806 hio->type = EFCT_HW_FC_CT_RSP;
2807 efct_fill_ct_params(io, &ct_params);
2808 ct_params.xri = hio->indicator;
2809 ct_params.tag = hio->reqtag;
2810 if (sli_xmit_sequence64_wqe(&hw->sli, hio->wqe.wqebuf, hio->sgl,
2811 &ct_params)){
2812 efc_log_err(hw->os, "XMIT SEQ WQE error\n");
2813 rc = -EIO;
2814 }
2815 break;
2816 }
2817 default:
2818 efc_log_err(hw->os, "bad SRRS type %#x\n", io->io_type);
2819 rc = -EIO;
2820 }
2821
2822 if (rc == 0) {
2823 hio->xbusy = true;
2824
2825
2826
2827
2828
2829 hio->wq->use_count++;
2830 rc = efct_hw_wq_write(hio->wq, &hio->wqe);
2831 if (rc >= 0) {
2832
2833 rc = 0;
2834 } else {
2835
2836 efc_log_err(hw->os,
2837 "sli_queue_write failed: %d\n", rc);
2838 hio->xbusy = false;
2839 }
2840 }
2841
2842 return rc;
2843}
2844
2845int
2846efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
2847 struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
2848 void *cb, void *arg)
2849{
2850 int rc = 0;
2851 bool send_wqe = true;
2852
2853 if (!io) {
2854 pr_err("bad parm hw=%p io=%p\n", hw, io);
2855 return -EIO;
2856 }
2857
2858 if (hw->state != EFCT_HW_STATE_ACTIVE) {
2859 efc_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
2860 return -EIO;
2861 }
2862
2863
2864
2865
2866 io->type = type;
2867 io->done = cb;
2868 io->arg = arg;
2869
2870
2871
2872
2873 switch (type) {
2874 case EFCT_HW_IO_TARGET_WRITE: {
2875 u16 *flags = &iparam->fcp_tgt.flags;
2876 struct fcp_txrdy *xfer = io->xfer_rdy.virt;
2877
2878
2879
2880
2881 xfer->ft_data_ro = cpu_to_be32(iparam->fcp_tgt.offset);
2882 xfer->ft_burst_len = cpu_to_be32(iparam->fcp_tgt.xmit_len);
2883
2884 if (io->xbusy)
2885 *flags |= SLI4_IO_CONTINUATION;
2886 else
2887 *flags &= ~SLI4_IO_CONTINUATION;
2888 iparam->fcp_tgt.xri = io->indicator;
2889 iparam->fcp_tgt.tag = io->reqtag;
2890
2891 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf,
2892 &io->def_sgl, io->first_data_sge,
2893 SLI4_CQ_DEFAULT,
2894 0, 0, &iparam->fcp_tgt)) {
2895 efc_log_err(hw->os, "TRECEIVE WQE error\n");
2896 rc = -EIO;
2897 }
2898 break;
2899 }
2900 case EFCT_HW_IO_TARGET_READ: {
2901 u16 *flags = &iparam->fcp_tgt.flags;
2902
2903 if (io->xbusy)
2904 *flags |= SLI4_IO_CONTINUATION;
2905 else
2906 *flags &= ~SLI4_IO_CONTINUATION;
2907
2908 iparam->fcp_tgt.xri = io->indicator;
2909 iparam->fcp_tgt.tag = io->reqtag;
2910
2911 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf,
2912 &io->def_sgl, io->first_data_sge,
2913 SLI4_CQ_DEFAULT,
2914 0, 0, &iparam->fcp_tgt)) {
2915 efc_log_err(hw->os, "TSEND WQE error\n");
2916 rc = -EIO;
2917 }
2918 break;
2919 }
2920 case EFCT_HW_IO_TARGET_RSP: {
2921 u16 *flags = &iparam->fcp_tgt.flags;
2922
2923 if (io->xbusy)
2924 *flags |= SLI4_IO_CONTINUATION;
2925 else
2926 *flags &= ~SLI4_IO_CONTINUATION;
2927
2928 iparam->fcp_tgt.xri = io->indicator;
2929 iparam->fcp_tgt.tag = io->reqtag;
2930
2931 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf,
2932 &io->def_sgl, SLI4_CQ_DEFAULT,
2933 0, &iparam->fcp_tgt)) {
2934 efc_log_err(hw->os, "TRSP WQE error\n");
2935 rc = -EIO;
2936 }
2937
2938 break;
2939 }
2940 default:
2941 efc_log_err(hw->os, "unsupported IO type %#x\n", type);
2942 rc = -EIO;
2943 }
2944
2945 if (send_wqe && rc == 0) {
2946 io->xbusy = true;
2947
2948
2949
2950
2951
2952 hw->tcmd_wq_submit[io->wq->instance]++;
2953 io->wq->use_count++;
2954 rc = efct_hw_wq_write(io->wq, &io->wqe);
2955 if (rc >= 0) {
2956
2957 rc = 0;
2958 } else {
2959
2960 efc_log_err(hw->os,
2961 "sli_queue_write failed: %d\n", rc);
2962 io->xbusy = false;
2963 }
2964 }
2965
2966 return rc;
2967}
2968
2969int
2970efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
2971 u8 sof, u8 eof, struct efc_dma *payload,
2972 struct efct_hw_send_frame_context *ctx,
2973 void (*callback)(void *arg, u8 *cqe, int status),
2974 void *arg)
2975{
2976 int rc;
2977 struct efct_hw_wqe *wqe;
2978 u32 xri;
2979 struct hw_wq *wq;
2980
2981 wqe = &ctx->wqe;
2982
2983
2984 ctx->hw = hw;
2985
2986
2987 ctx->wqcb = efct_hw_reqtag_alloc(hw, callback, arg);
2988 if (!ctx->wqcb) {
2989 efc_log_err(hw->os, "can't allocate request tag\n");
2990 return -ENOSPC;
2991 }
2992
2993 wq = hw->hw_wq[0];
2994
2995
2996
2997
2998 xri = wq->send_frame_io->indicator;
2999
3000
3001 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf,
3002 sof, eof, (u32 *)hdr, payload, payload->len,
3003 EFCT_HW_SEND_FRAME_TIMEOUT, xri,
3004 ctx->wqcb->instance_index);
3005 if (rc) {
3006 efc_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
3007 return -EIO;
3008 }
3009
3010
3011 rc = efct_hw_wq_write(wq, wqe);
3012 if (rc) {
3013 efc_log_err(hw->os, "efct_hw_wq_write failed: %d\n", rc);
3014 return -EIO;
3015 }
3016
3017 wq->use_count++;
3018
3019 return 0;
3020}
3021
3022static int
3023efct_hw_cb_link_stat(struct efct_hw *hw, int status,
3024 u8 *mqe, void *arg)
3025{
3026 struct sli4_cmd_read_link_stats *mbox_rsp;
3027 struct efct_hw_link_stat_cb_arg *cb_arg = arg;
3028 struct efct_hw_link_stat_counts counts[EFCT_HW_LINK_STAT_MAX];
3029 u32 num_counters, i;
3030 u32 mbox_rsp_flags = 0;
3031
3032 mbox_rsp = (struct sli4_cmd_read_link_stats *)mqe;
3033 mbox_rsp_flags = le32_to_cpu(mbox_rsp->dw1_flags);
3034 num_counters = (mbox_rsp_flags & SLI4_READ_LNKSTAT_GEC) ? 20 : 13;
3035 memset(counts, 0, sizeof(struct efct_hw_link_stat_counts) *
3036 EFCT_HW_LINK_STAT_MAX);
3037
3038
3039 for (i = 0; i < EFCT_HW_LINK_STAT_MAX; i++)
3040 counts[i].overflow = (mbox_rsp_flags & (1 << (i + 2)));
3041
3042 counts[EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT].counter =
3043 le32_to_cpu(mbox_rsp->linkfail_errcnt);
3044 counts[EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter =
3045 le32_to_cpu(mbox_rsp->losssync_errcnt);
3046 counts[EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter =
3047 le32_to_cpu(mbox_rsp->losssignal_errcnt);
3048 counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter =
3049 le32_to_cpu(mbox_rsp->primseq_errcnt);
3050 counts[EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter =
3051 le32_to_cpu(mbox_rsp->inval_txword_errcnt);
3052 counts[EFCT_HW_LINK_STAT_CRC_COUNT].counter =
3053 le32_to_cpu(mbox_rsp->crc_errcnt);
3054 counts[EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter =
3055 le32_to_cpu(mbox_rsp->primseq_eventtimeout_cnt);
3056 counts[EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter =
3057 le32_to_cpu(mbox_rsp->elastic_bufoverrun_errcnt);
3058 counts[EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter =
3059 le32_to_cpu(mbox_rsp->arbit_fc_al_timeout_cnt);
3060 counts[EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter =
3061 le32_to_cpu(mbox_rsp->adv_rx_buftor_to_buf_credit);
3062 counts[EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter =
3063 le32_to_cpu(mbox_rsp->curr_rx_buf_to_buf_credit);
3064 counts[EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter =
3065 le32_to_cpu(mbox_rsp->adv_tx_buf_to_buf_credit);
3066 counts[EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter =
3067 le32_to_cpu(mbox_rsp->curr_tx_buf_to_buf_credit);
3068 counts[EFCT_HW_LINK_STAT_RCV_EOFA_COUNT].counter =
3069 le32_to_cpu(mbox_rsp->rx_eofa_cnt);
3070 counts[EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter =
3071 le32_to_cpu(mbox_rsp->rx_eofdti_cnt);
3072 counts[EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT].counter =
3073 le32_to_cpu(mbox_rsp->rx_eofni_cnt);
3074 counts[EFCT_HW_LINK_STAT_RCV_SOFF_COUNT].counter =
3075 le32_to_cpu(mbox_rsp->rx_soff_cnt);
3076 counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter =
3077 le32_to_cpu(mbox_rsp->rx_dropped_no_aer_cnt);
3078 counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter =
3079 le32_to_cpu(mbox_rsp->rx_dropped_no_avail_rpi_rescnt);
3080 counts[EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter =
3081 le32_to_cpu(mbox_rsp->rx_dropped_no_avail_xri_rescnt);
3082
3083 if (cb_arg) {
3084 if (cb_arg->cb) {
3085 if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
3086 status = le16_to_cpu(mbox_rsp->hdr.status);
3087 cb_arg->cb(status, num_counters, counts, cb_arg->arg);
3088 }
3089
3090 kfree(cb_arg);
3091 }
3092
3093 return 0;
3094}
3095
3096int
3097efct_hw_get_link_stats(struct efct_hw *hw, u8 req_ext_counters,
3098 u8 clear_overflow_flags, u8 clear_all_counters,
3099 void (*cb)(int status, u32 num_counters,
3100 struct efct_hw_link_stat_counts *counters,
3101 void *arg),
3102 void *arg)
3103{
3104 int rc = -EIO;
3105 struct efct_hw_link_stat_cb_arg *cb_arg;
3106 u8 mbxdata[SLI4_BMBX_SIZE];
3107
3108 cb_arg = kzalloc(sizeof(*cb_arg), GFP_ATOMIC);
3109 if (!cb_arg)
3110 return -ENOMEM;
3111
3112 cb_arg->cb = cb;
3113 cb_arg->arg = arg;
3114
3115
3116 if (!sli_cmd_read_link_stats(&hw->sli, mbxdata, req_ext_counters,
3117 clear_overflow_flags, clear_all_counters))
3118 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
3119 efct_hw_cb_link_stat, cb_arg);
3120
3121 if (rc)
3122 kfree(cb_arg);
3123
3124 return rc;
3125}
3126
3127static int
3128efct_hw_cb_host_stat(struct efct_hw *hw, int status, u8 *mqe, void *arg)
3129{
3130 struct sli4_cmd_read_status *mbox_rsp =
3131 (struct sli4_cmd_read_status *)mqe;
3132 struct efct_hw_host_stat_cb_arg *cb_arg = arg;
3133 struct efct_hw_host_stat_counts counts[EFCT_HW_HOST_STAT_MAX];
3134 u32 num_counters = EFCT_HW_HOST_STAT_MAX;
3135
3136 memset(counts, 0, sizeof(struct efct_hw_host_stat_counts) *
3137 EFCT_HW_HOST_STAT_MAX);
3138
3139 counts[EFCT_HW_HOST_STAT_TX_KBYTE_COUNT].counter =
3140 le32_to_cpu(mbox_rsp->trans_kbyte_cnt);
3141 counts[EFCT_HW_HOST_STAT_RX_KBYTE_COUNT].counter =
3142 le32_to_cpu(mbox_rsp->recv_kbyte_cnt);
3143 counts[EFCT_HW_HOST_STAT_TX_FRAME_COUNT].counter =
3144 le32_to_cpu(mbox_rsp->trans_frame_cnt);
3145 counts[EFCT_HW_HOST_STAT_RX_FRAME_COUNT].counter =
3146 le32_to_cpu(mbox_rsp->recv_frame_cnt);
3147 counts[EFCT_HW_HOST_STAT_TX_SEQ_COUNT].counter =
3148 le32_to_cpu(mbox_rsp->trans_seq_cnt);
3149 counts[EFCT_HW_HOST_STAT_RX_SEQ_COUNT].counter =
3150 le32_to_cpu(mbox_rsp->recv_seq_cnt);
3151 counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter =
3152 le32_to_cpu(mbox_rsp->tot_exchanges_orig);
3153 counts[EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP].counter =
3154 le32_to_cpu(mbox_rsp->tot_exchanges_resp);
3155 counts[EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT].counter =
3156 le32_to_cpu(mbox_rsp->recv_p_bsy_cnt);
3157 counts[EFCT_HW_HOST_STAT_RX_F_BSY_COUNT].counter =
3158 le32_to_cpu(mbox_rsp->recv_f_bsy_cnt);
3159 counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter =
3160 le32_to_cpu(mbox_rsp->no_rq_buf_dropped_frames_cnt);
3161 counts[EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter =
3162 le32_to_cpu(mbox_rsp->empty_rq_timeout_cnt);
3163 counts[EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter =
3164 le32_to_cpu(mbox_rsp->no_xri_dropped_frames_cnt);
3165 counts[EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter =
3166 le32_to_cpu(mbox_rsp->empty_xri_pool_cnt);
3167
3168 if (cb_arg) {
3169 if (cb_arg->cb) {
3170 if (status == 0 && le16_to_cpu(mbox_rsp->hdr.status))
3171 status = le16_to_cpu(mbox_rsp->hdr.status);
3172 cb_arg->cb(status, num_counters, counts, cb_arg->arg);
3173 }
3174
3175 kfree(cb_arg);
3176 }
3177
3178 return 0;
3179}
3180
3181int
3182efct_hw_get_host_stats(struct efct_hw *hw, u8 cc,
3183 void (*cb)(int status, u32 num_counters,
3184 struct efct_hw_host_stat_counts *counters,
3185 void *arg),
3186 void *arg)
3187{
3188 int rc = -EIO;
3189 struct efct_hw_host_stat_cb_arg *cb_arg;
3190 u8 mbxdata[SLI4_BMBX_SIZE];
3191
3192 cb_arg = kmalloc(sizeof(*cb_arg), GFP_ATOMIC);
3193 if (!cb_arg)
3194 return -ENOMEM;
3195
3196 cb_arg->cb = cb;
3197 cb_arg->arg = arg;
3198
3199
3200 if (!sli_cmd_read_status(&hw->sli, mbxdata, cc))
3201 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
3202 efct_hw_cb_host_stat, cb_arg);
3203
3204 if (rc) {
3205 efc_log_debug(hw->os, "READ_HOST_STATS failed\n");
3206 kfree(cb_arg);
3207 }
3208
3209 return rc;
3210}
3211
3212struct efct_hw_async_call_ctx {
3213 efct_hw_async_cb_t callback;
3214 void *arg;
3215 u8 cmd[SLI4_BMBX_SIZE];
3216};
3217
3218static void
3219efct_hw_async_cb(struct efct_hw *hw, int status, u8 *mqe, void *arg)
3220{
3221 struct efct_hw_async_call_ctx *ctx = arg;
3222
3223 if (ctx) {
3224 if (ctx->callback)
3225 (*ctx->callback)(hw, status, mqe, ctx->arg);
3226
3227 kfree(ctx);
3228 }
3229}
3230
3231int
3232efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg)
3233{
3234 struct efct_hw_async_call_ctx *ctx;
3235 int rc;
3236
3237
3238
3239
3240
3241
3242 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3243 if (!ctx)
3244 return -ENOMEM;
3245
3246 ctx->callback = callback;
3247 ctx->arg = arg;
3248
3249
3250 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, 0)) {
3251 efc_log_err(hw->os, "COMMON_NOP format failure\n");
3252 kfree(ctx);
3253 return -EIO;
3254 }
3255
3256 rc = efct_hw_command(hw, ctx->cmd, EFCT_CMD_NOWAIT, efct_hw_async_cb,
3257 ctx);
3258 if (rc) {
3259 efc_log_err(hw->os, "COMMON_NOP command failure, rc=%d\n", rc);
3260 kfree(ctx);
3261 return -EIO;
3262 }
3263 return 0;
3264}
3265
3266static int
3267efct_hw_cb_fw_write(struct efct_hw *hw, int status, u8 *mqe, void *arg)
3268{
3269 struct sli4_cmd_sli_config *mbox_rsp =
3270 (struct sli4_cmd_sli_config *)mqe;
3271 struct sli4_rsp_cmn_write_object *wr_obj_rsp;
3272 struct efct_hw_fw_wr_cb_arg *cb_arg = arg;
3273 u32 bytes_written;
3274 u16 mbox_status;
3275 u32 change_status;
3276
3277 wr_obj_rsp = (struct sli4_rsp_cmn_write_object *)
3278 &mbox_rsp->payload.embed;
3279 bytes_written = le32_to_cpu(wr_obj_rsp->actual_write_length);
3280 mbox_status = le16_to_cpu(mbox_rsp->hdr.status);
3281 change_status = (le32_to_cpu(wr_obj_rsp->change_status_dword) &
3282 RSP_CHANGE_STATUS);
3283
3284 if (cb_arg) {
3285 if (cb_arg->cb) {
3286 if (!status && mbox_status)
3287 status = mbox_status;
3288 cb_arg->cb(status, bytes_written, change_status,
3289 cb_arg->arg);
3290 }
3291
3292 kfree(cb_arg);
3293 }
3294
3295 return 0;
3296}
3297
3298int
3299efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma, u32 size,
3300 u32 offset, int last,
3301 void (*cb)(int status, u32 bytes_written,
3302 u32 change_status, void *arg),
3303 void *arg)
3304{
3305 int rc = -EIO;
3306 u8 mbxdata[SLI4_BMBX_SIZE];
3307 struct efct_hw_fw_wr_cb_arg *cb_arg;
3308 int noc = 0;
3309
3310 cb_arg = kzalloc(sizeof(*cb_arg), GFP_KERNEL);
3311 if (!cb_arg)
3312 return -ENOMEM;
3313
3314 cb_arg->cb = cb;
3315 cb_arg->arg = arg;
3316
3317
3318 if (!sli_cmd_common_write_object(&hw->sli, mbxdata,
3319 noc, last, size, offset, "/prg/",
3320 dma))
3321 rc = efct_hw_command(hw, mbxdata, EFCT_CMD_NOWAIT,
3322 efct_hw_cb_fw_write, cb_arg);
3323
3324 if (rc != 0) {
3325 efc_log_debug(hw->os, "COMMON_WRITE_OBJECT failed\n");
3326 kfree(cb_arg);
3327 }
3328
3329 return rc;
3330}
3331
3332static int
3333efct_hw_cb_port_control(struct efct_hw *hw, int status, u8 *mqe,
3334 void *arg)
3335{
3336 return 0;
3337}
3338
3339int
3340efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
3341 uintptr_t value,
3342 void (*cb)(int status, uintptr_t value, void *arg),
3343 void *arg)
3344{
3345 int rc = -EIO;
3346 u8 link[SLI4_BMBX_SIZE];
3347 u32 speed = 0;
3348 u8 reset_alpa = 0;
3349
3350 switch (ctrl) {
3351 case EFCT_HW_PORT_INIT:
3352 if (!sli_cmd_config_link(&hw->sli, link))
3353 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
3354 efct_hw_cb_port_control, NULL);
3355
3356 if (rc != 0) {
3357 efc_log_err(hw->os, "CONFIG_LINK failed\n");
3358 break;
3359 }
3360 speed = hw->config.speed;
3361 reset_alpa = (u8)(value & 0xff);
3362
3363 rc = -EIO;
3364 if (!sli_cmd_init_link(&hw->sli, link, speed, reset_alpa))
3365 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
3366 efct_hw_cb_port_control, NULL);
3367
3368 if (rc)
3369 efc_log_err(hw->os, "INIT_LINK failed\n");
3370 break;
3371
3372 case EFCT_HW_PORT_SHUTDOWN:
3373 if (!sli_cmd_down_link(&hw->sli, link))
3374 rc = efct_hw_command(hw, link, EFCT_CMD_NOWAIT,
3375 efct_hw_cb_port_control, NULL);
3376
3377 if (rc)
3378 efc_log_err(hw->os, "DOWN_LINK failed\n");
3379 break;
3380
3381 default:
3382 efc_log_debug(hw->os, "unhandled control %#x\n", ctrl);
3383 break;
3384 }
3385
3386 return rc;
3387}
3388
3389void
3390efct_hw_teardown(struct efct_hw *hw)
3391{
3392 u32 i = 0;
3393 u32 destroy_queues;
3394 u32 free_memory;
3395 struct efc_dma *dma;
3396 struct efct *efct = hw->os;
3397
3398 destroy_queues = (hw->state == EFCT_HW_STATE_ACTIVE);
3399 free_memory = (hw->state != EFCT_HW_STATE_UNINITIALIZED);
3400
3401
3402 if (hw->sliport_healthcheck) {
3403 hw->sliport_healthcheck = 0;
3404 efct_hw_config_sli_port_health_check(hw, 0, 0);
3405 }
3406
3407 if (hw->state != EFCT_HW_STATE_QUEUES_ALLOCATED) {
3408 hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
3409
3410 efct_hw_flush(hw);
3411
3412 if (list_empty(&hw->cmd_head))
3413 efc_log_debug(hw->os,
3414 "All commands completed on MQ queue\n");
3415 else
3416 efc_log_debug(hw->os,
3417 "Some cmds still pending on MQ queue\n");
3418
3419
3420 efct_hw_command_cancel(hw);
3421 } else {
3422 hw->state = EFCT_HW_STATE_TEARDOWN_IN_PROGRESS;
3423 }
3424
3425 dma_free_coherent(&efct->pci->dev,
3426 hw->rnode_mem.size, hw->rnode_mem.virt,
3427 hw->rnode_mem.phys);
3428 memset(&hw->rnode_mem, 0, sizeof(struct efc_dma));
3429
3430 if (hw->io) {
3431 for (i = 0; i < hw->config.n_io; i++) {
3432 if (hw->io[i] && hw->io[i]->sgl &&
3433 hw->io[i]->sgl->virt) {
3434 dma_free_coherent(&efct->pci->dev,
3435 hw->io[i]->sgl->size,
3436 hw->io[i]->sgl->virt,
3437 hw->io[i]->sgl->phys);
3438 }
3439 kfree(hw->io[i]);
3440 hw->io[i] = NULL;
3441 }
3442 kfree(hw->io);
3443 hw->io = NULL;
3444 kfree(hw->wqe_buffs);
3445 hw->wqe_buffs = NULL;
3446 }
3447
3448 dma = &hw->xfer_rdy;
3449 dma_free_coherent(&efct->pci->dev,
3450 dma->size, dma->virt, dma->phys);
3451 memset(dma, 0, sizeof(struct efc_dma));
3452
3453 dma = &hw->loop_map;
3454 dma_free_coherent(&efct->pci->dev,
3455 dma->size, dma->virt, dma->phys);
3456 memset(dma, 0, sizeof(struct efc_dma));
3457
3458 for (i = 0; i < hw->wq_count; i++)
3459 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues,
3460 free_memory);
3461
3462 for (i = 0; i < hw->rq_count; i++)
3463 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues,
3464 free_memory);
3465
3466 for (i = 0; i < hw->mq_count; i++)
3467 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues,
3468 free_memory);
3469
3470 for (i = 0; i < hw->cq_count; i++)
3471 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues,
3472 free_memory);
3473
3474 for (i = 0; i < hw->eq_count; i++)
3475 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues,
3476 free_memory);
3477
3478
3479 efct_hw_rx_free(hw);
3480
3481 efct_hw_queue_teardown(hw);
3482
3483 kfree(hw->wq_cpu_array);
3484
3485 sli_teardown(&hw->sli);
3486
3487
3488 hw->state = EFCT_HW_STATE_UNINITIALIZED;
3489
3490
3491 kfree(hw->seq_pool);
3492 hw->seq_pool = NULL;
3493
3494
3495 efct_hw_reqtag_pool_free(hw);
3496
3497 mempool_destroy(hw->cmd_ctx_pool);
3498 mempool_destroy(hw->mbox_rqst_pool);
3499
3500
3501 hw->hw_setup_called = false;
3502}
3503
3504static int
3505efct_hw_sli_reset(struct efct_hw *hw, enum efct_hw_reset reset,
3506 enum efct_hw_state prev_state)
3507{
3508 int rc = 0;
3509
3510 switch (reset) {
3511 case EFCT_HW_RESET_FUNCTION:
3512 efc_log_debug(hw->os, "issuing function level reset\n");
3513 if (sli_reset(&hw->sli)) {
3514 efc_log_err(hw->os, "sli_reset failed\n");
3515 rc = -EIO;
3516 }
3517 break;
3518 case EFCT_HW_RESET_FIRMWARE:
3519 efc_log_debug(hw->os, "issuing firmware reset\n");
3520 if (sli_fw_reset(&hw->sli)) {
3521 efc_log_err(hw->os, "sli_soft_reset failed\n");
3522 rc = -EIO;
3523 }
3524
3525
3526
3527
3528 efc_log_debug(hw->os, "issuing function level reset\n");
3529 if (sli_reset(&hw->sli)) {
3530 efc_log_err(hw->os, "sli_reset failed\n");
3531 rc = -EIO;
3532 }
3533 break;
3534 default:
3535 efc_log_err(hw->os, "unknown type - no reset performed\n");
3536 hw->state = prev_state;
3537 rc = -EINVAL;
3538 break;
3539 }
3540
3541 return rc;
3542}
3543
3544int
3545efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset)
3546{
3547 int rc = 0;
3548 enum efct_hw_state prev_state = hw->state;
3549
3550 if (hw->state != EFCT_HW_STATE_ACTIVE)
3551 efc_log_debug(hw->os,
3552 "HW state %d is not active\n", hw->state);
3553
3554 hw->state = EFCT_HW_STATE_RESET_IN_PROGRESS;
3555
3556
3557
3558
3559
3560 if (prev_state == EFCT_HW_STATE_RESET_IN_PROGRESS ||
3561 prev_state == EFCT_HW_STATE_TEARDOWN_IN_PROGRESS)
3562 return efct_hw_sli_reset(hw, reset, prev_state);
3563
3564 if (prev_state != EFCT_HW_STATE_UNINITIALIZED) {
3565 efct_hw_flush(hw);
3566
3567 if (list_empty(&hw->cmd_head))
3568 efc_log_debug(hw->os,
3569 "All commands completed on MQ queue\n");
3570 else
3571 efc_log_err(hw->os,
3572 "Some commands still pending on MQ queue\n");
3573 }
3574
3575
3576 rc = efct_hw_sli_reset(hw, reset, prev_state);
3577 if (rc == -EINVAL)
3578 return -EIO;
3579
3580 return rc;
3581}
3582