1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include "bfi.h"
22#include "bfa_msgq.h"
23#include "bfa_ioc.h"
24
25#define call_cmdq_ent_cbfn(_cmdq_ent, _status) \
26{ \
27 bfa_msgq_cmdcbfn_t cbfn; \
28 void *cbarg; \
29 cbfn = (_cmdq_ent)->cbfn; \
30 cbarg = (_cmdq_ent)->cbarg; \
31 (_cmdq_ent)->cbfn = NULL; \
32 (_cmdq_ent)->cbarg = NULL; \
33 if (cbfn) { \
34 cbfn(cbarg, (_status)); \
35 } \
36}
37
38static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
39static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
40
41enum cmdq_event {
42 CMDQ_E_START = 1,
43 CMDQ_E_STOP = 2,
44 CMDQ_E_FAIL = 3,
45 CMDQ_E_POST = 4,
46 CMDQ_E_INIT_RESP = 5,
47 CMDQ_E_DB_READY = 6,
48};
49
50bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
51bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
52bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
53bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
54 enum cmdq_event);
55
56static void
57cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
58{
59 struct bfa_msgq_cmd_entry *cmdq_ent;
60
61 cmdq->producer_index = 0;
62 cmdq->consumer_index = 0;
63 cmdq->flags = 0;
64 cmdq->token = 0;
65 cmdq->offset = 0;
66 cmdq->bytes_to_copy = 0;
67 while (!list_empty(&cmdq->pending_q)) {
68 bfa_q_deq(&cmdq->pending_q, &cmdq_ent);
69 bfa_q_qe_init(&cmdq_ent->qe);
70 call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED);
71 }
72}
73
74static void
75cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
76{
77 switch (event) {
78 case CMDQ_E_START:
79 bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
80 break;
81
82 case CMDQ_E_STOP:
83 case CMDQ_E_FAIL:
84
85 break;
86
87 case CMDQ_E_POST:
88 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
89 break;
90
91 default:
92 bfa_sm_fault(event);
93 }
94}
95
96static void
97cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
98{
99 bfa_wc_down(&cmdq->msgq->init_wc);
100}
101
102static void
103cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
104{
105 switch (event) {
106 case CMDQ_E_STOP:
107 case CMDQ_E_FAIL:
108 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
109 break;
110
111 case CMDQ_E_POST:
112 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
113 break;
114
115 case CMDQ_E_INIT_RESP:
116 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
117 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
118 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
119 } else
120 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
121 break;
122
123 default:
124 bfa_sm_fault(event);
125 }
126}
127
128static void
129cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
130{
131}
132
133static void
134cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
135{
136 switch (event) {
137 case CMDQ_E_STOP:
138 case CMDQ_E_FAIL:
139 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
140 break;
141
142 case CMDQ_E_POST:
143 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
144 break;
145
146 default:
147 bfa_sm_fault(event);
148 }
149}
150
151static void
152cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
153{
154 bfa_msgq_cmdq_dbell(cmdq);
155}
156
157static void
158cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
159{
160 switch (event) {
161 case CMDQ_E_STOP:
162 case CMDQ_E_FAIL:
163 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
164 break;
165
166 case CMDQ_E_POST:
167 cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
168 break;
169
170 case CMDQ_E_DB_READY:
171 if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
172 cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
173 bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
174 } else
175 bfa_fsm_set_state(cmdq, cmdq_sm_ready);
176 break;
177
178 default:
179 bfa_sm_fault(event);
180 }
181}
182
183static void
184bfa_msgq_cmdq_dbell_ready(void *arg)
185{
186 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
187 bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
188}
189
190static void
191bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
192{
193 struct bfi_msgq_h2i_db *dbell =
194 (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
195
196 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
197 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0);
198 dbell->mh.mtag.i2htok = 0;
199 dbell->idx.cmdq_pi = htons(cmdq->producer_index);
200
201 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
202 bfa_msgq_cmdq_dbell_ready, cmdq)) {
203 bfa_msgq_cmdq_dbell_ready(cmdq);
204 }
205}
206
207static void
208__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
209{
210 size_t len = cmd->msg_size;
211 int num_entries = 0;
212 size_t to_copy;
213 u8 *src, *dst;
214
215 src = (u8 *)cmd->msg_hdr;
216 dst = (u8 *)cmdq->addr.kva;
217 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
218
219 while (len) {
220 to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ?
221 len : BFI_MSGQ_CMD_ENTRY_SIZE;
222 memcpy(dst, src, to_copy);
223 len -= to_copy;
224 src += BFI_MSGQ_CMD_ENTRY_SIZE;
225 BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
226 dst = (u8 *)cmdq->addr.kva;
227 dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
228 num_entries++;
229 }
230
231}
232
233static void
234bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
235{
236 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
237 struct bfa_msgq_cmd_entry *cmd;
238 int posted = 0;
239
240 cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
241
242
243 while (!list_empty(&cmdq->pending_q)) {
244 cmd =
245 (struct bfa_msgq_cmd_entry *)bfa_q_first(&cmdq->pending_q);
246 if (ntohs(cmd->msg_hdr->num_entries) <=
247 BFA_MSGQ_FREE_CNT(cmdq)) {
248 list_del(&cmd->qe);
249 __cmd_copy(cmdq, cmd);
250 posted = 1;
251 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
252 } else {
253 break;
254 }
255 }
256
257 if (posted)
258 bfa_fsm_send_event(cmdq, CMDQ_E_POST);
259}
260
261static void
262bfa_msgq_cmdq_copy_next(void *arg)
263{
264 struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
265
266 if (cmdq->bytes_to_copy)
267 bfa_msgq_cmdq_copy_rsp(cmdq);
268}
269
270static void
271bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
272{
273 struct bfi_msgq_i2h_cmdq_copy_req *req =
274 (struct bfi_msgq_i2h_cmdq_copy_req *)mb;
275
276 cmdq->token = 0;
277 cmdq->offset = ntohs(req->offset);
278 cmdq->bytes_to_copy = ntohs(req->len);
279 bfa_msgq_cmdq_copy_rsp(cmdq);
280}
281
282static void
283bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
284{
285 struct bfi_msgq_h2i_cmdq_copy_rsp *rsp =
286 (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
287 int copied;
288 u8 *addr = (u8 *)cmdq->addr.kva;
289
290 memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp));
291 bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0);
292 rsp->mh.mtag.i2htok = htons(cmdq->token);
293 copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
294 cmdq->bytes_to_copy;
295 addr += cmdq->offset;
296 memcpy(rsp->data, addr, copied);
297
298 cmdq->token++;
299 cmdq->offset += copied;
300 cmdq->bytes_to_copy -= copied;
301
302 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
303 bfa_msgq_cmdq_copy_next, cmdq)) {
304 bfa_msgq_cmdq_copy_next(cmdq);
305 }
306}
307
308static void
309bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
310{
311 cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
312 INIT_LIST_HEAD(&cmdq->pending_q);
313 cmdq->msgq = msgq;
314 bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
315}
316
317static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq);
318
319enum rspq_event {
320 RSPQ_E_START = 1,
321 RSPQ_E_STOP = 2,
322 RSPQ_E_FAIL = 3,
323 RSPQ_E_RESP = 4,
324 RSPQ_E_INIT_RESP = 5,
325 RSPQ_E_DB_READY = 6,
326};
327
328bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event);
329bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq,
330 enum rspq_event);
331bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event);
332bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq,
333 enum rspq_event);
334
335static void
336rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq)
337{
338 rspq->producer_index = 0;
339 rspq->consumer_index = 0;
340 rspq->flags = 0;
341}
342
343static void
344rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event)
345{
346 switch (event) {
347 case RSPQ_E_START:
348 bfa_fsm_set_state(rspq, rspq_sm_init_wait);
349 break;
350
351 case RSPQ_E_STOP:
352 case RSPQ_E_FAIL:
353
354 break;
355
356 default:
357 bfa_sm_fault(event);
358 }
359}
360
361static void
362rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq)
363{
364 bfa_wc_down(&rspq->msgq->init_wc);
365}
366
367static void
368rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
369{
370 switch (event) {
371 case RSPQ_E_FAIL:
372 case RSPQ_E_STOP:
373 bfa_fsm_set_state(rspq, rspq_sm_stopped);
374 break;
375
376 case RSPQ_E_INIT_RESP:
377 bfa_fsm_set_state(rspq, rspq_sm_ready);
378 break;
379
380 default:
381 bfa_sm_fault(event);
382 }
383}
384
385static void
386rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq)
387{
388}
389
390static void
391rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event)
392{
393 switch (event) {
394 case RSPQ_E_STOP:
395 case RSPQ_E_FAIL:
396 bfa_fsm_set_state(rspq, rspq_sm_stopped);
397 break;
398
399 case RSPQ_E_RESP:
400 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
401 break;
402
403 default:
404 bfa_sm_fault(event);
405 }
406}
407
408static void
409rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq)
410{
411 if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc))
412 bfa_msgq_rspq_dbell(rspq);
413}
414
415static void
416rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event)
417{
418 switch (event) {
419 case RSPQ_E_STOP:
420 case RSPQ_E_FAIL:
421 bfa_fsm_set_state(rspq, rspq_sm_stopped);
422 break;
423
424 case RSPQ_E_RESP:
425 rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE;
426 break;
427
428 case RSPQ_E_DB_READY:
429 if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) {
430 rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE;
431 bfa_fsm_set_state(rspq, rspq_sm_dbell_wait);
432 } else
433 bfa_fsm_set_state(rspq, rspq_sm_ready);
434 break;
435
436 default:
437 bfa_sm_fault(event);
438 }
439}
440
441static void
442bfa_msgq_rspq_dbell_ready(void *arg)
443{
444 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg;
445 bfa_fsm_send_event(rspq, RSPQ_E_DB_READY);
446}
447
448static void
449bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq)
450{
451 struct bfi_msgq_h2i_db *dbell =
452 (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]);
453
454 memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db));
455 bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0);
456 dbell->mh.mtag.i2htok = 0;
457 dbell->idx.rspq_ci = htons(rspq->consumer_index);
458
459 if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb,
460 bfa_msgq_rspq_dbell_ready, rspq)) {
461 bfa_msgq_rspq_dbell_ready(rspq);
462 }
463}
464
465static void
466bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb)
467{
468 struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb;
469 struct bfi_msgq_mhdr *msghdr;
470 int num_entries;
471 int mc;
472 u8 *rspq_qe;
473
474 rspq->producer_index = ntohs(dbell->idx.rspq_pi);
475
476 while (rspq->consumer_index != rspq->producer_index) {
477 rspq_qe = (u8 *)rspq->addr.kva;
478 rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE);
479 msghdr = (struct bfi_msgq_mhdr *)rspq_qe;
480
481 mc = msghdr->msg_class;
482 num_entries = ntohs(msghdr->num_entries);
483
484 if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL))
485 break;
486
487 (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr);
488
489 BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries,
490 rspq->depth);
491 }
492
493 bfa_fsm_send_event(rspq, RSPQ_E_RESP);
494}
495
496static void
497bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq)
498{
499 rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY;
500 rspq->msgq = msgq;
501 bfa_fsm_set_state(rspq, rspq_sm_stopped);
502}
503
504static void
505bfa_msgq_init_rsp(struct bfa_msgq *msgq,
506 struct bfi_mbmsg *mb)
507{
508 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
509 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP);
510}
511
512static void
513bfa_msgq_init(void *arg)
514{
515 struct bfa_msgq *msgq = (struct bfa_msgq *)arg;
516 struct bfi_msgq_cfg_req *msgq_cfg =
517 (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0];
518
519 memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req));
520 bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0);
521 msgq_cfg->mh.mtag.i2htok = 0;
522
523 bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
524 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
525 bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa);
526 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
527
528 bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL);
529}
530
531static void
532bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg)
533{
534 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
535
536 switch (msg->mh.msg_id) {
537 case BFI_MSGQ_I2H_INIT_RSP:
538 bfa_msgq_init_rsp(msgq, msg);
539 break;
540
541 case BFI_MSGQ_I2H_DOORBELL_PI:
542 bfa_msgq_rspq_pi_update(&msgq->rspq, msg);
543 break;
544
545 case BFI_MSGQ_I2H_DOORBELL_CI:
546 bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
547 break;
548
549 case BFI_MSGQ_I2H_CMDQ_COPY_REQ:
550 bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
551 break;
552
553 default:
554 BUG_ON(1);
555 }
556}
557
558static void
559bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event)
560{
561 struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg;
562
563 switch (event) {
564 case BFA_IOC_E_ENABLED:
565 bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq);
566 bfa_wc_up(&msgq->init_wc);
567 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
568 bfa_wc_up(&msgq->init_wc);
569 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START);
570 bfa_wc_wait(&msgq->init_wc);
571 break;
572
573 case BFA_IOC_E_DISABLED:
574 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
575 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP);
576 break;
577
578 case BFA_IOC_E_FAILED:
579 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
580 bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL);
581 break;
582
583 default:
584 break;
585 }
586}
587
588u32
589bfa_msgq_meminfo(void)
590{
591 return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) +
592 roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ);
593}
594
595void
596bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa)
597{
598 msgq->cmdq.addr.kva = kva;
599 msgq->cmdq.addr.pa = pa;
600
601 kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
602 pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ);
603
604 msgq->rspq.addr.kva = kva;
605 msgq->rspq.addr.pa = pa;
606}
607
608void
609bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc)
610{
611 msgq->ioc = ioc;
612
613 bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
614 bfa_msgq_rspq_attach(&msgq->rspq, msgq);
615
616 bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq);
617 bfa_q_qe_init(&msgq->ioc_notify);
618 bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq);
619 bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify);
620}
621
622void
623bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc,
624 bfa_msgq_mcfunc_t cbfn, void *cbarg)
625{
626 msgq->rspq.rsphdlr[mc].cbfn = cbfn;
627 msgq->rspq.rsphdlr[mc].cbarg = cbarg;
628}
629
630void
631bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd)
632{
633 if (ntohs(cmd->msg_hdr->num_entries) <=
634 BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
635 __cmd_copy(&msgq->cmdq, cmd);
636 call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK);
637 bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
638 } else {
639 list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
640 }
641}
642
643void
644bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len)
645{
646 struct bfa_msgq_rspq *rspq = &msgq->rspq;
647 size_t len = buf_len;
648 size_t to_copy;
649 int ci;
650 u8 *src, *dst;
651
652 ci = rspq->consumer_index;
653 src = (u8 *)rspq->addr.kva;
654 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
655 dst = buf;
656
657 while (len) {
658 to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ?
659 len : BFI_MSGQ_RSP_ENTRY_SIZE;
660 memcpy(dst, src, to_copy);
661 len -= to_copy;
662 dst += BFI_MSGQ_RSP_ENTRY_SIZE;
663 BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth);
664 src = (u8 *)rspq->addr.kva;
665 src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE);
666 }
667}
668