1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <linux/io.h>
12#include <linux/delay.h>
13#include <linux/dma-mapping.h>
14#include <linux/errno.h>
15#include <linux/kernel.h>
16#include <linux/list.h>
17#include <linux/pci.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/string.h>
21#include "qed.h"
22#include "qed_cxt.h"
23#include "qed_dev_api.h"
24#include "qed_hsi.h"
25#include "qed_hw.h"
26#include "qed_int.h"
27#include "qed_mcp.h"
28#include "qed_reg_addr.h"
29#include "qed_sp.h"
30#include "qed_sriov.h"
31#include "qed_roce.h"
32
33
34
35
36
37#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
38#define SPQ_BLOCK_SLEEP_LENGTH (1000)
39
40
41
42
43static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
44 void *cookie,
45 union event_ring_data *data, u8 fw_return_code)
46{
47 struct qed_spq_comp_done *comp_done;
48
49 comp_done = (struct qed_spq_comp_done *)cookie;
50
51 comp_done->done = 0x1;
52 comp_done->fw_return_code = fw_return_code;
53
54
55 smp_wmb();
56}
57
58static int qed_spq_block(struct qed_hwfn *p_hwfn,
59 struct qed_spq_entry *p_ent,
60 u8 *p_fw_ret)
61{
62 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
63 struct qed_spq_comp_done *comp_done;
64 int rc;
65
66 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
67 while (sleep_count) {
68
69 smp_rmb();
70 if (comp_done->done == 1) {
71 if (p_fw_ret)
72 *p_fw_ret = comp_done->fw_return_code;
73 return 0;
74 }
75 usleep_range(5000, 10000);
76 sleep_count--;
77 }
78
79 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
80 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
81 if (rc != 0)
82 DP_NOTICE(p_hwfn, "MCP drain failed\n");
83
84
85 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
86 while (sleep_count) {
87
88 smp_rmb();
89 if (comp_done->done == 1) {
90 if (p_fw_ret)
91 *p_fw_ret = comp_done->fw_return_code;
92 return 0;
93 }
94 usleep_range(5000, 10000);
95 sleep_count--;
96 }
97
98 if (comp_done->done == 1) {
99 if (p_fw_ret)
100 *p_fw_ret = comp_done->fw_return_code;
101 return 0;
102 }
103
104 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
105
106 return -EBUSY;
107}
108
109
110
111
112static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113 struct qed_spq_entry *p_ent)
114{
115 p_ent->flags = 0;
116
117 switch (p_ent->comp_mode) {
118 case QED_SPQ_MODE_EBLOCK:
119 case QED_SPQ_MODE_BLOCK:
120 p_ent->comp_cb.function = qed_spq_blocking_cb;
121 break;
122 case QED_SPQ_MODE_CB:
123 break;
124 default:
125 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
126 p_ent->comp_mode);
127 return -EINVAL;
128 }
129
130 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
131 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
132 p_ent->elem.hdr.cid,
133 p_ent->elem.hdr.cmd_id,
134 p_ent->elem.hdr.protocol_id,
135 p_ent->elem.data_ptr.hi,
136 p_ent->elem.data_ptr.lo,
137 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
138 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
139 "MODE_CB"));
140
141 return 0;
142}
143
144
145
146
147static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
148 struct qed_spq *p_spq)
149{
150 u16 pq;
151 struct qed_cxt_info cxt_info;
152 struct core_conn_context *p_cxt;
153 union qed_qm_pq_params pq_params;
154 int rc;
155
156 cxt_info.iid = p_spq->cid;
157
158 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
159
160 if (rc < 0) {
161 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
162 p_spq->cid);
163 return;
164 }
165
166 p_cxt = cxt_info.p_cxt;
167
168 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
169 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
170 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
171 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
172 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
173 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
174
175
176 memset(&pq_params, 0, sizeof(pq_params));
177 pq_params.core.tc = LB_TC;
178 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
179 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
180
181 p_cxt->xstorm_st_context.spq_base_lo =
182 DMA_LO_LE(p_spq->chain.p_phys_addr);
183 p_cxt->xstorm_st_context.spq_base_hi =
184 DMA_HI_LE(p_spq->chain.p_phys_addr);
185
186 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
187 p_hwfn->p_consq->chain.p_phys_addr);
188}
189
190static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
191 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
192{
193 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
194 u16 echo = qed_chain_get_prod_idx(p_chain);
195 struct slow_path_element *elem;
196 struct core_db_data db;
197
198 p_ent->elem.hdr.echo = cpu_to_le16(echo);
199 elem = qed_chain_produce(p_chain);
200 if (!elem) {
201 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
202 return -EINVAL;
203 }
204
205 *elem = p_ent->elem;
206
207
208 memset(&db, 0, sizeof(db));
209 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
210 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
211 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
212 DQ_XCM_CORE_SPQ_PROD_CMD);
213 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
214 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
215
216
217 wmb();
218
219 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
220
221
222 wmb();
223
224 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
225 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
226 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
227 p_spq->cid, db.params, db.agg_flags,
228 qed_chain_get_prod_idx(p_chain));
229
230 return 0;
231}
232
233
234
235
236static int
237qed_async_event_completion(struct qed_hwfn *p_hwfn,
238 struct event_ring_entry *p_eqe)
239{
240 switch (p_eqe->protocol_id) {
241 case PROTOCOLID_ROCE:
242 qed_async_roce_event(p_hwfn, p_eqe);
243 return 0;
244 case PROTOCOLID_COMMON:
245 return qed_sriov_eqe_event(p_hwfn,
246 p_eqe->opcode,
247 p_eqe->echo, &p_eqe->data);
248 default:
249 DP_NOTICE(p_hwfn,
250 "Unknown Async completion for protocol: %d\n",
251 p_eqe->protocol_id);
252 return -EINVAL;
253 }
254}
255
256
257
258
259void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
260{
261 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
262 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
263
264 REG_WR16(p_hwfn, addr, prod);
265
266
267 mmiowb();
268}
269
270int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
271{
272 struct qed_eq *p_eq = cookie;
273 struct qed_chain *p_chain = &p_eq->chain;
274 int rc = 0;
275
276
277 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
278
279 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
280
281
282
283
284 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
285 qed_chain_get_usable_per_page(p_chain))
286 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
287
288
289 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
290 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
291
292 if (!p_eqe) {
293 rc = -EINVAL;
294 break;
295 }
296
297 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
298 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
299 p_eqe->opcode,
300 p_eqe->protocol_id,
301 p_eqe->reserved0,
302 le16_to_cpu(p_eqe->echo),
303 p_eqe->fw_return_code,
304 p_eqe->flags);
305
306 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
307 if (qed_async_event_completion(p_hwfn, p_eqe))
308 rc = -EINVAL;
309 } else if (qed_spq_completion(p_hwfn,
310 p_eqe->echo,
311 p_eqe->fw_return_code,
312 &p_eqe->data)) {
313 rc = -EINVAL;
314 }
315
316 qed_chain_recycle_consumed(p_chain);
317 }
318
319 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
320
321 return rc;
322}
323
324struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
325{
326 struct qed_eq *p_eq;
327
328
329 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
330 if (!p_eq)
331 return NULL;
332
333
334 if (qed_chain_alloc(p_hwfn->cdev,
335 QED_CHAIN_USE_TO_PRODUCE,
336 QED_CHAIN_MODE_PBL,
337 QED_CHAIN_CNT_TYPE_U16,
338 num_elem,
339 sizeof(union event_ring_element),
340 &p_eq->chain))
341 goto eq_allocate_fail;
342
343
344 qed_int_register_cb(p_hwfn, qed_eq_completion,
345 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
346
347 return p_eq;
348
349eq_allocate_fail:
350 qed_eq_free(p_hwfn, p_eq);
351 return NULL;
352}
353
354void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
355{
356 qed_chain_reset(&p_eq->chain);
357}
358
359void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
360{
361 if (!p_eq)
362 return;
363 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
364 kfree(p_eq);
365}
366
367
368
369
370static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
371 struct eth_slow_path_rx_cqe *cqe,
372 enum protocol_type protocol)
373{
374 if (IS_VF(p_hwfn->cdev))
375 return 0;
376
377
378
379
380
381 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
382}
383
384int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
385 struct eth_slow_path_rx_cqe *cqe)
386{
387 int rc;
388
389 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
390 if (rc)
391 DP_NOTICE(p_hwfn,
392 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
393 cqe->ramrod_cmd_id);
394
395 return rc;
396}
397
398
399
400
401void qed_spq_setup(struct qed_hwfn *p_hwfn)
402{
403 struct qed_spq *p_spq = p_hwfn->p_spq;
404 struct qed_spq_entry *p_virt = NULL;
405 dma_addr_t p_phys = 0;
406 u32 i, capacity;
407
408 INIT_LIST_HEAD(&p_spq->pending);
409 INIT_LIST_HEAD(&p_spq->completion_pending);
410 INIT_LIST_HEAD(&p_spq->free_pool);
411 INIT_LIST_HEAD(&p_spq->unlimited_pending);
412 spin_lock_init(&p_spq->lock);
413
414
415 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
416 p_virt = p_spq->p_virt;
417
418 capacity = qed_chain_get_capacity(&p_spq->chain);
419 for (i = 0; i < capacity; i++) {
420 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
421
422 list_add_tail(&p_virt->list, &p_spq->free_pool);
423
424 p_virt++;
425 p_phys += sizeof(struct qed_spq_entry);
426 }
427
428
429 p_spq->normal_count = 0;
430 p_spq->comp_count = 0;
431 p_spq->comp_sent_count = 0;
432 p_spq->unlimited_pending_count = 0;
433
434 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
435 p_spq->comp_bitmap_idx = 0;
436
437
438 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
439 qed_spq_hw_initialize(p_hwfn, p_spq);
440
441
442 qed_chain_reset(&p_spq->chain);
443}
444
445int qed_spq_alloc(struct qed_hwfn *p_hwfn)
446{
447 struct qed_spq_entry *p_virt = NULL;
448 struct qed_spq *p_spq = NULL;
449 dma_addr_t p_phys = 0;
450 u32 capacity;
451
452
453 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
454 if (!p_spq)
455 return -ENOMEM;
456
457
458 if (qed_chain_alloc(p_hwfn->cdev,
459 QED_CHAIN_USE_TO_PRODUCE,
460 QED_CHAIN_MODE_SINGLE,
461 QED_CHAIN_CNT_TYPE_U16,
462 0,
463 sizeof(struct slow_path_element),
464 &p_spq->chain))
465 goto spq_allocate_fail;
466
467
468 capacity = qed_chain_get_capacity(&p_spq->chain);
469 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
470 capacity * sizeof(struct qed_spq_entry),
471 &p_phys, GFP_KERNEL);
472 if (!p_virt)
473 goto spq_allocate_fail;
474
475 p_spq->p_virt = p_virt;
476 p_spq->p_phys = p_phys;
477 p_hwfn->p_spq = p_spq;
478
479 return 0;
480
481spq_allocate_fail:
482 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
483 kfree(p_spq);
484 return -ENOMEM;
485}
486
487void qed_spq_free(struct qed_hwfn *p_hwfn)
488{
489 struct qed_spq *p_spq = p_hwfn->p_spq;
490 u32 capacity;
491
492 if (!p_spq)
493 return;
494
495 if (p_spq->p_virt) {
496 capacity = qed_chain_get_capacity(&p_spq->chain);
497 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
498 capacity *
499 sizeof(struct qed_spq_entry),
500 p_spq->p_virt, p_spq->p_phys);
501 }
502
503 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
504 ;
505 kfree(p_spq);
506}
507
508int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
509{
510 struct qed_spq *p_spq = p_hwfn->p_spq;
511 struct qed_spq_entry *p_ent = NULL;
512 int rc = 0;
513
514 spin_lock_bh(&p_spq->lock);
515
516 if (list_empty(&p_spq->free_pool)) {
517 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
518 if (!p_ent) {
519 DP_NOTICE(p_hwfn,
520 "Failed to allocate an SPQ entry for a pending ramrod\n");
521 rc = -ENOMEM;
522 goto out_unlock;
523 }
524 p_ent->queue = &p_spq->unlimited_pending;
525 } else {
526 p_ent = list_first_entry(&p_spq->free_pool,
527 struct qed_spq_entry, list);
528 list_del(&p_ent->list);
529 p_ent->queue = &p_spq->pending;
530 }
531
532 *pp_ent = p_ent;
533
534out_unlock:
535 spin_unlock_bh(&p_spq->lock);
536 return rc;
537}
538
539
540static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
541 struct qed_spq_entry *p_ent)
542{
543 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
544}
545
546void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
547{
548 spin_lock_bh(&p_hwfn->p_spq->lock);
549 __qed_spq_return_entry(p_hwfn, p_ent);
550 spin_unlock_bh(&p_hwfn->p_spq->lock);
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
568 struct qed_spq_entry *p_ent,
569 enum spq_priority priority)
570{
571 struct qed_spq *p_spq = p_hwfn->p_spq;
572
573 if (p_ent->queue == &p_spq->unlimited_pending) {
574
575 if (list_empty(&p_spq->free_pool)) {
576 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
577 p_spq->unlimited_pending_count++;
578
579 return 0;
580 } else {
581 struct qed_spq_entry *p_en2;
582
583 p_en2 = list_first_entry(&p_spq->free_pool,
584 struct qed_spq_entry, list);
585 list_del(&p_en2->list);
586
587
588
589
590
591 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
592
593 *p_en2 = *p_ent;
594
595
596 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
597 kfree(p_ent);
598
599 p_ent = p_en2;
600 }
601 }
602
603
604 switch (priority) {
605 case QED_SPQ_PRIORITY_NORMAL:
606 list_add_tail(&p_ent->list, &p_spq->pending);
607 p_spq->normal_count++;
608 break;
609 case QED_SPQ_PRIORITY_HIGH:
610 list_add(&p_ent->list, &p_spq->pending);
611 p_spq->high_count++;
612 break;
613 default:
614 return -EINVAL;
615 }
616
617 return 0;
618}
619
620
621
622
623u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
624{
625 if (!p_hwfn->p_spq)
626 return 0xffffffff;
627 return p_hwfn->p_spq->cid;
628}
629
630
631
632
633static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
634 struct list_head *head, u32 keep_reserve)
635{
636 struct qed_spq *p_spq = p_hwfn->p_spq;
637 int rc;
638
639 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
640 !list_empty(head)) {
641 struct qed_spq_entry *p_ent =
642 list_first_entry(head, struct qed_spq_entry, list);
643 list_del(&p_ent->list);
644 list_add_tail(&p_ent->list, &p_spq->completion_pending);
645 p_spq->comp_sent_count++;
646
647 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
648 if (rc) {
649 list_del(&p_ent->list);
650 __qed_spq_return_entry(p_hwfn, p_ent);
651 return rc;
652 }
653 }
654
655 return 0;
656}
657
658static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
659{
660 struct qed_spq *p_spq = p_hwfn->p_spq;
661 struct qed_spq_entry *p_ent = NULL;
662
663 while (!list_empty(&p_spq->free_pool)) {
664 if (list_empty(&p_spq->unlimited_pending))
665 break;
666
667 p_ent = list_first_entry(&p_spq->unlimited_pending,
668 struct qed_spq_entry, list);
669 if (!p_ent)
670 return -EINVAL;
671
672 list_del(&p_ent->list);
673
674 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
675 }
676
677 return qed_spq_post_list(p_hwfn, &p_spq->pending,
678 SPQ_HIGH_PRI_RESERVE_DEFAULT);
679}
680
681int qed_spq_post(struct qed_hwfn *p_hwfn,
682 struct qed_spq_entry *p_ent, u8 *fw_return_code)
683{
684 int rc = 0;
685 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
686 bool b_ret_ent = true;
687
688 if (!p_hwfn)
689 return -EINVAL;
690
691 if (!p_ent) {
692 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
693 return -EINVAL;
694 }
695
696
697 rc = qed_spq_fill_entry(p_hwfn, p_ent);
698
699 spin_lock_bh(&p_spq->lock);
700
701
702 if (rc)
703 goto spq_post_fail;
704
705
706 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
707 if (rc)
708 goto spq_post_fail;
709
710 rc = qed_spq_pend_post(p_hwfn);
711 if (rc) {
712
713
714
715
716 b_ret_ent = false;
717 goto spq_post_fail;
718 }
719
720 spin_unlock_bh(&p_spq->lock);
721
722 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
723
724
725
726
727
728 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
729
730 if (p_ent->queue == &p_spq->unlimited_pending) {
731
732
733
734 kfree(p_ent);
735 return rc;
736 }
737
738 if (rc)
739 goto spq_post_fail2;
740
741
742 qed_spq_return_entry(p_hwfn, p_ent);
743 }
744 return rc;
745
746spq_post_fail2:
747 spin_lock_bh(&p_spq->lock);
748 list_del(&p_ent->list);
749 qed_chain_return_produced(&p_spq->chain);
750
751spq_post_fail:
752
753 if (b_ret_ent)
754 __qed_spq_return_entry(p_hwfn, p_ent);
755 spin_unlock_bh(&p_spq->lock);
756
757 return rc;
758}
759
760int qed_spq_completion(struct qed_hwfn *p_hwfn,
761 __le16 echo,
762 u8 fw_return_code,
763 union event_ring_data *p_data)
764{
765 struct qed_spq *p_spq;
766 struct qed_spq_entry *p_ent = NULL;
767 struct qed_spq_entry *tmp;
768 struct qed_spq_entry *found = NULL;
769 int rc;
770
771 if (!p_hwfn)
772 return -EINVAL;
773
774 p_spq = p_hwfn->p_spq;
775 if (!p_spq)
776 return -EINVAL;
777
778 spin_lock_bh(&p_spq->lock);
779 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
780 if (p_ent->elem.hdr.echo == echo) {
781 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
782
783 list_del(&p_ent->list);
784
785
786
787
788
789
790 __set_bit(pos, p_spq->p_comp_bitmap);
791
792 while (test_bit(p_spq->comp_bitmap_idx,
793 p_spq->p_comp_bitmap)) {
794 __clear_bit(p_spq->comp_bitmap_idx,
795 p_spq->p_comp_bitmap);
796 p_spq->comp_bitmap_idx++;
797 qed_chain_return_produced(&p_spq->chain);
798 }
799
800 p_spq->comp_count++;
801 found = p_ent;
802 break;
803 }
804
805
806
807
808 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
809 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
810 le16_to_cpu(echo),
811 le16_to_cpu(p_ent->elem.hdr.echo));
812 }
813
814
815
816
817 spin_unlock_bh(&p_spq->lock);
818
819 if (!found) {
820 DP_NOTICE(p_hwfn,
821 "Failed to find an entry this EQE [echo %04x] completes\n",
822 le16_to_cpu(echo));
823 return -EEXIST;
824 }
825
826 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
827 "Complete EQE [echo %04x]: func %p cookie %p)\n",
828 le16_to_cpu(echo),
829 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
830 if (found->comp_cb.function)
831 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
832 fw_return_code);
833 else
834 DP_VERBOSE(p_hwfn,
835 QED_MSG_SPQ,
836 "Got a completion without a callback function\n");
837
838 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
839 (found->queue == &p_spq->unlimited_pending))
840
841
842
843
844 qed_spq_return_entry(p_hwfn, found);
845
846
847 spin_lock_bh(&p_spq->lock);
848 rc = qed_spq_pend_post(p_hwfn);
849 spin_unlock_bh(&p_spq->lock);
850
851 return rc;
852}
853
854struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
855{
856 struct qed_consq *p_consq;
857
858
859 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
860 if (!p_consq)
861 return NULL;
862
863
864 if (qed_chain_alloc(p_hwfn->cdev,
865 QED_CHAIN_USE_TO_PRODUCE,
866 QED_CHAIN_MODE_PBL,
867 QED_CHAIN_CNT_TYPE_U16,
868 QED_CHAIN_PAGE_SIZE / 0x80,
869 0x80, &p_consq->chain))
870 goto consq_allocate_fail;
871
872 return p_consq;
873
874consq_allocate_fail:
875 qed_consq_free(p_hwfn, p_consq);
876 return NULL;
877}
878
879void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
880{
881 qed_chain_reset(&p_consq->chain);
882}
883
884void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
885{
886 if (!p_consq)
887 return;
888 qed_chain_free(p_hwfn->cdev, &p_consq->chain);
889 kfree(p_consq);
890}
891