1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/pci.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/string.h>
45#include "qed.h"
46#include "qed_cxt.h"
47#include "qed_dev_api.h"
48#include "qed_hsi.h"
49#include "qed_hw.h"
50#include "qed_int.h"
51#include "qed_iscsi.h"
52#include "qed_mcp.h"
53#include "qed_ooo.h"
54#include "qed_reg_addr.h"
55#include "qed_sp.h"
56#include "qed_sriov.h"
57#include "qed_rdma.h"
58
59
60
61
62
63#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
64
65#define SPQ_BLOCK_DELAY_MAX_ITER (10)
66#define SPQ_BLOCK_DELAY_US (10)
67#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
68#define SPQ_BLOCK_SLEEP_MS (5)
69
70
71
72
73static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
74 void *cookie,
75 union event_ring_data *data, u8 fw_return_code)
76{
77 struct qed_spq_comp_done *comp_done;
78
79 comp_done = (struct qed_spq_comp_done *)cookie;
80
81 comp_done->fw_return_code = fw_return_code;
82
83
84 smp_store_release(&comp_done->done, 0x1);
85}
86
87static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 struct qed_spq_entry *p_ent,
89 u8 *p_fw_ret, bool sleep_between_iter)
90{
91 struct qed_spq_comp_done *comp_done;
92 u32 iter_cnt;
93
94 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 : SPQ_BLOCK_DELAY_MAX_ITER;
97
98 while (iter_cnt--) {
99
100 if (smp_load_acquire(&comp_done->done) == 1) {
101 if (p_fw_ret)
102 *p_fw_ret = comp_done->fw_return_code;
103 return 0;
104 }
105
106 if (sleep_between_iter)
107 msleep(SPQ_BLOCK_SLEEP_MS);
108 else
109 udelay(SPQ_BLOCK_DELAY_US);
110 }
111
112 return -EBUSY;
113}
114
115static int qed_spq_block(struct qed_hwfn *p_hwfn,
116 struct qed_spq_entry *p_ent,
117 u8 *p_fw_ret, bool skip_quick_poll)
118{
119 struct qed_spq_comp_done *comp_done;
120 struct qed_ptt *p_ptt;
121 int rc;
122
123
124
125
126 if (!skip_quick_poll) {
127 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
128 if (!rc)
129 return 0;
130 }
131
132
133 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
134 if (!rc)
135 return 0;
136
137 p_ptt = qed_ptt_acquire(p_hwfn);
138 if (!p_ptt) {
139 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
140 return -EAGAIN;
141 }
142
143 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
144 rc = qed_mcp_drain(p_hwfn, p_ptt);
145 if (rc) {
146 DP_NOTICE(p_hwfn, "MCP drain failed\n");
147 goto err;
148 }
149
150
151 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
152 if (!rc)
153 goto out;
154
155 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
156 if (comp_done->done == 1)
157 if (p_fw_ret)
158 *p_fw_ret = comp_done->fw_return_code;
159out:
160 qed_ptt_release(p_hwfn, p_ptt);
161 return 0;
162
163err:
164 qed_ptt_release(p_hwfn, p_ptt);
165 DP_NOTICE(p_hwfn,
166 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
167 le32_to_cpu(p_ent->elem.hdr.cid),
168 p_ent->elem.hdr.cmd_id,
169 p_ent->elem.hdr.protocol_id,
170 le16_to_cpu(p_ent->elem.hdr.echo));
171
172 return -EBUSY;
173}
174
175
176
177
178static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
179 struct qed_spq_entry *p_ent)
180{
181 p_ent->flags = 0;
182
183 switch (p_ent->comp_mode) {
184 case QED_SPQ_MODE_EBLOCK:
185 case QED_SPQ_MODE_BLOCK:
186 p_ent->comp_cb.function = qed_spq_blocking_cb;
187 break;
188 case QED_SPQ_MODE_CB:
189 break;
190 default:
191 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
192 p_ent->comp_mode);
193 return -EINVAL;
194 }
195
196 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
197 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
198 p_ent->elem.hdr.cid,
199 p_ent->elem.hdr.cmd_id,
200 p_ent->elem.hdr.protocol_id,
201 p_ent->elem.data_ptr.hi,
202 p_ent->elem.data_ptr.lo,
203 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
204 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
205 "MODE_CB"));
206
207 return 0;
208}
209
210
211
212
213static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
214 struct qed_spq *p_spq)
215{
216 struct e4_core_conn_context *p_cxt;
217 struct qed_cxt_info cxt_info;
218 u16 physical_q;
219 int rc;
220
221 cxt_info.iid = p_spq->cid;
222
223 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
224
225 if (rc < 0) {
226 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
227 p_spq->cid);
228 return;
229 }
230
231 p_cxt = cxt_info.p_cxt;
232
233 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
234 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
235 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
236 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
237 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
238 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
239
240
241 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
242 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
243
244 p_cxt->xstorm_st_context.spq_base_lo =
245 DMA_LO_LE(p_spq->chain.p_phys_addr);
246 p_cxt->xstorm_st_context.spq_base_hi =
247 DMA_HI_LE(p_spq->chain.p_phys_addr);
248
249 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
250 p_hwfn->p_consq->chain.p_phys_addr);
251}
252
253static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
254 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
255{
256 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
257 u16 echo = qed_chain_get_prod_idx(p_chain);
258 struct slow_path_element *elem;
259 struct core_db_data db;
260
261 p_ent->elem.hdr.echo = cpu_to_le16(echo);
262 elem = qed_chain_produce(p_chain);
263 if (!elem) {
264 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
265 return -EINVAL;
266 }
267
268 *elem = p_ent->elem;
269
270
271 memset(&db, 0, sizeof(db));
272 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
273 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
274 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
275 DQ_XCM_CORE_SPQ_PROD_CMD);
276 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
277 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
278
279
280 wmb();
281
282 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
283
284
285 wmb();
286
287 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
288 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
289 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
290 p_spq->cid, db.params, db.agg_flags,
291 qed_chain_get_prod_idx(p_chain));
292
293 return 0;
294}
295
296
297
298
299static int
300qed_async_event_completion(struct qed_hwfn *p_hwfn,
301 struct event_ring_entry *p_eqe)
302{
303 qed_spq_async_comp_cb cb;
304
305 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
306 return -EINVAL;
307
308 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
309 if (cb) {
310 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
311 &p_eqe->data, p_eqe->fw_return_code);
312 } else {
313 DP_NOTICE(p_hwfn,
314 "Unknown Async completion for protocol: %d\n",
315 p_eqe->protocol_id);
316 return -EINVAL;
317 }
318}
319
320int
321qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
322 enum protocol_type protocol_id,
323 qed_spq_async_comp_cb cb)
324{
325 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
326 return -EINVAL;
327
328 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
329 return 0;
330}
331
332void
333qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
334 enum protocol_type protocol_id)
335{
336 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
337 return;
338
339 p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
340}
341
342
343
344
345void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
346{
347 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
348 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
349
350 REG_WR16(p_hwfn, addr, prod);
351
352
353 mmiowb();
354}
355
356int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
357{
358 struct qed_eq *p_eq = cookie;
359 struct qed_chain *p_chain = &p_eq->chain;
360 int rc = 0;
361
362
363 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
364
365 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
366
367
368
369
370 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
371 qed_chain_get_usable_per_page(p_chain))
372 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
373
374
375 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
376 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
377
378 if (!p_eqe) {
379 rc = -EINVAL;
380 break;
381 }
382
383 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
384 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
385 p_eqe->opcode,
386 p_eqe->protocol_id,
387 p_eqe->reserved0,
388 le16_to_cpu(p_eqe->echo),
389 p_eqe->fw_return_code,
390 p_eqe->flags);
391
392 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
393 if (qed_async_event_completion(p_hwfn, p_eqe))
394 rc = -EINVAL;
395 } else if (qed_spq_completion(p_hwfn,
396 p_eqe->echo,
397 p_eqe->fw_return_code,
398 &p_eqe->data)) {
399 rc = -EINVAL;
400 }
401
402 qed_chain_recycle_consumed(p_chain);
403 }
404
405 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
406
407 return rc;
408}
409
410int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
411{
412 struct qed_eq *p_eq;
413
414
415 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
416 if (!p_eq)
417 return -ENOMEM;
418
419
420 if (qed_chain_alloc(p_hwfn->cdev,
421 QED_CHAIN_USE_TO_PRODUCE,
422 QED_CHAIN_MODE_PBL,
423 QED_CHAIN_CNT_TYPE_U16,
424 num_elem,
425 sizeof(union event_ring_element),
426 &p_eq->chain, NULL))
427 goto eq_allocate_fail;
428
429
430 qed_int_register_cb(p_hwfn, qed_eq_completion,
431 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
432
433 p_hwfn->p_eq = p_eq;
434 return 0;
435
436eq_allocate_fail:
437 kfree(p_eq);
438 return -ENOMEM;
439}
440
441void qed_eq_setup(struct qed_hwfn *p_hwfn)
442{
443 qed_chain_reset(&p_hwfn->p_eq->chain);
444}
445
446void qed_eq_free(struct qed_hwfn *p_hwfn)
447{
448 if (!p_hwfn->p_eq)
449 return;
450
451 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
452
453 kfree(p_hwfn->p_eq);
454 p_hwfn->p_eq = NULL;
455}
456
457
458
459
460static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
461 struct eth_slow_path_rx_cqe *cqe,
462 enum protocol_type protocol)
463{
464 if (IS_VF(p_hwfn->cdev))
465 return 0;
466
467
468
469
470
471 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
472}
473
474int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
475 struct eth_slow_path_rx_cqe *cqe)
476{
477 int rc;
478
479 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
480 if (rc)
481 DP_NOTICE(p_hwfn,
482 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
483 cqe->ramrod_cmd_id);
484
485 return rc;
486}
487
488
489
490
491void qed_spq_setup(struct qed_hwfn *p_hwfn)
492{
493 struct qed_spq *p_spq = p_hwfn->p_spq;
494 struct qed_spq_entry *p_virt = NULL;
495 dma_addr_t p_phys = 0;
496 u32 i, capacity;
497
498 INIT_LIST_HEAD(&p_spq->pending);
499 INIT_LIST_HEAD(&p_spq->completion_pending);
500 INIT_LIST_HEAD(&p_spq->free_pool);
501 INIT_LIST_HEAD(&p_spq->unlimited_pending);
502 spin_lock_init(&p_spq->lock);
503
504
505 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
506 p_virt = p_spq->p_virt;
507
508 capacity = qed_chain_get_capacity(&p_spq->chain);
509 for (i = 0; i < capacity; i++) {
510 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
511
512 list_add_tail(&p_virt->list, &p_spq->free_pool);
513
514 p_virt++;
515 p_phys += sizeof(struct qed_spq_entry);
516 }
517
518
519 p_spq->normal_count = 0;
520 p_spq->comp_count = 0;
521 p_spq->comp_sent_count = 0;
522 p_spq->unlimited_pending_count = 0;
523
524 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
525 p_spq->comp_bitmap_idx = 0;
526
527
528 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
529 qed_spq_hw_initialize(p_hwfn, p_spq);
530
531
532 qed_chain_reset(&p_spq->chain);
533}
534
535int qed_spq_alloc(struct qed_hwfn *p_hwfn)
536{
537 struct qed_spq_entry *p_virt = NULL;
538 struct qed_spq *p_spq = NULL;
539 dma_addr_t p_phys = 0;
540 u32 capacity;
541
542
543 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
544 if (!p_spq)
545 return -ENOMEM;
546
547
548 if (qed_chain_alloc(p_hwfn->cdev,
549 QED_CHAIN_USE_TO_PRODUCE,
550 QED_CHAIN_MODE_SINGLE,
551 QED_CHAIN_CNT_TYPE_U16,
552 0,
553 sizeof(struct slow_path_element),
554 &p_spq->chain, NULL))
555 goto spq_allocate_fail;
556
557
558 capacity = qed_chain_get_capacity(&p_spq->chain);
559 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
560 capacity * sizeof(struct qed_spq_entry),
561 &p_phys, GFP_KERNEL);
562 if (!p_virt)
563 goto spq_allocate_fail;
564
565 p_spq->p_virt = p_virt;
566 p_spq->p_phys = p_phys;
567 p_hwfn->p_spq = p_spq;
568
569 return 0;
570
571spq_allocate_fail:
572 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
573 kfree(p_spq);
574 return -ENOMEM;
575}
576
577void qed_spq_free(struct qed_hwfn *p_hwfn)
578{
579 struct qed_spq *p_spq = p_hwfn->p_spq;
580 u32 capacity;
581
582 if (!p_spq)
583 return;
584
585 if (p_spq->p_virt) {
586 capacity = qed_chain_get_capacity(&p_spq->chain);
587 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
588 capacity *
589 sizeof(struct qed_spq_entry),
590 p_spq->p_virt, p_spq->p_phys);
591 }
592
593 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
594 kfree(p_spq);
595 p_hwfn->p_spq = NULL;
596}
597
598int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
599{
600 struct qed_spq *p_spq = p_hwfn->p_spq;
601 struct qed_spq_entry *p_ent = NULL;
602 int rc = 0;
603
604 spin_lock_bh(&p_spq->lock);
605
606 if (list_empty(&p_spq->free_pool)) {
607 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
608 if (!p_ent) {
609 DP_NOTICE(p_hwfn,
610 "Failed to allocate an SPQ entry for a pending ramrod\n");
611 rc = -ENOMEM;
612 goto out_unlock;
613 }
614 p_ent->queue = &p_spq->unlimited_pending;
615 } else {
616 p_ent = list_first_entry(&p_spq->free_pool,
617 struct qed_spq_entry, list);
618 list_del(&p_ent->list);
619 p_ent->queue = &p_spq->pending;
620 }
621
622 *pp_ent = p_ent;
623
624out_unlock:
625 spin_unlock_bh(&p_spq->lock);
626 return rc;
627}
628
629
630static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
631 struct qed_spq_entry *p_ent)
632{
633 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
634}
635
636void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
637{
638 spin_lock_bh(&p_hwfn->p_spq->lock);
639 __qed_spq_return_entry(p_hwfn, p_ent);
640 spin_unlock_bh(&p_hwfn->p_spq->lock);
641}
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
658 struct qed_spq_entry *p_ent,
659 enum spq_priority priority)
660{
661 struct qed_spq *p_spq = p_hwfn->p_spq;
662
663 if (p_ent->queue == &p_spq->unlimited_pending) {
664
665 if (list_empty(&p_spq->free_pool)) {
666 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
667 p_spq->unlimited_pending_count++;
668
669 return 0;
670 } else {
671 struct qed_spq_entry *p_en2;
672
673 p_en2 = list_first_entry(&p_spq->free_pool,
674 struct qed_spq_entry, list);
675 list_del(&p_en2->list);
676
677
678
679
680
681 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
682
683 *p_en2 = *p_ent;
684
685
686 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
687 kfree(p_ent);
688
689 p_ent = p_en2;
690 }
691 }
692
693
694 switch (priority) {
695 case QED_SPQ_PRIORITY_NORMAL:
696 list_add_tail(&p_ent->list, &p_spq->pending);
697 p_spq->normal_count++;
698 break;
699 case QED_SPQ_PRIORITY_HIGH:
700 list_add(&p_ent->list, &p_spq->pending);
701 p_spq->high_count++;
702 break;
703 default:
704 return -EINVAL;
705 }
706
707 return 0;
708}
709
710
711
712
713u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
714{
715 if (!p_hwfn->p_spq)
716 return 0xffffffff;
717 return p_hwfn->p_spq->cid;
718}
719
720
721
722
723static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
724 struct list_head *head, u32 keep_reserve)
725{
726 struct qed_spq *p_spq = p_hwfn->p_spq;
727 int rc;
728
729 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
730 !list_empty(head)) {
731 struct qed_spq_entry *p_ent =
732 list_first_entry(head, struct qed_spq_entry, list);
733 list_del(&p_ent->list);
734 list_add_tail(&p_ent->list, &p_spq->completion_pending);
735 p_spq->comp_sent_count++;
736
737 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
738 if (rc) {
739 list_del(&p_ent->list);
740 __qed_spq_return_entry(p_hwfn, p_ent);
741 return rc;
742 }
743 }
744
745 return 0;
746}
747
748static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
749{
750 struct qed_spq *p_spq = p_hwfn->p_spq;
751 struct qed_spq_entry *p_ent = NULL;
752
753 while (!list_empty(&p_spq->free_pool)) {
754 if (list_empty(&p_spq->unlimited_pending))
755 break;
756
757 p_ent = list_first_entry(&p_spq->unlimited_pending,
758 struct qed_spq_entry, list);
759 if (!p_ent)
760 return -EINVAL;
761
762 list_del(&p_ent->list);
763
764 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
765 }
766
767 return qed_spq_post_list(p_hwfn, &p_spq->pending,
768 SPQ_HIGH_PRI_RESERVE_DEFAULT);
769}
770
771int qed_spq_post(struct qed_hwfn *p_hwfn,
772 struct qed_spq_entry *p_ent, u8 *fw_return_code)
773{
774 int rc = 0;
775 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
776 bool b_ret_ent = true;
777 bool eblock;
778
779 if (!p_hwfn)
780 return -EINVAL;
781
782 if (!p_ent) {
783 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
784 return -EINVAL;
785 }
786
787
788 rc = qed_spq_fill_entry(p_hwfn, p_ent);
789
790 spin_lock_bh(&p_spq->lock);
791
792
793 if (rc)
794 goto spq_post_fail;
795
796
797
798
799 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
800
801
802 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
803 if (rc)
804 goto spq_post_fail;
805
806 rc = qed_spq_pend_post(p_hwfn);
807 if (rc) {
808
809
810
811
812 b_ret_ent = false;
813 goto spq_post_fail;
814 }
815
816 spin_unlock_bh(&p_spq->lock);
817
818 if (eblock) {
819
820
821
822
823
824 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
825 p_ent->queue == &p_spq->unlimited_pending);
826
827 if (p_ent->queue == &p_spq->unlimited_pending) {
828
829
830
831 kfree(p_ent);
832 return rc;
833 }
834
835 if (rc)
836 goto spq_post_fail2;
837
838
839 qed_spq_return_entry(p_hwfn, p_ent);
840 }
841 return rc;
842
843spq_post_fail2:
844 spin_lock_bh(&p_spq->lock);
845 list_del(&p_ent->list);
846 qed_chain_return_produced(&p_spq->chain);
847
848spq_post_fail:
849
850 if (b_ret_ent)
851 __qed_spq_return_entry(p_hwfn, p_ent);
852 spin_unlock_bh(&p_spq->lock);
853
854 return rc;
855}
856
857int qed_spq_completion(struct qed_hwfn *p_hwfn,
858 __le16 echo,
859 u8 fw_return_code,
860 union event_ring_data *p_data)
861{
862 struct qed_spq *p_spq;
863 struct qed_spq_entry *p_ent = NULL;
864 struct qed_spq_entry *tmp;
865 struct qed_spq_entry *found = NULL;
866 int rc;
867
868 if (!p_hwfn)
869 return -EINVAL;
870
871 p_spq = p_hwfn->p_spq;
872 if (!p_spq)
873 return -EINVAL;
874
875 spin_lock_bh(&p_spq->lock);
876 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
877 if (p_ent->elem.hdr.echo == echo) {
878 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
879
880 list_del(&p_ent->list);
881
882
883
884
885
886
887 __set_bit(pos, p_spq->p_comp_bitmap);
888
889 while (test_bit(p_spq->comp_bitmap_idx,
890 p_spq->p_comp_bitmap)) {
891 __clear_bit(p_spq->comp_bitmap_idx,
892 p_spq->p_comp_bitmap);
893 p_spq->comp_bitmap_idx++;
894 qed_chain_return_produced(&p_spq->chain);
895 }
896
897 p_spq->comp_count++;
898 found = p_ent;
899 break;
900 }
901
902
903
904
905 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
906 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
907 le16_to_cpu(echo),
908 le16_to_cpu(p_ent->elem.hdr.echo));
909 }
910
911
912
913
914 spin_unlock_bh(&p_spq->lock);
915
916 if (!found) {
917 DP_NOTICE(p_hwfn,
918 "Failed to find an entry this EQE [echo %04x] completes\n",
919 le16_to_cpu(echo));
920 return -EEXIST;
921 }
922
923 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
924 "Complete EQE [echo %04x]: func %p cookie %p)\n",
925 le16_to_cpu(echo),
926 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
927 if (found->comp_cb.function)
928 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
929 fw_return_code);
930 else
931 DP_VERBOSE(p_hwfn,
932 QED_MSG_SPQ,
933 "Got a completion without a callback function\n");
934
935 if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
936 (found->queue == &p_spq->unlimited_pending))
937
938
939
940
941 qed_spq_return_entry(p_hwfn, found);
942
943
944 spin_lock_bh(&p_spq->lock);
945 rc = qed_spq_pend_post(p_hwfn);
946 spin_unlock_bh(&p_spq->lock);
947
948 return rc;
949}
950
951int qed_consq_alloc(struct qed_hwfn *p_hwfn)
952{
953 struct qed_consq *p_consq;
954
955
956 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
957 if (!p_consq)
958 return -ENOMEM;
959
960
961 if (qed_chain_alloc(p_hwfn->cdev,
962 QED_CHAIN_USE_TO_PRODUCE,
963 QED_CHAIN_MODE_PBL,
964 QED_CHAIN_CNT_TYPE_U16,
965 QED_CHAIN_PAGE_SIZE / 0x80,
966 0x80, &p_consq->chain, NULL))
967 goto consq_allocate_fail;
968
969 p_hwfn->p_consq = p_consq;
970 return 0;
971
972consq_allocate_fail:
973 kfree(p_consq);
974 return -ENOMEM;
975}
976
977void qed_consq_setup(struct qed_hwfn *p_hwfn)
978{
979 qed_chain_reset(&p_hwfn->p_consq->chain);
980}
981
982void qed_consq_free(struct qed_hwfn *p_hwfn)
983{
984 if (!p_hwfn->p_consq)
985 return;
986
987 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
988
989 kfree(p_hwfn->p_consq);
990 p_hwfn->p_consq = NULL;
991}
992