1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/types.h>
34#include <asm/byteorder.h>
35#include <linux/io.h>
36#include <linux/delay.h>
37#include <linux/dma-mapping.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/list.h>
41#include <linux/pci.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/string.h>
45#include "qed.h"
46#include "qed_cxt.h"
47#include "qed_dev_api.h"
48#include "qed_hsi.h"
49#include "qed_hw.h"
50#include "qed_int.h"
51#include "qed_iscsi.h"
52#include "qed_mcp.h"
53#include "qed_ooo.h"
54#include "qed_reg_addr.h"
55#include "qed_sp.h"
56#include "qed_sriov.h"
57#include "qed_rdma.h"
58
59
60
61
62
63#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
64
65#define SPQ_BLOCK_DELAY_MAX_ITER (10)
66#define SPQ_BLOCK_DELAY_US (10)
67#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
68#define SPQ_BLOCK_SLEEP_MS (5)
69
70
71
72
73static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
74 void *cookie,
75 union event_ring_data *data, u8 fw_return_code)
76{
77 struct qed_spq_comp_done *comp_done;
78
79 comp_done = (struct qed_spq_comp_done *)cookie;
80
81 comp_done->fw_return_code = fw_return_code;
82
83
84 smp_store_release(&comp_done->done, 0x1);
85}
86
87static int __qed_spq_block(struct qed_hwfn *p_hwfn,
88 struct qed_spq_entry *p_ent,
89 u8 *p_fw_ret, bool sleep_between_iter)
90{
91 struct qed_spq_comp_done *comp_done;
92 u32 iter_cnt;
93
94 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
95 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
96 : SPQ_BLOCK_DELAY_MAX_ITER;
97
98 while (iter_cnt--) {
99
100 if (smp_load_acquire(&comp_done->done) == 1) {
101 if (p_fw_ret)
102 *p_fw_ret = comp_done->fw_return_code;
103 return 0;
104 }
105
106 if (sleep_between_iter)
107 msleep(SPQ_BLOCK_SLEEP_MS);
108 else
109 udelay(SPQ_BLOCK_DELAY_US);
110 }
111
112 return -EBUSY;
113}
114
115static int qed_spq_block(struct qed_hwfn *p_hwfn,
116 struct qed_spq_entry *p_ent,
117 u8 *p_fw_ret, bool skip_quick_poll)
118{
119 struct qed_spq_comp_done *comp_done;
120 struct qed_ptt *p_ptt;
121 int rc;
122
123
124
125
126 if (!skip_quick_poll) {
127 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
128 if (!rc)
129 return 0;
130 }
131
132
133 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
134 if (!rc)
135 return 0;
136
137 p_ptt = qed_ptt_acquire(p_hwfn);
138 if (!p_ptt) {
139 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
140 return -EAGAIN;
141 }
142
143 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
144 rc = qed_mcp_drain(p_hwfn, p_ptt);
145 qed_ptt_release(p_hwfn, p_ptt);
146 if (rc) {
147 DP_NOTICE(p_hwfn, "MCP drain failed\n");
148 goto err;
149 }
150
151
152 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
153 if (!rc)
154 return 0;
155
156 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
157 if (comp_done->done == 1) {
158 if (p_fw_ret)
159 *p_fw_ret = comp_done->fw_return_code;
160 return 0;
161 }
162err:
163 DP_NOTICE(p_hwfn,
164 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
165 le32_to_cpu(p_ent->elem.hdr.cid),
166 p_ent->elem.hdr.cmd_id,
167 p_ent->elem.hdr.protocol_id,
168 le16_to_cpu(p_ent->elem.hdr.echo));
169
170 return -EBUSY;
171}
172
173
174
175
176static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
177 struct qed_spq_entry *p_ent)
178{
179 p_ent->flags = 0;
180
181 switch (p_ent->comp_mode) {
182 case QED_SPQ_MODE_EBLOCK:
183 case QED_SPQ_MODE_BLOCK:
184 p_ent->comp_cb.function = qed_spq_blocking_cb;
185 break;
186 case QED_SPQ_MODE_CB:
187 break;
188 default:
189 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
190 p_ent->comp_mode);
191 return -EINVAL;
192 }
193
194 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
195 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
196 p_ent->elem.hdr.cid,
197 p_ent->elem.hdr.cmd_id,
198 p_ent->elem.hdr.protocol_id,
199 p_ent->elem.data_ptr.hi,
200 p_ent->elem.data_ptr.lo,
201 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
202 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
203 "MODE_CB"));
204
205 return 0;
206}
207
208
209
210
211static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
212 struct qed_spq *p_spq)
213{
214 struct e4_core_conn_context *p_cxt;
215 struct qed_cxt_info cxt_info;
216 u16 physical_q;
217 int rc;
218
219 cxt_info.iid = p_spq->cid;
220
221 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
222
223 if (rc < 0) {
224 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
225 p_spq->cid);
226 return;
227 }
228
229 p_cxt = cxt_info.p_cxt;
230
231 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
232 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
233 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
234 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
235 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
236 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
237
238
239 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
240 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
241
242 p_cxt->xstorm_st_context.spq_base_lo =
243 DMA_LO_LE(p_spq->chain.p_phys_addr);
244 p_cxt->xstorm_st_context.spq_base_hi =
245 DMA_HI_LE(p_spq->chain.p_phys_addr);
246
247 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
248 p_hwfn->p_consq->chain.p_phys_addr);
249}
250
251static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
252 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
253{
254 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
255 struct core_db_data *p_db_data = &p_spq->db_data;
256 u16 echo = qed_chain_get_prod_idx(p_chain);
257 struct slow_path_element *elem;
258
259 p_ent->elem.hdr.echo = cpu_to_le16(echo);
260 elem = qed_chain_produce(p_chain);
261 if (!elem) {
262 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
263 return -EINVAL;
264 }
265
266 *elem = p_ent->elem;
267
268
269 p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
270
271
272 wmb();
273
274 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
275
276
277 wmb();
278
279 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
280 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
281 p_spq->db_addr_offset,
282 p_spq->cid,
283 p_db_data->params,
284 p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
285
286 return 0;
287}
288
289
290
291
292static int
293qed_async_event_completion(struct qed_hwfn *p_hwfn,
294 struct event_ring_entry *p_eqe)
295{
296 qed_spq_async_comp_cb cb;
297
298 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
299 return -EINVAL;
300
301 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
302 if (cb) {
303 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
304 &p_eqe->data, p_eqe->fw_return_code);
305 } else {
306 DP_NOTICE(p_hwfn,
307 "Unknown Async completion for protocol: %d\n",
308 p_eqe->protocol_id);
309 return -EINVAL;
310 }
311}
312
313int
314qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
315 enum protocol_type protocol_id,
316 qed_spq_async_comp_cb cb)
317{
318 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
319 return -EINVAL;
320
321 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
322 return 0;
323}
324
325void
326qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
327 enum protocol_type protocol_id)
328{
329 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
330 return;
331
332 p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
333}
334
335
336
337
338void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
339{
340 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
341 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
342
343 REG_WR16(p_hwfn, addr, prod);
344
345
346 mmiowb();
347}
348
349int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
350{
351 struct qed_eq *p_eq = cookie;
352 struct qed_chain *p_chain = &p_eq->chain;
353 int rc = 0;
354
355
356 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
357
358 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
359
360
361
362
363 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
364 qed_chain_get_usable_per_page(p_chain))
365 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
366
367
368 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
369 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
370
371 if (!p_eqe) {
372 rc = -EINVAL;
373 break;
374 }
375
376 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
377 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
378 p_eqe->opcode,
379 p_eqe->protocol_id,
380 p_eqe->reserved0,
381 le16_to_cpu(p_eqe->echo),
382 p_eqe->fw_return_code,
383 p_eqe->flags);
384
385 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
386 if (qed_async_event_completion(p_hwfn, p_eqe))
387 rc = -EINVAL;
388 } else if (qed_spq_completion(p_hwfn,
389 p_eqe->echo,
390 p_eqe->fw_return_code,
391 &p_eqe->data)) {
392 rc = -EINVAL;
393 }
394
395 qed_chain_recycle_consumed(p_chain);
396 }
397
398 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
399
400
401 spin_lock_bh(&p_hwfn->p_spq->lock);
402 rc = qed_spq_pend_post(p_hwfn);
403 spin_unlock_bh(&p_hwfn->p_spq->lock);
404
405 return rc;
406}
407
408int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
409{
410 struct qed_eq *p_eq;
411
412
413 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
414 if (!p_eq)
415 return -ENOMEM;
416
417
418 if (qed_chain_alloc(p_hwfn->cdev,
419 QED_CHAIN_USE_TO_PRODUCE,
420 QED_CHAIN_MODE_PBL,
421 QED_CHAIN_CNT_TYPE_U16,
422 num_elem,
423 sizeof(union event_ring_element),
424 &p_eq->chain, NULL))
425 goto eq_allocate_fail;
426
427
428 qed_int_register_cb(p_hwfn, qed_eq_completion,
429 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
430
431 p_hwfn->p_eq = p_eq;
432 return 0;
433
434eq_allocate_fail:
435 kfree(p_eq);
436 return -ENOMEM;
437}
438
439void qed_eq_setup(struct qed_hwfn *p_hwfn)
440{
441 qed_chain_reset(&p_hwfn->p_eq->chain);
442}
443
444void qed_eq_free(struct qed_hwfn *p_hwfn)
445{
446 if (!p_hwfn->p_eq)
447 return;
448
449 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
450
451 kfree(p_hwfn->p_eq);
452 p_hwfn->p_eq = NULL;
453}
454
455
456
457
458static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
459 struct eth_slow_path_rx_cqe *cqe,
460 enum protocol_type protocol)
461{
462 if (IS_VF(p_hwfn->cdev))
463 return 0;
464
465
466
467
468
469 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
470}
471
472int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
473 struct eth_slow_path_rx_cqe *cqe)
474{
475 int rc;
476
477 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
478 if (rc)
479 DP_NOTICE(p_hwfn,
480 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
481 cqe->ramrod_cmd_id);
482
483 return rc;
484}
485
486
487
488
489void qed_spq_setup(struct qed_hwfn *p_hwfn)
490{
491 struct qed_spq *p_spq = p_hwfn->p_spq;
492 struct qed_spq_entry *p_virt = NULL;
493 struct core_db_data *p_db_data;
494 void __iomem *db_addr;
495 dma_addr_t p_phys = 0;
496 u32 i, capacity;
497 int rc;
498
499 INIT_LIST_HEAD(&p_spq->pending);
500 INIT_LIST_HEAD(&p_spq->completion_pending);
501 INIT_LIST_HEAD(&p_spq->free_pool);
502 INIT_LIST_HEAD(&p_spq->unlimited_pending);
503 spin_lock_init(&p_spq->lock);
504
505
506 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
507 p_virt = p_spq->p_virt;
508
509 capacity = qed_chain_get_capacity(&p_spq->chain);
510 for (i = 0; i < capacity; i++) {
511 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
512
513 list_add_tail(&p_virt->list, &p_spq->free_pool);
514
515 p_virt++;
516 p_phys += sizeof(struct qed_spq_entry);
517 }
518
519
520 p_spq->normal_count = 0;
521 p_spq->comp_count = 0;
522 p_spq->comp_sent_count = 0;
523 p_spq->unlimited_pending_count = 0;
524
525 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
526 p_spq->comp_bitmap_idx = 0;
527
528
529 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
530 qed_spq_hw_initialize(p_hwfn, p_spq);
531
532
533 qed_chain_reset(&p_spq->chain);
534
535
536 p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
537 p_db_data = &p_spq->db_data;
538 memset(p_db_data, 0, sizeof(*p_db_data));
539 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
540 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
541 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
542 DQ_XCM_CORE_SPQ_PROD_CMD);
543 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
544
545
546 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
547 p_spq->db_addr_offset);
548 rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
549 DB_REC_WIDTH_32B, DB_REC_KERNEL);
550 if (rc)
551 DP_INFO(p_hwfn,
552 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
553}
554
555int qed_spq_alloc(struct qed_hwfn *p_hwfn)
556{
557 struct qed_spq_entry *p_virt = NULL;
558 struct qed_spq *p_spq = NULL;
559 dma_addr_t p_phys = 0;
560 u32 capacity;
561
562
563 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
564 if (!p_spq)
565 return -ENOMEM;
566
567
568 if (qed_chain_alloc(p_hwfn->cdev,
569 QED_CHAIN_USE_TO_PRODUCE,
570 QED_CHAIN_MODE_SINGLE,
571 QED_CHAIN_CNT_TYPE_U16,
572 0,
573 sizeof(struct slow_path_element),
574 &p_spq->chain, NULL))
575 goto spq_allocate_fail;
576
577
578 capacity = qed_chain_get_capacity(&p_spq->chain);
579 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
580 capacity * sizeof(struct qed_spq_entry),
581 &p_phys, GFP_KERNEL);
582 if (!p_virt)
583 goto spq_allocate_fail;
584
585 p_spq->p_virt = p_virt;
586 p_spq->p_phys = p_phys;
587 p_hwfn->p_spq = p_spq;
588
589 return 0;
590
591spq_allocate_fail:
592 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
593 kfree(p_spq);
594 return -ENOMEM;
595}
596
597void qed_spq_free(struct qed_hwfn *p_hwfn)
598{
599 struct qed_spq *p_spq = p_hwfn->p_spq;
600 void __iomem *db_addr;
601 u32 capacity;
602
603 if (!p_spq)
604 return;
605
606
607 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
608 p_spq->db_addr_offset);
609 qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
610
611 if (p_spq->p_virt) {
612 capacity = qed_chain_get_capacity(&p_spq->chain);
613 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
614 capacity *
615 sizeof(struct qed_spq_entry),
616 p_spq->p_virt, p_spq->p_phys);
617 }
618
619 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
620 kfree(p_spq);
621 p_hwfn->p_spq = NULL;
622}
623
624int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
625{
626 struct qed_spq *p_spq = p_hwfn->p_spq;
627 struct qed_spq_entry *p_ent = NULL;
628 int rc = 0;
629
630 spin_lock_bh(&p_spq->lock);
631
632 if (list_empty(&p_spq->free_pool)) {
633 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
634 if (!p_ent) {
635 DP_NOTICE(p_hwfn,
636 "Failed to allocate an SPQ entry for a pending ramrod\n");
637 rc = -ENOMEM;
638 goto out_unlock;
639 }
640 p_ent->queue = &p_spq->unlimited_pending;
641 } else {
642 p_ent = list_first_entry(&p_spq->free_pool,
643 struct qed_spq_entry, list);
644 list_del(&p_ent->list);
645 p_ent->queue = &p_spq->pending;
646 }
647
648 *pp_ent = p_ent;
649
650out_unlock:
651 spin_unlock_bh(&p_spq->lock);
652 return rc;
653}
654
655
656static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
657 struct qed_spq_entry *p_ent)
658{
659 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
660}
661
662void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
663{
664 spin_lock_bh(&p_hwfn->p_spq->lock);
665 __qed_spq_return_entry(p_hwfn, p_ent);
666 spin_unlock_bh(&p_hwfn->p_spq->lock);
667}
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
684 struct qed_spq_entry *p_ent,
685 enum spq_priority priority)
686{
687 struct qed_spq *p_spq = p_hwfn->p_spq;
688
689 if (p_ent->queue == &p_spq->unlimited_pending) {
690
691 if (list_empty(&p_spq->free_pool)) {
692 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
693 p_spq->unlimited_pending_count++;
694
695 return 0;
696 } else {
697 struct qed_spq_entry *p_en2;
698
699 p_en2 = list_first_entry(&p_spq->free_pool,
700 struct qed_spq_entry, list);
701 list_del(&p_en2->list);
702
703
704
705
706
707 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
708
709 *p_en2 = *p_ent;
710
711
712 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
713 kfree(p_ent);
714 else
715 p_ent->post_ent = p_en2;
716
717 p_ent = p_en2;
718 }
719 }
720
721
722 switch (priority) {
723 case QED_SPQ_PRIORITY_NORMAL:
724 list_add_tail(&p_ent->list, &p_spq->pending);
725 p_spq->normal_count++;
726 break;
727 case QED_SPQ_PRIORITY_HIGH:
728 list_add(&p_ent->list, &p_spq->pending);
729 p_spq->high_count++;
730 break;
731 default:
732 return -EINVAL;
733 }
734
735 return 0;
736}
737
738
739
740
741u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
742{
743 if (!p_hwfn->p_spq)
744 return 0xffffffff;
745 return p_hwfn->p_spq->cid;
746}
747
748
749
750
751static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
752 struct list_head *head, u32 keep_reserve)
753{
754 struct qed_spq *p_spq = p_hwfn->p_spq;
755 int rc;
756
757 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
758 !list_empty(head)) {
759 struct qed_spq_entry *p_ent =
760 list_first_entry(head, struct qed_spq_entry, list);
761 list_move_tail(&p_ent->list, &p_spq->completion_pending);
762 p_spq->comp_sent_count++;
763
764 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
765 if (rc) {
766 list_del(&p_ent->list);
767 __qed_spq_return_entry(p_hwfn, p_ent);
768 return rc;
769 }
770 }
771
772 return 0;
773}
774
775int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
776{
777 struct qed_spq *p_spq = p_hwfn->p_spq;
778 struct qed_spq_entry *p_ent = NULL;
779
780 while (!list_empty(&p_spq->free_pool)) {
781 if (list_empty(&p_spq->unlimited_pending))
782 break;
783
784 p_ent = list_first_entry(&p_spq->unlimited_pending,
785 struct qed_spq_entry, list);
786 if (!p_ent)
787 return -EINVAL;
788
789 list_del(&p_ent->list);
790
791 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
792 }
793
794 return qed_spq_post_list(p_hwfn, &p_spq->pending,
795 SPQ_HIGH_PRI_RESERVE_DEFAULT);
796}
797
798static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
799 u8 *fw_return_code)
800{
801 if (!fw_return_code)
802 return;
803
804 if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
805 p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
806 *fw_return_code = RDMA_RETURN_OK;
807}
808
809
810
811
812
813static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
814{
815 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
816 struct qed_spq *p_spq = p_hwfn->p_spq;
817
818 __set_bit(pos, p_spq->p_comp_bitmap);
819 while (test_bit(p_spq->comp_bitmap_idx,
820 p_spq->p_comp_bitmap)) {
821 __clear_bit(p_spq->comp_bitmap_idx,
822 p_spq->p_comp_bitmap);
823 p_spq->comp_bitmap_idx++;
824 qed_chain_return_produced(&p_spq->chain);
825 }
826}
827
828int qed_spq_post(struct qed_hwfn *p_hwfn,
829 struct qed_spq_entry *p_ent, u8 *fw_return_code)
830{
831 int rc = 0;
832 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
833 bool b_ret_ent = true;
834 bool eblock;
835
836 if (!p_hwfn)
837 return -EINVAL;
838
839 if (!p_ent) {
840 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
841 return -EINVAL;
842 }
843
844 if (p_hwfn->cdev->recov_in_prog) {
845 DP_VERBOSE(p_hwfn,
846 QED_MSG_SPQ,
847 "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
848 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
849
850
851 qed_spq_recov_set_ret_code(p_ent, fw_return_code);
852 return 0;
853 }
854
855
856 rc = qed_spq_fill_entry(p_hwfn, p_ent);
857
858 spin_lock_bh(&p_spq->lock);
859
860
861 if (rc)
862 goto spq_post_fail;
863
864
865
866
867 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
868
869
870 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
871 if (rc)
872 goto spq_post_fail;
873
874 rc = qed_spq_pend_post(p_hwfn);
875 if (rc) {
876
877
878
879
880 b_ret_ent = false;
881 goto spq_post_fail;
882 }
883
884 spin_unlock_bh(&p_spq->lock);
885
886 if (eblock) {
887
888
889
890
891
892 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
893 p_ent->queue == &p_spq->unlimited_pending);
894
895 if (p_ent->queue == &p_spq->unlimited_pending) {
896 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
897
898 kfree(p_ent);
899
900
901 p_ent = p_post_ent;
902 }
903
904 if (rc)
905 goto spq_post_fail2;
906
907
908 qed_spq_return_entry(p_hwfn, p_ent);
909 }
910 return rc;
911
912spq_post_fail2:
913 spin_lock_bh(&p_spq->lock);
914 list_del(&p_ent->list);
915 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
916
917spq_post_fail:
918
919 if (b_ret_ent)
920 __qed_spq_return_entry(p_hwfn, p_ent);
921 spin_unlock_bh(&p_spq->lock);
922
923 return rc;
924}
925
926int qed_spq_completion(struct qed_hwfn *p_hwfn,
927 __le16 echo,
928 u8 fw_return_code,
929 union event_ring_data *p_data)
930{
931 struct qed_spq *p_spq;
932 struct qed_spq_entry *p_ent = NULL;
933 struct qed_spq_entry *tmp;
934 struct qed_spq_entry *found = NULL;
935
936 if (!p_hwfn)
937 return -EINVAL;
938
939 p_spq = p_hwfn->p_spq;
940 if (!p_spq)
941 return -EINVAL;
942
943 spin_lock_bh(&p_spq->lock);
944 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
945 if (p_ent->elem.hdr.echo == echo) {
946 list_del(&p_ent->list);
947 qed_spq_comp_bmap_update(p_hwfn, echo);
948 p_spq->comp_count++;
949 found = p_ent;
950 break;
951 }
952
953
954
955
956 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
957 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
958 le16_to_cpu(echo),
959 le16_to_cpu(p_ent->elem.hdr.echo));
960 }
961
962
963
964
965 spin_unlock_bh(&p_spq->lock);
966
967 if (!found) {
968 DP_NOTICE(p_hwfn,
969 "Failed to find an entry this EQE [echo %04x] completes\n",
970 le16_to_cpu(echo));
971 return -EEXIST;
972 }
973
974 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
975 "Complete EQE [echo %04x]: func %p cookie %p)\n",
976 le16_to_cpu(echo),
977 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
978 if (found->comp_cb.function)
979 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
980 fw_return_code);
981 else
982 DP_VERBOSE(p_hwfn,
983 QED_MSG_SPQ,
984 "Got a completion without a callback function\n");
985
986 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
987
988
989
990 qed_spq_return_entry(p_hwfn, found);
991
992 return 0;
993}
994
995int qed_consq_alloc(struct qed_hwfn *p_hwfn)
996{
997 struct qed_consq *p_consq;
998
999
1000 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
1001 if (!p_consq)
1002 return -ENOMEM;
1003
1004
1005 if (qed_chain_alloc(p_hwfn->cdev,
1006 QED_CHAIN_USE_TO_PRODUCE,
1007 QED_CHAIN_MODE_PBL,
1008 QED_CHAIN_CNT_TYPE_U16,
1009 QED_CHAIN_PAGE_SIZE / 0x80,
1010 0x80, &p_consq->chain, NULL))
1011 goto consq_allocate_fail;
1012
1013 p_hwfn->p_consq = p_consq;
1014 return 0;
1015
1016consq_allocate_fail:
1017 kfree(p_consq);
1018 return -ENOMEM;
1019}
1020
1021void qed_consq_setup(struct qed_hwfn *p_hwfn)
1022{
1023 qed_chain_reset(&p_hwfn->p_consq->chain);
1024}
1025
1026void qed_consq_free(struct qed_hwfn *p_hwfn)
1027{
1028 if (!p_hwfn->p_consq)
1029 return;
1030
1031 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1032
1033 kfree(p_hwfn->p_consq);
1034 p_hwfn->p_consq = NULL;
1035}
1036