1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/interrupt.h>
40#include <linux/spinlock.h>
41#include <linux/sched.h>
42#include <linux/slab.h>
43#include <linux/pci.h>
44#include <linux/prefetch.h>
45
46#include "roce_hsi.h"
47
48#include "qplib_res.h"
49#include "qplib_rcfw.h"
50#include "qplib_sp.h"
51#include "qplib_fp.h"
52
53static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
54static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
55static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
56
57static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
58{
59 qp->sq.condition = false;
60 qp->sq.send_phantom = false;
61 qp->sq.single = false;
62}
63
64
65static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
66{
67 struct bnxt_qplib_cq *scq, *rcq;
68
69 scq = qp->scq;
70 rcq = qp->rcq;
71
72 if (!qp->sq.flushed) {
73 dev_dbg(&scq->hwq.pdev->dev,
74 "QPLIB: FP: Adding to SQ Flush list = %p",
75 qp);
76 bnxt_qplib_cancel_phantom_processing(qp);
77 list_add_tail(&qp->sq_flush, &scq->sqf_head);
78 qp->sq.flushed = true;
79 }
80 if (!qp->srq) {
81 if (!qp->rq.flushed) {
82 dev_dbg(&rcq->hwq.pdev->dev,
83 "QPLIB: FP: Adding to RQ Flush list = %p",
84 qp);
85 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
86 qp->rq.flushed = true;
87 }
88 }
89}
90
91static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
92 unsigned long *flags)
93 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
94{
95 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
96 if (qp->scq == qp->rcq)
97 __acquire(&qp->rcq->flush_lock);
98 else
99 spin_lock(&qp->rcq->flush_lock);
100}
101
102static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
103 unsigned long *flags)
104 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
105{
106 if (qp->scq == qp->rcq)
107 __release(&qp->rcq->flush_lock);
108 else
109 spin_unlock(&qp->rcq->flush_lock);
110 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
111}
112
113void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
114{
115 unsigned long flags;
116
117 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
118 __bnxt_qplib_add_flush_qp(qp);
119 bnxt_qplib_release_cq_flush_locks(qp, &flags);
120}
121
122static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
123{
124 if (qp->sq.flushed) {
125 qp->sq.flushed = false;
126 list_del(&qp->sq_flush);
127 }
128 if (!qp->srq) {
129 if (qp->rq.flushed) {
130 qp->rq.flushed = false;
131 list_del(&qp->rq_flush);
132 }
133 }
134}
135
136void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
137{
138 unsigned long flags;
139
140 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
141 __clean_cq(qp->scq, (u64)(unsigned long)qp);
142 qp->sq.hwq.prod = 0;
143 qp->sq.hwq.cons = 0;
144 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
145 qp->rq.hwq.prod = 0;
146 qp->rq.hwq.cons = 0;
147
148 __bnxt_qplib_del_flush_qp(qp);
149 bnxt_qplib_release_cq_flush_locks(qp, &flags);
150}
151
152static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
153{
154 struct bnxt_qplib_nq_work *nq_work =
155 container_of(work, struct bnxt_qplib_nq_work, work);
156
157 struct bnxt_qplib_cq *cq = nq_work->cq;
158 struct bnxt_qplib_nq *nq = nq_work->nq;
159
160 if (cq && nq) {
161 spin_lock_bh(&cq->compl_lock);
162 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
163 dev_dbg(&nq->pdev->dev,
164 "%s:Trigger cq = %p event nq = %p\n",
165 __func__, cq, nq);
166 nq->cqn_handler(nq, cq);
167 }
168 spin_unlock_bh(&cq->compl_lock);
169 }
170 kfree(nq_work);
171}
172
173static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
174 struct bnxt_qplib_qp *qp)
175{
176 struct bnxt_qplib_q *rq = &qp->rq;
177 struct bnxt_qplib_q *sq = &qp->sq;
178
179 if (qp->rq_hdr_buf)
180 dma_free_coherent(&res->pdev->dev,
181 rq->hwq.max_elements * qp->rq_hdr_buf_size,
182 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
183 if (qp->sq_hdr_buf)
184 dma_free_coherent(&res->pdev->dev,
185 sq->hwq.max_elements * qp->sq_hdr_buf_size,
186 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
187 qp->rq_hdr_buf = NULL;
188 qp->sq_hdr_buf = NULL;
189 qp->rq_hdr_buf_map = 0;
190 qp->sq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_size = 0;
192 qp->rq_hdr_buf_size = 0;
193}
194
195static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
196 struct bnxt_qplib_qp *qp)
197{
198 struct bnxt_qplib_q *rq = &qp->rq;
199 struct bnxt_qplib_q *sq = &qp->sq;
200 int rc = 0;
201
202 if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
203 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
204 sq->hwq.max_elements *
205 qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
208 rc = -ENOMEM;
209 dev_err(&res->pdev->dev,
210 "QPLIB: Failed to create sq_hdr_buf");
211 goto fail;
212 }
213 }
214
215 if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 rq->hwq.max_elements *
218 qp->rq_hdr_buf_size,
219 &qp->rq_hdr_buf_map,
220 GFP_KERNEL);
221 if (!qp->rq_hdr_buf) {
222 rc = -ENOMEM;
223 dev_err(&res->pdev->dev,
224 "QPLIB: Failed to create rq_hdr_buf");
225 goto fail;
226 }
227 }
228 return 0;
229
230fail:
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
232 return rc;
233}
234
235static void bnxt_qplib_service_nq(unsigned long data)
236{
237 struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
238 struct bnxt_qplib_hwq *hwq = &nq->hwq;
239 struct nq_base *nqe, **nq_ptr;
240 struct bnxt_qplib_cq *cq;
241 int num_cqne_processed = 0;
242 int num_srqne_processed = 0;
243 u32 sw_cons, raw_cons;
244 u16 type;
245 int budget = nq->budget;
246 uintptr_t q_handle;
247
248
249 raw_cons = hwq->cons;
250 while (budget--) {
251 sw_cons = HWQ_CMP(raw_cons, hwq);
252 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
253 nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
254 if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
255 break;
256
257
258
259
260
261 dma_rmb();
262
263 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
264 switch (type) {
265 case NQ_BASE_TYPE_CQ_NOTIFICATION:
266 {
267 struct nq_cn *nqcne = (struct nq_cn *)nqe;
268
269 q_handle = le32_to_cpu(nqcne->cq_handle_low);
270 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
271 << 32;
272 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
273 bnxt_qplib_arm_cq_enable(cq);
274 spin_lock_bh(&cq->compl_lock);
275 atomic_set(&cq->arm_state, 0);
276 if (!nq->cqn_handler(nq, (cq)))
277 num_cqne_processed++;
278 else
279 dev_warn(&nq->pdev->dev,
280 "QPLIB: cqn - type 0x%x not handled",
281 type);
282 spin_unlock_bh(&cq->compl_lock);
283 break;
284 }
285 case NQ_BASE_TYPE_SRQ_EVENT:
286 {
287 struct nq_srq_event *nqsrqe =
288 (struct nq_srq_event *)nqe;
289
290 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
291 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
292 << 32;
293 bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
294 DBR_DBR_TYPE_SRQ_ARMENA);
295 if (!nq->srqn_handler(nq,
296 (struct bnxt_qplib_srq *)q_handle,
297 nqsrqe->event))
298 num_srqne_processed++;
299 else
300 dev_warn(&nq->pdev->dev,
301 "QPLIB: SRQ event 0x%x not handled",
302 nqsrqe->event);
303 break;
304 }
305 case NQ_BASE_TYPE_DBQ_EVENT:
306 break;
307 default:
308 dev_warn(&nq->pdev->dev,
309 "QPLIB: nqe with type = 0x%x not handled",
310 type);
311 break;
312 }
313 raw_cons++;
314 }
315 if (hwq->cons != raw_cons) {
316 hwq->cons = raw_cons;
317 NQ_DB_REARM(nq->bar_reg_iomem, hwq->cons, hwq->max_elements);
318 }
319}
320
321static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
322{
323 struct bnxt_qplib_nq *nq = dev_instance;
324 struct bnxt_qplib_hwq *hwq = &nq->hwq;
325 struct nq_base **nq_ptr;
326 u32 sw_cons;
327
328
329 sw_cons = HWQ_CMP(hwq->cons, hwq);
330 nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
331 prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
332
333
334 tasklet_schedule(&nq->worker);
335
336 return IRQ_HANDLED;
337}
338
339void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
340{
341 tasklet_disable(&nq->worker);
342
343 NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
344
345 synchronize_irq(nq->vector);
346 if (kill)
347 tasklet_kill(&nq->worker);
348 if (nq->requested) {
349 irq_set_affinity_hint(nq->vector, NULL);
350 free_irq(nq->vector, nq);
351 nq->requested = false;
352 }
353}
354
355void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
356{
357 if (nq->cqn_wq) {
358 destroy_workqueue(nq->cqn_wq);
359 nq->cqn_wq = NULL;
360 }
361
362
363 bnxt_qplib_nq_stop_irq(nq, true);
364
365 if (nq->bar_reg_iomem)
366 iounmap(nq->bar_reg_iomem);
367 nq->bar_reg_iomem = NULL;
368
369 nq->cqn_handler = NULL;
370 nq->srqn_handler = NULL;
371 nq->vector = 0;
372}
373
374int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
375 int msix_vector, bool need_init)
376{
377 int rc;
378
379 if (nq->requested)
380 return -EFAULT;
381
382 nq->vector = msix_vector;
383 if (need_init)
384 tasklet_init(&nq->worker, bnxt_qplib_service_nq,
385 (unsigned long)nq);
386 else
387 tasklet_enable(&nq->worker);
388
389 snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
390 rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
391 if (rc)
392 return rc;
393
394 cpumask_clear(&nq->mask);
395 cpumask_set_cpu(nq_indx, &nq->mask);
396 rc = irq_set_affinity_hint(nq->vector, &nq->mask);
397 if (rc) {
398 dev_warn(&nq->pdev->dev,
399 "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
400 nq->vector, nq_indx);
401 }
402 nq->requested = true;
403 NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
404
405 return rc;
406}
407
408int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
409 int nq_idx, int msix_vector, int bar_reg_offset,
410 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
411 struct bnxt_qplib_cq *),
412 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
413 struct bnxt_qplib_srq *,
414 u8 event))
415{
416 resource_size_t nq_base;
417 int rc = -1;
418
419 if (cqn_handler)
420 nq->cqn_handler = cqn_handler;
421
422 if (srqn_handler)
423 nq->srqn_handler = srqn_handler;
424
425
426 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
427 if (!nq->cqn_wq)
428 return -ENOMEM;
429
430 nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
431 nq->bar_reg_off = bar_reg_offset;
432 nq_base = pci_resource_start(pdev, nq->bar_reg);
433 if (!nq_base) {
434 rc = -ENOMEM;
435 goto fail;
436 }
437 nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 4);
438 if (!nq->bar_reg_iomem) {
439 rc = -ENOMEM;
440 goto fail;
441 }
442
443 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
444 if (rc) {
445 dev_err(&nq->pdev->dev,
446 "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
447 goto fail;
448 }
449
450 return 0;
451fail:
452 bnxt_qplib_disable_nq(nq);
453 return rc;
454}
455
456void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
457{
458 if (nq->hwq.max_elements) {
459 bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
460 nq->hwq.max_elements = 0;
461 }
462}
463
464int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
465{
466 nq->pdev = pdev;
467 if (!nq->hwq.max_elements ||
468 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
469 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
470
471 if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL, 0,
472 &nq->hwq.max_elements,
473 BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
474 PAGE_SIZE, HWQ_TYPE_L2_CMPL))
475 return -ENOMEM;
476
477 nq->budget = 8;
478 return 0;
479}
480
481
482static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
483{
484 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
485 struct dbr_dbr db_msg = { 0 };
486 void __iomem *db;
487 u32 sw_prod = 0;
488
489
490 sw_prod = (arm_type == DBR_DBR_TYPE_SRQ_ARM) ? srq->threshold :
491 HWQ_CMP(srq_hwq->prod, srq_hwq);
492 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
493 DBR_DBR_INDEX_MASK);
494 db_msg.type_xid = cpu_to_le32(((srq->id << DBR_DBR_XID_SFT) &
495 DBR_DBR_XID_MASK) | arm_type);
496 db = (arm_type == DBR_DBR_TYPE_SRQ_ARMENA) ?
497 srq->dbr_base : srq->dpi->dbr;
498 wmb();
499 __iowrite64_copy(db, &db_msg, sizeof(db_msg) / sizeof(u64));
500}
501
502int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
503 struct bnxt_qplib_srq *srq)
504{
505 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
506 struct cmdq_destroy_srq req;
507 struct creq_destroy_srq_resp resp;
508 u16 cmd_flags = 0;
509 int rc;
510
511 RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
512
513
514 req.srq_cid = cpu_to_le32(srq->id);
515
516 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
517 (void *)&resp, NULL, 0);
518 if (rc)
519 return rc;
520
521 bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
522 kfree(srq->swq);
523 return 0;
524}
525
526int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
527 struct bnxt_qplib_srq *srq)
528{
529 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
530 struct cmdq_create_srq req;
531 struct creq_create_srq_resp resp;
532 struct bnxt_qplib_pbl *pbl;
533 u16 cmd_flags = 0;
534 int rc, idx;
535
536 srq->hwq.max_elements = srq->max_wqe;
537 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, srq->sglist,
538 srq->nmap, &srq->hwq.max_elements,
539 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
540 PAGE_SIZE, HWQ_TYPE_QUEUE);
541 if (rc)
542 goto exit;
543
544 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
545 GFP_KERNEL);
546 if (!srq->swq) {
547 rc = -ENOMEM;
548 goto fail;
549 }
550
551 RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
552
553
554 req.dpi = cpu_to_le32(srq->dpi->dpi);
555 req.srq_handle = cpu_to_le64((uintptr_t)srq);
556
557 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
558 pbl = &srq->hwq.pbl[PBL_LVL_0];
559 req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
560 CMDQ_CREATE_SRQ_LVL_MASK) <<
561 CMDQ_CREATE_SRQ_LVL_SFT) |
562 (pbl->pg_size == ROCE_PG_SIZE_4K ?
563 CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
564 pbl->pg_size == ROCE_PG_SIZE_8K ?
565 CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
566 pbl->pg_size == ROCE_PG_SIZE_64K ?
567 CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
568 pbl->pg_size == ROCE_PG_SIZE_2M ?
569 CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
570 pbl->pg_size == ROCE_PG_SIZE_8M ?
571 CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
572 pbl->pg_size == ROCE_PG_SIZE_1G ?
573 CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
574 CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
575 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
576 req.pd_id = cpu_to_le32(srq->pd->id);
577 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
578
579 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
580 (void *)&resp, NULL, 0);
581 if (rc)
582 goto fail;
583
584 spin_lock_init(&srq->lock);
585 srq->start_idx = 0;
586 srq->last_idx = srq->hwq.max_elements - 1;
587 for (idx = 0; idx < srq->hwq.max_elements; idx++)
588 srq->swq[idx].next_idx = idx + 1;
589 srq->swq[srq->last_idx].next_idx = -1;
590
591 srq->id = le32_to_cpu(resp.xid);
592 srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
593 if (srq->threshold)
594 bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARMENA);
595 srq->arm_req = false;
596
597 return 0;
598fail:
599 bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
600 kfree(srq->swq);
601exit:
602 return rc;
603}
604
605int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
606 struct bnxt_qplib_srq *srq)
607{
608 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
609 u32 sw_prod, sw_cons, count = 0;
610
611 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
612 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
613
614 count = sw_prod > sw_cons ? sw_prod - sw_cons :
615 srq_hwq->max_elements - sw_cons + sw_prod;
616 if (count > srq->threshold) {
617 srq->arm_req = false;
618 bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
619 } else {
620
621 srq->arm_req = true;
622 }
623
624 return 0;
625}
626
627int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
628 struct bnxt_qplib_srq *srq)
629{
630 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
631 struct cmdq_query_srq req;
632 struct creq_query_srq_resp resp;
633 struct bnxt_qplib_rcfw_sbuf *sbuf;
634 struct creq_query_srq_resp_sb *sb;
635 u16 cmd_flags = 0;
636 int rc = 0;
637
638 RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
639 req.srq_cid = cpu_to_le32(srq->id);
640
641
642 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
643 if (!sbuf)
644 return -ENOMEM;
645 sb = sbuf->sb;
646 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
647 (void *)sbuf, 0);
648 srq->threshold = le16_to_cpu(sb->srq_limit);
649 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
650
651 return rc;
652}
653
654int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
655 struct bnxt_qplib_swqe *wqe)
656{
657 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
658 struct rq_wqe *srqe, **srqe_ptr;
659 struct sq_sge *hw_sge;
660 u32 sw_prod, sw_cons, count = 0;
661 int i, rc = 0, next;
662
663 spin_lock(&srq_hwq->lock);
664 if (srq->start_idx == srq->last_idx) {
665 dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
666 srq->id);
667 rc = -EINVAL;
668 spin_unlock(&srq_hwq->lock);
669 goto done;
670 }
671 next = srq->start_idx;
672 srq->start_idx = srq->swq[next].next_idx;
673 spin_unlock(&srq_hwq->lock);
674
675 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
676 srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
677 srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
678 memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
679
680 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
681 i < wqe->num_sge; i++, hw_sge++) {
682 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
683 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
684 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
685 }
686 srqe->wqe_type = wqe->type;
687 srqe->flags = wqe->flags;
688 srqe->wqe_size = wqe->num_sge +
689 ((offsetof(typeof(*srqe), data) + 15) >> 4);
690 srqe->wr_id[0] = cpu_to_le32((u32)next);
691 srq->swq[next].wr_id = wqe->wr_id;
692
693 srq_hwq->prod++;
694
695 spin_lock(&srq_hwq->lock);
696 sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
697
698
699
700
701 sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
702 count = sw_prod > sw_cons ? sw_prod - sw_cons :
703 srq_hwq->max_elements - sw_cons + sw_prod;
704 spin_unlock(&srq_hwq->lock);
705
706 bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ);
707 if (srq->arm_req == true && count > srq->threshold) {
708 srq->arm_req = false;
709 bnxt_qplib_arm_srq(srq, DBR_DBR_TYPE_SRQ_ARM);
710 }
711done:
712 return rc;
713}
714
715
716int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
717{
718 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
719 struct cmdq_create_qp1 req;
720 struct creq_create_qp1_resp resp;
721 struct bnxt_qplib_pbl *pbl;
722 struct bnxt_qplib_q *sq = &qp->sq;
723 struct bnxt_qplib_q *rq = &qp->rq;
724 int rc;
725 u16 cmd_flags = 0;
726 u32 qp_flags = 0;
727
728 RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
729
730
731 req.type = qp->type;
732 req.dpi = cpu_to_le32(qp->dpi->dpi);
733 req.qp_handle = cpu_to_le64(qp->qp_handle);
734
735
736 sq->hwq.max_elements = sq->max_wqe;
737 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, 0,
738 &sq->hwq.max_elements,
739 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
740 PAGE_SIZE, HWQ_TYPE_QUEUE);
741 if (rc)
742 goto exit;
743
744 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
745 if (!sq->swq) {
746 rc = -ENOMEM;
747 goto fail_sq;
748 }
749 pbl = &sq->hwq.pbl[PBL_LVL_0];
750 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
751 req.sq_pg_size_sq_lvl =
752 ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
753 << CMDQ_CREATE_QP1_SQ_LVL_SFT) |
754 (pbl->pg_size == ROCE_PG_SIZE_4K ?
755 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
756 pbl->pg_size == ROCE_PG_SIZE_8K ?
757 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
758 pbl->pg_size == ROCE_PG_SIZE_64K ?
759 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
760 pbl->pg_size == ROCE_PG_SIZE_2M ?
761 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
762 pbl->pg_size == ROCE_PG_SIZE_8M ?
763 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
764 pbl->pg_size == ROCE_PG_SIZE_1G ?
765 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
766 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
767
768 if (qp->scq)
769 req.scq_cid = cpu_to_le32(qp->scq->id);
770
771 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
772
773
774 if (rq->max_wqe) {
775 rq->hwq.max_elements = qp->rq.max_wqe;
776 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL, 0,
777 &rq->hwq.max_elements,
778 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
779 PAGE_SIZE, HWQ_TYPE_QUEUE);
780 if (rc)
781 goto fail_sq;
782
783 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
784 GFP_KERNEL);
785 if (!rq->swq) {
786 rc = -ENOMEM;
787 goto fail_rq;
788 }
789 pbl = &rq->hwq.pbl[PBL_LVL_0];
790 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
791 req.rq_pg_size_rq_lvl =
792 ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
793 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
794 (pbl->pg_size == ROCE_PG_SIZE_4K ?
795 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
796 pbl->pg_size == ROCE_PG_SIZE_8K ?
797 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
798 pbl->pg_size == ROCE_PG_SIZE_64K ?
799 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
800 pbl->pg_size == ROCE_PG_SIZE_2M ?
801 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
802 pbl->pg_size == ROCE_PG_SIZE_8M ?
803 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
804 pbl->pg_size == ROCE_PG_SIZE_1G ?
805 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
806 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
807 if (qp->rcq)
808 req.rcq_cid = cpu_to_le32(qp->rcq->id);
809 }
810
811
812 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
813 if (rc) {
814 rc = -ENOMEM;
815 goto fail;
816 }
817 req.qp_flags = cpu_to_le32(qp_flags);
818 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
819 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
820
821 req.sq_fwo_sq_sge =
822 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
823 CMDQ_CREATE_QP1_SQ_SGE_SFT);
824 req.rq_fwo_rq_sge =
825 cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
826 CMDQ_CREATE_QP1_RQ_SGE_SFT);
827
828 req.pd_id = cpu_to_le32(qp->pd->id);
829
830 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
831 (void *)&resp, NULL, 0);
832 if (rc)
833 goto fail;
834
835 qp->id = le32_to_cpu(resp.xid);
836 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
837 rcfw->qp_tbl[qp->id].qp_id = qp->id;
838 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
839
840 return 0;
841
842fail:
843 bnxt_qplib_free_qp_hdr_buf(res, qp);
844fail_rq:
845 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
846 kfree(rq->swq);
847fail_sq:
848 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
849 kfree(sq->swq);
850exit:
851 return rc;
852}
853
854int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
855{
856 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
857 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
858 struct cmdq_create_qp req;
859 struct creq_create_qp_resp resp;
860 struct bnxt_qplib_pbl *pbl;
861 struct sq_psn_search **psn_search_ptr;
862 unsigned long int psn_search, poff = 0;
863 struct bnxt_qplib_q *sq = &qp->sq;
864 struct bnxt_qplib_q *rq = &qp->rq;
865 struct bnxt_qplib_hwq *xrrq;
866 int i, rc, req_size, psn_sz;
867 u16 cmd_flags = 0, max_ssge;
868 u32 sw_prod, qp_flags = 0;
869
870 RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
871
872
873 req.type = qp->type;
874 req.dpi = cpu_to_le32(qp->dpi->dpi);
875 req.qp_handle = cpu_to_le64(qp->qp_handle);
876
877
878 psn_sz = (qp->type == CMDQ_CREATE_QP_TYPE_RC) ?
879 sizeof(struct sq_psn_search) : 0;
880 sq->hwq.max_elements = sq->max_wqe;
881 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, sq->sglist,
882 sq->nmap, &sq->hwq.max_elements,
883 BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
884 psn_sz,
885 PAGE_SIZE, HWQ_TYPE_QUEUE);
886 if (rc)
887 goto exit;
888
889 sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
890 if (!sq->swq) {
891 rc = -ENOMEM;
892 goto fail_sq;
893 }
894 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
895 if (psn_sz) {
896 psn_search_ptr = (struct sq_psn_search **)
897 &hw_sq_send_ptr[get_sqe_pg
898 (sq->hwq.max_elements)];
899 psn_search = (unsigned long int)
900 &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
901 [get_sqe_idx(sq->hwq.max_elements)];
902 if (psn_search & ~PAGE_MASK) {
903
904
905
906 poff = (psn_search & ~PAGE_MASK) /
907 BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
908 }
909 for (i = 0; i < sq->hwq.max_elements; i++)
910 sq->swq[i].psn_search =
911 &psn_search_ptr[get_psne_pg(i + poff)]
912 [get_psne_idx(i + poff)];
913 }
914 pbl = &sq->hwq.pbl[PBL_LVL_0];
915 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
916 req.sq_pg_size_sq_lvl =
917 ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
918 << CMDQ_CREATE_QP_SQ_LVL_SFT) |
919 (pbl->pg_size == ROCE_PG_SIZE_4K ?
920 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
921 pbl->pg_size == ROCE_PG_SIZE_8K ?
922 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
923 pbl->pg_size == ROCE_PG_SIZE_64K ?
924 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
925 pbl->pg_size == ROCE_PG_SIZE_2M ?
926 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
927 pbl->pg_size == ROCE_PG_SIZE_8M ?
928 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
929 pbl->pg_size == ROCE_PG_SIZE_1G ?
930 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
931 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
932
933
934 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
935 for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
936 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
937 [get_sqe_idx(sw_prod)];
938 hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
939 }
940
941 if (qp->scq)
942 req.scq_cid = cpu_to_le32(qp->scq->id);
943
944 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
945 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
946 if (qp->sig_type)
947 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
948
949
950 if (rq->max_wqe) {
951 rq->hwq.max_elements = rq->max_wqe;
952 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, rq->sglist,
953 rq->nmap, &rq->hwq.max_elements,
954 BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
955 PAGE_SIZE, HWQ_TYPE_QUEUE);
956 if (rc)
957 goto fail_sq;
958
959 rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
960 GFP_KERNEL);
961 if (!rq->swq) {
962 rc = -ENOMEM;
963 goto fail_rq;
964 }
965 pbl = &rq->hwq.pbl[PBL_LVL_0];
966 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
967 req.rq_pg_size_rq_lvl =
968 ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
969 CMDQ_CREATE_QP_RQ_LVL_SFT) |
970 (pbl->pg_size == ROCE_PG_SIZE_4K ?
971 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
972 pbl->pg_size == ROCE_PG_SIZE_8K ?
973 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
974 pbl->pg_size == ROCE_PG_SIZE_64K ?
975 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
976 pbl->pg_size == ROCE_PG_SIZE_2M ?
977 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
978 pbl->pg_size == ROCE_PG_SIZE_8M ?
979 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
980 pbl->pg_size == ROCE_PG_SIZE_1G ?
981 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
982 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
983 } else {
984
985 if (qp->srq) {
986 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
987 req.srq_cid = cpu_to_le32(qp->srq->id);
988 }
989 }
990
991 if (qp->rcq)
992 req.rcq_cid = cpu_to_le32(qp->rcq->id);
993 req.qp_flags = cpu_to_le32(qp_flags);
994 req.sq_size = cpu_to_le32(sq->hwq.max_elements);
995 req.rq_size = cpu_to_le32(rq->hwq.max_elements);
996 qp->sq_hdr_buf = NULL;
997 qp->rq_hdr_buf = NULL;
998
999 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
1000 if (rc)
1001 goto fail_rq;
1002
1003
1004
1005
1006
1007 max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1008 req.sq_fwo_sq_sge = cpu_to_le16(
1009 ((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1010 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1011 req.rq_fwo_rq_sge = cpu_to_le16(
1012 ((rq->max_sge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1013 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1014
1015 if (psn_sz) {
1016 xrrq = &qp->orrq;
1017 xrrq->max_elements =
1018 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1019 req_size = xrrq->max_elements *
1020 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1021 req_size &= ~(PAGE_SIZE - 1);
1022 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1023 &xrrq->max_elements,
1024 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
1025 0, req_size, HWQ_TYPE_CTX);
1026 if (rc)
1027 goto fail_buf_free;
1028 pbl = &xrrq->pbl[PBL_LVL_0];
1029 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1030
1031 xrrq = &qp->irrq;
1032 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1033 qp->max_dest_rd_atomic);
1034 req_size = xrrq->max_elements *
1035 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1036 req_size &= ~(PAGE_SIZE - 1);
1037
1038 rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL, 0,
1039 &xrrq->max_elements,
1040 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1041 0, req_size, HWQ_TYPE_CTX);
1042 if (rc)
1043 goto fail_orrq;
1044
1045 pbl = &xrrq->pbl[PBL_LVL_0];
1046 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1047 }
1048 req.pd_id = cpu_to_le32(qp->pd->id);
1049
1050 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1051 (void *)&resp, NULL, 0);
1052 if (rc)
1053 goto fail;
1054
1055 qp->id = le32_to_cpu(resp.xid);
1056 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1057 INIT_LIST_HEAD(&qp->sq_flush);
1058 INIT_LIST_HEAD(&qp->rq_flush);
1059 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1060 rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
1061
1062 return 0;
1063
1064fail:
1065 if (qp->irrq.max_elements)
1066 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1067fail_orrq:
1068 if (qp->orrq.max_elements)
1069 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1070fail_buf_free:
1071 bnxt_qplib_free_qp_hdr_buf(res, qp);
1072fail_rq:
1073 bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1074 kfree(rq->swq);
1075fail_sq:
1076 bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1077 kfree(sq->swq);
1078exit:
1079 return rc;
1080}
1081
1082static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1083{
1084 switch (qp->state) {
1085 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1086
1087
1088
1089 if (!(qp->modify_flags &
1090 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1091 qp->modify_flags |=
1092 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1093 qp->path_mtu =
1094 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1095 }
1096 qp->modify_flags &=
1097 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1098
1099 if (qp->max_dest_rd_atomic < 1)
1100 qp->max_dest_rd_atomic = 1;
1101 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1102
1103 if (!(qp->modify_flags &
1104 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1105 qp->modify_flags |=
1106 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1107 qp->ah.sgid_index = 0;
1108 }
1109 break;
1110 default:
1111 break;
1112 }
1113}
1114
1115static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1116{
1117 switch (qp->state) {
1118 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1119
1120 if (qp->max_rd_atomic < 1)
1121 qp->max_rd_atomic = 1;
1122
1123
1124
1125
1126
1127
1128 qp->modify_flags &=
1129 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1130 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1131 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1132 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1133 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1134 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1135 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1136 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1137 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1138 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1139 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1140 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1141 break;
1142 default:
1143 break;
1144 }
1145}
1146
1147static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1148{
1149 switch (qp->cur_qp_state) {
1150 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1151 break;
1152 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1153 __modify_flags_from_init_state(qp);
1154 break;
1155 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1156 __modify_flags_from_rtr_state(qp);
1157 break;
1158 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1159 break;
1160 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1161 break;
1162 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1163 break;
1164 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1165 break;
1166 default:
1167 break;
1168 }
1169}
1170
1171int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1172{
1173 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1174 struct cmdq_modify_qp req;
1175 struct creq_modify_qp_resp resp;
1176 u16 cmd_flags = 0, pkey;
1177 u32 temp32[4];
1178 u32 bmask;
1179 int rc;
1180
1181 RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
1182
1183
1184 __filter_modify_flags(qp);
1185 bmask = qp->modify_flags;
1186 req.modify_mask = cpu_to_le32(qp->modify_flags);
1187 req.qp_cid = cpu_to_le32(qp->id);
1188 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1189 req.network_type_en_sqd_async_notify_new_state =
1190 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1191 (qp->en_sqd_async_notify ?
1192 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1193 }
1194 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1195
1196 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1197 req.access = qp->access;
1198
1199 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1200 if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1201 qp->pkey_index, &pkey))
1202 req.pkey = cpu_to_le16(pkey);
1203 }
1204 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1205 req.qkey = cpu_to_le32(qp->qkey);
1206
1207 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1208 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1209 req.dgid[0] = cpu_to_le32(temp32[0]);
1210 req.dgid[1] = cpu_to_le32(temp32[1]);
1211 req.dgid[2] = cpu_to_le32(temp32[2]);
1212 req.dgid[3] = cpu_to_le32(temp32[3]);
1213 }
1214 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1215 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1216
1217 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1218 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1219 [qp->ah.sgid_index]);
1220
1221 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1222 req.hop_limit = qp->ah.hop_limit;
1223
1224 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1225 req.traffic_class = qp->ah.traffic_class;
1226
1227 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1228 memcpy(req.dest_mac, qp->ah.dmac, 6);
1229
1230 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1231 req.path_mtu = qp->path_mtu;
1232
1233 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1234 req.timeout = qp->timeout;
1235
1236 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1237 req.retry_cnt = qp->retry_cnt;
1238
1239 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1240 req.rnr_retry = qp->rnr_retry;
1241
1242 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1243 req.min_rnr_timer = qp->min_rnr_timer;
1244
1245 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1246 req.rq_psn = cpu_to_le32(qp->rq.psn);
1247
1248 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1249 req.sq_psn = cpu_to_le32(qp->sq.psn);
1250
1251 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1252 req.max_rd_atomic =
1253 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1254
1255 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1256 req.max_dest_rd_atomic =
1257 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1258
1259 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1260 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1261 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1262 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1263 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1264 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1265 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1266
1267 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1268
1269 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1270 (void *)&resp, NULL, 0);
1271 if (rc)
1272 return rc;
1273 qp->cur_qp_state = qp->state;
1274 return 0;
1275}
1276
1277int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1278{
1279 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1280 struct cmdq_query_qp req;
1281 struct creq_query_qp_resp resp;
1282 struct bnxt_qplib_rcfw_sbuf *sbuf;
1283 struct creq_query_qp_resp_sb *sb;
1284 u16 cmd_flags = 0;
1285 u32 temp32[4];
1286 int i, rc = 0;
1287
1288 RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
1289
1290 sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1291 if (!sbuf)
1292 return -ENOMEM;
1293 sb = sbuf->sb;
1294
1295 req.qp_cid = cpu_to_le32(qp->id);
1296 req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1297 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1298 (void *)sbuf, 0);
1299 if (rc)
1300 goto bail;
1301
1302 qp->state = sb->en_sqd_async_notify_state &
1303 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1304 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1305 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1306 true : false;
1307 qp->access = sb->access;
1308 qp->pkey_index = le16_to_cpu(sb->pkey);
1309 qp->qkey = le32_to_cpu(sb->qkey);
1310
1311 temp32[0] = le32_to_cpu(sb->dgid[0]);
1312 temp32[1] = le32_to_cpu(sb->dgid[1]);
1313 temp32[2] = le32_to_cpu(sb->dgid[2]);
1314 temp32[3] = le32_to_cpu(sb->dgid[3]);
1315 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1316
1317 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1318
1319 qp->ah.sgid_index = 0;
1320 for (i = 0; i < res->sgid_tbl.max; i++) {
1321 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1322 qp->ah.sgid_index = i;
1323 break;
1324 }
1325 }
1326 if (i == res->sgid_tbl.max)
1327 dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
1328
1329 qp->ah.hop_limit = sb->hop_limit;
1330 qp->ah.traffic_class = sb->traffic_class;
1331 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1332 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1333 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1334 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1335 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1336 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1337 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1338 qp->timeout = sb->timeout;
1339 qp->retry_cnt = sb->retry_cnt;
1340 qp->rnr_retry = sb->rnr_retry;
1341 qp->min_rnr_timer = sb->min_rnr_timer;
1342 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1343 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1344 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1345 qp->max_dest_rd_atomic =
1346 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1347 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1348 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1349 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1350 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1351 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1352 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1353 memcpy(qp->smac, sb->src_mac, 6);
1354 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1355bail:
1356 bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
1357 return rc;
1358}
1359
1360static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1361{
1362 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1363 struct cq_base *hw_cqe, **hw_cqe_ptr;
1364 int i;
1365
1366 for (i = 0; i < cq_hwq->max_elements; i++) {
1367 hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1368 hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1369 if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1370 continue;
1371
1372
1373
1374
1375 dma_rmb();
1376 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1377 case CQ_BASE_CQE_TYPE_REQ:
1378 case CQ_BASE_CQE_TYPE_TERMINAL:
1379 {
1380 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1381
1382 if (qp == le64_to_cpu(cqe->qp_handle))
1383 cqe->qp_handle = 0;
1384 break;
1385 }
1386 case CQ_BASE_CQE_TYPE_RES_RC:
1387 case CQ_BASE_CQE_TYPE_RES_UD:
1388 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1389 {
1390 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1391
1392 if (qp == le64_to_cpu(cqe->qp_handle))
1393 cqe->qp_handle = 0;
1394 break;
1395 }
1396 default:
1397 break;
1398 }
1399 }
1400}
1401
1402int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1403 struct bnxt_qplib_qp *qp)
1404{
1405 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1406 struct cmdq_destroy_qp req;
1407 struct creq_destroy_qp_resp resp;
1408 u16 cmd_flags = 0;
1409 int rc;
1410
1411 rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1412 rcfw->qp_tbl[qp->id].qp_handle = NULL;
1413
1414 RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
1415
1416 req.qp_cid = cpu_to_le32(qp->id);
1417 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1418 (void *)&resp, NULL, 0);
1419 if (rc) {
1420 rcfw->qp_tbl[qp->id].qp_id = qp->id;
1421 rcfw->qp_tbl[qp->id].qp_handle = qp;
1422 return rc;
1423 }
1424
1425 return 0;
1426}
1427
1428void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1429 struct bnxt_qplib_qp *qp)
1430{
1431 bnxt_qplib_free_qp_hdr_buf(res, qp);
1432 bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1433 kfree(qp->sq.swq);
1434
1435 bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1436 kfree(qp->rq.swq);
1437
1438 if (qp->irrq.max_elements)
1439 bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1440 if (qp->orrq.max_elements)
1441 bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1442
1443}
1444
1445void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1446 struct bnxt_qplib_sge *sge)
1447{
1448 struct bnxt_qplib_q *sq = &qp->sq;
1449 u32 sw_prod;
1450
1451 memset(sge, 0, sizeof(*sge));
1452
1453 if (qp->sq_hdr_buf) {
1454 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1455 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1456 sw_prod * qp->sq_hdr_buf_size);
1457 sge->lkey = 0xFFFFFFFF;
1458 sge->size = qp->sq_hdr_buf_size;
1459 return qp->sq_hdr_buf + sw_prod * sge->size;
1460 }
1461 return NULL;
1462}
1463
1464u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1465{
1466 struct bnxt_qplib_q *rq = &qp->rq;
1467
1468 return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1469}
1470
1471dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1472{
1473 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1474}
1475
1476void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1477 struct bnxt_qplib_sge *sge)
1478{
1479 struct bnxt_qplib_q *rq = &qp->rq;
1480 u32 sw_prod;
1481
1482 memset(sge, 0, sizeof(*sge));
1483
1484 if (qp->rq_hdr_buf) {
1485 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1486 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1487 sw_prod * qp->rq_hdr_buf_size);
1488 sge->lkey = 0xFFFFFFFF;
1489 sge->size = qp->rq_hdr_buf_size;
1490 return qp->rq_hdr_buf + sw_prod * sge->size;
1491 }
1492 return NULL;
1493}
1494
1495void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1496{
1497 struct bnxt_qplib_q *sq = &qp->sq;
1498 struct dbr_dbr db_msg = { 0 };
1499 u32 sw_prod;
1500
1501 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1502
1503 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1504 DBR_DBR_INDEX_MASK);
1505 db_msg.type_xid =
1506 cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1507 DBR_DBR_TYPE_SQ);
1508
1509 wmb();
1510 __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1511}
1512
1513int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1514 struct bnxt_qplib_swqe *wqe)
1515{
1516 struct bnxt_qplib_q *sq = &qp->sq;
1517 struct bnxt_qplib_swq *swq;
1518 struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1519 struct sq_sge *hw_sge;
1520 struct bnxt_qplib_nq_work *nq_work = NULL;
1521 bool sch_handler = false;
1522 u32 sw_prod;
1523 u8 wqe_size16;
1524 int i, rc = 0, data_len = 0, pkt_num = 0;
1525 __le32 temp32;
1526
1527 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1528 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1529 sch_handler = true;
1530 dev_dbg(&sq->hwq.pdev->dev,
1531 "%s Error QP. Scheduling for poll_cq\n",
1532 __func__);
1533 goto queue_err;
1534 }
1535 }
1536
1537 if (bnxt_qplib_queue_full(sq)) {
1538 dev_err(&sq->hwq.pdev->dev,
1539 "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
1540 sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1541 sq->q_full_delta);
1542 rc = -ENOMEM;
1543 goto done;
1544 }
1545 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1546 swq = &sq->swq[sw_prod];
1547 swq->wr_id = wqe->wr_id;
1548 swq->type = wqe->type;
1549 swq->flags = wqe->flags;
1550 if (qp->sig_type)
1551 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1552 swq->start_psn = sq->psn & BTH_PSN_MASK;
1553
1554 hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1555 hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1556 [get_sqe_idx(sw_prod)];
1557
1558 memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
1559
1560 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1561
1562 if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1563 dev_warn(&sq->hwq.pdev->dev,
1564 "QPLIB: Inline data length > 96 detected");
1565 data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1566 } else {
1567 data_len = wqe->inline_len;
1568 }
1569 memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1570 wqe_size16 = (data_len + 15) >> 4;
1571 } else {
1572 for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1573 i < wqe->num_sge; i++, hw_sge++) {
1574 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1575 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1576 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1577 data_len += wqe->sg_list[i].size;
1578 }
1579
1580 wqe_size16 = wqe->num_sge;
1581
1582
1583
1584 if (!wqe->num_sge)
1585 wqe_size16++;
1586 }
1587
1588
1589 switch (wqe->type) {
1590 case BNXT_QPLIB_SWQE_TYPE_SEND:
1591 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1592
1593 struct sq_send_raweth_qp1 *sqe =
1594 (struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1595
1596 sqe->wqe_type = wqe->type;
1597 sqe->flags = wqe->flags;
1598 sqe->wqe_size = wqe_size16 +
1599 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1600 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1601 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1602 sqe->length = cpu_to_le32(data_len);
1603 sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1604 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1605 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1606
1607 break;
1608 }
1609
1610 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1611 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1612 {
1613 struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
1614
1615 sqe->wqe_type = wqe->type;
1616 sqe->flags = wqe->flags;
1617 sqe->wqe_size = wqe_size16 +
1618 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1619 sqe->inv_key_or_imm_data = cpu_to_le32(
1620 wqe->send.inv_key);
1621 if (qp->type == CMDQ_CREATE_QP_TYPE_UD) {
1622 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1623 sqe->dst_qp = cpu_to_le32(
1624 wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1625 sqe->length = cpu_to_le32(data_len);
1626 sqe->avid = cpu_to_le32(wqe->send.avid &
1627 SQ_SEND_AVID_MASK);
1628 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1629 } else {
1630 sqe->length = cpu_to_le32(data_len);
1631 sqe->dst_qp = 0;
1632 sqe->avid = 0;
1633 if (qp->mtu)
1634 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1635 if (!pkt_num)
1636 pkt_num = 1;
1637 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1638 }
1639 break;
1640 }
1641 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1642 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1643 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1644 {
1645 struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
1646
1647 sqe->wqe_type = wqe->type;
1648 sqe->flags = wqe->flags;
1649 sqe->wqe_size = wqe_size16 +
1650 ((offsetof(typeof(*sqe), data) + 15) >> 4);
1651 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1652 sqe->length = cpu_to_le32((u32)data_len);
1653 sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1654 sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1655 if (qp->mtu)
1656 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1657 if (!pkt_num)
1658 pkt_num = 1;
1659 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1660 break;
1661 }
1662 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1663 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1664 {
1665 struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
1666
1667 sqe->wqe_type = wqe->type;
1668 sqe->flags = wqe->flags;
1669 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1670 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1671 sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1672 sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1673 if (qp->mtu)
1674 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1675 if (!pkt_num)
1676 pkt_num = 1;
1677 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1678 break;
1679 }
1680 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1681 {
1682 struct sq_localinvalidate *sqe =
1683 (struct sq_localinvalidate *)hw_sq_send_hdr;
1684
1685 sqe->wqe_type = wqe->type;
1686 sqe->flags = wqe->flags;
1687 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1688
1689 break;
1690 }
1691 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1692 {
1693 struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
1694
1695 sqe->wqe_type = wqe->type;
1696 sqe->flags = wqe->flags;
1697 sqe->access_cntl = wqe->frmr.access_cntl |
1698 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1699 sqe->zero_based_page_size_log =
1700 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1701 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1702 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1703 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1704 temp32 = cpu_to_le32(wqe->frmr.length);
1705 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1706 sqe->numlevels_pbl_page_size_log =
1707 ((wqe->frmr.pbl_pg_sz_log <<
1708 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1709 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1710 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1711 SQ_FR_PMR_NUMLEVELS_MASK);
1712
1713 for (i = 0; i < wqe->frmr.page_list_len; i++)
1714 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1715 wqe->frmr.page_list[i] |
1716 PTU_PTE_VALID);
1717 sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1718 sqe->va = cpu_to_le64(wqe->frmr.va);
1719
1720 break;
1721 }
1722 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1723 {
1724 struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
1725
1726 sqe->wqe_type = wqe->type;
1727 sqe->flags = wqe->flags;
1728 sqe->access_cntl = wqe->bind.access_cntl;
1729 sqe->mw_type_zero_based = wqe->bind.mw_type |
1730 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1731 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1732 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1733 sqe->va = cpu_to_le64(wqe->bind.va);
1734 temp32 = cpu_to_le32(wqe->bind.length);
1735 memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1736 break;
1737 }
1738 default:
1739
1740 rc = -EINVAL;
1741 goto done;
1742 }
1743 swq->next_psn = sq->psn & BTH_PSN_MASK;
1744 if (swq->psn_search) {
1745 swq->psn_search->opcode_start_psn = cpu_to_le32(
1746 ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1747 SQ_PSN_SEARCH_START_PSN_MASK) |
1748 ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1749 SQ_PSN_SEARCH_OPCODE_MASK));
1750 swq->psn_search->flags_next_psn = cpu_to_le32(
1751 ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1752 SQ_PSN_SEARCH_NEXT_PSN_MASK));
1753 }
1754queue_err:
1755 if (sch_handler) {
1756
1757 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1758 swq = &sq->swq[sw_prod];
1759 swq->wr_id = wqe->wr_id;
1760 swq->type = wqe->type;
1761 swq->flags = wqe->flags;
1762 if (qp->sig_type)
1763 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1764 swq->start_psn = sq->psn & BTH_PSN_MASK;
1765 }
1766 sq->hwq.prod++;
1767 qp->wqe_cnt++;
1768
1769done:
1770 if (sch_handler) {
1771 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1772 if (nq_work) {
1773 nq_work->cq = qp->scq;
1774 nq_work->nq = qp->scq->nq;
1775 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1776 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1777 } else {
1778 dev_err(&sq->hwq.pdev->dev,
1779 "QPLIB: FP: Failed to allocate SQ nq_work!");
1780 rc = -ENOMEM;
1781 }
1782 }
1783 return rc;
1784}
1785
1786void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1787{
1788 struct bnxt_qplib_q *rq = &qp->rq;
1789 struct dbr_dbr db_msg = { 0 };
1790 u32 sw_prod;
1791
1792 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1793 db_msg.index = cpu_to_le32((sw_prod << DBR_DBR_INDEX_SFT) &
1794 DBR_DBR_INDEX_MASK);
1795 db_msg.type_xid =
1796 cpu_to_le32(((qp->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1797 DBR_DBR_TYPE_RQ);
1798
1799
1800 wmb();
1801 __iowrite64_copy(qp->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1802}
1803
1804int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1805 struct bnxt_qplib_swqe *wqe)
1806{
1807 struct bnxt_qplib_q *rq = &qp->rq;
1808 struct rq_wqe *rqe, **rqe_ptr;
1809 struct sq_sge *hw_sge;
1810 struct bnxt_qplib_nq_work *nq_work = NULL;
1811 bool sch_handler = false;
1812 u32 sw_prod;
1813 int i, rc = 0;
1814
1815 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1816 sch_handler = true;
1817 dev_dbg(&rq->hwq.pdev->dev,
1818 "%s Error QP. Scheduling for poll_cq\n",
1819 __func__);
1820 goto queue_err;
1821 }
1822 if (bnxt_qplib_queue_full(rq)) {
1823 dev_err(&rq->hwq.pdev->dev,
1824 "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
1825 rc = -EINVAL;
1826 goto done;
1827 }
1828 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1829 rq->swq[sw_prod].wr_id = wqe->wr_id;
1830
1831 rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1832 rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
1833
1834 memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1835
1836
1837 for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1838 i < wqe->num_sge; i++, hw_sge++) {
1839 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1840 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1841 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1842 }
1843 rqe->wqe_type = wqe->type;
1844 rqe->flags = wqe->flags;
1845 rqe->wqe_size = wqe->num_sge +
1846 ((offsetof(typeof(*rqe), data) + 15) >> 4);
1847
1848
1849
1850 if (!wqe->num_sge)
1851 rqe->wqe_size++;
1852
1853
1854 rqe->wr_id[0] = cpu_to_le32(sw_prod);
1855
1856queue_err:
1857 if (sch_handler) {
1858
1859 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1860 rq->swq[sw_prod].wr_id = wqe->wr_id;
1861 }
1862
1863 rq->hwq.prod++;
1864 if (sch_handler) {
1865 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1866 if (nq_work) {
1867 nq_work->cq = qp->rcq;
1868 nq_work->nq = qp->rcq->nq;
1869 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1870 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1871 } else {
1872 dev_err(&rq->hwq.pdev->dev,
1873 "QPLIB: FP: Failed to allocate RQ nq_work!");
1874 rc = -ENOMEM;
1875 }
1876 }
1877done:
1878 return rc;
1879}
1880
1881
1882
1883
1884static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1885{
1886 struct dbr_dbr db_msg = { 0 };
1887
1888 db_msg.type_xid =
1889 cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1890 DBR_DBR_TYPE_CQ_ARMENA);
1891
1892 wmb();
1893 __iowrite64_copy(cq->dbr_base, &db_msg, sizeof(db_msg) / sizeof(u64));
1894}
1895
1896static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1897{
1898 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1899 struct dbr_dbr db_msg = { 0 };
1900 u32 sw_cons;
1901
1902
1903 sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1904 db_msg.index = cpu_to_le32((sw_cons << DBR_DBR_INDEX_SFT) &
1905 DBR_DBR_INDEX_MASK);
1906 db_msg.type_xid =
1907 cpu_to_le32(((cq->id << DBR_DBR_XID_SFT) & DBR_DBR_XID_MASK) |
1908 arm_type);
1909
1910 wmb();
1911 __iowrite64_copy(cq->dpi->dbr, &db_msg, sizeof(db_msg) / sizeof(u64));
1912}
1913
1914int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1915{
1916 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1917 struct cmdq_create_cq req;
1918 struct creq_create_cq_resp resp;
1919 struct bnxt_qplib_pbl *pbl;
1920 u16 cmd_flags = 0;
1921 int rc;
1922
1923 cq->hwq.max_elements = cq->max_wqe;
1924 rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, cq->sghead,
1925 cq->nmap, &cq->hwq.max_elements,
1926 BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1927 PAGE_SIZE, HWQ_TYPE_QUEUE);
1928 if (rc)
1929 goto exit;
1930
1931 RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1932
1933 if (!cq->dpi) {
1934 dev_err(&rcfw->pdev->dev,
1935 "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
1936 return -EINVAL;
1937 }
1938 req.dpi = cpu_to_le32(cq->dpi->dpi);
1939 req.cq_handle = cpu_to_le64(cq->cq_handle);
1940
1941 req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1942 pbl = &cq->hwq.pbl[PBL_LVL_0];
1943 req.pg_size_lvl = cpu_to_le32(
1944 ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1945 CMDQ_CREATE_CQ_LVL_SFT) |
1946 (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1947 pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1948 pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1949 pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1950 pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1951 pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1952 CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1953
1954 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1955
1956 req.cq_fco_cnq_id = cpu_to_le32(
1957 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1958 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1959
1960 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1961 (void *)&resp, NULL, 0);
1962 if (rc)
1963 goto fail;
1964
1965 cq->id = le32_to_cpu(resp.xid);
1966 cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1967 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1968 init_waitqueue_head(&cq->waitq);
1969 INIT_LIST_HEAD(&cq->sqf_head);
1970 INIT_LIST_HEAD(&cq->rqf_head);
1971 spin_lock_init(&cq->compl_lock);
1972
1973 bnxt_qplib_arm_cq_enable(cq);
1974 return 0;
1975
1976fail:
1977 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1978exit:
1979 return rc;
1980}
1981
1982int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1983{
1984 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1985 struct cmdq_destroy_cq req;
1986 struct creq_destroy_cq_resp resp;
1987 u16 cmd_flags = 0;
1988 int rc;
1989
1990 RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
1991
1992 req.cq_cid = cpu_to_le32(cq->id);
1993 rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1994 (void *)&resp, NULL, 0);
1995 if (rc)
1996 return rc;
1997 bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1998 return 0;
1999}
2000
2001static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2002 struct bnxt_qplib_cqe **pcqe, int *budget)
2003{
2004 u32 sw_prod, sw_cons;
2005 struct bnxt_qplib_cqe *cqe;
2006 int rc = 0;
2007
2008
2009 sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2010 cqe = *pcqe;
2011 while (*budget) {
2012 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2013 if (sw_cons == sw_prod) {
2014 break;
2015 }
2016
2017 if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2018 bnxt_qplib_cancel_phantom_processing(qp);
2019 goto skip_compl;
2020 }
2021 memset(cqe, 0, sizeof(*cqe));
2022 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2023 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2024 cqe->qp_handle = (u64)(unsigned long)qp;
2025 cqe->wr_id = sq->swq[sw_cons].wr_id;
2026 cqe->src_qp = qp->id;
2027 cqe->type = sq->swq[sw_cons].type;
2028 cqe++;
2029 (*budget)--;
2030skip_compl:
2031 sq->hwq.cons++;
2032 }
2033 *pcqe = cqe;
2034 if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2035
2036 rc = -EAGAIN;
2037
2038 return rc;
2039}
2040
2041static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2042 struct bnxt_qplib_cqe **pcqe, int *budget)
2043{
2044 struct bnxt_qplib_cqe *cqe;
2045 u32 sw_prod, sw_cons;
2046 int rc = 0;
2047 int opcode = 0;
2048
2049 switch (qp->type) {
2050 case CMDQ_CREATE_QP1_TYPE_GSI:
2051 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2052 break;
2053 case CMDQ_CREATE_QP_TYPE_RC:
2054 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2055 break;
2056 case CMDQ_CREATE_QP_TYPE_UD:
2057 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2058 break;
2059 }
2060
2061
2062 sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2063 cqe = *pcqe;
2064 while (*budget) {
2065 sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2066 if (sw_cons == sw_prod)
2067 break;
2068 memset(cqe, 0, sizeof(*cqe));
2069 cqe->status =
2070 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2071 cqe->opcode = opcode;
2072 cqe->qp_handle = (unsigned long)qp;
2073 cqe->wr_id = rq->swq[sw_cons].wr_id;
2074 cqe++;
2075 (*budget)--;
2076 rq->hwq.cons++;
2077 }
2078 *pcqe = cqe;
2079 if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2080
2081 rc = -EAGAIN;
2082
2083 return rc;
2084}
2085
2086void bnxt_qplib_mark_qp_error(void *qp_handle)
2087{
2088 struct bnxt_qplib_qp *qp = qp_handle;
2089
2090 if (!qp)
2091 return;
2092
2093
2094 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2095 bnxt_qplib_cancel_phantom_processing(qp);
2096}
2097
2098
2099
2100
2101static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2102 u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2103{
2104 struct bnxt_qplib_q *sq = &qp->sq;
2105 struct bnxt_qplib_swq *swq;
2106 u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2107 struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2108 struct cq_req *peek_req_hwcqe;
2109 struct bnxt_qplib_qp *peek_qp;
2110 struct bnxt_qplib_q *peek_sq;
2111 int i, rc = 0;
2112
2113
2114
2115 swq = &sq->swq[sw_sq_cons];
2116 if (swq->psn_search &&
2117 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2118
2119 swq->psn_search->flags_next_psn = cpu_to_le32
2120 (le32_to_cpu(swq->psn_search->flags_next_psn)
2121 & ~0x80000000);
2122 dev_dbg(&cq->hwq.pdev->dev,
2123 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2124 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2125 sq->condition = true;
2126 sq->send_phantom = true;
2127
2128
2129 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
2130
2131 rc = -EAGAIN;
2132 goto out;
2133 }
2134 if (sq->condition) {
2135
2136 peek_raw_cq_cons = cq->hwq.cons;
2137 peek_sw_cq_cons = cq_cons;
2138 i = cq->hwq.max_elements;
2139 while (i--) {
2140 peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2141 peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2142 peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2143 [CQE_IDX(peek_sw_cq_cons)];
2144
2145 if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2146 cq->hwq.max_elements)) {
2147
2148
2149
2150
2151 dma_rmb();
2152
2153 if ((peek_hwcqe->cqe_type_toggle &
2154 CQ_BASE_CQE_TYPE_MASK) ==
2155 CQ_BASE_CQE_TYPE_REQ) {
2156 peek_req_hwcqe = (struct cq_req *)
2157 peek_hwcqe;
2158 peek_qp = (struct bnxt_qplib_qp *)
2159 ((unsigned long)
2160 le64_to_cpu
2161 (peek_req_hwcqe->qp_handle));
2162 peek_sq = &peek_qp->sq;
2163 peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2164 peek_req_hwcqe->sq_cons_idx) - 1
2165 , &sq->hwq);
2166
2167 if (peek_sq == sq &&
2168 sq->swq[peek_sq_cons_idx].wr_id ==
2169 BNXT_QPLIB_FENCE_WRID) {
2170
2171
2172
2173
2174 dev_dbg(&cq->hwq.pdev->dev,
2175 "FP:Got Phantom CQE");
2176 sq->condition = false;
2177 sq->single = true;
2178 rc = 0;
2179 goto out;
2180 }
2181 }
2182
2183 } else {
2184
2185 rc = -EINVAL;
2186 goto out;
2187 }
2188 peek_sw_cq_cons++;
2189 peek_raw_cq_cons++;
2190 }
2191 dev_err(&cq->hwq.pdev->dev,
2192 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
2193 cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2194 rc = -EINVAL;
2195 }
2196out:
2197 return rc;
2198}
2199
2200static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2201 struct cq_req *hwcqe,
2202 struct bnxt_qplib_cqe **pcqe, int *budget,
2203 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2204{
2205 struct bnxt_qplib_qp *qp;
2206 struct bnxt_qplib_q *sq;
2207 struct bnxt_qplib_cqe *cqe;
2208 u32 sw_sq_cons, cqe_sq_cons;
2209 struct bnxt_qplib_swq *swq;
2210 int rc = 0;
2211
2212 qp = (struct bnxt_qplib_qp *)((unsigned long)
2213 le64_to_cpu(hwcqe->qp_handle));
2214 if (!qp) {
2215 dev_err(&cq->hwq.pdev->dev,
2216 "QPLIB: FP: Process Req qp is NULL");
2217 return -EINVAL;
2218 }
2219 sq = &qp->sq;
2220
2221 cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2222 if (cqe_sq_cons > sq->hwq.max_elements) {
2223 dev_err(&cq->hwq.pdev->dev,
2224 "QPLIB: FP: CQ Process req reported ");
2225 dev_err(&cq->hwq.pdev->dev,
2226 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2227 cqe_sq_cons, sq->hwq.max_elements);
2228 return -EINVAL;
2229 }
2230
2231 if (qp->sq.flushed) {
2232 dev_dbg(&cq->hwq.pdev->dev,
2233 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2234 goto done;
2235 }
2236
2237
2238
2239
2240 cqe = *pcqe;
2241 while (*budget) {
2242 sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2243 if (sw_sq_cons == cqe_sq_cons)
2244
2245 break;
2246
2247 swq = &sq->swq[sw_sq_cons];
2248 memset(cqe, 0, sizeof(*cqe));
2249 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2250 cqe->qp_handle = (u64)(unsigned long)qp;
2251 cqe->src_qp = qp->id;
2252 cqe->wr_id = swq->wr_id;
2253 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2254 goto skip;
2255 cqe->type = swq->type;
2256
2257
2258
2259
2260
2261 if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2262 hwcqe->status != CQ_REQ_STATUS_OK) {
2263 cqe->status = hwcqe->status;
2264 dev_err(&cq->hwq.pdev->dev,
2265 "QPLIB: FP: CQ Processed Req ");
2266 dev_err(&cq->hwq.pdev->dev,
2267 "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
2268 sw_sq_cons, cqe->wr_id, cqe->status);
2269 cqe++;
2270 (*budget)--;
2271 bnxt_qplib_mark_qp_error(qp);
2272
2273 bnxt_qplib_add_flush_qp(qp);
2274 } else {
2275 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2276
2277 if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2278 cqe_sq_cons)) {
2279 *lib_qp = qp;
2280 goto out;
2281 }
2282 cqe->status = CQ_REQ_STATUS_OK;
2283 cqe++;
2284 (*budget)--;
2285 }
2286 }
2287skip:
2288 sq->hwq.cons++;
2289 if (sq->single)
2290 break;
2291 }
2292out:
2293 *pcqe = cqe;
2294 if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2295
2296 rc = -EAGAIN;
2297 goto done;
2298 }
2299
2300
2301
2302
2303 sq->single = false;
2304done:
2305 return rc;
2306}
2307
2308static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2309{
2310 spin_lock(&srq->hwq.lock);
2311 srq->swq[srq->last_idx].next_idx = (int)tag;
2312 srq->last_idx = (int)tag;
2313 srq->swq[srq->last_idx].next_idx = -1;
2314 srq->hwq.cons++;
2315 spin_unlock(&srq->hwq.lock);
2316}
2317
2318static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2319 struct cq_res_rc *hwcqe,
2320 struct bnxt_qplib_cqe **pcqe,
2321 int *budget)
2322{
2323 struct bnxt_qplib_qp *qp;
2324 struct bnxt_qplib_q *rq;
2325 struct bnxt_qplib_srq *srq;
2326 struct bnxt_qplib_cqe *cqe;
2327 u32 wr_id_idx;
2328 int rc = 0;
2329
2330 qp = (struct bnxt_qplib_qp *)((unsigned long)
2331 le64_to_cpu(hwcqe->qp_handle));
2332 if (!qp) {
2333 dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
2334 return -EINVAL;
2335 }
2336 if (qp->rq.flushed) {
2337 dev_dbg(&cq->hwq.pdev->dev,
2338 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2339 goto done;
2340 }
2341
2342 cqe = *pcqe;
2343 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2344 cqe->length = le32_to_cpu(hwcqe->length);
2345 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2346 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2347 cqe->flags = le16_to_cpu(hwcqe->flags);
2348 cqe->status = hwcqe->status;
2349 cqe->qp_handle = (u64)(unsigned long)qp;
2350
2351 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2352 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2353 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2354 srq = qp->srq;
2355 if (!srq)
2356 return -EINVAL;
2357 if (wr_id_idx >= srq->hwq.max_elements) {
2358 dev_err(&cq->hwq.pdev->dev,
2359 "QPLIB: FP: CQ Process RC ");
2360 dev_err(&cq->hwq.pdev->dev,
2361 "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2362 wr_id_idx, srq->hwq.max_elements);
2363 return -EINVAL;
2364 }
2365 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2366 bnxt_qplib_release_srqe(srq, wr_id_idx);
2367 cqe++;
2368 (*budget)--;
2369 *pcqe = cqe;
2370 } else {
2371 rq = &qp->rq;
2372 if (wr_id_idx >= rq->hwq.max_elements) {
2373 dev_err(&cq->hwq.pdev->dev,
2374 "QPLIB: FP: CQ Process RC ");
2375 dev_err(&cq->hwq.pdev->dev,
2376 "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2377 wr_id_idx, rq->hwq.max_elements);
2378 return -EINVAL;
2379 }
2380 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2381 cqe++;
2382 (*budget)--;
2383 rq->hwq.cons++;
2384 *pcqe = cqe;
2385
2386 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2387 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2388
2389 bnxt_qplib_add_flush_qp(qp);
2390 }
2391 }
2392
2393done:
2394 return rc;
2395}
2396
2397static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2398 struct cq_res_ud *hwcqe,
2399 struct bnxt_qplib_cqe **pcqe,
2400 int *budget)
2401{
2402 struct bnxt_qplib_qp *qp;
2403 struct bnxt_qplib_q *rq;
2404 struct bnxt_qplib_srq *srq;
2405 struct bnxt_qplib_cqe *cqe;
2406 u32 wr_id_idx;
2407 int rc = 0;
2408
2409 qp = (struct bnxt_qplib_qp *)((unsigned long)
2410 le64_to_cpu(hwcqe->qp_handle));
2411 if (!qp) {
2412 dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
2413 return -EINVAL;
2414 }
2415 if (qp->rq.flushed) {
2416 dev_dbg(&cq->hwq.pdev->dev,
2417 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2418 goto done;
2419 }
2420 cqe = *pcqe;
2421 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2422 cqe->length = le32_to_cpu(hwcqe->length);
2423 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2424 cqe->flags = le16_to_cpu(hwcqe->flags);
2425 cqe->status = hwcqe->status;
2426 cqe->qp_handle = (u64)(unsigned long)qp;
2427 memcpy(cqe->smac, hwcqe->src_mac, 6);
2428 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2429 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2430 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2431 ((le32_to_cpu(
2432 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2433 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2434
2435 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2436 srq = qp->srq;
2437 if (!srq)
2438 return -EINVAL;
2439
2440 if (wr_id_idx >= srq->hwq.max_elements) {
2441 dev_err(&cq->hwq.pdev->dev,
2442 "QPLIB: FP: CQ Process UD ");
2443 dev_err(&cq->hwq.pdev->dev,
2444 "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2445 wr_id_idx, srq->hwq.max_elements);
2446 return -EINVAL;
2447 }
2448 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2449 bnxt_qplib_release_srqe(srq, wr_id_idx);
2450 cqe++;
2451 (*budget)--;
2452 *pcqe = cqe;
2453 } else {
2454 rq = &qp->rq;
2455 if (wr_id_idx >= rq->hwq.max_elements) {
2456 dev_err(&cq->hwq.pdev->dev,
2457 "QPLIB: FP: CQ Process UD ");
2458 dev_err(&cq->hwq.pdev->dev,
2459 "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
2460 wr_id_idx, rq->hwq.max_elements);
2461 return -EINVAL;
2462 }
2463
2464 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2465 cqe++;
2466 (*budget)--;
2467 rq->hwq.cons++;
2468 *pcqe = cqe;
2469
2470 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2471 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2472
2473 bnxt_qplib_add_flush_qp(qp);
2474 }
2475 }
2476done:
2477 return rc;
2478}
2479
2480bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2481{
2482 struct cq_base *hw_cqe, **hw_cqe_ptr;
2483 u32 sw_cons, raw_cons;
2484 bool rc = true;
2485
2486 raw_cons = cq->hwq.cons;
2487 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2488 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2489 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2490
2491
2492 rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2493 return rc;
2494}
2495
2496static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2497 struct cq_res_raweth_qp1 *hwcqe,
2498 struct bnxt_qplib_cqe **pcqe,
2499 int *budget)
2500{
2501 struct bnxt_qplib_qp *qp;
2502 struct bnxt_qplib_q *rq;
2503 struct bnxt_qplib_srq *srq;
2504 struct bnxt_qplib_cqe *cqe;
2505 u32 wr_id_idx;
2506 int rc = 0;
2507
2508 qp = (struct bnxt_qplib_qp *)((unsigned long)
2509 le64_to_cpu(hwcqe->qp_handle));
2510 if (!qp) {
2511 dev_err(&cq->hwq.pdev->dev,
2512 "QPLIB: process_cq Raw/QP1 qp is NULL");
2513 return -EINVAL;
2514 }
2515 if (qp->rq.flushed) {
2516 dev_dbg(&cq->hwq.pdev->dev,
2517 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2518 goto done;
2519 }
2520 cqe = *pcqe;
2521 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2522 cqe->flags = le16_to_cpu(hwcqe->flags);
2523 cqe->qp_handle = (u64)(unsigned long)qp;
2524
2525 wr_id_idx =
2526 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2527 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2528 cqe->src_qp = qp->id;
2529 if (qp->id == 1 && !cqe->length) {
2530
2531 cqe->length = 296;
2532 } else {
2533 cqe->length = le16_to_cpu(hwcqe->length);
2534 }
2535 cqe->pkey_index = qp->pkey_index;
2536 memcpy(cqe->smac, qp->smac, 6);
2537
2538 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2539 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2540 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2541
2542 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2543 srq = qp->srq;
2544 if (!srq) {
2545 dev_err(&cq->hwq.pdev->dev,
2546 "QPLIB: FP: SRQ used but not defined??");
2547 return -EINVAL;
2548 }
2549 if (wr_id_idx >= srq->hwq.max_elements) {
2550 dev_err(&cq->hwq.pdev->dev,
2551 "QPLIB: FP: CQ Process Raw/QP1 ");
2552 dev_err(&cq->hwq.pdev->dev,
2553 "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
2554 wr_id_idx, srq->hwq.max_elements);
2555 return -EINVAL;
2556 }
2557 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2558 bnxt_qplib_release_srqe(srq, wr_id_idx);
2559 cqe++;
2560 (*budget)--;
2561 *pcqe = cqe;
2562 } else {
2563 rq = &qp->rq;
2564 if (wr_id_idx >= rq->hwq.max_elements) {
2565 dev_err(&cq->hwq.pdev->dev,
2566 "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
2567 dev_err(&cq->hwq.pdev->dev,
2568 "QPLIB: ix 0x%x exceeded RQ max 0x%x",
2569 wr_id_idx, rq->hwq.max_elements);
2570 return -EINVAL;
2571 }
2572 cqe->wr_id = rq->swq[wr_id_idx].wr_id;
2573 cqe++;
2574 (*budget)--;
2575 rq->hwq.cons++;
2576 *pcqe = cqe;
2577
2578 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2579 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2580
2581 bnxt_qplib_add_flush_qp(qp);
2582 }
2583 }
2584
2585done:
2586 return rc;
2587}
2588
2589static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2590 struct cq_terminal *hwcqe,
2591 struct bnxt_qplib_cqe **pcqe,
2592 int *budget)
2593{
2594 struct bnxt_qplib_qp *qp;
2595 struct bnxt_qplib_q *sq, *rq;
2596 struct bnxt_qplib_cqe *cqe;
2597 u32 sw_cons = 0, cqe_cons;
2598 int rc = 0;
2599
2600
2601 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2602 dev_warn(&cq->hwq.pdev->dev,
2603 "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
2604 hwcqe->status);
2605
2606 qp = (struct bnxt_qplib_qp *)((unsigned long)
2607 le64_to_cpu(hwcqe->qp_handle));
2608 if (!qp) {
2609 dev_err(&cq->hwq.pdev->dev,
2610 "QPLIB: FP: CQ Process terminal qp is NULL");
2611 return -EINVAL;
2612 }
2613
2614
2615 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2616
2617 sq = &qp->sq;
2618 rq = &qp->rq;
2619
2620 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2621 if (cqe_cons == 0xFFFF)
2622 goto do_rq;
2623
2624 if (cqe_cons > sq->hwq.max_elements) {
2625 dev_err(&cq->hwq.pdev->dev,
2626 "QPLIB: FP: CQ Process terminal reported ");
2627 dev_err(&cq->hwq.pdev->dev,
2628 "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
2629 cqe_cons, sq->hwq.max_elements);
2630 goto do_rq;
2631 }
2632
2633 if (qp->sq.flushed) {
2634 dev_dbg(&cq->hwq.pdev->dev,
2635 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2636 goto sq_done;
2637 }
2638
2639
2640
2641
2642
2643 cqe = *pcqe;
2644 while (*budget) {
2645 sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2646 if (sw_cons == cqe_cons)
2647 break;
2648 if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2649 memset(cqe, 0, sizeof(*cqe));
2650 cqe->status = CQ_REQ_STATUS_OK;
2651 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2652 cqe->qp_handle = (u64)(unsigned long)qp;
2653 cqe->src_qp = qp->id;
2654 cqe->wr_id = sq->swq[sw_cons].wr_id;
2655 cqe->type = sq->swq[sw_cons].type;
2656 cqe++;
2657 (*budget)--;
2658 }
2659 sq->hwq.cons++;
2660 }
2661 *pcqe = cqe;
2662 if (!(*budget) && sw_cons != cqe_cons) {
2663
2664 rc = -EAGAIN;
2665 goto sq_done;
2666 }
2667sq_done:
2668 if (rc)
2669 return rc;
2670do_rq:
2671 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2672 if (cqe_cons == 0xFFFF) {
2673 goto done;
2674 } else if (cqe_cons > rq->hwq.max_elements) {
2675 dev_err(&cq->hwq.pdev->dev,
2676 "QPLIB: FP: CQ Processed terminal ");
2677 dev_err(&cq->hwq.pdev->dev,
2678 "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
2679 cqe_cons, rq->hwq.max_elements);
2680 goto done;
2681 }
2682
2683 if (qp->rq.flushed) {
2684 dev_dbg(&cq->hwq.pdev->dev,
2685 "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
2686 rc = 0;
2687 goto done;
2688 }
2689
2690
2691
2692
2693
2694
2695
2696 bnxt_qplib_add_flush_qp(qp);
2697done:
2698 return rc;
2699}
2700
2701static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2702 struct cq_cutoff *hwcqe)
2703{
2704
2705 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2706 dev_err(&cq->hwq.pdev->dev,
2707 "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
2708 hwcqe->status);
2709 return -EINVAL;
2710 }
2711 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2712 wake_up_interruptible(&cq->waitq);
2713
2714 return 0;
2715}
2716
2717int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2718 struct bnxt_qplib_cqe *cqe,
2719 int num_cqes)
2720{
2721 struct bnxt_qplib_qp *qp = NULL;
2722 u32 budget = num_cqes;
2723 unsigned long flags;
2724
2725 spin_lock_irqsave(&cq->flush_lock, flags);
2726 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2727 dev_dbg(&cq->hwq.pdev->dev,
2728 "QPLIB: FP: Flushing SQ QP= %p",
2729 qp);
2730 __flush_sq(&qp->sq, qp, &cqe, &budget);
2731 }
2732
2733 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2734 dev_dbg(&cq->hwq.pdev->dev,
2735 "QPLIB: FP: Flushing RQ QP= %p",
2736 qp);
2737 __flush_rq(&qp->rq, qp, &cqe, &budget);
2738 }
2739 spin_unlock_irqrestore(&cq->flush_lock, flags);
2740
2741 return num_cqes - budget;
2742}
2743
2744int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2745 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2746{
2747 struct cq_base *hw_cqe, **hw_cqe_ptr;
2748 u32 sw_cons, raw_cons;
2749 int budget, rc = 0;
2750
2751 raw_cons = cq->hwq.cons;
2752 budget = num_cqes;
2753
2754 while (budget) {
2755 sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2756 hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2757 hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2758
2759
2760 if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2761 break;
2762
2763
2764
2765
2766
2767 dma_rmb();
2768
2769 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
2770 case CQ_BASE_CQE_TYPE_REQ:
2771 rc = bnxt_qplib_cq_process_req(cq,
2772 (struct cq_req *)hw_cqe,
2773 &cqe, &budget,
2774 sw_cons, lib_qp);
2775 break;
2776 case CQ_BASE_CQE_TYPE_RES_RC:
2777 rc = bnxt_qplib_cq_process_res_rc(cq,
2778 (struct cq_res_rc *)
2779 hw_cqe, &cqe,
2780 &budget);
2781 break;
2782 case CQ_BASE_CQE_TYPE_RES_UD:
2783 rc = bnxt_qplib_cq_process_res_ud
2784 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
2785 &budget);
2786 break;
2787 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2788 rc = bnxt_qplib_cq_process_res_raweth_qp1
2789 (cq, (struct cq_res_raweth_qp1 *)
2790 hw_cqe, &cqe, &budget);
2791 break;
2792 case CQ_BASE_CQE_TYPE_TERMINAL:
2793 rc = bnxt_qplib_cq_process_terminal
2794 (cq, (struct cq_terminal *)hw_cqe,
2795 &cqe, &budget);
2796 break;
2797 case CQ_BASE_CQE_TYPE_CUT_OFF:
2798 bnxt_qplib_cq_process_cutoff
2799 (cq, (struct cq_cutoff *)hw_cqe);
2800
2801 goto exit;
2802 default:
2803 dev_err(&cq->hwq.pdev->dev,
2804 "QPLIB: process_cq unknown type 0x%lx",
2805 hw_cqe->cqe_type_toggle &
2806 CQ_BASE_CQE_TYPE_MASK);
2807 rc = -EINVAL;
2808 break;
2809 }
2810 if (rc < 0) {
2811 if (rc == -EAGAIN)
2812 break;
2813
2814
2815
2816 dev_err(&cq->hwq.pdev->dev,
2817 "QPLIB: process_cqe error rc = 0x%x", rc);
2818 }
2819 raw_cons++;
2820 }
2821 if (cq->hwq.cons != raw_cons) {
2822 cq->hwq.cons = raw_cons;
2823 bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ);
2824 }
2825exit:
2826 return num_cqes - budget;
2827}
2828
2829void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2830{
2831 if (arm_type)
2832 bnxt_qplib_arm_cq(cq, arm_type);
2833
2834 atomic_set(&cq->arm_state, 1);
2835}
2836
2837void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2838{
2839 flush_workqueue(qp->scq->nq->cqn_wq);
2840 if (qp->scq != qp->rcq)
2841 flush_workqueue(qp->rcq->nq->cqn_wq);
2842}
2843