1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51#include <rdma/uverbs_ioctl.h>
52
53#include "bnxt_ulp.h"
54
55#include "roce_hsi.h"
56#include "qplib_res.h"
57#include "qplib_sp.h"
58#include "qplib_fp.h"
59#include "qplib_rcfw.h"
60
61#include "bnxt_re.h"
62#include "ib_verbs.h"
63#include <rdma/bnxt_re-abi.h>
64
65static int __from_ib_access_flags(int iflags)
66{
67 int qflags = 0;
68
69 if (iflags & IB_ACCESS_LOCAL_WRITE)
70 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71 if (iflags & IB_ACCESS_REMOTE_READ)
72 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73 if (iflags & IB_ACCESS_REMOTE_WRITE)
74 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77 if (iflags & IB_ACCESS_MW_BIND)
78 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79 if (iflags & IB_ZERO_BASED)
80 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81 if (iflags & IB_ACCESS_ON_DEMAND)
82 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
83 return qflags;
84};
85
86static enum ib_access_flags __to_ib_access_flags(int qflags)
87{
88 enum ib_access_flags iflags = 0;
89
90 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91 iflags |= IB_ACCESS_LOCAL_WRITE;
92 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93 iflags |= IB_ACCESS_REMOTE_WRITE;
94 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95 iflags |= IB_ACCESS_REMOTE_READ;
96 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97 iflags |= IB_ACCESS_REMOTE_ATOMIC;
98 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99 iflags |= IB_ACCESS_MW_BIND;
100 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101 iflags |= IB_ZERO_BASED;
102 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103 iflags |= IB_ACCESS_ON_DEMAND;
104 return iflags;
105};
106
107static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108 struct bnxt_qplib_sge *sg_list, int num)
109{
110 int i, total = 0;
111
112 for (i = 0; i < num; i++) {
113 sg_list[i].addr = ib_sg_list[i].addr;
114 sg_list[i].lkey = ib_sg_list[i].lkey;
115 sg_list[i].size = ib_sg_list[i].length;
116 total += sg_list[i].size;
117 }
118 return total;
119}
120
121
122int bnxt_re_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *ib_attr,
124 struct ib_udata *udata)
125{
126 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
127 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
128
129 memset(ib_attr, 0, sizeof(*ib_attr));
130 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
131 min(sizeof(dev_attr->fw_ver),
132 sizeof(ib_attr->fw_ver)));
133 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
134 (u8 *)&ib_attr->sys_image_guid);
135 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
136 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
137
138 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
139 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
140 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
141 ib_attr->max_qp = dev_attr->max_qp;
142 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
143 ib_attr->device_cap_flags =
144 IB_DEVICE_CURR_QP_STATE_MOD
145 | IB_DEVICE_RC_RNR_NAK_GEN
146 | IB_DEVICE_SHUTDOWN_PORT
147 | IB_DEVICE_SYS_IMAGE_GUID
148 | IB_DEVICE_LOCAL_DMA_LKEY
149 | IB_DEVICE_RESIZE_MAX_WR
150 | IB_DEVICE_PORT_ACTIVE_EVENT
151 | IB_DEVICE_N_NOTIFY_CQ
152 | IB_DEVICE_MEM_WINDOW
153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 | IB_DEVICE_MEM_MGT_EXTENSIONS;
155 ib_attr->max_send_sge = dev_attr->max_qp_sges;
156 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
157 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
158 ib_attr->max_cq = dev_attr->max_cq;
159 ib_attr->max_cqe = dev_attr->max_cq_wqes;
160 ib_attr->max_mr = dev_attr->max_mr;
161 ib_attr->max_pd = dev_attr->max_pd;
162 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
163 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
164 ib_attr->atomic_cap = IB_ATOMIC_NONE;
165 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
166 if (dev_attr->is_atomic) {
167 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
168 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
169 }
170
171 ib_attr->max_ee_rd_atom = 0;
172 ib_attr->max_res_rd_atom = 0;
173 ib_attr->max_ee_init_rd_atom = 0;
174 ib_attr->max_ee = 0;
175 ib_attr->max_rdd = 0;
176 ib_attr->max_mw = dev_attr->max_mw;
177 ib_attr->max_raw_ipv6_qp = 0;
178 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
179 ib_attr->max_mcast_grp = 0;
180 ib_attr->max_mcast_qp_attach = 0;
181 ib_attr->max_total_mcast_qp_attach = 0;
182 ib_attr->max_ah = dev_attr->max_ah;
183
184 ib_attr->max_srq = dev_attr->max_srq;
185 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
186 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
187
188 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
189
190 ib_attr->max_pkeys = 1;
191 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
192 return 0;
193}
194
195
196int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
197 struct ib_port_attr *port_attr)
198{
199 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
200 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
201
202 memset(port_attr, 0, sizeof(*port_attr));
203
204 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
205 port_attr->state = IB_PORT_ACTIVE;
206 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
207 } else {
208 port_attr->state = IB_PORT_DOWN;
209 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
210 }
211 port_attr->max_mtu = IB_MTU_4096;
212 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
213 port_attr->gid_tbl_len = dev_attr->max_sgid;
214 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
215 IB_PORT_DEVICE_MGMT_SUP |
216 IB_PORT_VENDOR_CLASS_SUP;
217 port_attr->ip_gids = true;
218
219 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
220 port_attr->bad_pkey_cntr = 0;
221 port_attr->qkey_viol_cntr = 0;
222 port_attr->pkey_tbl_len = dev_attr->max_pkey;
223 port_attr->lid = 0;
224 port_attr->sm_lid = 0;
225 port_attr->lmc = 0;
226 port_attr->max_vl_num = 4;
227 port_attr->sm_sl = 0;
228 port_attr->subnet_timeout = 0;
229 port_attr->init_type_reply = 0;
230 port_attr->active_speed = rdev->active_speed;
231 port_attr->active_width = rdev->active_width;
232
233 return 0;
234}
235
236int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
237 struct ib_port_immutable *immutable)
238{
239 struct ib_port_attr port_attr;
240
241 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
242 return -EINVAL;
243
244 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
245 immutable->gid_tbl_len = port_attr.gid_tbl_len;
246 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
247 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
248 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
249 return 0;
250}
251
252void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
253{
254 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
255
256 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
257 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
258 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
259}
260
261int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
262 u16 index, u16 *pkey)
263{
264 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
265
266
267
268 memset(pkey, 0, sizeof(*pkey));
269 return bnxt_qplib_get_pkey(&rdev->qplib_res,
270 &rdev->qplib_res.pkey_tbl, index, pkey);
271}
272
273int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
274 int index, union ib_gid *gid)
275{
276 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
277 int rc = 0;
278
279
280 memset(gid, 0, sizeof(*gid));
281 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
282 &rdev->qplib_res.sgid_tbl, index,
283 (struct bnxt_qplib_gid *)gid);
284 return rc;
285}
286
287int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
288{
289 int rc = 0;
290 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
291 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
292 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
293 struct bnxt_qplib_gid *gid_to_del;
294 u16 vlan_id = 0xFFFF;
295
296
297 ctx = *context;
298 if (!ctx)
299 return -EINVAL;
300
301 if (sgid_tbl && sgid_tbl->active) {
302 if (ctx->idx >= sgid_tbl->max)
303 return -EINVAL;
304 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
305 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
306
307
308
309
310
311
312
313
314 if (ctx->idx == 0 &&
315 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
316 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
317 ibdev_dbg(&rdev->ibdev,
318 "Trying to delete GID0 while QP1 is alive\n");
319 return -EFAULT;
320 }
321 ctx->refcnt--;
322 if (!ctx->refcnt) {
323 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
324 vlan_id, true);
325 if (rc) {
326 ibdev_err(&rdev->ibdev,
327 "Failed to remove GID: %#x", rc);
328 } else {
329 ctx_tbl = sgid_tbl->ctx;
330 ctx_tbl[ctx->idx] = NULL;
331 kfree(ctx);
332 }
333 }
334 } else {
335 return -EINVAL;
336 }
337 return rc;
338}
339
340int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
341{
342 int rc;
343 u32 tbl_idx = 0;
344 u16 vlan_id = 0xFFFF;
345 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
346 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
347 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
348
349 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
350 if (rc)
351 return rc;
352
353 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
354 rdev->qplib_res.netdev->dev_addr,
355 vlan_id, true, &tbl_idx);
356 if (rc == -EALREADY) {
357 ctx_tbl = sgid_tbl->ctx;
358 ctx_tbl[tbl_idx]->refcnt++;
359 *context = ctx_tbl[tbl_idx];
360 return 0;
361 }
362
363 if (rc < 0) {
364 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
365 return rc;
366 }
367
368 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
369 if (!ctx)
370 return -ENOMEM;
371 ctx_tbl = sgid_tbl->ctx;
372 ctx->idx = tbl_idx;
373 ctx->refcnt = 1;
374 ctx_tbl[tbl_idx] = ctx;
375 *context = ctx;
376
377 return rc;
378}
379
380enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
381 u32 port_num)
382{
383 return IB_LINK_LAYER_ETHERNET;
384}
385
386#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
387
388static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
389{
390 struct bnxt_re_fence_data *fence = &pd->fence;
391 struct ib_mr *ib_mr = &fence->mr->ib_mr;
392 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
393
394 memset(wqe, 0, sizeof(*wqe));
395 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
396 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
397 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
398 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
399 wqe->bind.zero_based = false;
400 wqe->bind.parent_l_key = ib_mr->lkey;
401 wqe->bind.va = (u64)(unsigned long)fence->va;
402 wqe->bind.length = fence->size;
403 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
404 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
405
406
407
408
409 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
410}
411
412static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
413{
414 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
415 qplib_qp);
416 struct ib_pd *ib_pd = qp->ib_qp.pd;
417 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
418 struct bnxt_re_fence_data *fence = &pd->fence;
419 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
420 struct bnxt_qplib_swqe wqe;
421 int rc;
422
423 memcpy(&wqe, fence_wqe, sizeof(wqe));
424 wqe.bind.r_key = fence->bind_rkey;
425 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
426
427 ibdev_dbg(&qp->rdev->ibdev,
428 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
429 wqe.bind.r_key, qp->qplib_qp.id, pd);
430 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
431 if (rc) {
432 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
433 return rc;
434 }
435 bnxt_qplib_post_send_db(&qp->qplib_qp);
436
437 return rc;
438}
439
440static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
441{
442 struct bnxt_re_fence_data *fence = &pd->fence;
443 struct bnxt_re_dev *rdev = pd->rdev;
444 struct device *dev = &rdev->en_dev->pdev->dev;
445 struct bnxt_re_mr *mr = fence->mr;
446
447 if (fence->mw) {
448 bnxt_re_dealloc_mw(fence->mw);
449 fence->mw = NULL;
450 }
451 if (mr) {
452 if (mr->ib_mr.rkey)
453 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
454 true);
455 if (mr->ib_mr.lkey)
456 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
457 kfree(mr);
458 fence->mr = NULL;
459 }
460 if (fence->dma_addr) {
461 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
462 DMA_BIDIRECTIONAL);
463 fence->dma_addr = 0;
464 }
465}
466
467static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
468{
469 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
470 struct bnxt_re_fence_data *fence = &pd->fence;
471 struct bnxt_re_dev *rdev = pd->rdev;
472 struct device *dev = &rdev->en_dev->pdev->dev;
473 struct bnxt_re_mr *mr = NULL;
474 dma_addr_t dma_addr = 0;
475 struct ib_mw *mw;
476 int rc;
477
478 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
479 DMA_BIDIRECTIONAL);
480 rc = dma_mapping_error(dev, dma_addr);
481 if (rc) {
482 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
483 rc = -EIO;
484 fence->dma_addr = 0;
485 goto fail;
486 }
487 fence->dma_addr = dma_addr;
488
489
490 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
491 if (!mr) {
492 rc = -ENOMEM;
493 goto fail;
494 }
495 fence->mr = mr;
496 mr->rdev = rdev;
497 mr->qplib_mr.pd = &pd->qplib_pd;
498 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
499 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
500 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
501 if (rc) {
502 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
503 goto fail;
504 }
505
506
507 mr->ib_mr.lkey = mr->qplib_mr.lkey;
508 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
509 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
510 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
511 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
512 if (rc) {
513 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
514 goto fail;
515 }
516 mr->ib_mr.rkey = mr->qplib_mr.rkey;
517
518
519 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
520 if (IS_ERR(mw)) {
521 ibdev_err(&rdev->ibdev,
522 "Failed to create fence-MW for PD: %p\n", pd);
523 rc = PTR_ERR(mw);
524 goto fail;
525 }
526 fence->mw = mw;
527
528 bnxt_re_create_fence_wqe(pd);
529 return 0;
530
531fail:
532 bnxt_re_destroy_fence_mr(pd);
533 return rc;
534}
535
536
537int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
538{
539 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
540 struct bnxt_re_dev *rdev = pd->rdev;
541
542 bnxt_re_destroy_fence_mr(pd);
543
544 if (pd->qplib_pd.id)
545 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
546 &pd->qplib_pd);
547 return 0;
548}
549
550int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
551{
552 struct ib_device *ibdev = ibpd->device;
553 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
554 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
555 udata, struct bnxt_re_ucontext, ib_uctx);
556 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
557 int rc;
558
559 pd->rdev = rdev;
560 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
561 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
562 rc = -ENOMEM;
563 goto fail;
564 }
565
566 if (udata) {
567 struct bnxt_re_pd_resp resp;
568
569 if (!ucntx->dpi.dbr) {
570
571
572
573
574 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
575 &ucntx->dpi, ucntx)) {
576 rc = -ENOMEM;
577 goto dbfail;
578 }
579 }
580
581 resp.pdid = pd->qplib_pd.id;
582
583 resp.dpi = ucntx->dpi.dpi;
584 resp.dbr = (u64)ucntx->dpi.umdbr;
585
586 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
587 if (rc) {
588 ibdev_err(&rdev->ibdev,
589 "Failed to copy user response\n");
590 goto dbfail;
591 }
592 }
593
594 if (!udata)
595 if (bnxt_re_create_fence_mr(pd))
596 ibdev_warn(&rdev->ibdev,
597 "Failed to create Fence-MR\n");
598 return 0;
599dbfail:
600 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
601 &pd->qplib_pd);
602fail:
603 return rc;
604}
605
606
607int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
608{
609 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
610 struct bnxt_re_dev *rdev = ah->rdev;
611
612 bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
613 !(flags & RDMA_DESTROY_AH_SLEEPABLE));
614 return 0;
615}
616
617static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
618{
619 u8 nw_type;
620
621 switch (ntype) {
622 case RDMA_NETWORK_IPV4:
623 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
624 break;
625 case RDMA_NETWORK_IPV6:
626 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
627 break;
628 default:
629 nw_type = CMDQ_CREATE_AH_TYPE_V1;
630 break;
631 }
632 return nw_type;
633}
634
635int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
636 struct ib_udata *udata)
637{
638 struct ib_pd *ib_pd = ib_ah->pd;
639 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
640 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
641 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
642 struct bnxt_re_dev *rdev = pd->rdev;
643 const struct ib_gid_attr *sgid_attr;
644 struct bnxt_re_gid_ctx *ctx;
645 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
646 u8 nw_type;
647 int rc;
648
649 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
650 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
651 return -EINVAL;
652 }
653
654 ah->rdev = rdev;
655 ah->qplib_ah.pd = &pd->qplib_pd;
656
657
658 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
659 sizeof(union ib_gid));
660 sgid_attr = grh->sgid_attr;
661
662
663
664 ctx = rdma_read_gid_hw_context(sgid_attr);
665 ah->qplib_ah.sgid_index = ctx->idx;
666 ah->qplib_ah.host_sgid_index = grh->sgid_index;
667 ah->qplib_ah.traffic_class = grh->traffic_class;
668 ah->qplib_ah.flow_label = grh->flow_label;
669 ah->qplib_ah.hop_limit = grh->hop_limit;
670 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
671
672
673 nw_type = rdma_gid_attr_network_type(sgid_attr);
674 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
675
676 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
677 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
678 !(init_attr->flags &
679 RDMA_CREATE_AH_SLEEPABLE));
680 if (rc) {
681 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
682 return rc;
683 }
684
685
686 if (udata) {
687 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
688 udata, struct bnxt_re_ucontext, ib_uctx);
689 unsigned long flag;
690 u32 *wrptr;
691
692 spin_lock_irqsave(&uctx->sh_lock, flag);
693 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
694 *wrptr = ah->qplib_ah.id;
695 wmb();
696 spin_unlock_irqrestore(&uctx->sh_lock, flag);
697 }
698
699 return 0;
700}
701
702int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
703{
704 return 0;
705}
706
707int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
708{
709 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
710
711 ah_attr->type = ib_ah->type;
712 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
713 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
714 rdma_ah_set_grh(ah_attr, NULL, 0,
715 ah->qplib_ah.host_sgid_index,
716 0, ah->qplib_ah.traffic_class);
717 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
718 rdma_ah_set_port_num(ah_attr, 1);
719 rdma_ah_set_static_rate(ah_attr, 0);
720 return 0;
721}
722
723unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
724 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
725{
726 unsigned long flags;
727
728 spin_lock_irqsave(&qp->scq->cq_lock, flags);
729 if (qp->rcq != qp->scq)
730 spin_lock(&qp->rcq->cq_lock);
731 else
732 __acquire(&qp->rcq->cq_lock);
733
734 return flags;
735}
736
737void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
738 unsigned long flags)
739 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
740{
741 if (qp->rcq != qp->scq)
742 spin_unlock(&qp->rcq->cq_lock);
743 else
744 __release(&qp->rcq->cq_lock);
745 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
746}
747
748static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
749{
750 struct bnxt_re_qp *gsi_sqp;
751 struct bnxt_re_ah *gsi_sah;
752 struct bnxt_re_dev *rdev;
753 int rc = 0;
754
755 rdev = qp->rdev;
756 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
757 gsi_sah = rdev->gsi_ctx.gsi_sah;
758
759 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
760 bnxt_qplib_destroy_ah(&rdev->qplib_res,
761 &gsi_sah->qplib_ah,
762 true);
763 bnxt_qplib_clean_qp(&qp->qplib_qp);
764
765 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
766 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
767 if (rc) {
768 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
769 goto fail;
770 }
771 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
772
773
774 mutex_lock(&rdev->qp_lock);
775 list_del(&gsi_sqp->list);
776 mutex_unlock(&rdev->qp_lock);
777 atomic_dec(&rdev->qp_count);
778
779 kfree(rdev->gsi_ctx.sqp_tbl);
780 kfree(gsi_sah);
781 kfree(gsi_sqp);
782 rdev->gsi_ctx.gsi_sqp = NULL;
783 rdev->gsi_ctx.gsi_sah = NULL;
784 rdev->gsi_ctx.sqp_tbl = NULL;
785
786 return 0;
787fail:
788 return rc;
789}
790
791
792int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
793{
794 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
795 struct bnxt_re_dev *rdev = qp->rdev;
796 unsigned int flags;
797 int rc;
798
799 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
800
801 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
802 if (rc) {
803 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
804 return rc;
805 }
806
807 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
808 flags = bnxt_re_lock_cqs(qp);
809 bnxt_qplib_clean_qp(&qp->qplib_qp);
810 bnxt_re_unlock_cqs(qp, flags);
811 }
812
813 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
814
815 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
816 rc = bnxt_re_destroy_gsi_sqp(qp);
817 if (rc)
818 goto sh_fail;
819 }
820
821 mutex_lock(&rdev->qp_lock);
822 list_del(&qp->list);
823 mutex_unlock(&rdev->qp_lock);
824 atomic_dec(&rdev->qp_count);
825
826 ib_umem_release(qp->rumem);
827 ib_umem_release(qp->sumem);
828
829 kfree(qp);
830 return 0;
831sh_fail:
832 return rc;
833}
834
835static u8 __from_ib_qp_type(enum ib_qp_type type)
836{
837 switch (type) {
838 case IB_QPT_GSI:
839 return CMDQ_CREATE_QP1_TYPE_GSI;
840 case IB_QPT_RC:
841 return CMDQ_CREATE_QP_TYPE_RC;
842 case IB_QPT_UD:
843 return CMDQ_CREATE_QP_TYPE_UD;
844 default:
845 return IB_QPT_MAX;
846 }
847}
848
849static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
850 int rsge, int max)
851{
852 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
853 rsge = max;
854 return bnxt_re_get_rwqe_size(rsge);
855}
856
857static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
858{
859 u16 wqe_size, calc_ils;
860
861 wqe_size = bnxt_re_get_swqe_size(nsge);
862 if (ilsize) {
863 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
864 wqe_size = max_t(u16, calc_ils, wqe_size);
865 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
866 }
867 return wqe_size;
868}
869
870static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
871 struct ib_qp_init_attr *init_attr)
872{
873 struct bnxt_qplib_dev_attr *dev_attr;
874 struct bnxt_qplib_qp *qplqp;
875 struct bnxt_re_dev *rdev;
876 struct bnxt_qplib_q *sq;
877 int align, ilsize;
878
879 rdev = qp->rdev;
880 qplqp = &qp->qplib_qp;
881 sq = &qplqp->sq;
882 dev_attr = &rdev->dev_attr;
883
884 align = sizeof(struct sq_send_hdr);
885 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
886
887 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
888 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
889 return -EINVAL;
890
891
892
893 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
894 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
895 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
896
897 if (init_attr->cap.max_inline_data) {
898 qplqp->max_inline_data = sq->wqe_size -
899 sizeof(struct sq_send_hdr);
900 init_attr->cap.max_inline_data = qplqp->max_inline_data;
901 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
902 sq->max_sge = qplqp->max_inline_data /
903 sizeof(struct sq_sge);
904 }
905
906 return 0;
907}
908
909static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
910 struct bnxt_re_qp *qp, struct ib_udata *udata)
911{
912 struct bnxt_qplib_qp *qplib_qp;
913 struct bnxt_re_ucontext *cntx;
914 struct bnxt_re_qp_req ureq;
915 int bytes = 0, psn_sz;
916 struct ib_umem *umem;
917 int psn_nume;
918
919 qplib_qp = &qp->qplib_qp;
920 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
921 ib_uctx);
922 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
923 return -EFAULT;
924
925 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
926
927 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
928 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
929 sizeof(struct sq_psn_search_ext) :
930 sizeof(struct sq_psn_search);
931 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
932 qplib_qp->sq.max_wqe :
933 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
934 sizeof(struct bnxt_qplib_sge));
935 bytes += (psn_nume * psn_sz);
936 }
937
938 bytes = PAGE_ALIGN(bytes);
939 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
940 IB_ACCESS_LOCAL_WRITE);
941 if (IS_ERR(umem))
942 return PTR_ERR(umem);
943
944 qp->sumem = umem;
945 qplib_qp->sq.sg_info.umem = umem;
946 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
947 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
948 qplib_qp->qp_handle = ureq.qp_handle;
949
950 if (!qp->qplib_qp.srq) {
951 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
952 bytes = PAGE_ALIGN(bytes);
953 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
954 IB_ACCESS_LOCAL_WRITE);
955 if (IS_ERR(umem))
956 goto rqfail;
957 qp->rumem = umem;
958 qplib_qp->rq.sg_info.umem = umem;
959 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
960 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
961 }
962
963 qplib_qp->dpi = &cntx->dpi;
964 return 0;
965rqfail:
966 ib_umem_release(qp->sumem);
967 qp->sumem = NULL;
968 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
969
970 return PTR_ERR(umem);
971}
972
973static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
974 (struct bnxt_re_pd *pd,
975 struct bnxt_qplib_res *qp1_res,
976 struct bnxt_qplib_qp *qp1_qp)
977{
978 struct bnxt_re_dev *rdev = pd->rdev;
979 struct bnxt_re_ah *ah;
980 union ib_gid sgid;
981 int rc;
982
983 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
984 if (!ah)
985 return NULL;
986
987 ah->rdev = rdev;
988 ah->qplib_ah.pd = &pd->qplib_pd;
989
990 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
991 if (rc)
992 goto fail;
993
994
995 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
996 sizeof(union ib_gid));
997 ah->qplib_ah.sgid_index = 0;
998
999 ah->qplib_ah.traffic_class = 0;
1000 ah->qplib_ah.flow_label = 0;
1001 ah->qplib_ah.hop_limit = 1;
1002 ah->qplib_ah.sl = 0;
1003
1004 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1005
1006 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1007 if (rc) {
1008 ibdev_err(&rdev->ibdev,
1009 "Failed to allocate HW AH for Shadow QP");
1010 goto fail;
1011 }
1012
1013 return ah;
1014
1015fail:
1016 kfree(ah);
1017 return NULL;
1018}
1019
1020static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1021 (struct bnxt_re_pd *pd,
1022 struct bnxt_qplib_res *qp1_res,
1023 struct bnxt_qplib_qp *qp1_qp)
1024{
1025 struct bnxt_re_dev *rdev = pd->rdev;
1026 struct bnxt_re_qp *qp;
1027 int rc;
1028
1029 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1030 if (!qp)
1031 return NULL;
1032
1033 qp->rdev = rdev;
1034
1035
1036 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1037
1038 qp->qplib_qp.pd = &pd->qplib_pd;
1039 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1040 qp->qplib_qp.type = IB_QPT_UD;
1041
1042 qp->qplib_qp.max_inline_data = 0;
1043 qp->qplib_qp.sig_type = true;
1044
1045
1046 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1047 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1048 qp->qplib_qp.sq.max_sge = 2;
1049
1050 qp->qplib_qp.sq.q_full_delta = 1;
1051 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1052 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1053
1054 qp->qplib_qp.scq = qp1_qp->scq;
1055 qp->qplib_qp.rcq = qp1_qp->rcq;
1056
1057 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1058 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1059 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1060
1061 qp->qplib_qp.rq.q_full_delta = 1;
1062 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1063 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1064
1065 qp->qplib_qp.mtu = qp1_qp->mtu;
1066
1067 qp->qplib_qp.sq_hdr_buf_size = 0;
1068 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1069 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1070
1071 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1072 if (rc)
1073 goto fail;
1074
1075 spin_lock_init(&qp->sq_lock);
1076 INIT_LIST_HEAD(&qp->list);
1077 mutex_lock(&rdev->qp_lock);
1078 list_add_tail(&qp->list, &rdev->qp_list);
1079 atomic_inc(&rdev->qp_count);
1080 mutex_unlock(&rdev->qp_lock);
1081 return qp;
1082fail:
1083 kfree(qp);
1084 return NULL;
1085}
1086
1087static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1088 struct ib_qp_init_attr *init_attr)
1089{
1090 struct bnxt_qplib_dev_attr *dev_attr;
1091 struct bnxt_qplib_qp *qplqp;
1092 struct bnxt_re_dev *rdev;
1093 struct bnxt_qplib_q *rq;
1094 int entries;
1095
1096 rdev = qp->rdev;
1097 qplqp = &qp->qplib_qp;
1098 rq = &qplqp->rq;
1099 dev_attr = &rdev->dev_attr;
1100
1101 if (init_attr->srq) {
1102 struct bnxt_re_srq *srq;
1103
1104 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1105 qplqp->srq = &srq->qplib_srq;
1106 rq->max_wqe = 0;
1107 } else {
1108 rq->max_sge = init_attr->cap.max_recv_sge;
1109 if (rq->max_sge > dev_attr->max_qp_sges)
1110 rq->max_sge = dev_attr->max_qp_sges;
1111 init_attr->cap.max_recv_sge = rq->max_sge;
1112 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1113 dev_attr->max_qp_sges);
1114
1115
1116
1117 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1118 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1119 rq->q_full_delta = 0;
1120 rq->sg_info.pgsize = PAGE_SIZE;
1121 rq->sg_info.pgshft = PAGE_SHIFT;
1122 }
1123
1124 return 0;
1125}
1126
1127static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1128{
1129 struct bnxt_qplib_dev_attr *dev_attr;
1130 struct bnxt_qplib_qp *qplqp;
1131 struct bnxt_re_dev *rdev;
1132
1133 rdev = qp->rdev;
1134 qplqp = &qp->qplib_qp;
1135 dev_attr = &rdev->dev_attr;
1136
1137 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1138 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1139 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1140 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1141 qplqp->rq.max_sge = 6;
1142 }
1143}
1144
1145static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1146 struct ib_qp_init_attr *init_attr,
1147 struct ib_udata *udata)
1148{
1149 struct bnxt_qplib_dev_attr *dev_attr;
1150 struct bnxt_qplib_qp *qplqp;
1151 struct bnxt_re_dev *rdev;
1152 struct bnxt_qplib_q *sq;
1153 int entries;
1154 int diff;
1155 int rc;
1156
1157 rdev = qp->rdev;
1158 qplqp = &qp->qplib_qp;
1159 sq = &qplqp->sq;
1160 dev_attr = &rdev->dev_attr;
1161
1162 sq->max_sge = init_attr->cap.max_send_sge;
1163 if (sq->max_sge > dev_attr->max_qp_sges) {
1164 sq->max_sge = dev_attr->max_qp_sges;
1165 init_attr->cap.max_send_sge = sq->max_sge;
1166 }
1167
1168 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1169 if (rc)
1170 return rc;
1171
1172 entries = init_attr->cap.max_send_wr;
1173
1174 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1175 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1176 entries = roundup_pow_of_two(entries + diff + 1);
1177 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1178 sq->q_full_delta = diff + 1;
1179
1180
1181
1182
1183
1184 qplqp->sq.q_full_delta -= 1;
1185 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1186 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1187
1188 return 0;
1189}
1190
1191static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1192 struct ib_qp_init_attr *init_attr)
1193{
1194 struct bnxt_qplib_dev_attr *dev_attr;
1195 struct bnxt_qplib_qp *qplqp;
1196 struct bnxt_re_dev *rdev;
1197 int entries;
1198
1199 rdev = qp->rdev;
1200 qplqp = &qp->qplib_qp;
1201 dev_attr = &rdev->dev_attr;
1202
1203 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
1204 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1205 qplqp->sq.max_wqe = min_t(u32, entries,
1206 dev_attr->max_qp_wqes + 1);
1207 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1208 init_attr->cap.max_send_wr;
1209 qplqp->sq.max_sge++;
1210 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1211 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1212 }
1213}
1214
1215static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1216 struct ib_qp_init_attr *init_attr)
1217{
1218 struct bnxt_qplib_chip_ctx *chip_ctx;
1219 int qptype;
1220
1221 chip_ctx = rdev->chip_ctx;
1222
1223 qptype = __from_ib_qp_type(init_attr->qp_type);
1224 if (qptype == IB_QPT_MAX) {
1225 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1226 qptype = -EOPNOTSUPP;
1227 goto out;
1228 }
1229
1230 if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1231 init_attr->qp_type == IB_QPT_GSI)
1232 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1233out:
1234 return qptype;
1235}
1236
1237static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1238 struct ib_qp_init_attr *init_attr,
1239 struct ib_udata *udata)
1240{
1241 struct bnxt_qplib_dev_attr *dev_attr;
1242 struct bnxt_qplib_qp *qplqp;
1243 struct bnxt_re_dev *rdev;
1244 struct bnxt_re_cq *cq;
1245 int rc = 0, qptype;
1246
1247 rdev = qp->rdev;
1248 qplqp = &qp->qplib_qp;
1249 dev_attr = &rdev->dev_attr;
1250
1251
1252 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1253 qplqp->pd = &pd->qplib_pd;
1254 qplqp->qp_handle = (u64)qplqp;
1255 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1256 qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
1257 true : false);
1258 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1259 if (qptype < 0) {
1260 rc = qptype;
1261 goto out;
1262 }
1263 qplqp->type = (u8)qptype;
1264 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1265
1266 if (init_attr->qp_type == IB_QPT_RC) {
1267 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1268 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1269 }
1270 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1271 qplqp->dpi = &rdev->dpi_privileged;
1272 if (init_attr->create_flags) {
1273 ibdev_dbg(&rdev->ibdev,
1274 "QP create flags 0x%x not supported",
1275 init_attr->create_flags);
1276 return -EOPNOTSUPP;
1277 }
1278
1279
1280 if (init_attr->send_cq) {
1281 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1282 qplqp->scq = &cq->qplib_cq;
1283 qp->scq = cq;
1284 }
1285
1286 if (init_attr->recv_cq) {
1287 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1288 qplqp->rcq = &cq->qplib_cq;
1289 qp->rcq = cq;
1290 }
1291
1292
1293 rc = bnxt_re_init_rq_attr(qp, init_attr);
1294 if (rc)
1295 goto out;
1296 if (init_attr->qp_type == IB_QPT_GSI)
1297 bnxt_re_adjust_gsi_rq_attr(qp);
1298
1299
1300 rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
1301 if (rc)
1302 goto out;
1303 if (init_attr->qp_type == IB_QPT_GSI)
1304 bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1305
1306 if (udata)
1307 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1308out:
1309 return rc;
1310}
1311
1312static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1313 struct bnxt_re_pd *pd)
1314{
1315 struct bnxt_re_sqp_entries *sqp_tbl = NULL;
1316 struct bnxt_re_dev *rdev;
1317 struct bnxt_re_qp *sqp;
1318 struct bnxt_re_ah *sah;
1319 int rc = 0;
1320
1321 rdev = qp->rdev;
1322
1323 sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
1324 GFP_KERNEL);
1325 if (!sqp_tbl)
1326 return -ENOMEM;
1327 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1328
1329 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1330 if (!sqp) {
1331 rc = -ENODEV;
1332 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1333 goto out;
1334 }
1335 rdev->gsi_ctx.gsi_sqp = sqp;
1336
1337 sqp->rcq = qp->rcq;
1338 sqp->scq = qp->scq;
1339 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1340 &qp->qplib_qp);
1341 if (!sah) {
1342 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1343 &sqp->qplib_qp);
1344 rc = -ENODEV;
1345 ibdev_err(&rdev->ibdev,
1346 "Failed to create AH entry for ShadowQP");
1347 goto out;
1348 }
1349 rdev->gsi_ctx.gsi_sah = sah;
1350
1351 return 0;
1352out:
1353 kfree(sqp_tbl);
1354 return rc;
1355}
1356
1357static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1358 struct ib_qp_init_attr *init_attr)
1359{
1360 struct bnxt_re_dev *rdev;
1361 struct bnxt_qplib_qp *qplqp;
1362 int rc = 0;
1363
1364 rdev = qp->rdev;
1365 qplqp = &qp->qplib_qp;
1366
1367 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1368 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1369
1370 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1371 if (rc) {
1372 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1373 goto out;
1374 }
1375
1376 rc = bnxt_re_create_shadow_gsi(qp, pd);
1377out:
1378 return rc;
1379}
1380
1381static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1382 struct ib_qp_init_attr *init_attr,
1383 struct bnxt_qplib_dev_attr *dev_attr)
1384{
1385 bool rc = true;
1386
1387 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1388 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1389 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1390 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1391 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1392 ibdev_err(&rdev->ibdev,
1393 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1394 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1395 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1396 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1397 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1398 init_attr->cap.max_inline_data,
1399 dev_attr->max_inline_data);
1400 rc = false;
1401 }
1402 return rc;
1403}
1404
1405struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1406 struct ib_qp_init_attr *qp_init_attr,
1407 struct ib_udata *udata)
1408{
1409 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1410 struct bnxt_re_dev *rdev = pd->rdev;
1411 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1412 struct bnxt_re_qp *qp;
1413 int rc;
1414
1415 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1416 if (!rc) {
1417 rc = -EINVAL;
1418 goto exit;
1419 }
1420
1421 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1422 if (!qp) {
1423 rc = -ENOMEM;
1424 goto exit;
1425 }
1426 qp->rdev = rdev;
1427 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1428 if (rc)
1429 goto fail;
1430
1431 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1432 !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1433 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1434 if (rc == -ENODEV)
1435 goto qp_destroy;
1436 if (rc)
1437 goto fail;
1438 } else {
1439 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1440 if (rc) {
1441 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1442 goto free_umem;
1443 }
1444 if (udata) {
1445 struct bnxt_re_qp_resp resp;
1446
1447 resp.qpid = qp->qplib_qp.id;
1448 resp.rsvd = 0;
1449 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1450 if (rc) {
1451 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1452 goto qp_destroy;
1453 }
1454 }
1455 }
1456
1457 qp->ib_qp.qp_num = qp->qplib_qp.id;
1458 if (qp_init_attr->qp_type == IB_QPT_GSI)
1459 rdev->gsi_ctx.gsi_qp = qp;
1460 spin_lock_init(&qp->sq_lock);
1461 spin_lock_init(&qp->rq_lock);
1462 INIT_LIST_HEAD(&qp->list);
1463 mutex_lock(&rdev->qp_lock);
1464 list_add_tail(&qp->list, &rdev->qp_list);
1465 mutex_unlock(&rdev->qp_lock);
1466 atomic_inc(&rdev->qp_count);
1467
1468 return &qp->ib_qp;
1469qp_destroy:
1470 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1471free_umem:
1472 ib_umem_release(qp->rumem);
1473 ib_umem_release(qp->sumem);
1474fail:
1475 kfree(qp);
1476exit:
1477 return ERR_PTR(rc);
1478}
1479
1480static u8 __from_ib_qp_state(enum ib_qp_state state)
1481{
1482 switch (state) {
1483 case IB_QPS_RESET:
1484 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1485 case IB_QPS_INIT:
1486 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1487 case IB_QPS_RTR:
1488 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1489 case IB_QPS_RTS:
1490 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1491 case IB_QPS_SQD:
1492 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1493 case IB_QPS_SQE:
1494 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1495 case IB_QPS_ERR:
1496 default:
1497 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1498 }
1499}
1500
1501static enum ib_qp_state __to_ib_qp_state(u8 state)
1502{
1503 switch (state) {
1504 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1505 return IB_QPS_RESET;
1506 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1507 return IB_QPS_INIT;
1508 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1509 return IB_QPS_RTR;
1510 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1511 return IB_QPS_RTS;
1512 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1513 return IB_QPS_SQD;
1514 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1515 return IB_QPS_SQE;
1516 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1517 default:
1518 return IB_QPS_ERR;
1519 }
1520}
1521
1522static u32 __from_ib_mtu(enum ib_mtu mtu)
1523{
1524 switch (mtu) {
1525 case IB_MTU_256:
1526 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1527 case IB_MTU_512:
1528 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1529 case IB_MTU_1024:
1530 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1531 case IB_MTU_2048:
1532 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1533 case IB_MTU_4096:
1534 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1535 default:
1536 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1537 }
1538}
1539
1540static enum ib_mtu __to_ib_mtu(u32 mtu)
1541{
1542 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1543 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1544 return IB_MTU_256;
1545 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1546 return IB_MTU_512;
1547 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1548 return IB_MTU_1024;
1549 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1550 return IB_MTU_2048;
1551 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1552 return IB_MTU_4096;
1553 default:
1554 return IB_MTU_2048;
1555 }
1556}
1557
1558
1559int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1560{
1561 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1562 ib_srq);
1563 struct bnxt_re_dev *rdev = srq->rdev;
1564 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1565 struct bnxt_qplib_nq *nq = NULL;
1566
1567 if (qplib_srq->cq)
1568 nq = qplib_srq->cq->nq;
1569 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1570 ib_umem_release(srq->umem);
1571 atomic_dec(&rdev->srq_count);
1572 if (nq)
1573 nq->budget--;
1574 return 0;
1575}
1576
1577static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1578 struct bnxt_re_pd *pd,
1579 struct bnxt_re_srq *srq,
1580 struct ib_udata *udata)
1581{
1582 struct bnxt_re_srq_req ureq;
1583 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1584 struct ib_umem *umem;
1585 int bytes = 0;
1586 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1587 udata, struct bnxt_re_ucontext, ib_uctx);
1588
1589 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1590 return -EFAULT;
1591
1592 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1593 bytes = PAGE_ALIGN(bytes);
1594 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1595 IB_ACCESS_LOCAL_WRITE);
1596 if (IS_ERR(umem))
1597 return PTR_ERR(umem);
1598
1599 srq->umem = umem;
1600 qplib_srq->sg_info.umem = umem;
1601 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1602 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1603 qplib_srq->srq_handle = ureq.srq_handle;
1604 qplib_srq->dpi = &cntx->dpi;
1605
1606 return 0;
1607}
1608
1609int bnxt_re_create_srq(struct ib_srq *ib_srq,
1610 struct ib_srq_init_attr *srq_init_attr,
1611 struct ib_udata *udata)
1612{
1613 struct bnxt_qplib_dev_attr *dev_attr;
1614 struct bnxt_qplib_nq *nq = NULL;
1615 struct bnxt_re_dev *rdev;
1616 struct bnxt_re_srq *srq;
1617 struct bnxt_re_pd *pd;
1618 struct ib_pd *ib_pd;
1619 int rc, entries;
1620
1621 ib_pd = ib_srq->pd;
1622 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1623 rdev = pd->rdev;
1624 dev_attr = &rdev->dev_attr;
1625 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1626
1627 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1628 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1629 rc = -EINVAL;
1630 goto exit;
1631 }
1632
1633 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1634 rc = -EOPNOTSUPP;
1635 goto exit;
1636 }
1637
1638 srq->rdev = rdev;
1639 srq->qplib_srq.pd = &pd->qplib_pd;
1640 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1641
1642
1643
1644 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1645 if (entries > dev_attr->max_srq_wqes + 1)
1646 entries = dev_attr->max_srq_wqes + 1;
1647 srq->qplib_srq.max_wqe = entries;
1648
1649 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1650
1651 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1652 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1653 srq->srq_limit = srq_init_attr->attr.srq_limit;
1654 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1655 nq = &rdev->nq[0];
1656
1657 if (udata) {
1658 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1659 if (rc)
1660 goto fail;
1661 }
1662
1663 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1664 if (rc) {
1665 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1666 goto fail;
1667 }
1668
1669 if (udata) {
1670 struct bnxt_re_srq_resp resp;
1671
1672 resp.srqid = srq->qplib_srq.id;
1673 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1674 if (rc) {
1675 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1676 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1677 &srq->qplib_srq);
1678 goto fail;
1679 }
1680 }
1681 if (nq)
1682 nq->budget++;
1683 atomic_inc(&rdev->srq_count);
1684 spin_lock_init(&srq->lock);
1685
1686 return 0;
1687
1688fail:
1689 ib_umem_release(srq->umem);
1690exit:
1691 return rc;
1692}
1693
1694int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1695 enum ib_srq_attr_mask srq_attr_mask,
1696 struct ib_udata *udata)
1697{
1698 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1699 ib_srq);
1700 struct bnxt_re_dev *rdev = srq->rdev;
1701 int rc;
1702
1703 switch (srq_attr_mask) {
1704 case IB_SRQ_MAX_WR:
1705
1706 break;
1707 case IB_SRQ_LIMIT:
1708
1709 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1710 return -EINVAL;
1711
1712 srq->qplib_srq.threshold = srq_attr->srq_limit;
1713 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1714 if (rc) {
1715 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1716 return rc;
1717 }
1718
1719 srq->srq_limit = srq_attr->srq_limit;
1720
1721 break;
1722 default:
1723 ibdev_err(&rdev->ibdev,
1724 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1725 return -EINVAL;
1726 }
1727 return 0;
1728}
1729
1730int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1731{
1732 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1733 ib_srq);
1734 struct bnxt_re_srq tsrq;
1735 struct bnxt_re_dev *rdev = srq->rdev;
1736 int rc;
1737
1738
1739 tsrq.qplib_srq.id = srq->qplib_srq.id;
1740 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1741 if (rc) {
1742 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1743 return rc;
1744 }
1745 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1746 srq_attr->max_sge = srq->qplib_srq.max_sge;
1747 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1748
1749 return 0;
1750}
1751
1752int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1753 const struct ib_recv_wr **bad_wr)
1754{
1755 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1756 ib_srq);
1757 struct bnxt_qplib_swqe wqe;
1758 unsigned long flags;
1759 int rc = 0;
1760
1761 spin_lock_irqsave(&srq->lock, flags);
1762 while (wr) {
1763
1764 wqe.num_sge = wr->num_sge;
1765 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1766 wqe.wr_id = wr->wr_id;
1767 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1768
1769 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1770 if (rc) {
1771 *bad_wr = wr;
1772 break;
1773 }
1774 wr = wr->next;
1775 }
1776 spin_unlock_irqrestore(&srq->lock, flags);
1777
1778 return rc;
1779}
1780static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1781 struct bnxt_re_qp *qp1_qp,
1782 int qp_attr_mask)
1783{
1784 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1785 int rc = 0;
1786
1787 if (qp_attr_mask & IB_QP_STATE) {
1788 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1789 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1790 }
1791 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1792 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1793 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1794 }
1795
1796 if (qp_attr_mask & IB_QP_QKEY) {
1797 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1798
1799 qp->qplib_qp.qkey = 0x81818181;
1800 }
1801 if (qp_attr_mask & IB_QP_SQ_PSN) {
1802 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1803 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1804 }
1805
1806 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1807 if (rc)
1808 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1809 return rc;
1810}
1811
1812int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1813 int qp_attr_mask, struct ib_udata *udata)
1814{
1815 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1816 struct bnxt_re_dev *rdev = qp->rdev;
1817 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1818 enum ib_qp_state curr_qp_state, new_qp_state;
1819 int rc, entries;
1820 unsigned int flags;
1821 u8 nw_type;
1822
1823 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1824 return -EOPNOTSUPP;
1825
1826 qp->qplib_qp.modify_flags = 0;
1827 if (qp_attr_mask & IB_QP_STATE) {
1828 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1829 new_qp_state = qp_attr->qp_state;
1830 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1831 ib_qp->qp_type, qp_attr_mask)) {
1832 ibdev_err(&rdev->ibdev,
1833 "Invalid attribute mask: %#x specified ",
1834 qp_attr_mask);
1835 ibdev_err(&rdev->ibdev,
1836 "for qpn: %#x type: %#x",
1837 ib_qp->qp_num, ib_qp->qp_type);
1838 ibdev_err(&rdev->ibdev,
1839 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1840 curr_qp_state, new_qp_state);
1841 return -EINVAL;
1842 }
1843 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1844 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1845
1846 if (!qp->sumem &&
1847 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1848 ibdev_dbg(&rdev->ibdev,
1849 "Move QP = %p to flush list\n", qp);
1850 flags = bnxt_re_lock_cqs(qp);
1851 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1852 bnxt_re_unlock_cqs(qp, flags);
1853 }
1854 if (!qp->sumem &&
1855 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1856 ibdev_dbg(&rdev->ibdev,
1857 "Move QP = %p out of flush list\n", qp);
1858 flags = bnxt_re_lock_cqs(qp);
1859 bnxt_qplib_clean_qp(&qp->qplib_qp);
1860 bnxt_re_unlock_cqs(qp, flags);
1861 }
1862 }
1863 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1864 qp->qplib_qp.modify_flags |=
1865 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1866 qp->qplib_qp.en_sqd_async_notify = true;
1867 }
1868 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1869 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1870 qp->qplib_qp.access =
1871 __from_ib_access_flags(qp_attr->qp_access_flags);
1872
1873 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1874
1875 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1876 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1877 }
1878 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1879 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1880 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1881 }
1882 if (qp_attr_mask & IB_QP_QKEY) {
1883 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1884 qp->qplib_qp.qkey = qp_attr->qkey;
1885 }
1886 if (qp_attr_mask & IB_QP_AV) {
1887 const struct ib_global_route *grh =
1888 rdma_ah_read_grh(&qp_attr->ah_attr);
1889 const struct ib_gid_attr *sgid_attr;
1890 struct bnxt_re_gid_ctx *ctx;
1891
1892 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1893 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1894 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1895 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1896 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1897 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1898 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1899 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1900 sizeof(qp->qplib_qp.ah.dgid.data));
1901 qp->qplib_qp.ah.flow_label = grh->flow_label;
1902 sgid_attr = grh->sgid_attr;
1903
1904
1905
1906 ctx = rdma_read_gid_hw_context(sgid_attr);
1907 qp->qplib_qp.ah.sgid_index = ctx->idx;
1908 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1909 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1910 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1911 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1912 ether_addr_copy(qp->qplib_qp.ah.dmac,
1913 qp_attr->ah_attr.roce.dmac);
1914
1915 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1916 &qp->qplib_qp.smac[0]);
1917 if (rc)
1918 return rc;
1919
1920 nw_type = rdma_gid_attr_network_type(sgid_attr);
1921 switch (nw_type) {
1922 case RDMA_NETWORK_IPV4:
1923 qp->qplib_qp.nw_type =
1924 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1925 break;
1926 case RDMA_NETWORK_IPV6:
1927 qp->qplib_qp.nw_type =
1928 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1929 break;
1930 default:
1931 qp->qplib_qp.nw_type =
1932 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1933 break;
1934 }
1935 }
1936
1937 if (qp_attr_mask & IB_QP_PATH_MTU) {
1938 qp->qplib_qp.modify_flags |=
1939 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1940 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1941 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1942 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1943 qp->qplib_qp.modify_flags |=
1944 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1945 qp->qplib_qp.path_mtu =
1946 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1947 qp->qplib_qp.mtu =
1948 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1949 }
1950
1951 if (qp_attr_mask & IB_QP_TIMEOUT) {
1952 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1953 qp->qplib_qp.timeout = qp_attr->timeout;
1954 }
1955 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1956 qp->qplib_qp.modify_flags |=
1957 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1958 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1959 }
1960 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1961 qp->qplib_qp.modify_flags |=
1962 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1963 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1964 }
1965 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1966 qp->qplib_qp.modify_flags |=
1967 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1968 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1969 }
1970 if (qp_attr_mask & IB_QP_RQ_PSN) {
1971 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1972 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1973 }
1974 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1975 qp->qplib_qp.modify_flags |=
1976 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1977
1978 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1979 dev_attr->max_qp_rd_atom);
1980 }
1981 if (qp_attr_mask & IB_QP_SQ_PSN) {
1982 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1983 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1984 }
1985 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1986 if (qp_attr->max_dest_rd_atomic >
1987 dev_attr->max_qp_init_rd_atom) {
1988 ibdev_err(&rdev->ibdev,
1989 "max_dest_rd_atomic requested%d is > dev_max%d",
1990 qp_attr->max_dest_rd_atomic,
1991 dev_attr->max_qp_init_rd_atom);
1992 return -EINVAL;
1993 }
1994
1995 qp->qplib_qp.modify_flags |=
1996 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1997 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1998 }
1999 if (qp_attr_mask & IB_QP_CAP) {
2000 qp->qplib_qp.modify_flags |=
2001 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2002 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2003 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2004 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2005 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2006 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2007 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2008 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2009 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2010 (qp_attr->cap.max_inline_data >=
2011 dev_attr->max_inline_data)) {
2012 ibdev_err(&rdev->ibdev,
2013 "Create QP failed - max exceeded");
2014 return -EINVAL;
2015 }
2016 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2017 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2018 dev_attr->max_qp_wqes + 1);
2019 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2020 qp_attr->cap.max_send_wr;
2021
2022
2023
2024
2025
2026 qp->qplib_qp.sq.q_full_delta -= 1;
2027 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2028 if (qp->qplib_qp.rq.max_wqe) {
2029 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2030 qp->qplib_qp.rq.max_wqe =
2031 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2032 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2033 qp_attr->cap.max_recv_wr;
2034 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2035 } else {
2036
2037 }
2038 }
2039 if (qp_attr_mask & IB_QP_DEST_QPN) {
2040 qp->qplib_qp.modify_flags |=
2041 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2042 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2043 }
2044 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2045 if (rc) {
2046 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2047 return rc;
2048 }
2049 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2050 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2051 return rc;
2052}
2053
2054int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2055 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2056{
2057 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2058 struct bnxt_re_dev *rdev = qp->rdev;
2059 struct bnxt_qplib_qp *qplib_qp;
2060 int rc;
2061
2062 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2063 if (!qplib_qp)
2064 return -ENOMEM;
2065
2066 qplib_qp->id = qp->qplib_qp.id;
2067 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2068
2069 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2070 if (rc) {
2071 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2072 goto out;
2073 }
2074 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2075 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2076 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2077 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2078 qp_attr->pkey_index = qplib_qp->pkey_index;
2079 qp_attr->qkey = qplib_qp->qkey;
2080 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2081 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2082 qplib_qp->ah.host_sgid_index,
2083 qplib_qp->ah.hop_limit,
2084 qplib_qp->ah.traffic_class);
2085 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2086 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2087 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2088 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2089 qp_attr->timeout = qplib_qp->timeout;
2090 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2091 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2092 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2093 qp_attr->rq_psn = qplib_qp->rq.psn;
2094 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2095 qp_attr->sq_psn = qplib_qp->sq.psn;
2096 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2097 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2098 IB_SIGNAL_REQ_WR;
2099 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2100
2101 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2102 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2103 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2104 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2105 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2106 qp_init_attr->cap = qp_attr->cap;
2107
2108out:
2109 kfree(qplib_qp);
2110 return rc;
2111}
2112
2113
2114
2115static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2116 const struct ib_send_wr *wr,
2117 struct bnxt_qplib_swqe *wqe,
2118 int payload_size)
2119{
2120 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2121 ib_ah);
2122 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2123 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2124 struct bnxt_qplib_sge sge;
2125 u8 nw_type;
2126 u16 ether_type;
2127 union ib_gid dgid;
2128 bool is_eth = false;
2129 bool is_vlan = false;
2130 bool is_grh = false;
2131 bool is_udp = false;
2132 u8 ip_version = 0;
2133 u16 vlan_id = 0xFFFF;
2134 void *buf;
2135 int i, rc = 0;
2136
2137 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2138
2139 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2140 if (rc)
2141 return rc;
2142
2143
2144 nw_type = rdma_gid_attr_network_type(sgid_attr);
2145 switch (nw_type) {
2146 case RDMA_NETWORK_IPV4:
2147 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2148 break;
2149 case RDMA_NETWORK_IPV6:
2150 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2151 break;
2152 default:
2153 nw_type = BNXT_RE_ROCE_V1_PACKET;
2154 break;
2155 }
2156 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2157 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2158 if (is_udp) {
2159 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2160 ip_version = 4;
2161 ether_type = ETH_P_IP;
2162 } else {
2163 ip_version = 6;
2164 ether_type = ETH_P_IPV6;
2165 }
2166 is_grh = false;
2167 } else {
2168 ether_type = ETH_P_IBOE;
2169 is_grh = true;
2170 }
2171
2172 is_eth = true;
2173 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
2174
2175 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2176 ip_version, is_udp, 0, &qp->qp1_hdr);
2177
2178
2179 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2180 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2181
2182
2183
2184 if (!is_vlan) {
2185 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2186 } else {
2187 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2188 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2189 }
2190
2191 if (is_grh || (ip_version == 6)) {
2192 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2193 sizeof(sgid_attr->gid));
2194 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2195 sizeof(sgid_attr->gid));
2196 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2197 }
2198
2199 if (ip_version == 4) {
2200 qp->qp1_hdr.ip4.tos = 0;
2201 qp->qp1_hdr.ip4.id = 0;
2202 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2203 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2204
2205 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2206 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2207 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2208 }
2209
2210 if (is_udp) {
2211 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2212 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2213 qp->qp1_hdr.udp.csum = 0;
2214 }
2215
2216
2217 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2218 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2219 qp->qp1_hdr.immediate_present = 1;
2220 } else {
2221 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2222 }
2223 if (wr->send_flags & IB_SEND_SOLICITED)
2224 qp->qp1_hdr.bth.solicited_event = 1;
2225
2226 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2227
2228
2229 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2230 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2231 qp->qp1_hdr.bth.ack_req = 0;
2232 qp->send_psn++;
2233 qp->send_psn &= BTH_PSN_MASK;
2234 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2235
2236
2237 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2238 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2239
2240
2241 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2242 if (buf) {
2243 ib_ud_header_pack(&qp->qp1_hdr, buf);
2244 for (i = wqe->num_sge; i; i--) {
2245 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2246 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2247 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2248 }
2249
2250
2251
2252
2253
2254
2255
2256
2257 if (is_udp && ip_version == 4)
2258 sge.size -= 20;
2259
2260
2261
2262
2263
2264 if (!is_udp)
2265 sge.size -= 8;
2266
2267
2268 if (!is_vlan)
2269 sge.size -= 4;
2270
2271 wqe->sg_list[0].addr = sge.addr;
2272 wqe->sg_list[0].lkey = sge.lkey;
2273 wqe->sg_list[0].size = sge.size;
2274 wqe->num_sge++;
2275
2276 } else {
2277 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2278 rc = -ENOMEM;
2279 }
2280 return rc;
2281}
2282
2283
2284
2285
2286
2287
2288
2289static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2290 const struct ib_recv_wr *wr,
2291 struct bnxt_qplib_swqe *wqe,
2292 int payload_size)
2293{
2294 struct bnxt_re_sqp_entries *sqp_entry;
2295 struct bnxt_qplib_sge ref, sge;
2296 struct bnxt_re_dev *rdev;
2297 u32 rq_prod_index;
2298
2299 rdev = qp->rdev;
2300
2301 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2302
2303 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2304 return -ENOMEM;
2305
2306
2307
2308
2309
2310 ref.addr = wqe->sg_list[0].addr;
2311 ref.lkey = wqe->sg_list[0].lkey;
2312 ref.size = wqe->sg_list[0].size;
2313
2314 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2315
2316
2317 wqe->sg_list[0].addr = sge.addr;
2318 wqe->sg_list[0].lkey = sge.lkey;
2319 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2320 sge.size -= wqe->sg_list[0].size;
2321
2322 sqp_entry->sge.addr = ref.addr;
2323 sqp_entry->sge.lkey = ref.lkey;
2324 sqp_entry->sge.size = ref.size;
2325
2326 sqp_entry->wrid = wqe->wr_id;
2327
2328 wqe->wr_id = rq_prod_index;
2329 return 0;
2330}
2331
2332static int is_ud_qp(struct bnxt_re_qp *qp)
2333{
2334 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2335 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2336}
2337
2338static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2339 const struct ib_send_wr *wr,
2340 struct bnxt_qplib_swqe *wqe)
2341{
2342 struct bnxt_re_ah *ah = NULL;
2343
2344 if (is_ud_qp(qp)) {
2345 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2346 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2347 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2348 wqe->send.avid = ah->qplib_ah.id;
2349 }
2350 switch (wr->opcode) {
2351 case IB_WR_SEND:
2352 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2353 break;
2354 case IB_WR_SEND_WITH_IMM:
2355 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2356 wqe->send.imm_data = wr->ex.imm_data;
2357 break;
2358 case IB_WR_SEND_WITH_INV:
2359 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2360 wqe->send.inv_key = wr->ex.invalidate_rkey;
2361 break;
2362 default:
2363 return -EINVAL;
2364 }
2365 if (wr->send_flags & IB_SEND_SIGNALED)
2366 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2367 if (wr->send_flags & IB_SEND_FENCE)
2368 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2369 if (wr->send_flags & IB_SEND_SOLICITED)
2370 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2371 if (wr->send_flags & IB_SEND_INLINE)
2372 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2373
2374 return 0;
2375}
2376
2377static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2378 struct bnxt_qplib_swqe *wqe)
2379{
2380 switch (wr->opcode) {
2381 case IB_WR_RDMA_WRITE:
2382 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2383 break;
2384 case IB_WR_RDMA_WRITE_WITH_IMM:
2385 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2386 wqe->rdma.imm_data = wr->ex.imm_data;
2387 break;
2388 case IB_WR_RDMA_READ:
2389 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2390 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2391 break;
2392 default:
2393 return -EINVAL;
2394 }
2395 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2396 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2397 if (wr->send_flags & IB_SEND_SIGNALED)
2398 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2399 if (wr->send_flags & IB_SEND_FENCE)
2400 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2401 if (wr->send_flags & IB_SEND_SOLICITED)
2402 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2403 if (wr->send_flags & IB_SEND_INLINE)
2404 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2405
2406 return 0;
2407}
2408
2409static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2410 struct bnxt_qplib_swqe *wqe)
2411{
2412 switch (wr->opcode) {
2413 case IB_WR_ATOMIC_CMP_AND_SWP:
2414 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2415 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2416 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2417 break;
2418 case IB_WR_ATOMIC_FETCH_AND_ADD:
2419 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2420 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2421 break;
2422 default:
2423 return -EINVAL;
2424 }
2425 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2426 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2427 if (wr->send_flags & IB_SEND_SIGNALED)
2428 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2429 if (wr->send_flags & IB_SEND_FENCE)
2430 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2431 if (wr->send_flags & IB_SEND_SOLICITED)
2432 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2433 return 0;
2434}
2435
2436static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2437 struct bnxt_qplib_swqe *wqe)
2438{
2439 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2440 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2441
2442
2443
2444
2445 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2446
2447 if (wr->send_flags & IB_SEND_SIGNALED)
2448 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2449 if (wr->send_flags & IB_SEND_SOLICITED)
2450 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2451
2452 return 0;
2453}
2454
2455static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2456 struct bnxt_qplib_swqe *wqe)
2457{
2458 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2459 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2460 int access = wr->access;
2461
2462 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2463 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2464 wqe->frmr.page_list = mr->pages;
2465 wqe->frmr.page_list_len = mr->npages;
2466 wqe->frmr.levels = qplib_frpl->hwq.level;
2467 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2468
2469
2470
2471
2472
2473 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2474
2475 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2476 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2477
2478 if (access & IB_ACCESS_LOCAL_WRITE)
2479 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2480 if (access & IB_ACCESS_REMOTE_READ)
2481 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2482 if (access & IB_ACCESS_REMOTE_WRITE)
2483 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2484 if (access & IB_ACCESS_REMOTE_ATOMIC)
2485 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2486 if (access & IB_ACCESS_MW_BIND)
2487 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2488
2489 wqe->frmr.l_key = wr->key;
2490 wqe->frmr.length = wr->mr->length;
2491 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2492 wqe->frmr.va = wr->mr->iova;
2493 return 0;
2494}
2495
2496static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2497 const struct ib_send_wr *wr,
2498 struct bnxt_qplib_swqe *wqe)
2499{
2500
2501 u8 *in_data;
2502 u32 i, sge_len;
2503 void *sge_addr;
2504
2505 in_data = wqe->inline_data;
2506 for (i = 0; i < wr->num_sge; i++) {
2507 sge_addr = (void *)(unsigned long)
2508 wr->sg_list[i].addr;
2509 sge_len = wr->sg_list[i].length;
2510
2511 if ((sge_len + wqe->inline_len) >
2512 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2513 ibdev_err(&rdev->ibdev,
2514 "Inline data size requested > supported value");
2515 return -EINVAL;
2516 }
2517 sge_len = wr->sg_list[i].length;
2518
2519 memcpy(in_data, sge_addr, sge_len);
2520 in_data += wr->sg_list[i].length;
2521 wqe->inline_len += wr->sg_list[i].length;
2522 }
2523 return wqe->inline_len;
2524}
2525
2526static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2527 const struct ib_send_wr *wr,
2528 struct bnxt_qplib_swqe *wqe)
2529{
2530 int payload_sz = 0;
2531
2532 if (wr->send_flags & IB_SEND_INLINE)
2533 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2534 else
2535 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2536 wqe->num_sge);
2537
2538 return payload_sz;
2539}
2540
2541static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2542{
2543 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2544 qp->ib_qp.qp_type == IB_QPT_GSI ||
2545 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2546 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2547 int qp_attr_mask;
2548 struct ib_qp_attr qp_attr;
2549
2550 qp_attr_mask = IB_QP_STATE;
2551 qp_attr.qp_state = IB_QPS_RTS;
2552 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2553 qp->qplib_qp.wqe_cnt = 0;
2554 }
2555}
2556
2557static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2558 struct bnxt_re_qp *qp,
2559 const struct ib_send_wr *wr)
2560{
2561 int rc = 0, payload_sz = 0;
2562 unsigned long flags;
2563
2564 spin_lock_irqsave(&qp->sq_lock, flags);
2565 while (wr) {
2566 struct bnxt_qplib_swqe wqe = {};
2567
2568
2569 wqe.num_sge = wr->num_sge;
2570 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2571 ibdev_err(&rdev->ibdev,
2572 "Limit exceeded for Send SGEs");
2573 rc = -EINVAL;
2574 goto bad;
2575 }
2576
2577 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2578 if (payload_sz < 0) {
2579 rc = -EINVAL;
2580 goto bad;
2581 }
2582 wqe.wr_id = wr->wr_id;
2583
2584 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2585
2586 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2587 if (!rc)
2588 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2589bad:
2590 if (rc) {
2591 ibdev_err(&rdev->ibdev,
2592 "Post send failed opcode = %#x rc = %d",
2593 wr->opcode, rc);
2594 break;
2595 }
2596 wr = wr->next;
2597 }
2598 bnxt_qplib_post_send_db(&qp->qplib_qp);
2599 bnxt_ud_qp_hw_stall_workaround(qp);
2600 spin_unlock_irqrestore(&qp->sq_lock, flags);
2601 return rc;
2602}
2603
2604int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2605 const struct ib_send_wr **bad_wr)
2606{
2607 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2608 struct bnxt_qplib_swqe wqe;
2609 int rc = 0, payload_sz = 0;
2610 unsigned long flags;
2611
2612 spin_lock_irqsave(&qp->sq_lock, flags);
2613 while (wr) {
2614
2615 memset(&wqe, 0, sizeof(wqe));
2616
2617
2618 wqe.num_sge = wr->num_sge;
2619 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2620 ibdev_err(&qp->rdev->ibdev,
2621 "Limit exceeded for Send SGEs");
2622 rc = -EINVAL;
2623 goto bad;
2624 }
2625
2626 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2627 if (payload_sz < 0) {
2628 rc = -EINVAL;
2629 goto bad;
2630 }
2631 wqe.wr_id = wr->wr_id;
2632
2633 switch (wr->opcode) {
2634 case IB_WR_SEND:
2635 case IB_WR_SEND_WITH_IMM:
2636 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2637 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2638 payload_sz);
2639 if (rc)
2640 goto bad;
2641 wqe.rawqp1.lflags |=
2642 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2643 }
2644 switch (wr->send_flags) {
2645 case IB_SEND_IP_CSUM:
2646 wqe.rawqp1.lflags |=
2647 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2648 break;
2649 default:
2650 break;
2651 }
2652 fallthrough;
2653 case IB_WR_SEND_WITH_INV:
2654 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2655 break;
2656 case IB_WR_RDMA_WRITE:
2657 case IB_WR_RDMA_WRITE_WITH_IMM:
2658 case IB_WR_RDMA_READ:
2659 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2660 break;
2661 case IB_WR_ATOMIC_CMP_AND_SWP:
2662 case IB_WR_ATOMIC_FETCH_AND_ADD:
2663 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2664 break;
2665 case IB_WR_RDMA_READ_WITH_INV:
2666 ibdev_err(&qp->rdev->ibdev,
2667 "RDMA Read with Invalidate is not supported");
2668 rc = -EINVAL;
2669 goto bad;
2670 case IB_WR_LOCAL_INV:
2671 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2672 break;
2673 case IB_WR_REG_MR:
2674 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2675 break;
2676 default:
2677
2678 ibdev_err(&qp->rdev->ibdev,
2679 "WR (%#x) is not supported", wr->opcode);
2680 rc = -EINVAL;
2681 goto bad;
2682 }
2683 if (!rc)
2684 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2685bad:
2686 if (rc) {
2687 ibdev_err(&qp->rdev->ibdev,
2688 "post_send failed op:%#x qps = %#x rc = %d\n",
2689 wr->opcode, qp->qplib_qp.state, rc);
2690 *bad_wr = wr;
2691 break;
2692 }
2693 wr = wr->next;
2694 }
2695 bnxt_qplib_post_send_db(&qp->qplib_qp);
2696 bnxt_ud_qp_hw_stall_workaround(qp);
2697 spin_unlock_irqrestore(&qp->sq_lock, flags);
2698
2699 return rc;
2700}
2701
2702static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2703 struct bnxt_re_qp *qp,
2704 const struct ib_recv_wr *wr)
2705{
2706 struct bnxt_qplib_swqe wqe;
2707 int rc = 0;
2708
2709 memset(&wqe, 0, sizeof(wqe));
2710 while (wr) {
2711
2712 memset(&wqe, 0, sizeof(wqe));
2713
2714
2715 wqe.num_sge = wr->num_sge;
2716 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2717 ibdev_err(&rdev->ibdev,
2718 "Limit exceeded for Receive SGEs");
2719 rc = -EINVAL;
2720 break;
2721 }
2722 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2723 wqe.wr_id = wr->wr_id;
2724 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2725
2726 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2727 if (rc)
2728 break;
2729
2730 wr = wr->next;
2731 }
2732 if (!rc)
2733 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2734 return rc;
2735}
2736
2737int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2738 const struct ib_recv_wr **bad_wr)
2739{
2740 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2741 struct bnxt_qplib_swqe wqe;
2742 int rc = 0, payload_sz = 0;
2743 unsigned long flags;
2744 u32 count = 0;
2745
2746 spin_lock_irqsave(&qp->rq_lock, flags);
2747 while (wr) {
2748
2749 memset(&wqe, 0, sizeof(wqe));
2750
2751
2752 wqe.num_sge = wr->num_sge;
2753 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2754 ibdev_err(&qp->rdev->ibdev,
2755 "Limit exceeded for Receive SGEs");
2756 rc = -EINVAL;
2757 *bad_wr = wr;
2758 break;
2759 }
2760
2761 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2762 wr->num_sge);
2763 wqe.wr_id = wr->wr_id;
2764 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2765
2766 if (ib_qp->qp_type == IB_QPT_GSI &&
2767 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2768 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2769 payload_sz);
2770 if (!rc)
2771 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2772 if (rc) {
2773 *bad_wr = wr;
2774 break;
2775 }
2776
2777
2778 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2779 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2780 count = 0;
2781 }
2782
2783 wr = wr->next;
2784 }
2785
2786 if (count)
2787 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2788
2789 spin_unlock_irqrestore(&qp->rq_lock, flags);
2790
2791 return rc;
2792}
2793
2794
2795int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2796{
2797 struct bnxt_re_cq *cq;
2798 struct bnxt_qplib_nq *nq;
2799 struct bnxt_re_dev *rdev;
2800
2801 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2802 rdev = cq->rdev;
2803 nq = cq->qplib_cq.nq;
2804
2805 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2806 ib_umem_release(cq->umem);
2807
2808 atomic_dec(&rdev->cq_count);
2809 nq->budget--;
2810 kfree(cq->cql);
2811 return 0;
2812}
2813
2814int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2815 struct ib_udata *udata)
2816{
2817 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2818 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2819 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2820 int rc, entries;
2821 int cqe = attr->cqe;
2822 struct bnxt_qplib_nq *nq = NULL;
2823 unsigned int nq_alloc_cnt;
2824
2825 if (attr->flags)
2826 return -EOPNOTSUPP;
2827
2828
2829 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2830 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2831 return -EINVAL;
2832 }
2833
2834 cq->rdev = rdev;
2835 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2836
2837 entries = roundup_pow_of_two(cqe + 1);
2838 if (entries > dev_attr->max_cq_wqes + 1)
2839 entries = dev_attr->max_cq_wqes + 1;
2840
2841 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2842 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2843 if (udata) {
2844 struct bnxt_re_cq_req req;
2845 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2846 udata, struct bnxt_re_ucontext, ib_uctx);
2847 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2848 rc = -EFAULT;
2849 goto fail;
2850 }
2851
2852 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2853 entries * sizeof(struct cq_base),
2854 IB_ACCESS_LOCAL_WRITE);
2855 if (IS_ERR(cq->umem)) {
2856 rc = PTR_ERR(cq->umem);
2857 goto fail;
2858 }
2859 cq->qplib_cq.sg_info.umem = cq->umem;
2860 cq->qplib_cq.dpi = &uctx->dpi;
2861 } else {
2862 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2863 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2864 GFP_KERNEL);
2865 if (!cq->cql) {
2866 rc = -ENOMEM;
2867 goto fail;
2868 }
2869
2870 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2871 }
2872
2873
2874
2875
2876 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2877 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2878 cq->qplib_cq.max_wqe = entries;
2879 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2880 cq->qplib_cq.nq = nq;
2881
2882 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2883 if (rc) {
2884 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
2885 goto fail;
2886 }
2887
2888 cq->ib_cq.cqe = entries;
2889 cq->cq_period = cq->qplib_cq.period;
2890 nq->budget++;
2891
2892 atomic_inc(&rdev->cq_count);
2893 spin_lock_init(&cq->cq_lock);
2894
2895 if (udata) {
2896 struct bnxt_re_cq_resp resp;
2897
2898 resp.cqid = cq->qplib_cq.id;
2899 resp.tail = cq->qplib_cq.hwq.cons;
2900 resp.phase = cq->qplib_cq.period;
2901 resp.rsvd = 0;
2902 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2903 if (rc) {
2904 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
2905 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2906 goto c2fail;
2907 }
2908 }
2909
2910 return 0;
2911
2912c2fail:
2913 ib_umem_release(cq->umem);
2914fail:
2915 kfree(cq->cql);
2916 return rc;
2917}
2918
2919static u8 __req_to_ib_wc_status(u8 qstatus)
2920{
2921 switch (qstatus) {
2922 case CQ_REQ_STATUS_OK:
2923 return IB_WC_SUCCESS;
2924 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2925 return IB_WC_BAD_RESP_ERR;
2926 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2927 return IB_WC_LOC_LEN_ERR;
2928 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2929 return IB_WC_LOC_QP_OP_ERR;
2930 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2931 return IB_WC_LOC_PROT_ERR;
2932 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2933 return IB_WC_GENERAL_ERR;
2934 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2935 return IB_WC_REM_INV_REQ_ERR;
2936 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2937 return IB_WC_REM_ACCESS_ERR;
2938 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2939 return IB_WC_REM_OP_ERR;
2940 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2941 return IB_WC_RNR_RETRY_EXC_ERR;
2942 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2943 return IB_WC_RETRY_EXC_ERR;
2944 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2945 return IB_WC_WR_FLUSH_ERR;
2946 default:
2947 return IB_WC_GENERAL_ERR;
2948 }
2949 return 0;
2950}
2951
2952static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2953{
2954 switch (qstatus) {
2955 case CQ_RES_RAWETH_QP1_STATUS_OK:
2956 return IB_WC_SUCCESS;
2957 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2958 return IB_WC_LOC_ACCESS_ERR;
2959 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2960 return IB_WC_LOC_LEN_ERR;
2961 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2962 return IB_WC_LOC_PROT_ERR;
2963 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2964 return IB_WC_LOC_QP_OP_ERR;
2965 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2966 return IB_WC_GENERAL_ERR;
2967 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2968 return IB_WC_WR_FLUSH_ERR;
2969 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2970 return IB_WC_WR_FLUSH_ERR;
2971 default:
2972 return IB_WC_GENERAL_ERR;
2973 }
2974}
2975
2976static u8 __rc_to_ib_wc_status(u8 qstatus)
2977{
2978 switch (qstatus) {
2979 case CQ_RES_RC_STATUS_OK:
2980 return IB_WC_SUCCESS;
2981 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2982 return IB_WC_LOC_ACCESS_ERR;
2983 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2984 return IB_WC_LOC_LEN_ERR;
2985 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2986 return IB_WC_LOC_PROT_ERR;
2987 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2988 return IB_WC_LOC_QP_OP_ERR;
2989 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2990 return IB_WC_GENERAL_ERR;
2991 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2992 return IB_WC_REM_INV_REQ_ERR;
2993 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2994 return IB_WC_WR_FLUSH_ERR;
2995 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2996 return IB_WC_WR_FLUSH_ERR;
2997 default:
2998 return IB_WC_GENERAL_ERR;
2999 }
3000}
3001
3002static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3003{
3004 switch (cqe->type) {
3005 case BNXT_QPLIB_SWQE_TYPE_SEND:
3006 wc->opcode = IB_WC_SEND;
3007 break;
3008 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3009 wc->opcode = IB_WC_SEND;
3010 wc->wc_flags |= IB_WC_WITH_IMM;
3011 break;
3012 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3013 wc->opcode = IB_WC_SEND;
3014 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3015 break;
3016 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3017 wc->opcode = IB_WC_RDMA_WRITE;
3018 break;
3019 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3020 wc->opcode = IB_WC_RDMA_WRITE;
3021 wc->wc_flags |= IB_WC_WITH_IMM;
3022 break;
3023 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3024 wc->opcode = IB_WC_RDMA_READ;
3025 break;
3026 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3027 wc->opcode = IB_WC_COMP_SWAP;
3028 break;
3029 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3030 wc->opcode = IB_WC_FETCH_ADD;
3031 break;
3032 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3033 wc->opcode = IB_WC_LOCAL_INV;
3034 break;
3035 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3036 wc->opcode = IB_WC_REG_MR;
3037 break;
3038 default:
3039 wc->opcode = IB_WC_SEND;
3040 break;
3041 }
3042
3043 wc->status = __req_to_ib_wc_status(cqe->status);
3044}
3045
3046static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3047 u16 raweth_qp1_flags2)
3048{
3049 bool is_ipv6 = false, is_ipv4 = false;
3050
3051
3052 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3053 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3054 return -1;
3055
3056 if (raweth_qp1_flags2 &
3057 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3058 raweth_qp1_flags2 &
3059 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3060
3061 (raweth_qp1_flags2 &
3062 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3063 (is_ipv6 = true) : (is_ipv4 = true);
3064 return ((is_ipv6) ?
3065 BNXT_RE_ROCEV2_IPV6_PACKET :
3066 BNXT_RE_ROCEV2_IPV4_PACKET);
3067 } else {
3068 return BNXT_RE_ROCE_V1_PACKET;
3069 }
3070}
3071
3072static int bnxt_re_to_ib_nw_type(int nw_type)
3073{
3074 u8 nw_hdr_type = 0xFF;
3075
3076 switch (nw_type) {
3077 case BNXT_RE_ROCE_V1_PACKET:
3078 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3079 break;
3080 case BNXT_RE_ROCEV2_IPV4_PACKET:
3081 nw_hdr_type = RDMA_NETWORK_IPV4;
3082 break;
3083 case BNXT_RE_ROCEV2_IPV6_PACKET:
3084 nw_hdr_type = RDMA_NETWORK_IPV6;
3085 break;
3086 }
3087 return nw_hdr_type;
3088}
3089
3090static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3091 void *rq_hdr_buf)
3092{
3093 u8 *tmp_buf = NULL;
3094 struct ethhdr *eth_hdr;
3095 u16 eth_type;
3096 bool rc = false;
3097
3098 tmp_buf = (u8 *)rq_hdr_buf;
3099
3100
3101
3102
3103
3104 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3105 tmp_buf += 4;
3106
3107 eth_hdr = (struct ethhdr *)tmp_buf;
3108 eth_type = ntohs(eth_hdr->h_proto);
3109 switch (eth_type) {
3110 case ETH_P_IBOE:
3111 rc = true;
3112 break;
3113 case ETH_P_IP:
3114 case ETH_P_IPV6: {
3115 u32 len;
3116 struct udphdr *udp_hdr;
3117
3118 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3119 sizeof(struct ipv6hdr));
3120 tmp_buf += sizeof(struct ethhdr) + len;
3121 udp_hdr = (struct udphdr *)tmp_buf;
3122 if (ntohs(udp_hdr->dest) ==
3123 ROCE_V2_UDP_DPORT)
3124 rc = true;
3125 break;
3126 }
3127 default:
3128 break;
3129 }
3130 }
3131
3132 return rc;
3133}
3134
3135static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3136 struct bnxt_qplib_cqe *cqe)
3137{
3138 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3139 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3140 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3141 struct bnxt_re_ah *gsi_sah;
3142 struct ib_send_wr *swr;
3143 struct ib_ud_wr udwr;
3144 struct ib_recv_wr rwr;
3145 int pkt_type = 0;
3146 u32 tbl_idx;
3147 void *rq_hdr_buf;
3148 dma_addr_t rq_hdr_buf_map;
3149 dma_addr_t shrq_hdr_buf_map;
3150 u32 offset = 0;
3151 u32 skip_bytes = 0;
3152 struct ib_sge s_sge[2];
3153 struct ib_sge r_sge[2];
3154 int rc;
3155
3156 memset(&udwr, 0, sizeof(udwr));
3157 memset(&rwr, 0, sizeof(rwr));
3158 memset(&s_sge, 0, sizeof(s_sge));
3159 memset(&r_sge, 0, sizeof(r_sge));
3160
3161 swr = &udwr.wr;
3162 tbl_idx = cqe->wr_id;
3163
3164 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3165 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3166 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3167 tbl_idx);
3168
3169
3170 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3171 tbl_idx);
3172 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3173
3174
3175 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3176 sqp_entry->qp1_qp = gsi_qp;
3177
3178
3179
3180 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3181 cqe->raweth_qp1_flags2);
3182 if (pkt_type < 0) {
3183 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3184 return -EINVAL;
3185 }
3186
3187
3188
3189 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3190 offset = 20;
3191
3192
3193
3194
3195
3196 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3197 skip_bytes = 4;
3198
3199
3200 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3201 + skip_bytes;
3202 s_sge[0].lkey = 0xFFFFFFFF;
3203 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3204 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3205
3206
3207 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3208 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3209 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3210 s_sge[1].addr += 8;
3211 s_sge[1].lkey = 0xFFFFFFFF;
3212 s_sge[1].length = 256;
3213
3214
3215
3216 r_sge[0].addr = shrq_hdr_buf_map;
3217 r_sge[0].lkey = 0xFFFFFFFF;
3218 r_sge[0].length = 40;
3219
3220 r_sge[1].addr = sqp_entry->sge.addr + offset;
3221 r_sge[1].lkey = sqp_entry->sge.lkey;
3222 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3223
3224
3225 rwr.num_sge = 2;
3226 rwr.sg_list = r_sge;
3227 rwr.wr_id = tbl_idx;
3228 rwr.next = NULL;
3229
3230 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3231 if (rc) {
3232 ibdev_err(&rdev->ibdev,
3233 "Failed to post Rx buffers to shadow QP");
3234 return -ENOMEM;
3235 }
3236
3237 swr->num_sge = 2;
3238 swr->sg_list = s_sge;
3239 swr->wr_id = tbl_idx;
3240 swr->opcode = IB_WR_SEND;
3241 swr->next = NULL;
3242 gsi_sah = rdev->gsi_ctx.gsi_sah;
3243 udwr.ah = &gsi_sah->ib_ah;
3244 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3245 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3246
3247
3248 rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3249
3250 return 0;
3251}
3252
3253static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3254 struct bnxt_qplib_cqe *cqe)
3255{
3256 wc->opcode = IB_WC_RECV;
3257 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3258 wc->wc_flags |= IB_WC_GRH;
3259}
3260
3261static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3262 u16 vlan_id)
3263{
3264
3265
3266
3267
3268 if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3269 htons(ETH_P_8021Q), vlan_id))
3270 return false;
3271 return true;
3272}
3273
3274static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3275 u16 *vid, u8 *sl)
3276{
3277 bool ret = false;
3278 u32 metadata;
3279 u16 tpid;
3280
3281 metadata = orig_cqe->raweth_qp1_metadata;
3282 if (orig_cqe->raweth_qp1_flags2 &
3283 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3284 tpid = ((metadata &
3285 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3286 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3287 if (tpid == ETH_P_8021Q) {
3288 *vid = metadata &
3289 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3290 *sl = (metadata &
3291 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3292 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3293 ret = true;
3294 }
3295 }
3296
3297 return ret;
3298}
3299
3300static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3301 struct bnxt_qplib_cqe *cqe)
3302{
3303 wc->opcode = IB_WC_RECV;
3304 wc->status = __rc_to_ib_wc_status(cqe->status);
3305
3306 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3307 wc->wc_flags |= IB_WC_WITH_IMM;
3308 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3309 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3310 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3311 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3312 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3313}
3314
3315static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3316 struct ib_wc *wc,
3317 struct bnxt_qplib_cqe *cqe)
3318{
3319 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3320 struct bnxt_re_qp *gsi_qp = NULL;
3321 struct bnxt_qplib_cqe *orig_cqe = NULL;
3322 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3323 int nw_type;
3324 u32 tbl_idx;
3325 u16 vlan_id;
3326 u8 sl;
3327
3328 tbl_idx = cqe->wr_id;
3329
3330 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3331 gsi_qp = sqp_entry->qp1_qp;
3332 orig_cqe = &sqp_entry->cqe;
3333
3334 wc->wr_id = sqp_entry->wrid;
3335 wc->byte_len = orig_cqe->length;
3336 wc->qp = &gsi_qp->ib_qp;
3337
3338 wc->ex.imm_data = orig_cqe->immdata;
3339 wc->src_qp = orig_cqe->src_qp;
3340 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3341 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3342 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3343 wc->vlan_id = vlan_id;
3344 wc->sl = sl;
3345 wc->wc_flags |= IB_WC_WITH_VLAN;
3346 }
3347 }
3348 wc->port_num = 1;
3349 wc->vendor_err = orig_cqe->status;
3350
3351 wc->opcode = IB_WC_RECV;
3352 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3353 wc->wc_flags |= IB_WC_GRH;
3354
3355 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3356 orig_cqe->raweth_qp1_flags2);
3357 if (nw_type >= 0) {
3358 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3359 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3360 }
3361}
3362
3363static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3364 struct ib_wc *wc,
3365 struct bnxt_qplib_cqe *cqe)
3366{
3367 u8 nw_type;
3368
3369 wc->opcode = IB_WC_RECV;
3370 wc->status = __rc_to_ib_wc_status(cqe->status);
3371
3372 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3373 wc->wc_flags |= IB_WC_WITH_IMM;
3374
3375 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3376 wc->wc_flags |= IB_WC_GRH;
3377 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3378 wc->wc_flags |= IB_WC_WITH_SMAC;
3379 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3380 wc->vlan_id = (cqe->cfa_meta & 0xFFF);
3381 if (wc->vlan_id < 0x1000)
3382 wc->wc_flags |= IB_WC_WITH_VLAN;
3383 }
3384 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3385 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3386 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3387 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3388 }
3389
3390}
3391
3392static int send_phantom_wqe(struct bnxt_re_qp *qp)
3393{
3394 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3395 unsigned long flags;
3396 int rc = 0;
3397
3398 spin_lock_irqsave(&qp->sq_lock, flags);
3399
3400 rc = bnxt_re_bind_fence_mw(lib_qp);
3401 if (!rc) {
3402 lib_qp->sq.phantom_wqe_cnt++;
3403 ibdev_dbg(&qp->rdev->ibdev,
3404 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3405 lib_qp->id, lib_qp->sq.hwq.prod,
3406 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3407 lib_qp->sq.phantom_wqe_cnt);
3408 }
3409
3410 spin_unlock_irqrestore(&qp->sq_lock, flags);
3411 return rc;
3412}
3413
3414int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3415{
3416 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3417 struct bnxt_re_qp *qp, *sh_qp;
3418 struct bnxt_qplib_cqe *cqe;
3419 int i, ncqe, budget;
3420 struct bnxt_qplib_q *sq;
3421 struct bnxt_qplib_qp *lib_qp;
3422 u32 tbl_idx;
3423 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3424 unsigned long flags;
3425
3426 spin_lock_irqsave(&cq->cq_lock, flags);
3427 budget = min_t(u32, num_entries, cq->max_cql);
3428 num_entries = budget;
3429 if (!cq->cql) {
3430 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3431 goto exit;
3432 }
3433 cqe = &cq->cql[0];
3434 while (budget) {
3435 lib_qp = NULL;
3436 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3437 if (lib_qp) {
3438 sq = &lib_qp->sq;
3439 if (sq->send_phantom) {
3440 qp = container_of(lib_qp,
3441 struct bnxt_re_qp, qplib_qp);
3442 if (send_phantom_wqe(qp) == -ENOMEM)
3443 ibdev_err(&cq->rdev->ibdev,
3444 "Phantom failed! Scheduled to send again\n");
3445 else
3446 sq->send_phantom = false;
3447 }
3448 }
3449 if (ncqe < budget)
3450 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3451 cqe + ncqe,
3452 budget - ncqe);
3453
3454 if (!ncqe)
3455 break;
3456
3457 for (i = 0; i < ncqe; i++, cqe++) {
3458
3459 memset(wc, 0, sizeof(*wc));
3460
3461 wc->wr_id = cqe->wr_id;
3462 wc->byte_len = cqe->length;
3463 qp = container_of
3464 ((struct bnxt_qplib_qp *)
3465 (unsigned long)(cqe->qp_handle),
3466 struct bnxt_re_qp, qplib_qp);
3467 wc->qp = &qp->ib_qp;
3468 wc->ex.imm_data = cqe->immdata;
3469 wc->src_qp = cqe->src_qp;
3470 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3471 wc->port_num = 1;
3472 wc->vendor_err = cqe->status;
3473
3474 switch (cqe->opcode) {
3475 case CQ_BASE_CQE_TYPE_REQ:
3476 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3477 if (sh_qp &&
3478 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3479
3480
3481
3482 memset(wc, 0, sizeof(*wc));
3483 continue;
3484 }
3485 bnxt_re_process_req_wc(wc, cqe);
3486 break;
3487 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3488 if (!cqe->status) {
3489 int rc = 0;
3490
3491 rc = bnxt_re_process_raw_qp_pkt_rx
3492 (qp, cqe);
3493 if (!rc) {
3494 memset(wc, 0, sizeof(*wc));
3495 continue;
3496 }
3497 cqe->status = -1;
3498 }
3499
3500
3501
3502
3503 tbl_idx = cqe->wr_id;
3504 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3505 wc->wr_id = sqp_entry->wrid;
3506 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3507 break;
3508 case CQ_BASE_CQE_TYPE_RES_RC:
3509 bnxt_re_process_res_rc_wc(wc, cqe);
3510 break;
3511 case CQ_BASE_CQE_TYPE_RES_UD:
3512 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3513 if (sh_qp &&
3514 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3515
3516
3517
3518 if (cqe->status) {
3519 continue;
3520 } else {
3521 bnxt_re_process_res_shadow_qp_wc
3522 (qp, wc, cqe);
3523 break;
3524 }
3525 }
3526 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3527 break;
3528 default:
3529 ibdev_err(&cq->rdev->ibdev,
3530 "POLL CQ : type 0x%x not handled",
3531 cqe->opcode);
3532 continue;
3533 }
3534 wc++;
3535 budget--;
3536 }
3537 }
3538exit:
3539 spin_unlock_irqrestore(&cq->cq_lock, flags);
3540 return num_entries - budget;
3541}
3542
3543int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3544 enum ib_cq_notify_flags ib_cqn_flags)
3545{
3546 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3547 int type = 0, rc = 0;
3548 unsigned long flags;
3549
3550 spin_lock_irqsave(&cq->cq_lock, flags);
3551
3552 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3553 type = DBC_DBC_TYPE_CQ_ARMALL;
3554
3555 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3556 type = DBC_DBC_TYPE_CQ_ARMSE;
3557
3558
3559 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3560 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3561 rc = 1;
3562 goto exit;
3563 }
3564 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3565
3566exit:
3567 spin_unlock_irqrestore(&cq->cq_lock, flags);
3568 return rc;
3569}
3570
3571
3572struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3573{
3574 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3575 struct bnxt_re_dev *rdev = pd->rdev;
3576 struct bnxt_re_mr *mr;
3577 int rc;
3578
3579 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3580 if (!mr)
3581 return ERR_PTR(-ENOMEM);
3582
3583 mr->rdev = rdev;
3584 mr->qplib_mr.pd = &pd->qplib_pd;
3585 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3586 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3587
3588
3589 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3590 if (rc)
3591 goto fail;
3592
3593 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3594 mr->qplib_mr.total_size = -1;
3595 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3596 PAGE_SIZE);
3597 if (rc)
3598 goto fail_mr;
3599
3600 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3601 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3602 IB_ACCESS_REMOTE_ATOMIC))
3603 mr->ib_mr.rkey = mr->ib_mr.lkey;
3604 atomic_inc(&rdev->mr_count);
3605
3606 return &mr->ib_mr;
3607
3608fail_mr:
3609 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3610fail:
3611 kfree(mr);
3612 return ERR_PTR(rc);
3613}
3614
3615int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3616{
3617 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3618 struct bnxt_re_dev *rdev = mr->rdev;
3619 int rc;
3620
3621 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3622 if (rc) {
3623 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3624 return rc;
3625 }
3626
3627 if (mr->pages) {
3628 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3629 &mr->qplib_frpl);
3630 kfree(mr->pages);
3631 mr->npages = 0;
3632 mr->pages = NULL;
3633 }
3634 ib_umem_release(mr->ib_umem);
3635
3636 kfree(mr);
3637 atomic_dec(&rdev->mr_count);
3638 return rc;
3639}
3640
3641static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3642{
3643 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3644
3645 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3646 return -ENOMEM;
3647
3648 mr->pages[mr->npages++] = addr;
3649 return 0;
3650}
3651
3652int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3653 unsigned int *sg_offset)
3654{
3655 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3656
3657 mr->npages = 0;
3658 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3659}
3660
3661struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3662 u32 max_num_sg)
3663{
3664 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3665 struct bnxt_re_dev *rdev = pd->rdev;
3666 struct bnxt_re_mr *mr = NULL;
3667 int rc;
3668
3669 if (type != IB_MR_TYPE_MEM_REG) {
3670 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3671 return ERR_PTR(-EINVAL);
3672 }
3673 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3674 return ERR_PTR(-EINVAL);
3675
3676 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3677 if (!mr)
3678 return ERR_PTR(-ENOMEM);
3679
3680 mr->rdev = rdev;
3681 mr->qplib_mr.pd = &pd->qplib_pd;
3682 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3683 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3684
3685 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3686 if (rc)
3687 goto bail;
3688
3689 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3690 mr->ib_mr.rkey = mr->ib_mr.lkey;
3691
3692 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3693 if (!mr->pages) {
3694 rc = -ENOMEM;
3695 goto fail;
3696 }
3697 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3698 &mr->qplib_frpl, max_num_sg);
3699 if (rc) {
3700 ibdev_err(&rdev->ibdev,
3701 "Failed to allocate HW FR page list");
3702 goto fail_mr;
3703 }
3704
3705 atomic_inc(&rdev->mr_count);
3706 return &mr->ib_mr;
3707
3708fail_mr:
3709 kfree(mr->pages);
3710fail:
3711 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3712bail:
3713 kfree(mr);
3714 return ERR_PTR(rc);
3715}
3716
3717struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3718 struct ib_udata *udata)
3719{
3720 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3721 struct bnxt_re_dev *rdev = pd->rdev;
3722 struct bnxt_re_mw *mw;
3723 int rc;
3724
3725 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3726 if (!mw)
3727 return ERR_PTR(-ENOMEM);
3728 mw->rdev = rdev;
3729 mw->qplib_mw.pd = &pd->qplib_pd;
3730
3731 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3732 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3733 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3734 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3735 if (rc) {
3736 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
3737 goto fail;
3738 }
3739 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3740
3741 atomic_inc(&rdev->mw_count);
3742 return &mw->ib_mw;
3743
3744fail:
3745 kfree(mw);
3746 return ERR_PTR(rc);
3747}
3748
3749int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3750{
3751 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3752 struct bnxt_re_dev *rdev = mw->rdev;
3753 int rc;
3754
3755 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3756 if (rc) {
3757 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
3758 return rc;
3759 }
3760
3761 kfree(mw);
3762 atomic_dec(&rdev->mw_count);
3763 return rc;
3764}
3765
3766
3767struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3768 u64 virt_addr, int mr_access_flags,
3769 struct ib_udata *udata)
3770{
3771 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3772 struct bnxt_re_dev *rdev = pd->rdev;
3773 struct bnxt_re_mr *mr;
3774 struct ib_umem *umem;
3775 unsigned long page_size;
3776 int umem_pgs, rc;
3777
3778 if (length > BNXT_RE_MAX_MR_SIZE) {
3779 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
3780 length, BNXT_RE_MAX_MR_SIZE);
3781 return ERR_PTR(-ENOMEM);
3782 }
3783
3784 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3785 if (!mr)
3786 return ERR_PTR(-ENOMEM);
3787
3788 mr->rdev = rdev;
3789 mr->qplib_mr.pd = &pd->qplib_pd;
3790 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3791 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3792
3793 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3794 if (rc) {
3795 ibdev_err(&rdev->ibdev, "Failed to allocate MR");
3796 goto free_mr;
3797 }
3798
3799 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3800
3801 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
3802 if (IS_ERR(umem)) {
3803 ibdev_err(&rdev->ibdev, "Failed to get umem");
3804 rc = -EFAULT;
3805 goto free_mrw;
3806 }
3807 mr->ib_umem = umem;
3808
3809 mr->qplib_mr.va = virt_addr;
3810 page_size = ib_umem_find_best_pgsz(
3811 umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr);
3812 if (!page_size) {
3813 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
3814 rc = -EFAULT;
3815 goto free_umem;
3816 }
3817 mr->qplib_mr.total_size = length;
3818
3819 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
3820 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
3821 umem_pgs, page_size);
3822 if (rc) {
3823 ibdev_err(&rdev->ibdev, "Failed to register user MR");
3824 goto free_umem;
3825 }
3826
3827 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3828 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3829 atomic_inc(&rdev->mr_count);
3830
3831 return &mr->ib_mr;
3832free_umem:
3833 ib_umem_release(umem);
3834free_mrw:
3835 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3836free_mr:
3837 kfree(mr);
3838 return ERR_PTR(rc);
3839}
3840
3841int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3842{
3843 struct ib_device *ibdev = ctx->device;
3844 struct bnxt_re_ucontext *uctx =
3845 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3846 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3847 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3848 struct bnxt_re_uctx_resp resp = {};
3849 u32 chip_met_rev_num = 0;
3850 int rc;
3851
3852 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
3853
3854 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3855 ibdev_dbg(ibdev, " is different from the device %d ",
3856 BNXT_RE_ABI_VERSION);
3857 return -EPERM;
3858 }
3859
3860 uctx->rdev = rdev;
3861
3862 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3863 if (!uctx->shpg) {
3864 rc = -ENOMEM;
3865 goto fail;
3866 }
3867 spin_lock_init(&uctx->sh_lock);
3868
3869 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3870 chip_met_rev_num = rdev->chip_ctx->chip_num;
3871 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
3872 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3873 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
3874 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3875 resp.chip_id0 = chip_met_rev_num;
3876
3877 resp.dev_id = rdev->en_dev->pdev->devfn;
3878 resp.max_qp = rdev->qplib_ctx.qpc_count;
3879 resp.pg_size = PAGE_SIZE;
3880 resp.cqe_sz = sizeof(struct cq_base);
3881 resp.max_cqd = dev_attr->max_cq_wqes;
3882
3883 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
3884 resp.mode = rdev->chip_ctx->modes.wqe_mode;
3885
3886 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3887 if (rc) {
3888 ibdev_err(ibdev, "Failed to copy user context");
3889 rc = -EFAULT;
3890 goto cfail;
3891 }
3892
3893 return 0;
3894cfail:
3895 free_page((unsigned long)uctx->shpg);
3896 uctx->shpg = NULL;
3897fail:
3898 return rc;
3899}
3900
3901void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3902{
3903 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3904 struct bnxt_re_ucontext,
3905 ib_uctx);
3906
3907 struct bnxt_re_dev *rdev = uctx->rdev;
3908
3909 if (uctx->shpg)
3910 free_page((unsigned long)uctx->shpg);
3911
3912 if (uctx->dpi.dbr) {
3913
3914
3915
3916 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3917 &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3918 uctx->dpi.dbr = NULL;
3919 }
3920}
3921
3922
3923int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3924{
3925 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3926 struct bnxt_re_ucontext,
3927 ib_uctx);
3928 struct bnxt_re_dev *rdev = uctx->rdev;
3929 u64 pfn;
3930
3931 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3932 return -EINVAL;
3933
3934 if (vma->vm_pgoff) {
3935 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3936 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3937 PAGE_SIZE, vma->vm_page_prot)) {
3938 ibdev_err(&rdev->ibdev, "Failed to map DPI");
3939 return -EAGAIN;
3940 }
3941 } else {
3942 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3943 if (remap_pfn_range(vma, vma->vm_start,
3944 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3945 ibdev_err(&rdev->ibdev, "Failed to map shared page");
3946 return -EAGAIN;
3947 }
3948 }
3949
3950 return 0;
3951}
3952