1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51#include <rdma/uverbs_ioctl.h>
52
53#include "bnxt_ulp.h"
54
55#include "roce_hsi.h"
56#include "qplib_res.h"
57#include "qplib_sp.h"
58#include "qplib_fp.h"
59#include "qplib_rcfw.h"
60
61#include "bnxt_re.h"
62#include "ib_verbs.h"
63#include <rdma/bnxt_re-abi.h>
64
65static int __from_ib_access_flags(int iflags)
66{
67 int qflags = 0;
68
69 if (iflags & IB_ACCESS_LOCAL_WRITE)
70 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71 if (iflags & IB_ACCESS_REMOTE_READ)
72 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73 if (iflags & IB_ACCESS_REMOTE_WRITE)
74 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77 if (iflags & IB_ACCESS_MW_BIND)
78 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79 if (iflags & IB_ZERO_BASED)
80 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81 if (iflags & IB_ACCESS_ON_DEMAND)
82 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
83 return qflags;
84};
85
86static enum ib_access_flags __to_ib_access_flags(int qflags)
87{
88 enum ib_access_flags iflags = 0;
89
90 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91 iflags |= IB_ACCESS_LOCAL_WRITE;
92 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93 iflags |= IB_ACCESS_REMOTE_WRITE;
94 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95 iflags |= IB_ACCESS_REMOTE_READ;
96 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97 iflags |= IB_ACCESS_REMOTE_ATOMIC;
98 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99 iflags |= IB_ACCESS_MW_BIND;
100 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101 iflags |= IB_ZERO_BASED;
102 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103 iflags |= IB_ACCESS_ON_DEMAND;
104 return iflags;
105};
106
107static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108 struct bnxt_qplib_sge *sg_list, int num)
109{
110 int i, total = 0;
111
112 for (i = 0; i < num; i++) {
113 sg_list[i].addr = ib_sg_list[i].addr;
114 sg_list[i].lkey = ib_sg_list[i].lkey;
115 sg_list[i].size = ib_sg_list[i].length;
116 total += sg_list[i].size;
117 }
118 return total;
119}
120
121
122int bnxt_re_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *ib_attr,
124 struct ib_udata *udata)
125{
126 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
127 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
128
129 memset(ib_attr, 0, sizeof(*ib_attr));
130 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
131 min(sizeof(dev_attr->fw_ver),
132 sizeof(ib_attr->fw_ver)));
133 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
134 (u8 *)&ib_attr->sys_image_guid);
135 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
136 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
137
138 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
139 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
140 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
141 ib_attr->max_qp = dev_attr->max_qp;
142 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
143 ib_attr->device_cap_flags =
144 IB_DEVICE_CURR_QP_STATE_MOD
145 | IB_DEVICE_RC_RNR_NAK_GEN
146 | IB_DEVICE_SHUTDOWN_PORT
147 | IB_DEVICE_SYS_IMAGE_GUID
148 | IB_DEVICE_LOCAL_DMA_LKEY
149 | IB_DEVICE_RESIZE_MAX_WR
150 | IB_DEVICE_PORT_ACTIVE_EVENT
151 | IB_DEVICE_N_NOTIFY_CQ
152 | IB_DEVICE_MEM_WINDOW
153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 | IB_DEVICE_MEM_MGT_EXTENSIONS;
155 ib_attr->max_send_sge = dev_attr->max_qp_sges;
156 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
157 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
158 ib_attr->max_cq = dev_attr->max_cq;
159 ib_attr->max_cqe = dev_attr->max_cq_wqes;
160 ib_attr->max_mr = dev_attr->max_mr;
161 ib_attr->max_pd = dev_attr->max_pd;
162 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
163 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
164 ib_attr->atomic_cap = IB_ATOMIC_NONE;
165 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
166
167 ib_attr->max_ee_rd_atom = 0;
168 ib_attr->max_res_rd_atom = 0;
169 ib_attr->max_ee_init_rd_atom = 0;
170 ib_attr->max_ee = 0;
171 ib_attr->max_rdd = 0;
172 ib_attr->max_mw = dev_attr->max_mw;
173 ib_attr->max_raw_ipv6_qp = 0;
174 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
175 ib_attr->max_mcast_grp = 0;
176 ib_attr->max_mcast_qp_attach = 0;
177 ib_attr->max_total_mcast_qp_attach = 0;
178 ib_attr->max_ah = dev_attr->max_ah;
179
180 ib_attr->max_fmr = 0;
181 ib_attr->max_map_per_fmr = 0;
182
183 ib_attr->max_srq = dev_attr->max_srq;
184 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
185 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
186
187 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
188
189 ib_attr->max_pkeys = 1;
190 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
191 return 0;
192}
193
194int bnxt_re_modify_device(struct ib_device *ibdev,
195 int device_modify_mask,
196 struct ib_device_modify *device_modify)
197{
198 switch (device_modify_mask) {
199 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
200
201
202 break;
203 case IB_DEVICE_MODIFY_NODE_DESC:
204
205 break;
206 default:
207 break;
208 }
209 return 0;
210}
211
212
213int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
214 struct ib_port_attr *port_attr)
215{
216 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
217 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
218
219 memset(port_attr, 0, sizeof(*port_attr));
220
221 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
222 port_attr->state = IB_PORT_ACTIVE;
223 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
224 } else {
225 port_attr->state = IB_PORT_DOWN;
226 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
227 }
228 port_attr->max_mtu = IB_MTU_4096;
229 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
230 port_attr->gid_tbl_len = dev_attr->max_sgid;
231 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
232 IB_PORT_DEVICE_MGMT_SUP |
233 IB_PORT_VENDOR_CLASS_SUP;
234 port_attr->ip_gids = true;
235
236 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
237 port_attr->bad_pkey_cntr = 0;
238 port_attr->qkey_viol_cntr = 0;
239 port_attr->pkey_tbl_len = dev_attr->max_pkey;
240 port_attr->lid = 0;
241 port_attr->sm_lid = 0;
242 port_attr->lmc = 0;
243 port_attr->max_vl_num = 4;
244 port_attr->sm_sl = 0;
245 port_attr->subnet_timeout = 0;
246 port_attr->init_type_reply = 0;
247 port_attr->active_speed = rdev->active_speed;
248 port_attr->active_width = rdev->active_width;
249
250 return 0;
251}
252
253int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
254 struct ib_port_immutable *immutable)
255{
256 struct ib_port_attr port_attr;
257
258 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
259 return -EINVAL;
260
261 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
262 immutable->gid_tbl_len = port_attr.gid_tbl_len;
263 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
264 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
265 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
266 return 0;
267}
268
269void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
270{
271 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
272
273 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
274 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
275 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
276}
277
278int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
279 u16 index, u16 *pkey)
280{
281 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
282
283
284
285 memset(pkey, 0, sizeof(*pkey));
286 return bnxt_qplib_get_pkey(&rdev->qplib_res,
287 &rdev->qplib_res.pkey_tbl, index, pkey);
288}
289
290int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
291 int index, union ib_gid *gid)
292{
293 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
294 int rc = 0;
295
296
297 memset(gid, 0, sizeof(*gid));
298 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
299 &rdev->qplib_res.sgid_tbl, index,
300 (struct bnxt_qplib_gid *)gid);
301 return rc;
302}
303
304int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
305{
306 int rc = 0;
307 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
308 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
309 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
310 struct bnxt_qplib_gid *gid_to_del;
311 u16 vlan_id = 0xFFFF;
312
313
314 ctx = *context;
315 if (!ctx)
316 return -EINVAL;
317
318 if (sgid_tbl && sgid_tbl->active) {
319 if (ctx->idx >= sgid_tbl->max)
320 return -EINVAL;
321 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
322 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
323
324
325
326
327
328
329
330
331 if (ctx->idx == 0 &&
332 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
333 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
334 ibdev_dbg(&rdev->ibdev,
335 "Trying to delete GID0 while QP1 is alive\n");
336 return -EFAULT;
337 }
338 ctx->refcnt--;
339 if (!ctx->refcnt) {
340 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
341 vlan_id, true);
342 if (rc) {
343 ibdev_err(&rdev->ibdev,
344 "Failed to remove GID: %#x", rc);
345 } else {
346 ctx_tbl = sgid_tbl->ctx;
347 ctx_tbl[ctx->idx] = NULL;
348 kfree(ctx);
349 }
350 }
351 } else {
352 return -EINVAL;
353 }
354 return rc;
355}
356
357int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
358{
359 int rc;
360 u32 tbl_idx = 0;
361 u16 vlan_id = 0xFFFF;
362 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
363 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
364 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
365
366 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
367 if (rc)
368 return rc;
369
370 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
371 rdev->qplib_res.netdev->dev_addr,
372 vlan_id, true, &tbl_idx);
373 if (rc == -EALREADY) {
374 ctx_tbl = sgid_tbl->ctx;
375 ctx_tbl[tbl_idx]->refcnt++;
376 *context = ctx_tbl[tbl_idx];
377 return 0;
378 }
379
380 if (rc < 0) {
381 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
382 return rc;
383 }
384
385 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
386 if (!ctx)
387 return -ENOMEM;
388 ctx_tbl = sgid_tbl->ctx;
389 ctx->idx = tbl_idx;
390 ctx->refcnt = 1;
391 ctx_tbl[tbl_idx] = ctx;
392 *context = ctx;
393
394 return rc;
395}
396
397enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
398 u8 port_num)
399{
400 return IB_LINK_LAYER_ETHERNET;
401}
402
403#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
404
405static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
406{
407 struct bnxt_re_fence_data *fence = &pd->fence;
408 struct ib_mr *ib_mr = &fence->mr->ib_mr;
409 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
410
411 memset(wqe, 0, sizeof(*wqe));
412 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
413 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
414 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
415 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
416 wqe->bind.zero_based = false;
417 wqe->bind.parent_l_key = ib_mr->lkey;
418 wqe->bind.va = (u64)(unsigned long)fence->va;
419 wqe->bind.length = fence->size;
420 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
421 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
422
423
424
425
426 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
427}
428
429static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
430{
431 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
432 qplib_qp);
433 struct ib_pd *ib_pd = qp->ib_qp.pd;
434 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
435 struct bnxt_re_fence_data *fence = &pd->fence;
436 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
437 struct bnxt_qplib_swqe wqe;
438 int rc;
439
440 memcpy(&wqe, fence_wqe, sizeof(wqe));
441 wqe.bind.r_key = fence->bind_rkey;
442 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
443
444 ibdev_dbg(&qp->rdev->ibdev,
445 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
446 wqe.bind.r_key, qp->qplib_qp.id, pd);
447 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
448 if (rc) {
449 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
450 return rc;
451 }
452 bnxt_qplib_post_send_db(&qp->qplib_qp);
453
454 return rc;
455}
456
457static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
458{
459 struct bnxt_re_fence_data *fence = &pd->fence;
460 struct bnxt_re_dev *rdev = pd->rdev;
461 struct device *dev = &rdev->en_dev->pdev->dev;
462 struct bnxt_re_mr *mr = fence->mr;
463
464 if (fence->mw) {
465 bnxt_re_dealloc_mw(fence->mw);
466 fence->mw = NULL;
467 }
468 if (mr) {
469 if (mr->ib_mr.rkey)
470 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
471 true);
472 if (mr->ib_mr.lkey)
473 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
474 kfree(mr);
475 fence->mr = NULL;
476 }
477 if (fence->dma_addr) {
478 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
479 DMA_BIDIRECTIONAL);
480 fence->dma_addr = 0;
481 }
482}
483
484static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
485{
486 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
487 struct bnxt_re_fence_data *fence = &pd->fence;
488 struct bnxt_re_dev *rdev = pd->rdev;
489 struct device *dev = &rdev->en_dev->pdev->dev;
490 struct bnxt_re_mr *mr = NULL;
491 dma_addr_t dma_addr = 0;
492 struct ib_mw *mw;
493 u64 pbl_tbl;
494 int rc;
495
496 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
497 DMA_BIDIRECTIONAL);
498 rc = dma_mapping_error(dev, dma_addr);
499 if (rc) {
500 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
501 rc = -EIO;
502 fence->dma_addr = 0;
503 goto fail;
504 }
505 fence->dma_addr = dma_addr;
506
507
508 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
509 if (!mr) {
510 rc = -ENOMEM;
511 goto fail;
512 }
513 fence->mr = mr;
514 mr->rdev = rdev;
515 mr->qplib_mr.pd = &pd->qplib_pd;
516 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
517 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
518 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
519 if (rc) {
520 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
521 goto fail;
522 }
523
524
525 mr->ib_mr.lkey = mr->qplib_mr.lkey;
526 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
527 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
528 pbl_tbl = dma_addr;
529 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
530 BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
531 if (rc) {
532 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
533 goto fail;
534 }
535 mr->ib_mr.rkey = mr->qplib_mr.rkey;
536
537
538 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
539 if (IS_ERR(mw)) {
540 ibdev_err(&rdev->ibdev,
541 "Failed to create fence-MW for PD: %p\n", pd);
542 rc = PTR_ERR(mw);
543 goto fail;
544 }
545 fence->mw = mw;
546
547 bnxt_re_create_fence_wqe(pd);
548 return 0;
549
550fail:
551 bnxt_re_destroy_fence_mr(pd);
552 return rc;
553}
554
555
556void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
557{
558 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
559 struct bnxt_re_dev *rdev = pd->rdev;
560
561 bnxt_re_destroy_fence_mr(pd);
562
563 if (pd->qplib_pd.id)
564 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
565 &pd->qplib_pd);
566}
567
568int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
569{
570 struct ib_device *ibdev = ibpd->device;
571 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
572 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
573 udata, struct bnxt_re_ucontext, ib_uctx);
574 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
575 int rc;
576
577 pd->rdev = rdev;
578 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
579 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
580 rc = -ENOMEM;
581 goto fail;
582 }
583
584 if (udata) {
585 struct bnxt_re_pd_resp resp;
586
587 if (!ucntx->dpi.dbr) {
588
589
590
591
592 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
593 &ucntx->dpi, ucntx)) {
594 rc = -ENOMEM;
595 goto dbfail;
596 }
597 }
598
599 resp.pdid = pd->qplib_pd.id;
600
601 resp.dpi = ucntx->dpi.dpi;
602 resp.dbr = (u64)ucntx->dpi.umdbr;
603
604 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
605 if (rc) {
606 ibdev_err(&rdev->ibdev,
607 "Failed to copy user response\n");
608 goto dbfail;
609 }
610 }
611
612 if (!udata)
613 if (bnxt_re_create_fence_mr(pd))
614 ibdev_warn(&rdev->ibdev,
615 "Failed to create Fence-MR\n");
616 return 0;
617dbfail:
618 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
619 &pd->qplib_pd);
620fail:
621 return rc;
622}
623
624
625void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
626{
627 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
628 struct bnxt_re_dev *rdev = ah->rdev;
629
630 bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
631 !(flags & RDMA_DESTROY_AH_SLEEPABLE));
632}
633
634static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
635{
636 u8 nw_type;
637
638 switch (ntype) {
639 case RDMA_NETWORK_IPV4:
640 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
641 break;
642 case RDMA_NETWORK_IPV6:
643 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
644 break;
645 default:
646 nw_type = CMDQ_CREATE_AH_TYPE_V1;
647 break;
648 }
649 return nw_type;
650}
651
652int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
653 u32 flags, struct ib_udata *udata)
654{
655 struct ib_pd *ib_pd = ib_ah->pd;
656 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
657 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
658 struct bnxt_re_dev *rdev = pd->rdev;
659 const struct ib_gid_attr *sgid_attr;
660 struct bnxt_re_gid_ctx *ctx;
661 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
662 u8 nw_type;
663 int rc;
664
665 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
666 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
667 return -EINVAL;
668 }
669
670 ah->rdev = rdev;
671 ah->qplib_ah.pd = &pd->qplib_pd;
672
673
674 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
675 sizeof(union ib_gid));
676 sgid_attr = grh->sgid_attr;
677
678
679
680 ctx = rdma_read_gid_hw_context(sgid_attr);
681 ah->qplib_ah.sgid_index = ctx->idx;
682 ah->qplib_ah.host_sgid_index = grh->sgid_index;
683 ah->qplib_ah.traffic_class = grh->traffic_class;
684 ah->qplib_ah.flow_label = grh->flow_label;
685 ah->qplib_ah.hop_limit = grh->hop_limit;
686 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
687
688
689 nw_type = rdma_gid_attr_network_type(sgid_attr);
690 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
691
692 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
693 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
694 !(flags & RDMA_CREATE_AH_SLEEPABLE));
695 if (rc) {
696 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
697 return rc;
698 }
699
700
701 if (udata) {
702 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
703 udata, struct bnxt_re_ucontext, ib_uctx);
704 unsigned long flag;
705 u32 *wrptr;
706
707 spin_lock_irqsave(&uctx->sh_lock, flag);
708 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
709 *wrptr = ah->qplib_ah.id;
710 wmb();
711 spin_unlock_irqrestore(&uctx->sh_lock, flag);
712 }
713
714 return 0;
715}
716
717int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
718{
719 return 0;
720}
721
722int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
723{
724 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
725
726 ah_attr->type = ib_ah->type;
727 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
728 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
729 rdma_ah_set_grh(ah_attr, NULL, 0,
730 ah->qplib_ah.host_sgid_index,
731 0, ah->qplib_ah.traffic_class);
732 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
733 rdma_ah_set_port_num(ah_attr, 1);
734 rdma_ah_set_static_rate(ah_attr, 0);
735 return 0;
736}
737
738unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
739 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
740{
741 unsigned long flags;
742
743 spin_lock_irqsave(&qp->scq->cq_lock, flags);
744 if (qp->rcq != qp->scq)
745 spin_lock(&qp->rcq->cq_lock);
746 else
747 __acquire(&qp->rcq->cq_lock);
748
749 return flags;
750}
751
752void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
753 unsigned long flags)
754 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
755{
756 if (qp->rcq != qp->scq)
757 spin_unlock(&qp->rcq->cq_lock);
758 else
759 __release(&qp->rcq->cq_lock);
760 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
761}
762
763static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
764{
765 struct bnxt_re_qp *gsi_sqp;
766 struct bnxt_re_ah *gsi_sah;
767 struct bnxt_re_dev *rdev;
768 int rc = 0;
769
770 rdev = qp->rdev;
771 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
772 gsi_sah = rdev->gsi_ctx.gsi_sah;
773
774
775 mutex_lock(&rdev->qp_lock);
776 list_del(&gsi_sqp->list);
777 mutex_unlock(&rdev->qp_lock);
778 atomic_dec(&rdev->qp_count);
779
780 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
781 bnxt_qplib_destroy_ah(&rdev->qplib_res,
782 &gsi_sah->qplib_ah,
783 true);
784 bnxt_qplib_clean_qp(&qp->qplib_qp);
785
786 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
787 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
788 if (rc) {
789 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
790 goto fail;
791 }
792 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
793
794 kfree(rdev->gsi_ctx.sqp_tbl);
795 kfree(gsi_sah);
796 kfree(gsi_sqp);
797 rdev->gsi_ctx.gsi_sqp = NULL;
798 rdev->gsi_ctx.gsi_sah = NULL;
799 rdev->gsi_ctx.sqp_tbl = NULL;
800
801 return 0;
802fail:
803 return rc;
804}
805
806
807int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
808{
809 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
810 struct bnxt_re_dev *rdev = qp->rdev;
811 unsigned int flags;
812 int rc;
813
814 mutex_lock(&rdev->qp_lock);
815 list_del(&qp->list);
816 mutex_unlock(&rdev->qp_lock);
817 atomic_dec(&rdev->qp_count);
818
819 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
820
821 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
822 if (rc) {
823 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
824 return rc;
825 }
826
827 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
828 flags = bnxt_re_lock_cqs(qp);
829 bnxt_qplib_clean_qp(&qp->qplib_qp);
830 bnxt_re_unlock_cqs(qp, flags);
831 }
832
833 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
834
835 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
836 rc = bnxt_re_destroy_gsi_sqp(qp);
837 if (rc)
838 goto sh_fail;
839 }
840
841 ib_umem_release(qp->rumem);
842 ib_umem_release(qp->sumem);
843
844 kfree(qp);
845 return 0;
846sh_fail:
847 return rc;
848}
849
850static u8 __from_ib_qp_type(enum ib_qp_type type)
851{
852 switch (type) {
853 case IB_QPT_GSI:
854 return CMDQ_CREATE_QP1_TYPE_GSI;
855 case IB_QPT_RC:
856 return CMDQ_CREATE_QP_TYPE_RC;
857 case IB_QPT_UD:
858 return CMDQ_CREATE_QP_TYPE_UD;
859 default:
860 return IB_QPT_MAX;
861 }
862}
863
864static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
865 struct bnxt_re_qp *qp, struct ib_udata *udata)
866{
867 struct bnxt_re_qp_req ureq;
868 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
869 struct ib_umem *umem;
870 int bytes = 0, psn_sz;
871 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
872 udata, struct bnxt_re_ucontext, ib_uctx);
873
874 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
875 return -EFAULT;
876
877 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
878
879 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
880 psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
881 sizeof(struct sq_psn_search_ext) :
882 sizeof(struct sq_psn_search);
883 bytes += (qplib_qp->sq.max_wqe * psn_sz);
884 }
885 bytes = PAGE_ALIGN(bytes);
886 umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE);
887 if (IS_ERR(umem))
888 return PTR_ERR(umem);
889
890 qp->sumem = umem;
891 qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl;
892 qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
893 qplib_qp->sq.sg_info.nmap = umem->nmap;
894 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
895 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
896 qplib_qp->qp_handle = ureq.qp_handle;
897
898 if (!qp->qplib_qp.srq) {
899 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
900 bytes = PAGE_ALIGN(bytes);
901 umem = ib_umem_get(udata, ureq.qprva, bytes,
902 IB_ACCESS_LOCAL_WRITE);
903 if (IS_ERR(umem))
904 goto rqfail;
905 qp->rumem = umem;
906 qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl;
907 qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
908 qplib_qp->rq.sg_info.nmap = umem->nmap;
909 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
910 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
911 }
912
913 qplib_qp->dpi = &cntx->dpi;
914 return 0;
915rqfail:
916 ib_umem_release(qp->sumem);
917 qp->sumem = NULL;
918 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
919
920 return PTR_ERR(umem);
921}
922
923static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
924 (struct bnxt_re_pd *pd,
925 struct bnxt_qplib_res *qp1_res,
926 struct bnxt_qplib_qp *qp1_qp)
927{
928 struct bnxt_re_dev *rdev = pd->rdev;
929 struct bnxt_re_ah *ah;
930 union ib_gid sgid;
931 int rc;
932
933 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
934 if (!ah)
935 return NULL;
936
937 ah->rdev = rdev;
938 ah->qplib_ah.pd = &pd->qplib_pd;
939
940 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
941 if (rc)
942 goto fail;
943
944
945 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
946 sizeof(union ib_gid));
947 ah->qplib_ah.sgid_index = 0;
948
949 ah->qplib_ah.traffic_class = 0;
950 ah->qplib_ah.flow_label = 0;
951 ah->qplib_ah.hop_limit = 1;
952 ah->qplib_ah.sl = 0;
953
954 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
955
956 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
957 if (rc) {
958 ibdev_err(&rdev->ibdev,
959 "Failed to allocate HW AH for Shadow QP");
960 goto fail;
961 }
962
963 return ah;
964
965fail:
966 kfree(ah);
967 return NULL;
968}
969
970static struct bnxt_re_qp *bnxt_re_create_shadow_qp
971 (struct bnxt_re_pd *pd,
972 struct bnxt_qplib_res *qp1_res,
973 struct bnxt_qplib_qp *qp1_qp)
974{
975 struct bnxt_re_dev *rdev = pd->rdev;
976 struct bnxt_re_qp *qp;
977 int rc;
978
979 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
980 if (!qp)
981 return NULL;
982
983 qp->rdev = rdev;
984
985
986 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
987
988 qp->qplib_qp.pd = &pd->qplib_pd;
989 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
990 qp->qplib_qp.type = IB_QPT_UD;
991
992 qp->qplib_qp.max_inline_data = 0;
993 qp->qplib_qp.sig_type = true;
994
995
996 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
997 qp->qplib_qp.sq.max_sge = 2;
998
999 qp->qplib_qp.sq.q_full_delta = 1;
1000 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1001 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1002
1003 qp->qplib_qp.scq = qp1_qp->scq;
1004 qp->qplib_qp.rcq = qp1_qp->rcq;
1005
1006 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1007 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1008
1009 qp->qplib_qp.rq.q_full_delta = 1;
1010 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1011 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1012
1013 qp->qplib_qp.mtu = qp1_qp->mtu;
1014
1015 qp->qplib_qp.sq_hdr_buf_size = 0;
1016 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1017 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1018
1019 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1020 if (rc)
1021 goto fail;
1022
1023 spin_lock_init(&qp->sq_lock);
1024 INIT_LIST_HEAD(&qp->list);
1025 mutex_lock(&rdev->qp_lock);
1026 list_add_tail(&qp->list, &rdev->qp_list);
1027 atomic_inc(&rdev->qp_count);
1028 mutex_unlock(&rdev->qp_lock);
1029 return qp;
1030fail:
1031 kfree(qp);
1032 return NULL;
1033}
1034
1035static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1036 struct ib_qp_init_attr *init_attr)
1037{
1038 struct bnxt_qplib_dev_attr *dev_attr;
1039 struct bnxt_qplib_qp *qplqp;
1040 struct bnxt_re_dev *rdev;
1041 int entries;
1042
1043 rdev = qp->rdev;
1044 qplqp = &qp->qplib_qp;
1045 dev_attr = &rdev->dev_attr;
1046
1047 if (init_attr->srq) {
1048 struct bnxt_re_srq *srq;
1049
1050 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1051 if (!srq) {
1052 ibdev_err(&rdev->ibdev, "SRQ not found");
1053 return -EINVAL;
1054 }
1055 qplqp->srq = &srq->qplib_srq;
1056 qplqp->rq.max_wqe = 0;
1057 } else {
1058
1059
1060
1061 entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
1062 qplqp->rq.max_wqe = min_t(u32, entries,
1063 dev_attr->max_qp_wqes + 1);
1064
1065 qplqp->rq.q_full_delta = qplqp->rq.max_wqe -
1066 init_attr->cap.max_recv_wr;
1067 qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
1068 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1069 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1070 }
1071 qplqp->rq.sg_info.pgsize = PAGE_SIZE;
1072 qplqp->rq.sg_info.pgshft = PAGE_SHIFT;
1073
1074 return 0;
1075}
1076
1077static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1078{
1079 struct bnxt_qplib_dev_attr *dev_attr;
1080 struct bnxt_qplib_qp *qplqp;
1081 struct bnxt_re_dev *rdev;
1082
1083 rdev = qp->rdev;
1084 qplqp = &qp->qplib_qp;
1085 dev_attr = &rdev->dev_attr;
1086
1087 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1088 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1089 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1090 qplqp->rq.max_sge = 6;
1091}
1092
1093static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1094 struct ib_qp_init_attr *init_attr,
1095 struct ib_udata *udata)
1096{
1097 struct bnxt_qplib_dev_attr *dev_attr;
1098 struct bnxt_qplib_qp *qplqp;
1099 struct bnxt_re_dev *rdev;
1100 int entries;
1101
1102 rdev = qp->rdev;
1103 qplqp = &qp->qplib_qp;
1104 dev_attr = &rdev->dev_attr;
1105
1106 qplqp->sq.max_sge = init_attr->cap.max_send_sge;
1107 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1108 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1109
1110
1111
1112
1113 entries = init_attr->cap.max_send_wr;
1114
1115 entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
1116 qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
1117 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1118 qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1119
1120
1121
1122
1123
1124 qplqp->sq.q_full_delta -= 1;
1125 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1126 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1127}
1128
1129static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1130 struct ib_qp_init_attr *init_attr)
1131{
1132 struct bnxt_qplib_dev_attr *dev_attr;
1133 struct bnxt_qplib_qp *qplqp;
1134 struct bnxt_re_dev *rdev;
1135 int entries;
1136
1137 rdev = qp->rdev;
1138 qplqp = &qp->qplib_qp;
1139 dev_attr = &rdev->dev_attr;
1140
1141 entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
1142 qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1143 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1144 init_attr->cap.max_send_wr;
1145 qplqp->sq.max_sge++;
1146 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1147 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1148}
1149
1150static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1151 struct ib_qp_init_attr *init_attr)
1152{
1153 struct bnxt_qplib_chip_ctx *chip_ctx;
1154 int qptype;
1155
1156 chip_ctx = rdev->chip_ctx;
1157
1158 qptype = __from_ib_qp_type(init_attr->qp_type);
1159 if (qptype == IB_QPT_MAX) {
1160 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1161 qptype = -EINVAL;
1162 goto out;
1163 }
1164
1165 if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
1166 init_attr->qp_type == IB_QPT_GSI)
1167 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1168out:
1169 return qptype;
1170}
1171
1172static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1173 struct ib_qp_init_attr *init_attr,
1174 struct ib_udata *udata)
1175{
1176 struct bnxt_qplib_dev_attr *dev_attr;
1177 struct bnxt_qplib_qp *qplqp;
1178 struct bnxt_re_dev *rdev;
1179 struct bnxt_re_cq *cq;
1180 int rc = 0, qptype;
1181
1182 rdev = qp->rdev;
1183 qplqp = &qp->qplib_qp;
1184 dev_attr = &rdev->dev_attr;
1185
1186
1187 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1188 qplqp->pd = &pd->qplib_pd;
1189 qplqp->qp_handle = (u64)qplqp;
1190 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1191 qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
1192 true : false);
1193 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1194 if (qptype < 0) {
1195 rc = qptype;
1196 goto out;
1197 }
1198 qplqp->type = (u8)qptype;
1199
1200 if (init_attr->qp_type == IB_QPT_RC) {
1201 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1202 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1203 }
1204 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1205 qplqp->dpi = &rdev->dpi_privileged;
1206 if (init_attr->create_flags)
1207 ibdev_dbg(&rdev->ibdev,
1208 "QP create flags 0x%x not supported",
1209 init_attr->create_flags);
1210
1211
1212 if (init_attr->send_cq) {
1213 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1214 if (!cq) {
1215 ibdev_err(&rdev->ibdev, "Send CQ not found");
1216 rc = -EINVAL;
1217 goto out;
1218 }
1219 qplqp->scq = &cq->qplib_cq;
1220 qp->scq = cq;
1221 }
1222
1223 if (init_attr->recv_cq) {
1224 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1225 if (!cq) {
1226 ibdev_err(&rdev->ibdev, "Receive CQ not found");
1227 rc = -EINVAL;
1228 goto out;
1229 }
1230 qplqp->rcq = &cq->qplib_cq;
1231 qp->rcq = cq;
1232 }
1233
1234
1235 rc = bnxt_re_init_rq_attr(qp, init_attr);
1236 if (rc)
1237 goto out;
1238 if (init_attr->qp_type == IB_QPT_GSI)
1239 bnxt_re_adjust_gsi_rq_attr(qp);
1240
1241
1242 bnxt_re_init_sq_attr(qp, init_attr, udata);
1243 if (init_attr->qp_type == IB_QPT_GSI)
1244 bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
1245
1246 if (udata)
1247 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1248out:
1249 return rc;
1250}
1251
1252static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1253 struct bnxt_re_pd *pd)
1254{
1255 struct bnxt_re_sqp_entries *sqp_tbl = NULL;
1256 struct bnxt_re_dev *rdev;
1257 struct bnxt_re_qp *sqp;
1258 struct bnxt_re_ah *sah;
1259 int rc = 0;
1260
1261 rdev = qp->rdev;
1262
1263 sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
1264 GFP_KERNEL);
1265 if (!sqp_tbl)
1266 return -ENOMEM;
1267 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1268
1269 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1270 if (!sqp) {
1271 rc = -ENODEV;
1272 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1273 goto out;
1274 }
1275 rdev->gsi_ctx.gsi_sqp = sqp;
1276
1277 sqp->rcq = qp->rcq;
1278 sqp->scq = qp->scq;
1279 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1280 &qp->qplib_qp);
1281 if (!sah) {
1282 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1283 &sqp->qplib_qp);
1284 rc = -ENODEV;
1285 ibdev_err(&rdev->ibdev,
1286 "Failed to create AH entry for ShadowQP");
1287 goto out;
1288 }
1289 rdev->gsi_ctx.gsi_sah = sah;
1290
1291 return 0;
1292out:
1293 kfree(sqp_tbl);
1294 return rc;
1295}
1296
1297static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1298 struct ib_qp_init_attr *init_attr)
1299{
1300 struct bnxt_re_dev *rdev;
1301 struct bnxt_qplib_qp *qplqp;
1302 int rc = 0;
1303
1304 rdev = qp->rdev;
1305 qplqp = &qp->qplib_qp;
1306
1307 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1308 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1309
1310 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1311 if (rc) {
1312 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1313 goto out;
1314 }
1315
1316 rc = bnxt_re_create_shadow_gsi(qp, pd);
1317out:
1318 return rc;
1319}
1320
1321static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1322 struct ib_qp_init_attr *init_attr,
1323 struct bnxt_qplib_dev_attr *dev_attr)
1324{
1325 bool rc = true;
1326
1327 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1328 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1329 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1330 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1331 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1332 ibdev_err(&rdev->ibdev,
1333 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1334 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1335 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1336 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1337 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1338 init_attr->cap.max_inline_data,
1339 dev_attr->max_inline_data);
1340 rc = false;
1341 }
1342 return rc;
1343}
1344
1345struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1346 struct ib_qp_init_attr *qp_init_attr,
1347 struct ib_udata *udata)
1348{
1349 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1350 struct bnxt_re_dev *rdev = pd->rdev;
1351 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1352 struct bnxt_re_qp *qp;
1353 int rc;
1354
1355 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1356 if (!rc) {
1357 rc = -EINVAL;
1358 goto exit;
1359 }
1360
1361 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1362 if (!qp) {
1363 rc = -ENOMEM;
1364 goto exit;
1365 }
1366 qp->rdev = rdev;
1367 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1368 if (rc)
1369 goto fail;
1370
1371 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1372 !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
1373 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1374 if (rc == -ENODEV)
1375 goto qp_destroy;
1376 if (rc)
1377 goto fail;
1378 } else {
1379 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1380 if (rc) {
1381 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1382 goto free_umem;
1383 }
1384 if (udata) {
1385 struct bnxt_re_qp_resp resp;
1386
1387 resp.qpid = qp->qplib_qp.id;
1388 resp.rsvd = 0;
1389 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1390 if (rc) {
1391 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1392 goto qp_destroy;
1393 }
1394 }
1395 }
1396
1397 qp->ib_qp.qp_num = qp->qplib_qp.id;
1398 if (qp_init_attr->qp_type == IB_QPT_GSI)
1399 rdev->gsi_ctx.gsi_qp = qp;
1400 spin_lock_init(&qp->sq_lock);
1401 spin_lock_init(&qp->rq_lock);
1402 INIT_LIST_HEAD(&qp->list);
1403 mutex_lock(&rdev->qp_lock);
1404 list_add_tail(&qp->list, &rdev->qp_list);
1405 mutex_unlock(&rdev->qp_lock);
1406 atomic_inc(&rdev->qp_count);
1407
1408 return &qp->ib_qp;
1409qp_destroy:
1410 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1411free_umem:
1412 ib_umem_release(qp->rumem);
1413 ib_umem_release(qp->sumem);
1414fail:
1415 kfree(qp);
1416exit:
1417 return ERR_PTR(rc);
1418}
1419
1420static u8 __from_ib_qp_state(enum ib_qp_state state)
1421{
1422 switch (state) {
1423 case IB_QPS_RESET:
1424 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1425 case IB_QPS_INIT:
1426 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1427 case IB_QPS_RTR:
1428 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1429 case IB_QPS_RTS:
1430 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1431 case IB_QPS_SQD:
1432 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1433 case IB_QPS_SQE:
1434 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1435 case IB_QPS_ERR:
1436 default:
1437 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1438 }
1439}
1440
1441static enum ib_qp_state __to_ib_qp_state(u8 state)
1442{
1443 switch (state) {
1444 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1445 return IB_QPS_RESET;
1446 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1447 return IB_QPS_INIT;
1448 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1449 return IB_QPS_RTR;
1450 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1451 return IB_QPS_RTS;
1452 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1453 return IB_QPS_SQD;
1454 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1455 return IB_QPS_SQE;
1456 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1457 default:
1458 return IB_QPS_ERR;
1459 }
1460}
1461
1462static u32 __from_ib_mtu(enum ib_mtu mtu)
1463{
1464 switch (mtu) {
1465 case IB_MTU_256:
1466 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1467 case IB_MTU_512:
1468 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1469 case IB_MTU_1024:
1470 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1471 case IB_MTU_2048:
1472 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1473 case IB_MTU_4096:
1474 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1475 default:
1476 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1477 }
1478}
1479
1480static enum ib_mtu __to_ib_mtu(u32 mtu)
1481{
1482 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1483 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1484 return IB_MTU_256;
1485 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1486 return IB_MTU_512;
1487 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1488 return IB_MTU_1024;
1489 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1490 return IB_MTU_2048;
1491 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1492 return IB_MTU_4096;
1493 default:
1494 return IB_MTU_2048;
1495 }
1496}
1497
1498
1499void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1500{
1501 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1502 ib_srq);
1503 struct bnxt_re_dev *rdev = srq->rdev;
1504 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1505 struct bnxt_qplib_nq *nq = NULL;
1506
1507 if (qplib_srq->cq)
1508 nq = qplib_srq->cq->nq;
1509 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1510 ib_umem_release(srq->umem);
1511 atomic_dec(&rdev->srq_count);
1512 if (nq)
1513 nq->budget--;
1514}
1515
1516static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1517 struct bnxt_re_pd *pd,
1518 struct bnxt_re_srq *srq,
1519 struct ib_udata *udata)
1520{
1521 struct bnxt_re_srq_req ureq;
1522 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1523 struct ib_umem *umem;
1524 int bytes = 0;
1525 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1526 udata, struct bnxt_re_ucontext, ib_uctx);
1527
1528 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1529 return -EFAULT;
1530
1531 bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1532 bytes = PAGE_ALIGN(bytes);
1533 umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE);
1534 if (IS_ERR(umem))
1535 return PTR_ERR(umem);
1536
1537 srq->umem = umem;
1538 qplib_srq->sg_info.sghead = umem->sg_head.sgl;
1539 qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
1540 qplib_srq->sg_info.nmap = umem->nmap;
1541 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1542 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1543 qplib_srq->srq_handle = ureq.srq_handle;
1544 qplib_srq->dpi = &cntx->dpi;
1545
1546 return 0;
1547}
1548
1549int bnxt_re_create_srq(struct ib_srq *ib_srq,
1550 struct ib_srq_init_attr *srq_init_attr,
1551 struct ib_udata *udata)
1552{
1553 struct ib_pd *ib_pd = ib_srq->pd;
1554 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1555 struct bnxt_re_dev *rdev = pd->rdev;
1556 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1557 struct bnxt_re_srq *srq =
1558 container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1559 struct bnxt_qplib_nq *nq = NULL;
1560 int rc, entries;
1561
1562 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1563 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1564 rc = -EINVAL;
1565 goto exit;
1566 }
1567
1568 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1569 rc = -EOPNOTSUPP;
1570 goto exit;
1571 }
1572
1573 srq->rdev = rdev;
1574 srq->qplib_srq.pd = &pd->qplib_pd;
1575 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1576
1577
1578
1579 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1580 if (entries > dev_attr->max_srq_wqes + 1)
1581 entries = dev_attr->max_srq_wqes + 1;
1582
1583 srq->qplib_srq.max_wqe = entries;
1584 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1585 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1586 srq->srq_limit = srq_init_attr->attr.srq_limit;
1587 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1588 nq = &rdev->nq[0];
1589
1590 if (udata) {
1591 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1592 if (rc)
1593 goto fail;
1594 }
1595
1596 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1597 if (rc) {
1598 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1599 goto fail;
1600 }
1601
1602 if (udata) {
1603 struct bnxt_re_srq_resp resp;
1604
1605 resp.srqid = srq->qplib_srq.id;
1606 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1607 if (rc) {
1608 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1609 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1610 &srq->qplib_srq);
1611 goto fail;
1612 }
1613 }
1614 if (nq)
1615 nq->budget++;
1616 atomic_inc(&rdev->srq_count);
1617
1618 return 0;
1619
1620fail:
1621 ib_umem_release(srq->umem);
1622exit:
1623 return rc;
1624}
1625
1626int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1627 enum ib_srq_attr_mask srq_attr_mask,
1628 struct ib_udata *udata)
1629{
1630 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1631 ib_srq);
1632 struct bnxt_re_dev *rdev = srq->rdev;
1633 int rc;
1634
1635 switch (srq_attr_mask) {
1636 case IB_SRQ_MAX_WR:
1637
1638 break;
1639 case IB_SRQ_LIMIT:
1640
1641 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1642 return -EINVAL;
1643
1644 srq->qplib_srq.threshold = srq_attr->srq_limit;
1645 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1646 if (rc) {
1647 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1648 return rc;
1649 }
1650
1651 srq->srq_limit = srq_attr->srq_limit;
1652
1653 break;
1654 default:
1655 ibdev_err(&rdev->ibdev,
1656 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1657 return -EINVAL;
1658 }
1659 return 0;
1660}
1661
1662int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1663{
1664 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1665 ib_srq);
1666 struct bnxt_re_srq tsrq;
1667 struct bnxt_re_dev *rdev = srq->rdev;
1668 int rc;
1669
1670
1671 tsrq.qplib_srq.id = srq->qplib_srq.id;
1672 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1673 if (rc) {
1674 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1675 return rc;
1676 }
1677 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1678 srq_attr->max_sge = srq->qplib_srq.max_sge;
1679 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1680
1681 return 0;
1682}
1683
1684int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1685 const struct ib_recv_wr **bad_wr)
1686{
1687 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1688 ib_srq);
1689 struct bnxt_qplib_swqe wqe;
1690 unsigned long flags;
1691 int rc = 0;
1692
1693 spin_lock_irqsave(&srq->lock, flags);
1694 while (wr) {
1695
1696 wqe.num_sge = wr->num_sge;
1697 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1698 wqe.wr_id = wr->wr_id;
1699 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1700
1701 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1702 if (rc) {
1703 *bad_wr = wr;
1704 break;
1705 }
1706 wr = wr->next;
1707 }
1708 spin_unlock_irqrestore(&srq->lock, flags);
1709
1710 return rc;
1711}
1712static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1713 struct bnxt_re_qp *qp1_qp,
1714 int qp_attr_mask)
1715{
1716 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1717 int rc = 0;
1718
1719 if (qp_attr_mask & IB_QP_STATE) {
1720 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1721 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1722 }
1723 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1724 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1725 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1726 }
1727
1728 if (qp_attr_mask & IB_QP_QKEY) {
1729 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1730
1731 qp->qplib_qp.qkey = 0x81818181;
1732 }
1733 if (qp_attr_mask & IB_QP_SQ_PSN) {
1734 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1735 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1736 }
1737
1738 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1739 if (rc)
1740 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1741 return rc;
1742}
1743
1744int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1745 int qp_attr_mask, struct ib_udata *udata)
1746{
1747 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1748 struct bnxt_re_dev *rdev = qp->rdev;
1749 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1750 enum ib_qp_state curr_qp_state, new_qp_state;
1751 int rc, entries;
1752 unsigned int flags;
1753 u8 nw_type;
1754
1755 qp->qplib_qp.modify_flags = 0;
1756 if (qp_attr_mask & IB_QP_STATE) {
1757 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1758 new_qp_state = qp_attr->qp_state;
1759 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1760 ib_qp->qp_type, qp_attr_mask)) {
1761 ibdev_err(&rdev->ibdev,
1762 "Invalid attribute mask: %#x specified ",
1763 qp_attr_mask);
1764 ibdev_err(&rdev->ibdev,
1765 "for qpn: %#x type: %#x",
1766 ib_qp->qp_num, ib_qp->qp_type);
1767 ibdev_err(&rdev->ibdev,
1768 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1769 curr_qp_state, new_qp_state);
1770 return -EINVAL;
1771 }
1772 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1773 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1774
1775 if (!qp->sumem &&
1776 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1777 ibdev_dbg(&rdev->ibdev,
1778 "Move QP = %p to flush list\n", qp);
1779 flags = bnxt_re_lock_cqs(qp);
1780 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1781 bnxt_re_unlock_cqs(qp, flags);
1782 }
1783 if (!qp->sumem &&
1784 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1785 ibdev_dbg(&rdev->ibdev,
1786 "Move QP = %p out of flush list\n", qp);
1787 flags = bnxt_re_lock_cqs(qp);
1788 bnxt_qplib_clean_qp(&qp->qplib_qp);
1789 bnxt_re_unlock_cqs(qp, flags);
1790 }
1791 }
1792 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1793 qp->qplib_qp.modify_flags |=
1794 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1795 qp->qplib_qp.en_sqd_async_notify = true;
1796 }
1797 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1798 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1799 qp->qplib_qp.access =
1800 __from_ib_access_flags(qp_attr->qp_access_flags);
1801
1802 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1803
1804 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1805 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1806 }
1807 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1808 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1809 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1810 }
1811 if (qp_attr_mask & IB_QP_QKEY) {
1812 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1813 qp->qplib_qp.qkey = qp_attr->qkey;
1814 }
1815 if (qp_attr_mask & IB_QP_AV) {
1816 const struct ib_global_route *grh =
1817 rdma_ah_read_grh(&qp_attr->ah_attr);
1818 const struct ib_gid_attr *sgid_attr;
1819 struct bnxt_re_gid_ctx *ctx;
1820
1821 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1822 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1823 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1824 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1825 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1826 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1827 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1828 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1829 sizeof(qp->qplib_qp.ah.dgid.data));
1830 qp->qplib_qp.ah.flow_label = grh->flow_label;
1831 sgid_attr = grh->sgid_attr;
1832
1833
1834
1835 ctx = rdma_read_gid_hw_context(sgid_attr);
1836 qp->qplib_qp.ah.sgid_index = ctx->idx;
1837 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1838 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1839 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1840 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1841 ether_addr_copy(qp->qplib_qp.ah.dmac,
1842 qp_attr->ah_attr.roce.dmac);
1843
1844 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1845 &qp->qplib_qp.smac[0]);
1846 if (rc)
1847 return rc;
1848
1849 nw_type = rdma_gid_attr_network_type(sgid_attr);
1850 switch (nw_type) {
1851 case RDMA_NETWORK_IPV4:
1852 qp->qplib_qp.nw_type =
1853 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1854 break;
1855 case RDMA_NETWORK_IPV6:
1856 qp->qplib_qp.nw_type =
1857 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1858 break;
1859 default:
1860 qp->qplib_qp.nw_type =
1861 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1862 break;
1863 }
1864 }
1865
1866 if (qp_attr_mask & IB_QP_PATH_MTU) {
1867 qp->qplib_qp.modify_flags |=
1868 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1869 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1870 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1871 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1872 qp->qplib_qp.modify_flags |=
1873 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1874 qp->qplib_qp.path_mtu =
1875 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1876 qp->qplib_qp.mtu =
1877 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1878 }
1879
1880 if (qp_attr_mask & IB_QP_TIMEOUT) {
1881 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1882 qp->qplib_qp.timeout = qp_attr->timeout;
1883 }
1884 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1885 qp->qplib_qp.modify_flags |=
1886 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1887 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1888 }
1889 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1890 qp->qplib_qp.modify_flags |=
1891 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1892 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1893 }
1894 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1895 qp->qplib_qp.modify_flags |=
1896 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1897 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1898 }
1899 if (qp_attr_mask & IB_QP_RQ_PSN) {
1900 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1901 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1902 }
1903 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1904 qp->qplib_qp.modify_flags |=
1905 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1906
1907 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1908 dev_attr->max_qp_rd_atom);
1909 }
1910 if (qp_attr_mask & IB_QP_SQ_PSN) {
1911 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1912 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1913 }
1914 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1915 if (qp_attr->max_dest_rd_atomic >
1916 dev_attr->max_qp_init_rd_atom) {
1917 ibdev_err(&rdev->ibdev,
1918 "max_dest_rd_atomic requested%d is > dev_max%d",
1919 qp_attr->max_dest_rd_atomic,
1920 dev_attr->max_qp_init_rd_atom);
1921 return -EINVAL;
1922 }
1923
1924 qp->qplib_qp.modify_flags |=
1925 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1926 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1927 }
1928 if (qp_attr_mask & IB_QP_CAP) {
1929 qp->qplib_qp.modify_flags |=
1930 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1931 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1932 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1933 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1934 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1935 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1936 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1937 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1938 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1939 (qp_attr->cap.max_inline_data >=
1940 dev_attr->max_inline_data)) {
1941 ibdev_err(&rdev->ibdev,
1942 "Create QP failed - max exceeded");
1943 return -EINVAL;
1944 }
1945 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1946 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1947 dev_attr->max_qp_wqes + 1);
1948 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1949 qp_attr->cap.max_send_wr;
1950
1951
1952
1953
1954
1955 qp->qplib_qp.sq.q_full_delta -= 1;
1956 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1957 if (qp->qplib_qp.rq.max_wqe) {
1958 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1959 qp->qplib_qp.rq.max_wqe =
1960 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1961 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1962 qp_attr->cap.max_recv_wr;
1963 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1964 } else {
1965
1966 }
1967 }
1968 if (qp_attr_mask & IB_QP_DEST_QPN) {
1969 qp->qplib_qp.modify_flags |=
1970 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1971 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1972 }
1973 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1974 if (rc) {
1975 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
1976 return rc;
1977 }
1978 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
1979 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1980 return rc;
1981}
1982
1983int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1984 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1985{
1986 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1987 struct bnxt_re_dev *rdev = qp->rdev;
1988 struct bnxt_qplib_qp *qplib_qp;
1989 int rc;
1990
1991 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1992 if (!qplib_qp)
1993 return -ENOMEM;
1994
1995 qplib_qp->id = qp->qplib_qp.id;
1996 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1997
1998 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1999 if (rc) {
2000 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2001 goto out;
2002 }
2003 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2004 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2005 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2006 qp_attr->pkey_index = qplib_qp->pkey_index;
2007 qp_attr->qkey = qplib_qp->qkey;
2008 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2009 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2010 qplib_qp->ah.host_sgid_index,
2011 qplib_qp->ah.hop_limit,
2012 qplib_qp->ah.traffic_class);
2013 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2014 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2015 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2016 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2017 qp_attr->timeout = qplib_qp->timeout;
2018 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2019 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2020 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2021 qp_attr->rq_psn = qplib_qp->rq.psn;
2022 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2023 qp_attr->sq_psn = qplib_qp->sq.psn;
2024 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2025 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2026 IB_SIGNAL_REQ_WR;
2027 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2028
2029 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2030 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2031 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2032 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2033 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2034 qp_init_attr->cap = qp_attr->cap;
2035
2036out:
2037 kfree(qplib_qp);
2038 return rc;
2039}
2040
2041
2042
2043static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2044 const struct ib_send_wr *wr,
2045 struct bnxt_qplib_swqe *wqe,
2046 int payload_size)
2047{
2048 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2049 ib_ah);
2050 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2051 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2052 struct bnxt_qplib_sge sge;
2053 u8 nw_type;
2054 u16 ether_type;
2055 union ib_gid dgid;
2056 bool is_eth = false;
2057 bool is_vlan = false;
2058 bool is_grh = false;
2059 bool is_udp = false;
2060 u8 ip_version = 0;
2061 u16 vlan_id = 0xFFFF;
2062 void *buf;
2063 int i, rc = 0;
2064
2065 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2066
2067 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2068 if (rc)
2069 return rc;
2070
2071
2072 nw_type = rdma_gid_attr_network_type(sgid_attr);
2073 switch (nw_type) {
2074 case RDMA_NETWORK_IPV4:
2075 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2076 break;
2077 case RDMA_NETWORK_IPV6:
2078 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2079 break;
2080 default:
2081 nw_type = BNXT_RE_ROCE_V1_PACKET;
2082 break;
2083 }
2084 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2085 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2086 if (is_udp) {
2087 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2088 ip_version = 4;
2089 ether_type = ETH_P_IP;
2090 } else {
2091 ip_version = 6;
2092 ether_type = ETH_P_IPV6;
2093 }
2094 is_grh = false;
2095 } else {
2096 ether_type = ETH_P_IBOE;
2097 is_grh = true;
2098 }
2099
2100 is_eth = true;
2101 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
2102
2103 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2104 ip_version, is_udp, 0, &qp->qp1_hdr);
2105
2106
2107 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2108 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2109
2110
2111
2112 if (!is_vlan) {
2113 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2114 } else {
2115 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2116 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2117 }
2118
2119 if (is_grh || (ip_version == 6)) {
2120 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2121 sizeof(sgid_attr->gid));
2122 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2123 sizeof(sgid_attr->gid));
2124 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2125 }
2126
2127 if (ip_version == 4) {
2128 qp->qp1_hdr.ip4.tos = 0;
2129 qp->qp1_hdr.ip4.id = 0;
2130 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2131 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2132
2133 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2134 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2135 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2136 }
2137
2138 if (is_udp) {
2139 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2140 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2141 qp->qp1_hdr.udp.csum = 0;
2142 }
2143
2144
2145 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2146 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2147 qp->qp1_hdr.immediate_present = 1;
2148 } else {
2149 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2150 }
2151 if (wr->send_flags & IB_SEND_SOLICITED)
2152 qp->qp1_hdr.bth.solicited_event = 1;
2153
2154 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2155
2156
2157 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2158 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2159 qp->qp1_hdr.bth.ack_req = 0;
2160 qp->send_psn++;
2161 qp->send_psn &= BTH_PSN_MASK;
2162 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2163
2164
2165 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2166 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2167
2168
2169 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2170 if (buf) {
2171 ib_ud_header_pack(&qp->qp1_hdr, buf);
2172 for (i = wqe->num_sge; i; i--) {
2173 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2174 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2175 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2176 }
2177
2178
2179
2180
2181
2182
2183
2184
2185 if (is_udp && ip_version == 4)
2186 sge.size -= 20;
2187
2188
2189
2190
2191
2192 if (!is_udp)
2193 sge.size -= 8;
2194
2195
2196 if (!is_vlan)
2197 sge.size -= 4;
2198
2199 wqe->sg_list[0].addr = sge.addr;
2200 wqe->sg_list[0].lkey = sge.lkey;
2201 wqe->sg_list[0].size = sge.size;
2202 wqe->num_sge++;
2203
2204 } else {
2205 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2206 rc = -ENOMEM;
2207 }
2208 return rc;
2209}
2210
2211
2212
2213
2214
2215
2216
2217static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2218 const struct ib_recv_wr *wr,
2219 struct bnxt_qplib_swqe *wqe,
2220 int payload_size)
2221{
2222 struct bnxt_re_sqp_entries *sqp_entry;
2223 struct bnxt_qplib_sge ref, sge;
2224 struct bnxt_re_dev *rdev;
2225 u32 rq_prod_index;
2226
2227 rdev = qp->rdev;
2228
2229 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2230
2231 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2232 return -ENOMEM;
2233
2234
2235
2236
2237
2238 ref.addr = wqe->sg_list[0].addr;
2239 ref.lkey = wqe->sg_list[0].lkey;
2240 ref.size = wqe->sg_list[0].size;
2241
2242 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2243
2244
2245 wqe->sg_list[0].addr = sge.addr;
2246 wqe->sg_list[0].lkey = sge.lkey;
2247 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2248 sge.size -= wqe->sg_list[0].size;
2249
2250 sqp_entry->sge.addr = ref.addr;
2251 sqp_entry->sge.lkey = ref.lkey;
2252 sqp_entry->sge.size = ref.size;
2253
2254 sqp_entry->wrid = wqe->wr_id;
2255
2256 wqe->wr_id = rq_prod_index;
2257 return 0;
2258}
2259
2260static int is_ud_qp(struct bnxt_re_qp *qp)
2261{
2262 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2263 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2264}
2265
2266static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2267 const struct ib_send_wr *wr,
2268 struct bnxt_qplib_swqe *wqe)
2269{
2270 struct bnxt_re_ah *ah = NULL;
2271
2272 if (is_ud_qp(qp)) {
2273 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2274 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2275 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2276 wqe->send.avid = ah->qplib_ah.id;
2277 }
2278 switch (wr->opcode) {
2279 case IB_WR_SEND:
2280 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2281 break;
2282 case IB_WR_SEND_WITH_IMM:
2283 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2284 wqe->send.imm_data = wr->ex.imm_data;
2285 break;
2286 case IB_WR_SEND_WITH_INV:
2287 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2288 wqe->send.inv_key = wr->ex.invalidate_rkey;
2289 break;
2290 default:
2291 return -EINVAL;
2292 }
2293 if (wr->send_flags & IB_SEND_SIGNALED)
2294 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2295 if (wr->send_flags & IB_SEND_FENCE)
2296 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2297 if (wr->send_flags & IB_SEND_SOLICITED)
2298 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2299 if (wr->send_flags & IB_SEND_INLINE)
2300 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2301
2302 return 0;
2303}
2304
2305static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2306 struct bnxt_qplib_swqe *wqe)
2307{
2308 switch (wr->opcode) {
2309 case IB_WR_RDMA_WRITE:
2310 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2311 break;
2312 case IB_WR_RDMA_WRITE_WITH_IMM:
2313 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2314 wqe->rdma.imm_data = wr->ex.imm_data;
2315 break;
2316 case IB_WR_RDMA_READ:
2317 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2318 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2319 break;
2320 default:
2321 return -EINVAL;
2322 }
2323 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2324 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2325 if (wr->send_flags & IB_SEND_SIGNALED)
2326 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2327 if (wr->send_flags & IB_SEND_FENCE)
2328 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2329 if (wr->send_flags & IB_SEND_SOLICITED)
2330 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2331 if (wr->send_flags & IB_SEND_INLINE)
2332 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2333
2334 return 0;
2335}
2336
2337static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2338 struct bnxt_qplib_swqe *wqe)
2339{
2340 switch (wr->opcode) {
2341 case IB_WR_ATOMIC_CMP_AND_SWP:
2342 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2343 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2344 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2345 break;
2346 case IB_WR_ATOMIC_FETCH_AND_ADD:
2347 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2348 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2349 break;
2350 default:
2351 return -EINVAL;
2352 }
2353 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2354 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2355 if (wr->send_flags & IB_SEND_SIGNALED)
2356 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2357 if (wr->send_flags & IB_SEND_FENCE)
2358 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2359 if (wr->send_flags & IB_SEND_SOLICITED)
2360 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2361 return 0;
2362}
2363
2364static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2365 struct bnxt_qplib_swqe *wqe)
2366{
2367 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2368 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2369
2370
2371
2372
2373 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2374
2375 if (wr->send_flags & IB_SEND_SIGNALED)
2376 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2377 if (wr->send_flags & IB_SEND_SOLICITED)
2378 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2379
2380 return 0;
2381}
2382
2383static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2384 struct bnxt_qplib_swqe *wqe)
2385{
2386 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2387 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2388 int access = wr->access;
2389
2390 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2391 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2392 wqe->frmr.page_list = mr->pages;
2393 wqe->frmr.page_list_len = mr->npages;
2394 wqe->frmr.levels = qplib_frpl->hwq.level;
2395 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2396
2397
2398
2399
2400
2401 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2402
2403 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2404 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2405
2406 if (access & IB_ACCESS_LOCAL_WRITE)
2407 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2408 if (access & IB_ACCESS_REMOTE_READ)
2409 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2410 if (access & IB_ACCESS_REMOTE_WRITE)
2411 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2412 if (access & IB_ACCESS_REMOTE_ATOMIC)
2413 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2414 if (access & IB_ACCESS_MW_BIND)
2415 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2416
2417 wqe->frmr.l_key = wr->key;
2418 wqe->frmr.length = wr->mr->length;
2419 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2420 wqe->frmr.va = wr->mr->iova;
2421 return 0;
2422}
2423
2424static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2425 const struct ib_send_wr *wr,
2426 struct bnxt_qplib_swqe *wqe)
2427{
2428
2429 u8 *in_data;
2430 u32 i, sge_len;
2431 void *sge_addr;
2432
2433 in_data = wqe->inline_data;
2434 for (i = 0; i < wr->num_sge; i++) {
2435 sge_addr = (void *)(unsigned long)
2436 wr->sg_list[i].addr;
2437 sge_len = wr->sg_list[i].length;
2438
2439 if ((sge_len + wqe->inline_len) >
2440 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2441 ibdev_err(&rdev->ibdev,
2442 "Inline data size requested > supported value");
2443 return -EINVAL;
2444 }
2445 sge_len = wr->sg_list[i].length;
2446
2447 memcpy(in_data, sge_addr, sge_len);
2448 in_data += wr->sg_list[i].length;
2449 wqe->inline_len += wr->sg_list[i].length;
2450 }
2451 return wqe->inline_len;
2452}
2453
2454static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2455 const struct ib_send_wr *wr,
2456 struct bnxt_qplib_swqe *wqe)
2457{
2458 int payload_sz = 0;
2459
2460 if (wr->send_flags & IB_SEND_INLINE)
2461 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2462 else
2463 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2464 wqe->num_sge);
2465
2466 return payload_sz;
2467}
2468
2469static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2470{
2471 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2472 qp->ib_qp.qp_type == IB_QPT_GSI ||
2473 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2474 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2475 int qp_attr_mask;
2476 struct ib_qp_attr qp_attr;
2477
2478 qp_attr_mask = IB_QP_STATE;
2479 qp_attr.qp_state = IB_QPS_RTS;
2480 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2481 qp->qplib_qp.wqe_cnt = 0;
2482 }
2483}
2484
2485static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2486 struct bnxt_re_qp *qp,
2487 const struct ib_send_wr *wr)
2488{
2489 int rc = 0, payload_sz = 0;
2490 unsigned long flags;
2491
2492 spin_lock_irqsave(&qp->sq_lock, flags);
2493 while (wr) {
2494 struct bnxt_qplib_swqe wqe = {};
2495
2496
2497 wqe.num_sge = wr->num_sge;
2498 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2499 ibdev_err(&rdev->ibdev,
2500 "Limit exceeded for Send SGEs");
2501 rc = -EINVAL;
2502 goto bad;
2503 }
2504
2505 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2506 if (payload_sz < 0) {
2507 rc = -EINVAL;
2508 goto bad;
2509 }
2510 wqe.wr_id = wr->wr_id;
2511
2512 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2513
2514 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2515 if (!rc)
2516 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2517bad:
2518 if (rc) {
2519 ibdev_err(&rdev->ibdev,
2520 "Post send failed opcode = %#x rc = %d",
2521 wr->opcode, rc);
2522 break;
2523 }
2524 wr = wr->next;
2525 }
2526 bnxt_qplib_post_send_db(&qp->qplib_qp);
2527 bnxt_ud_qp_hw_stall_workaround(qp);
2528 spin_unlock_irqrestore(&qp->sq_lock, flags);
2529 return rc;
2530}
2531
2532int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2533 const struct ib_send_wr **bad_wr)
2534{
2535 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2536 struct bnxt_qplib_swqe wqe;
2537 int rc = 0, payload_sz = 0;
2538 unsigned long flags;
2539
2540 spin_lock_irqsave(&qp->sq_lock, flags);
2541 while (wr) {
2542
2543 memset(&wqe, 0, sizeof(wqe));
2544
2545
2546 wqe.num_sge = wr->num_sge;
2547 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2548 ibdev_err(&qp->rdev->ibdev,
2549 "Limit exceeded for Send SGEs");
2550 rc = -EINVAL;
2551 goto bad;
2552 }
2553
2554 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2555 if (payload_sz < 0) {
2556 rc = -EINVAL;
2557 goto bad;
2558 }
2559 wqe.wr_id = wr->wr_id;
2560
2561 switch (wr->opcode) {
2562 case IB_WR_SEND:
2563 case IB_WR_SEND_WITH_IMM:
2564 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2565 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2566 payload_sz);
2567 if (rc)
2568 goto bad;
2569 wqe.rawqp1.lflags |=
2570 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2571 }
2572 switch (wr->send_flags) {
2573 case IB_SEND_IP_CSUM:
2574 wqe.rawqp1.lflags |=
2575 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2576 break;
2577 default:
2578 break;
2579 }
2580
2581 case IB_WR_SEND_WITH_INV:
2582 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2583 break;
2584 case IB_WR_RDMA_WRITE:
2585 case IB_WR_RDMA_WRITE_WITH_IMM:
2586 case IB_WR_RDMA_READ:
2587 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2588 break;
2589 case IB_WR_ATOMIC_CMP_AND_SWP:
2590 case IB_WR_ATOMIC_FETCH_AND_ADD:
2591 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2592 break;
2593 case IB_WR_RDMA_READ_WITH_INV:
2594 ibdev_err(&qp->rdev->ibdev,
2595 "RDMA Read with Invalidate is not supported");
2596 rc = -EINVAL;
2597 goto bad;
2598 case IB_WR_LOCAL_INV:
2599 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2600 break;
2601 case IB_WR_REG_MR:
2602 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2603 break;
2604 default:
2605
2606 ibdev_err(&qp->rdev->ibdev,
2607 "WR (%#x) is not supported", wr->opcode);
2608 rc = -EINVAL;
2609 goto bad;
2610 }
2611 if (!rc)
2612 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2613bad:
2614 if (rc) {
2615 ibdev_err(&qp->rdev->ibdev,
2616 "post_send failed op:%#x qps = %#x rc = %d\n",
2617 wr->opcode, qp->qplib_qp.state, rc);
2618 *bad_wr = wr;
2619 break;
2620 }
2621 wr = wr->next;
2622 }
2623 bnxt_qplib_post_send_db(&qp->qplib_qp);
2624 bnxt_ud_qp_hw_stall_workaround(qp);
2625 spin_unlock_irqrestore(&qp->sq_lock, flags);
2626
2627 return rc;
2628}
2629
2630static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2631 struct bnxt_re_qp *qp,
2632 const struct ib_recv_wr *wr)
2633{
2634 struct bnxt_qplib_swqe wqe;
2635 int rc = 0;
2636
2637 memset(&wqe, 0, sizeof(wqe));
2638 while (wr) {
2639
2640 memset(&wqe, 0, sizeof(wqe));
2641
2642
2643 wqe.num_sge = wr->num_sge;
2644 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2645 ibdev_err(&rdev->ibdev,
2646 "Limit exceeded for Receive SGEs");
2647 rc = -EINVAL;
2648 break;
2649 }
2650 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2651 wqe.wr_id = wr->wr_id;
2652 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2653
2654 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2655 if (rc)
2656 break;
2657
2658 wr = wr->next;
2659 }
2660 if (!rc)
2661 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2662 return rc;
2663}
2664
2665int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2666 const struct ib_recv_wr **bad_wr)
2667{
2668 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2669 struct bnxt_qplib_swqe wqe;
2670 int rc = 0, payload_sz = 0;
2671 unsigned long flags;
2672 u32 count = 0;
2673
2674 spin_lock_irqsave(&qp->rq_lock, flags);
2675 while (wr) {
2676
2677 memset(&wqe, 0, sizeof(wqe));
2678
2679
2680 wqe.num_sge = wr->num_sge;
2681 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2682 ibdev_err(&qp->rdev->ibdev,
2683 "Limit exceeded for Receive SGEs");
2684 rc = -EINVAL;
2685 *bad_wr = wr;
2686 break;
2687 }
2688
2689 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2690 wr->num_sge);
2691 wqe.wr_id = wr->wr_id;
2692 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2693
2694 if (ib_qp->qp_type == IB_QPT_GSI &&
2695 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2696 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2697 payload_sz);
2698 if (!rc)
2699 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2700 if (rc) {
2701 *bad_wr = wr;
2702 break;
2703 }
2704
2705
2706 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2707 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2708 count = 0;
2709 }
2710
2711 wr = wr->next;
2712 }
2713
2714 if (count)
2715 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2716
2717 spin_unlock_irqrestore(&qp->rq_lock, flags);
2718
2719 return rc;
2720}
2721
2722
2723void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2724{
2725 struct bnxt_re_cq *cq;
2726 struct bnxt_qplib_nq *nq;
2727 struct bnxt_re_dev *rdev;
2728
2729 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2730 rdev = cq->rdev;
2731 nq = cq->qplib_cq.nq;
2732
2733 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2734 ib_umem_release(cq->umem);
2735
2736 atomic_dec(&rdev->cq_count);
2737 nq->budget--;
2738 kfree(cq->cql);
2739}
2740
2741int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2742 struct ib_udata *udata)
2743{
2744 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2745 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2746 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2747 int rc, entries;
2748 int cqe = attr->cqe;
2749 struct bnxt_qplib_nq *nq = NULL;
2750 unsigned int nq_alloc_cnt;
2751
2752
2753 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2754 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2755 return -EINVAL;
2756 }
2757
2758 cq->rdev = rdev;
2759 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2760
2761 entries = roundup_pow_of_two(cqe + 1);
2762 if (entries > dev_attr->max_cq_wqes + 1)
2763 entries = dev_attr->max_cq_wqes + 1;
2764
2765 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2766 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2767 if (udata) {
2768 struct bnxt_re_cq_req req;
2769 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2770 udata, struct bnxt_re_ucontext, ib_uctx);
2771 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2772 rc = -EFAULT;
2773 goto fail;
2774 }
2775
2776 cq->umem = ib_umem_get(udata, req.cq_va,
2777 entries * sizeof(struct cq_base),
2778 IB_ACCESS_LOCAL_WRITE);
2779 if (IS_ERR(cq->umem)) {
2780 rc = PTR_ERR(cq->umem);
2781 goto fail;
2782 }
2783 cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl;
2784 cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
2785 cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
2786 cq->qplib_cq.dpi = &uctx->dpi;
2787 } else {
2788 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2789 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2790 GFP_KERNEL);
2791 if (!cq->cql) {
2792 rc = -ENOMEM;
2793 goto fail;
2794 }
2795
2796 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2797 }
2798
2799
2800
2801
2802 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2803 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2804 cq->qplib_cq.max_wqe = entries;
2805 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2806 cq->qplib_cq.nq = nq;
2807
2808 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2809 if (rc) {
2810 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
2811 goto fail;
2812 }
2813
2814 cq->ib_cq.cqe = entries;
2815 cq->cq_period = cq->qplib_cq.period;
2816 nq->budget++;
2817
2818 atomic_inc(&rdev->cq_count);
2819 spin_lock_init(&cq->cq_lock);
2820
2821 if (udata) {
2822 struct bnxt_re_cq_resp resp;
2823
2824 resp.cqid = cq->qplib_cq.id;
2825 resp.tail = cq->qplib_cq.hwq.cons;
2826 resp.phase = cq->qplib_cq.period;
2827 resp.rsvd = 0;
2828 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2829 if (rc) {
2830 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
2831 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2832 goto c2fail;
2833 }
2834 }
2835
2836 return 0;
2837
2838c2fail:
2839 ib_umem_release(cq->umem);
2840fail:
2841 kfree(cq->cql);
2842 return rc;
2843}
2844
2845static u8 __req_to_ib_wc_status(u8 qstatus)
2846{
2847 switch (qstatus) {
2848 case CQ_REQ_STATUS_OK:
2849 return IB_WC_SUCCESS;
2850 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2851 return IB_WC_BAD_RESP_ERR;
2852 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2853 return IB_WC_LOC_LEN_ERR;
2854 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2855 return IB_WC_LOC_QP_OP_ERR;
2856 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2857 return IB_WC_LOC_PROT_ERR;
2858 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2859 return IB_WC_GENERAL_ERR;
2860 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2861 return IB_WC_REM_INV_REQ_ERR;
2862 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2863 return IB_WC_REM_ACCESS_ERR;
2864 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2865 return IB_WC_REM_OP_ERR;
2866 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2867 return IB_WC_RNR_RETRY_EXC_ERR;
2868 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2869 return IB_WC_RETRY_EXC_ERR;
2870 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2871 return IB_WC_WR_FLUSH_ERR;
2872 default:
2873 return IB_WC_GENERAL_ERR;
2874 }
2875 return 0;
2876}
2877
2878static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2879{
2880 switch (qstatus) {
2881 case CQ_RES_RAWETH_QP1_STATUS_OK:
2882 return IB_WC_SUCCESS;
2883 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2884 return IB_WC_LOC_ACCESS_ERR;
2885 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2886 return IB_WC_LOC_LEN_ERR;
2887 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2888 return IB_WC_LOC_PROT_ERR;
2889 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2890 return IB_WC_LOC_QP_OP_ERR;
2891 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2892 return IB_WC_GENERAL_ERR;
2893 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2894 return IB_WC_WR_FLUSH_ERR;
2895 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2896 return IB_WC_WR_FLUSH_ERR;
2897 default:
2898 return IB_WC_GENERAL_ERR;
2899 }
2900}
2901
2902static u8 __rc_to_ib_wc_status(u8 qstatus)
2903{
2904 switch (qstatus) {
2905 case CQ_RES_RC_STATUS_OK:
2906 return IB_WC_SUCCESS;
2907 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2908 return IB_WC_LOC_ACCESS_ERR;
2909 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2910 return IB_WC_LOC_LEN_ERR;
2911 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2912 return IB_WC_LOC_PROT_ERR;
2913 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2914 return IB_WC_LOC_QP_OP_ERR;
2915 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2916 return IB_WC_GENERAL_ERR;
2917 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2918 return IB_WC_REM_INV_REQ_ERR;
2919 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2920 return IB_WC_WR_FLUSH_ERR;
2921 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2922 return IB_WC_WR_FLUSH_ERR;
2923 default:
2924 return IB_WC_GENERAL_ERR;
2925 }
2926}
2927
2928static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2929{
2930 switch (cqe->type) {
2931 case BNXT_QPLIB_SWQE_TYPE_SEND:
2932 wc->opcode = IB_WC_SEND;
2933 break;
2934 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2935 wc->opcode = IB_WC_SEND;
2936 wc->wc_flags |= IB_WC_WITH_IMM;
2937 break;
2938 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2939 wc->opcode = IB_WC_SEND;
2940 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2941 break;
2942 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2943 wc->opcode = IB_WC_RDMA_WRITE;
2944 break;
2945 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2946 wc->opcode = IB_WC_RDMA_WRITE;
2947 wc->wc_flags |= IB_WC_WITH_IMM;
2948 break;
2949 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2950 wc->opcode = IB_WC_RDMA_READ;
2951 break;
2952 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2953 wc->opcode = IB_WC_COMP_SWAP;
2954 break;
2955 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2956 wc->opcode = IB_WC_FETCH_ADD;
2957 break;
2958 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2959 wc->opcode = IB_WC_LOCAL_INV;
2960 break;
2961 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2962 wc->opcode = IB_WC_REG_MR;
2963 break;
2964 default:
2965 wc->opcode = IB_WC_SEND;
2966 break;
2967 }
2968
2969 wc->status = __req_to_ib_wc_status(cqe->status);
2970}
2971
2972static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2973 u16 raweth_qp1_flags2)
2974{
2975 bool is_ipv6 = false, is_ipv4 = false;
2976
2977
2978 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2979 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2980 return -1;
2981
2982 if (raweth_qp1_flags2 &
2983 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2984 raweth_qp1_flags2 &
2985 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2986
2987 (raweth_qp1_flags2 &
2988 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2989 (is_ipv6 = true) : (is_ipv4 = true);
2990 return ((is_ipv6) ?
2991 BNXT_RE_ROCEV2_IPV6_PACKET :
2992 BNXT_RE_ROCEV2_IPV4_PACKET);
2993 } else {
2994 return BNXT_RE_ROCE_V1_PACKET;
2995 }
2996}
2997
2998static int bnxt_re_to_ib_nw_type(int nw_type)
2999{
3000 u8 nw_hdr_type = 0xFF;
3001
3002 switch (nw_type) {
3003 case BNXT_RE_ROCE_V1_PACKET:
3004 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3005 break;
3006 case BNXT_RE_ROCEV2_IPV4_PACKET:
3007 nw_hdr_type = RDMA_NETWORK_IPV4;
3008 break;
3009 case BNXT_RE_ROCEV2_IPV6_PACKET:
3010 nw_hdr_type = RDMA_NETWORK_IPV6;
3011 break;
3012 }
3013 return nw_hdr_type;
3014}
3015
3016static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3017 void *rq_hdr_buf)
3018{
3019 u8 *tmp_buf = NULL;
3020 struct ethhdr *eth_hdr;
3021 u16 eth_type;
3022 bool rc = false;
3023
3024 tmp_buf = (u8 *)rq_hdr_buf;
3025
3026
3027
3028
3029
3030 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3031 tmp_buf += 4;
3032
3033 eth_hdr = (struct ethhdr *)tmp_buf;
3034 eth_type = ntohs(eth_hdr->h_proto);
3035 switch (eth_type) {
3036 case ETH_P_IBOE:
3037 rc = true;
3038 break;
3039 case ETH_P_IP:
3040 case ETH_P_IPV6: {
3041 u32 len;
3042 struct udphdr *udp_hdr;
3043
3044 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3045 sizeof(struct ipv6hdr));
3046 tmp_buf += sizeof(struct ethhdr) + len;
3047 udp_hdr = (struct udphdr *)tmp_buf;
3048 if (ntohs(udp_hdr->dest) ==
3049 ROCE_V2_UDP_DPORT)
3050 rc = true;
3051 break;
3052 }
3053 default:
3054 break;
3055 }
3056 }
3057
3058 return rc;
3059}
3060
3061static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3062 struct bnxt_qplib_cqe *cqe)
3063{
3064 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3065 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3066 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3067 struct bnxt_re_ah *gsi_sah;
3068 struct ib_send_wr *swr;
3069 struct ib_ud_wr udwr;
3070 struct ib_recv_wr rwr;
3071 int pkt_type = 0;
3072 u32 tbl_idx;
3073 void *rq_hdr_buf;
3074 dma_addr_t rq_hdr_buf_map;
3075 dma_addr_t shrq_hdr_buf_map;
3076 u32 offset = 0;
3077 u32 skip_bytes = 0;
3078 struct ib_sge s_sge[2];
3079 struct ib_sge r_sge[2];
3080 int rc;
3081
3082 memset(&udwr, 0, sizeof(udwr));
3083 memset(&rwr, 0, sizeof(rwr));
3084 memset(&s_sge, 0, sizeof(s_sge));
3085 memset(&r_sge, 0, sizeof(r_sge));
3086
3087 swr = &udwr.wr;
3088 tbl_idx = cqe->wr_id;
3089
3090 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3091 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3092 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3093 tbl_idx);
3094
3095
3096 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3097 tbl_idx);
3098 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3099
3100
3101 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3102 sqp_entry->qp1_qp = gsi_qp;
3103
3104
3105
3106 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3107 cqe->raweth_qp1_flags2);
3108 if (pkt_type < 0) {
3109 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3110 return -EINVAL;
3111 }
3112
3113
3114
3115 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3116 offset = 20;
3117
3118
3119
3120
3121
3122 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3123 skip_bytes = 4;
3124
3125
3126 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3127 + skip_bytes;
3128 s_sge[0].lkey = 0xFFFFFFFF;
3129 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3130 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3131
3132
3133 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3134 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3135 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3136 s_sge[1].addr += 8;
3137 s_sge[1].lkey = 0xFFFFFFFF;
3138 s_sge[1].length = 256;
3139
3140
3141
3142 r_sge[0].addr = shrq_hdr_buf_map;
3143 r_sge[0].lkey = 0xFFFFFFFF;
3144 r_sge[0].length = 40;
3145
3146 r_sge[1].addr = sqp_entry->sge.addr + offset;
3147 r_sge[1].lkey = sqp_entry->sge.lkey;
3148 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3149
3150
3151 rwr.num_sge = 2;
3152 rwr.sg_list = r_sge;
3153 rwr.wr_id = tbl_idx;
3154 rwr.next = NULL;
3155
3156 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3157 if (rc) {
3158 ibdev_err(&rdev->ibdev,
3159 "Failed to post Rx buffers to shadow QP");
3160 return -ENOMEM;
3161 }
3162
3163 swr->num_sge = 2;
3164 swr->sg_list = s_sge;
3165 swr->wr_id = tbl_idx;
3166 swr->opcode = IB_WR_SEND;
3167 swr->next = NULL;
3168 gsi_sah = rdev->gsi_ctx.gsi_sah;
3169 udwr.ah = &gsi_sah->ib_ah;
3170 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3171 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3172
3173
3174 rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3175
3176 return 0;
3177}
3178
3179static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3180 struct bnxt_qplib_cqe *cqe)
3181{
3182 wc->opcode = IB_WC_RECV;
3183 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3184 wc->wc_flags |= IB_WC_GRH;
3185}
3186
3187static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3188 u16 *vid, u8 *sl)
3189{
3190 bool ret = false;
3191 u32 metadata;
3192 u16 tpid;
3193
3194 metadata = orig_cqe->raweth_qp1_metadata;
3195 if (orig_cqe->raweth_qp1_flags2 &
3196 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3197 tpid = ((metadata &
3198 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3199 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3200 if (tpid == ETH_P_8021Q) {
3201 *vid = metadata &
3202 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3203 *sl = (metadata &
3204 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3205 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3206 ret = true;
3207 }
3208 }
3209
3210 return ret;
3211}
3212
3213static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3214 struct bnxt_qplib_cqe *cqe)
3215{
3216 wc->opcode = IB_WC_RECV;
3217 wc->status = __rc_to_ib_wc_status(cqe->status);
3218
3219 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3220 wc->wc_flags |= IB_WC_WITH_IMM;
3221 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3222 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3223 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3224 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3225 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3226}
3227
3228static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3229 struct ib_wc *wc,
3230 struct bnxt_qplib_cqe *cqe)
3231{
3232 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3233 struct bnxt_re_qp *gsi_qp = NULL;
3234 struct bnxt_qplib_cqe *orig_cqe = NULL;
3235 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3236 int nw_type;
3237 u32 tbl_idx;
3238 u16 vlan_id;
3239 u8 sl;
3240
3241 tbl_idx = cqe->wr_id;
3242
3243 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3244 gsi_qp = sqp_entry->qp1_qp;
3245 orig_cqe = &sqp_entry->cqe;
3246
3247 wc->wr_id = sqp_entry->wrid;
3248 wc->byte_len = orig_cqe->length;
3249 wc->qp = &gsi_qp->ib_qp;
3250
3251 wc->ex.imm_data = orig_cqe->immdata;
3252 wc->src_qp = orig_cqe->src_qp;
3253 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3254 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3255 wc->vlan_id = vlan_id;
3256 wc->sl = sl;
3257 wc->wc_flags |= IB_WC_WITH_VLAN;
3258 }
3259 wc->port_num = 1;
3260 wc->vendor_err = orig_cqe->status;
3261
3262 wc->opcode = IB_WC_RECV;
3263 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3264 wc->wc_flags |= IB_WC_GRH;
3265
3266 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3267 orig_cqe->raweth_qp1_flags2);
3268 if (nw_type >= 0) {
3269 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3270 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3271 }
3272}
3273
3274static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3275 struct ib_wc *wc,
3276 struct bnxt_qplib_cqe *cqe)
3277{
3278 u8 nw_type;
3279
3280 wc->opcode = IB_WC_RECV;
3281 wc->status = __rc_to_ib_wc_status(cqe->status);
3282
3283 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3284 wc->wc_flags |= IB_WC_WITH_IMM;
3285
3286 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3287 wc->wc_flags |= IB_WC_GRH;
3288 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3289 wc->wc_flags |= IB_WC_WITH_SMAC;
3290 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3291 wc->vlan_id = (cqe->cfa_meta & 0xFFF);
3292 if (wc->vlan_id < 0x1000)
3293 wc->wc_flags |= IB_WC_WITH_VLAN;
3294 }
3295 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3296 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3297 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3298 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3299 }
3300
3301}
3302
3303static int send_phantom_wqe(struct bnxt_re_qp *qp)
3304{
3305 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3306 unsigned long flags;
3307 int rc = 0;
3308
3309 spin_lock_irqsave(&qp->sq_lock, flags);
3310
3311 rc = bnxt_re_bind_fence_mw(lib_qp);
3312 if (!rc) {
3313 lib_qp->sq.phantom_wqe_cnt++;
3314 ibdev_dbg(&qp->rdev->ibdev,
3315 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3316 lib_qp->id, lib_qp->sq.hwq.prod,
3317 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3318 lib_qp->sq.phantom_wqe_cnt);
3319 }
3320
3321 spin_unlock_irqrestore(&qp->sq_lock, flags);
3322 return rc;
3323}
3324
3325int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3326{
3327 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3328 struct bnxt_re_qp *qp, *sh_qp;
3329 struct bnxt_qplib_cqe *cqe;
3330 int i, ncqe, budget;
3331 struct bnxt_qplib_q *sq;
3332 struct bnxt_qplib_qp *lib_qp;
3333 u32 tbl_idx;
3334 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3335 unsigned long flags;
3336
3337 spin_lock_irqsave(&cq->cq_lock, flags);
3338 budget = min_t(u32, num_entries, cq->max_cql);
3339 num_entries = budget;
3340 if (!cq->cql) {
3341 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3342 goto exit;
3343 }
3344 cqe = &cq->cql[0];
3345 while (budget) {
3346 lib_qp = NULL;
3347 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3348 if (lib_qp) {
3349 sq = &lib_qp->sq;
3350 if (sq->send_phantom) {
3351 qp = container_of(lib_qp,
3352 struct bnxt_re_qp, qplib_qp);
3353 if (send_phantom_wqe(qp) == -ENOMEM)
3354 ibdev_err(&cq->rdev->ibdev,
3355 "Phantom failed! Scheduled to send again\n");
3356 else
3357 sq->send_phantom = false;
3358 }
3359 }
3360 if (ncqe < budget)
3361 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3362 cqe + ncqe,
3363 budget - ncqe);
3364
3365 if (!ncqe)
3366 break;
3367
3368 for (i = 0; i < ncqe; i++, cqe++) {
3369
3370 memset(wc, 0, sizeof(*wc));
3371
3372 wc->wr_id = cqe->wr_id;
3373 wc->byte_len = cqe->length;
3374 qp = container_of
3375 ((struct bnxt_qplib_qp *)
3376 (unsigned long)(cqe->qp_handle),
3377 struct bnxt_re_qp, qplib_qp);
3378 if (!qp) {
3379 ibdev_err(&cq->rdev->ibdev, "POLL CQ : bad QP handle");
3380 continue;
3381 }
3382 wc->qp = &qp->ib_qp;
3383 wc->ex.imm_data = cqe->immdata;
3384 wc->src_qp = cqe->src_qp;
3385 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3386 wc->port_num = 1;
3387 wc->vendor_err = cqe->status;
3388
3389 switch (cqe->opcode) {
3390 case CQ_BASE_CQE_TYPE_REQ:
3391 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3392 if (sh_qp &&
3393 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3394
3395
3396
3397 memset(wc, 0, sizeof(*wc));
3398 continue;
3399 }
3400 bnxt_re_process_req_wc(wc, cqe);
3401 break;
3402 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3403 if (!cqe->status) {
3404 int rc = 0;
3405
3406 rc = bnxt_re_process_raw_qp_pkt_rx
3407 (qp, cqe);
3408 if (!rc) {
3409 memset(wc, 0, sizeof(*wc));
3410 continue;
3411 }
3412 cqe->status = -1;
3413 }
3414
3415
3416
3417
3418 tbl_idx = cqe->wr_id;
3419 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3420 wc->wr_id = sqp_entry->wrid;
3421 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3422 break;
3423 case CQ_BASE_CQE_TYPE_RES_RC:
3424 bnxt_re_process_res_rc_wc(wc, cqe);
3425 break;
3426 case CQ_BASE_CQE_TYPE_RES_UD:
3427 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3428 if (sh_qp &&
3429 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3430
3431
3432
3433 if (cqe->status) {
3434 continue;
3435 } else {
3436 bnxt_re_process_res_shadow_qp_wc
3437 (qp, wc, cqe);
3438 break;
3439 }
3440 }
3441 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3442 break;
3443 default:
3444 ibdev_err(&cq->rdev->ibdev,
3445 "POLL CQ : type 0x%x not handled",
3446 cqe->opcode);
3447 continue;
3448 }
3449 wc++;
3450 budget--;
3451 }
3452 }
3453exit:
3454 spin_unlock_irqrestore(&cq->cq_lock, flags);
3455 return num_entries - budget;
3456}
3457
3458int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3459 enum ib_cq_notify_flags ib_cqn_flags)
3460{
3461 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3462 int type = 0, rc = 0;
3463 unsigned long flags;
3464
3465 spin_lock_irqsave(&cq->cq_lock, flags);
3466
3467 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3468 type = DBC_DBC_TYPE_CQ_ARMALL;
3469
3470 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3471 type = DBC_DBC_TYPE_CQ_ARMSE;
3472
3473
3474 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3475 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3476 rc = 1;
3477 goto exit;
3478 }
3479 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3480
3481exit:
3482 spin_unlock_irqrestore(&cq->cq_lock, flags);
3483 return rc;
3484}
3485
3486
3487struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3488{
3489 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3490 struct bnxt_re_dev *rdev = pd->rdev;
3491 struct bnxt_re_mr *mr;
3492 u64 pbl = 0;
3493 int rc;
3494
3495 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3496 if (!mr)
3497 return ERR_PTR(-ENOMEM);
3498
3499 mr->rdev = rdev;
3500 mr->qplib_mr.pd = &pd->qplib_pd;
3501 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3502 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3503
3504
3505 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3506 if (rc)
3507 goto fail;
3508
3509 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3510 mr->qplib_mr.total_size = -1;
3511 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3512 PAGE_SIZE);
3513 if (rc)
3514 goto fail_mr;
3515
3516 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3517 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3518 IB_ACCESS_REMOTE_ATOMIC))
3519 mr->ib_mr.rkey = mr->ib_mr.lkey;
3520 atomic_inc(&rdev->mr_count);
3521
3522 return &mr->ib_mr;
3523
3524fail_mr:
3525 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3526fail:
3527 kfree(mr);
3528 return ERR_PTR(rc);
3529}
3530
3531int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3532{
3533 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3534 struct bnxt_re_dev *rdev = mr->rdev;
3535 int rc;
3536
3537 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3538 if (rc) {
3539 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3540 return rc;
3541 }
3542
3543 if (mr->pages) {
3544 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3545 &mr->qplib_frpl);
3546 kfree(mr->pages);
3547 mr->npages = 0;
3548 mr->pages = NULL;
3549 }
3550 ib_umem_release(mr->ib_umem);
3551
3552 kfree(mr);
3553 atomic_dec(&rdev->mr_count);
3554 return rc;
3555}
3556
3557static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3558{
3559 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3560
3561 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3562 return -ENOMEM;
3563
3564 mr->pages[mr->npages++] = addr;
3565 return 0;
3566}
3567
3568int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3569 unsigned int *sg_offset)
3570{
3571 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3572
3573 mr->npages = 0;
3574 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3575}
3576
3577struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3578 u32 max_num_sg, struct ib_udata *udata)
3579{
3580 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3581 struct bnxt_re_dev *rdev = pd->rdev;
3582 struct bnxt_re_mr *mr = NULL;
3583 int rc;
3584
3585 if (type != IB_MR_TYPE_MEM_REG) {
3586 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3587 return ERR_PTR(-EINVAL);
3588 }
3589 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3590 return ERR_PTR(-EINVAL);
3591
3592 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3593 if (!mr)
3594 return ERR_PTR(-ENOMEM);
3595
3596 mr->rdev = rdev;
3597 mr->qplib_mr.pd = &pd->qplib_pd;
3598 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3599 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3600
3601 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3602 if (rc)
3603 goto bail;
3604
3605 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3606 mr->ib_mr.rkey = mr->ib_mr.lkey;
3607
3608 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3609 if (!mr->pages) {
3610 rc = -ENOMEM;
3611 goto fail;
3612 }
3613 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3614 &mr->qplib_frpl, max_num_sg);
3615 if (rc) {
3616 ibdev_err(&rdev->ibdev,
3617 "Failed to allocate HW FR page list");
3618 goto fail_mr;
3619 }
3620
3621 atomic_inc(&rdev->mr_count);
3622 return &mr->ib_mr;
3623
3624fail_mr:
3625 kfree(mr->pages);
3626fail:
3627 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3628bail:
3629 kfree(mr);
3630 return ERR_PTR(rc);
3631}
3632
3633struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3634 struct ib_udata *udata)
3635{
3636 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3637 struct bnxt_re_dev *rdev = pd->rdev;
3638 struct bnxt_re_mw *mw;
3639 int rc;
3640
3641 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3642 if (!mw)
3643 return ERR_PTR(-ENOMEM);
3644 mw->rdev = rdev;
3645 mw->qplib_mw.pd = &pd->qplib_pd;
3646
3647 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3648 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3649 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3650 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3651 if (rc) {
3652 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
3653 goto fail;
3654 }
3655 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3656
3657 atomic_inc(&rdev->mw_count);
3658 return &mw->ib_mw;
3659
3660fail:
3661 kfree(mw);
3662 return ERR_PTR(rc);
3663}
3664
3665int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3666{
3667 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3668 struct bnxt_re_dev *rdev = mw->rdev;
3669 int rc;
3670
3671 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3672 if (rc) {
3673 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
3674 return rc;
3675 }
3676
3677 kfree(mw);
3678 atomic_dec(&rdev->mw_count);
3679 return rc;
3680}
3681
3682static int bnxt_re_page_size_ok(int page_shift)
3683{
3684 switch (page_shift) {
3685 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3686 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3687 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3688 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3689 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3690 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3691 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3692 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3693 return 1;
3694 default:
3695 return 0;
3696 }
3697}
3698
3699static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3700 int page_shift)
3701{
3702 u64 *pbl_tbl = pbl_tbl_orig;
3703 u64 page_size = BIT_ULL(page_shift);
3704 struct ib_block_iter biter;
3705
3706 rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
3707 *pbl_tbl++ = rdma_block_iter_dma_address(&biter);
3708
3709 return pbl_tbl - pbl_tbl_orig;
3710}
3711
3712
3713struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3714 u64 virt_addr, int mr_access_flags,
3715 struct ib_udata *udata)
3716{
3717 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3718 struct bnxt_re_dev *rdev = pd->rdev;
3719 struct bnxt_re_mr *mr;
3720 struct ib_umem *umem;
3721 u64 *pbl_tbl = NULL;
3722 int umem_pgs, page_shift, rc;
3723
3724 if (length > BNXT_RE_MAX_MR_SIZE) {
3725 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
3726 length, BNXT_RE_MAX_MR_SIZE);
3727 return ERR_PTR(-ENOMEM);
3728 }
3729
3730 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3731 if (!mr)
3732 return ERR_PTR(-ENOMEM);
3733
3734 mr->rdev = rdev;
3735 mr->qplib_mr.pd = &pd->qplib_pd;
3736 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3737 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3738
3739 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3740 if (rc) {
3741 ibdev_err(&rdev->ibdev, "Failed to allocate MR");
3742 goto free_mr;
3743 }
3744
3745 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3746
3747 umem = ib_umem_get(udata, start, length, mr_access_flags);
3748 if (IS_ERR(umem)) {
3749 ibdev_err(&rdev->ibdev, "Failed to get umem");
3750 rc = -EFAULT;
3751 goto free_mrw;
3752 }
3753 mr->ib_umem = umem;
3754
3755 mr->qplib_mr.va = virt_addr;
3756 umem_pgs = ib_umem_page_count(umem);
3757 if (!umem_pgs) {
3758 ibdev_err(&rdev->ibdev, "umem is invalid!");
3759 rc = -EINVAL;
3760 goto free_umem;
3761 }
3762 mr->qplib_mr.total_size = length;
3763
3764 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3765 if (!pbl_tbl) {
3766 rc = -ENOMEM;
3767 goto free_umem;
3768 }
3769
3770 page_shift = __ffs(ib_umem_find_best_pgsz(umem,
3771 BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
3772 virt_addr));
3773
3774 if (!bnxt_re_page_size_ok(page_shift)) {
3775 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
3776 rc = -EFAULT;
3777 goto fail;
3778 }
3779
3780 if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
3781 length > BNXT_RE_MAX_MR_SIZE_LOW) {
3782 ibdev_err(&rdev->ibdev, "Requested MR Sz:%llu Max sup:%llu",
3783 length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3784 rc = -EINVAL;
3785 goto fail;
3786 }
3787
3788
3789 umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3790 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3791 umem_pgs, false, 1 << page_shift);
3792 if (rc) {
3793 ibdev_err(&rdev->ibdev, "Failed to register user MR");
3794 goto fail;
3795 }
3796
3797 kfree(pbl_tbl);
3798
3799 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3800 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3801 atomic_inc(&rdev->mr_count);
3802
3803 return &mr->ib_mr;
3804fail:
3805 kfree(pbl_tbl);
3806free_umem:
3807 ib_umem_release(umem);
3808free_mrw:
3809 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3810free_mr:
3811 kfree(mr);
3812 return ERR_PTR(rc);
3813}
3814
3815int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3816{
3817 struct ib_device *ibdev = ctx->device;
3818 struct bnxt_re_ucontext *uctx =
3819 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3820 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3821 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3822 struct bnxt_re_uctx_resp resp;
3823 u32 chip_met_rev_num = 0;
3824 int rc;
3825
3826 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
3827
3828 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3829 ibdev_dbg(ibdev, " is different from the device %d ",
3830 BNXT_RE_ABI_VERSION);
3831 return -EPERM;
3832 }
3833
3834 uctx->rdev = rdev;
3835
3836 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3837 if (!uctx->shpg) {
3838 rc = -ENOMEM;
3839 goto fail;
3840 }
3841 spin_lock_init(&uctx->sh_lock);
3842
3843 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3844 chip_met_rev_num = rdev->chip_ctx->chip_num;
3845 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
3846 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3847 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
3848 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3849 resp.chip_id0 = chip_met_rev_num;
3850
3851 resp.chip_id1 = 0;
3852
3853 resp.dev_id = rdev->en_dev->pdev->devfn;
3854 resp.max_qp = rdev->qplib_ctx.qpc_count;
3855 resp.pg_size = PAGE_SIZE;
3856 resp.cqe_sz = sizeof(struct cq_base);
3857 resp.max_cqd = dev_attr->max_cq_wqes;
3858 resp.rsvd = 0;
3859
3860 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3861 if (rc) {
3862 ibdev_err(ibdev, "Failed to copy user context");
3863 rc = -EFAULT;
3864 goto cfail;
3865 }
3866
3867 return 0;
3868cfail:
3869 free_page((unsigned long)uctx->shpg);
3870 uctx->shpg = NULL;
3871fail:
3872 return rc;
3873}
3874
3875void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3876{
3877 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3878 struct bnxt_re_ucontext,
3879 ib_uctx);
3880
3881 struct bnxt_re_dev *rdev = uctx->rdev;
3882
3883 if (uctx->shpg)
3884 free_page((unsigned long)uctx->shpg);
3885
3886 if (uctx->dpi.dbr) {
3887
3888
3889
3890 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3891 &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3892 uctx->dpi.dbr = NULL;
3893 }
3894}
3895
3896
3897int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3898{
3899 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3900 struct bnxt_re_ucontext,
3901 ib_uctx);
3902 struct bnxt_re_dev *rdev = uctx->rdev;
3903 u64 pfn;
3904
3905 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3906 return -EINVAL;
3907
3908 if (vma->vm_pgoff) {
3909 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3910 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3911 PAGE_SIZE, vma->vm_page_prot)) {
3912 ibdev_err(&rdev->ibdev, "Failed to map DPI");
3913 return -EAGAIN;
3914 }
3915 } else {
3916 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3917 if (remap_pfn_range(vma, vma->vm_start,
3918 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3919 ibdev_err(&rdev->ibdev, "Failed to map shared page");
3920 return -EAGAIN;
3921 }
3922 }
3923
3924 return 0;
3925}
3926