1
2
3#include "main.h"
4
5
6
7
8
9
10
11static int irdma_query_device(struct ib_device *ibdev,
12 struct ib_device_attr *props,
13 struct ib_udata *udata)
14{
15 struct irdma_device *iwdev = to_iwdev(ibdev);
16 struct irdma_pci_f *rf = iwdev->rf;
17 struct pci_dev *pcidev = iwdev->rf->pcidev;
18 struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
19
20 if (udata->inlen || udata->outlen)
21 return -EINVAL;
22
23 memset(props, 0, sizeof(*props));
24 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
25 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
26 irdma_fw_minor_ver(&rf->sc_dev);
27 props->device_cap_flags = iwdev->device_cap_flags;
28 props->vendor_id = pcidev->vendor;
29 props->vendor_part_id = pcidev->device;
30
31 props->hw_ver = rf->pcidev->revision;
32 props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
33 props->max_mr_size = hw_attrs->max_mr_size;
34 props->max_qp = rf->max_qp - rf->used_qps;
35 props->max_qp_wr = hw_attrs->max_qp_wr;
36 props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
37 props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
38 props->max_cq = rf->max_cq - rf->used_cqs;
39 props->max_cqe = rf->max_cqe;
40 props->max_mr = rf->max_mr - rf->used_mrs;
41 props->max_mw = props->max_mr;
42 props->max_pd = rf->max_pd - rf->used_pds;
43 props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
44 props->max_qp_rd_atom = hw_attrs->max_hw_ird;
45 props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
46 if (rdma_protocol_roce(ibdev, 1))
47 props->max_pkeys = IRDMA_PKEY_TBL_SZ;
48 props->max_ah = rf->max_ah;
49 props->max_mcast_grp = rf->max_mcg;
50 props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
51 props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
52 props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
53#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
54 if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
55 props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
56
57 return 0;
58}
59
60
61
62
63
64
65
66static void irdma_get_eth_speed_and_width(u32 link_speed, u16 *active_speed,
67 u8 *active_width)
68{
69 if (link_speed <= SPEED_1000) {
70 *active_width = IB_WIDTH_1X;
71 *active_speed = IB_SPEED_SDR;
72 } else if (link_speed <= SPEED_10000) {
73 *active_width = IB_WIDTH_1X;
74 *active_speed = IB_SPEED_FDR10;
75 } else if (link_speed <= SPEED_20000) {
76 *active_width = IB_WIDTH_4X;
77 *active_speed = IB_SPEED_DDR;
78 } else if (link_speed <= SPEED_25000) {
79 *active_width = IB_WIDTH_1X;
80 *active_speed = IB_SPEED_EDR;
81 } else if (link_speed <= SPEED_40000) {
82 *active_width = IB_WIDTH_4X;
83 *active_speed = IB_SPEED_FDR10;
84 } else {
85 *active_width = IB_WIDTH_4X;
86 *active_speed = IB_SPEED_EDR;
87 }
88}
89
90
91
92
93
94
95
96static int irdma_query_port(struct ib_device *ibdev, u32 port,
97 struct ib_port_attr *props)
98{
99 struct irdma_device *iwdev = to_iwdev(ibdev);
100 struct net_device *netdev = iwdev->netdev;
101
102
103
104 props->max_mtu = IB_MTU_4096;
105 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
106 props->lid = 1;
107 props->lmc = 0;
108 props->sm_lid = 0;
109 props->sm_sl = 0;
110 if (netif_carrier_ok(netdev) && netif_running(netdev)) {
111 props->state = IB_PORT_ACTIVE;
112 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
113 } else {
114 props->state = IB_PORT_DOWN;
115 props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
116 }
117 irdma_get_eth_speed_and_width(SPEED_100000, &props->active_speed,
118 &props->active_width);
119
120 if (rdma_protocol_roce(ibdev, 1)) {
121 props->gid_tbl_len = 32;
122 props->ip_gids = true;
123 props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
124 } else {
125 props->gid_tbl_len = 1;
126 }
127 props->qkey_viol_cntr = 0;
128 props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
129 props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
130
131 return 0;
132}
133
134
135
136
137
138static void irdma_disassociate_ucontext(struct ib_ucontext *context)
139{
140}
141
142static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
143 struct vm_area_struct *vma)
144{
145 u64 pfn;
146
147 if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
148 return -EINVAL;
149
150 vma->vm_private_data = ucontext;
151 pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
152 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
153
154 return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
155 pgprot_noncached(vma->vm_page_prot), NULL);
156}
157
158static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
159{
160 struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
161
162 kfree(entry);
163}
164
165static struct rdma_user_mmap_entry*
166irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
167 enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
168{
169 struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
170 int ret;
171
172 if (!entry)
173 return NULL;
174
175 entry->bar_offset = bar_offset;
176 entry->mmap_flag = mmap_flag;
177
178 ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
179 &entry->rdma_entry, PAGE_SIZE);
180 if (ret) {
181 kfree(entry);
182 return NULL;
183 }
184 *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
185
186 return &entry->rdma_entry;
187}
188
189
190
191
192
193
194static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
195{
196 struct rdma_user_mmap_entry *rdma_entry;
197 struct irdma_user_mmap_entry *entry;
198 struct irdma_ucontext *ucontext;
199 u64 pfn;
200 int ret;
201
202 ucontext = to_ucontext(context);
203
204
205 if (ucontext->legacy_mode)
206 return irdma_mmap_legacy(ucontext, vma);
207
208 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
209 if (!rdma_entry) {
210 ibdev_dbg(&ucontext->iwdev->ibdev,
211 "VERBS: pgoff[0x%lx] does not have valid entry\n",
212 vma->vm_pgoff);
213 return -EINVAL;
214 }
215
216 entry = to_irdma_mmap_entry(rdma_entry);
217 ibdev_dbg(&ucontext->iwdev->ibdev,
218 "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
219 entry->bar_offset, entry->mmap_flag);
220
221 pfn = (entry->bar_offset +
222 pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
223
224 switch (entry->mmap_flag) {
225 case IRDMA_MMAP_IO_NC:
226 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
227 pgprot_noncached(vma->vm_page_prot),
228 rdma_entry);
229 break;
230 case IRDMA_MMAP_IO_WC:
231 ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
232 pgprot_writecombine(vma->vm_page_prot),
233 rdma_entry);
234 break;
235 default:
236 ret = -EINVAL;
237 }
238
239 if (ret)
240 ibdev_dbg(&ucontext->iwdev->ibdev,
241 "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
242 entry->bar_offset, entry->mmap_flag, ret);
243 rdma_user_mmap_entry_put(rdma_entry);
244
245 return ret;
246}
247
248
249
250
251
252static void irdma_alloc_push_page(struct irdma_qp *iwqp)
253{
254 struct irdma_cqp_request *cqp_request;
255 struct cqp_cmds_info *cqp_info;
256 struct irdma_device *iwdev = iwqp->iwdev;
257 struct irdma_sc_qp *qp = &iwqp->sc_qp;
258 enum irdma_status_code status;
259
260 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
261 if (!cqp_request)
262 return;
263
264 cqp_info = &cqp_request->info;
265 cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
266 cqp_info->post_sq = 1;
267 cqp_info->in.u.manage_push_page.info.push_idx = 0;
268 cqp_info->in.u.manage_push_page.info.qs_handle =
269 qp->vsi->qos[qp->user_pri].qs_handle;
270 cqp_info->in.u.manage_push_page.info.free_page = 0;
271 cqp_info->in.u.manage_push_page.info.push_page_type = 0;
272 cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
273 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
274
275 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
276 if (!status && cqp_request->compl_info.op_ret_val <
277 iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
278 qp->push_idx = cqp_request->compl_info.op_ret_val;
279 qp->push_offset = 0;
280 }
281
282 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
283}
284
285
286
287
288
289
290
291
292
293static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
294 struct ib_udata *udata)
295{
296 struct ib_device *ibdev = uctx->device;
297 struct irdma_device *iwdev = to_iwdev(ibdev);
298 struct irdma_alloc_ucontext_req req;
299 struct irdma_alloc_ucontext_resp uresp = {};
300 struct irdma_ucontext *ucontext = to_ucontext(uctx);
301 struct irdma_uk_attrs *uk_attrs;
302
303 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
304 return -EINVAL;
305
306 if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
307 goto ver_error;
308
309 ucontext->iwdev = iwdev;
310 ucontext->abi_ver = req.userspace_ver;
311
312 uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
313
314 if (udata->outlen < sizeof(uresp)) {
315 if (uk_attrs->hw_rev != IRDMA_GEN_1)
316 return -EOPNOTSUPP;
317
318 ucontext->legacy_mode = true;
319 uresp.max_qps = iwdev->rf->max_qp;
320 uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
321 uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
322 uresp.kernel_ver = req.userspace_ver;
323 if (ib_copy_to_udata(udata, &uresp,
324 min(sizeof(uresp), udata->outlen)))
325 return -EFAULT;
326 } else {
327 u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
328
329 ucontext->db_mmap_entry =
330 irdma_user_mmap_entry_insert(ucontext, bar_off,
331 IRDMA_MMAP_IO_NC,
332 &uresp.db_mmap_key);
333 if (!ucontext->db_mmap_entry)
334 return -ENOMEM;
335
336 uresp.kernel_ver = IRDMA_ABI_VER;
337 uresp.feature_flags = uk_attrs->feature_flags;
338 uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
339 uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
340 uresp.max_hw_inline = uk_attrs->max_hw_inline;
341 uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
342 uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
343 uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
344 uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
345 uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
346 uresp.hw_rev = uk_attrs->hw_rev;
347 if (ib_copy_to_udata(udata, &uresp,
348 min(sizeof(uresp), udata->outlen))) {
349 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
350 return -EFAULT;
351 }
352 }
353
354 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
355 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
356 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
357 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
358
359 return 0;
360
361ver_error:
362 ibdev_err(&iwdev->ibdev,
363 "Invalid userspace driver version detected. Detected version %d, should be %d\n",
364 req.userspace_ver, IRDMA_ABI_VER);
365 return -EINVAL;
366}
367
368
369
370
371
372static void irdma_dealloc_ucontext(struct ib_ucontext *context)
373{
374 struct irdma_ucontext *ucontext = to_ucontext(context);
375
376 rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
377}
378
379
380
381
382
383
384static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
385{
386 struct irdma_pd *iwpd = to_iwpd(pd);
387 struct irdma_device *iwdev = to_iwdev(pd->device);
388 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
389 struct irdma_pci_f *rf = iwdev->rf;
390 struct irdma_alloc_pd_resp uresp = {};
391 struct irdma_sc_pd *sc_pd;
392 u32 pd_id = 0;
393 int err;
394
395 err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
396 &rf->next_pd);
397 if (err)
398 return err;
399
400 sc_pd = &iwpd->sc_pd;
401 if (udata) {
402 struct irdma_ucontext *ucontext =
403 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
404 ibucontext);
405 irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
406 uresp.pd_id = pd_id;
407 if (ib_copy_to_udata(udata, &uresp,
408 min(sizeof(uresp), udata->outlen))) {
409 err = -EFAULT;
410 goto error;
411 }
412 } else {
413 irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
414 }
415
416 return 0;
417error:
418 irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
419
420 return err;
421}
422
423
424
425
426
427
428static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
429{
430 struct irdma_pd *iwpd = to_iwpd(ibpd);
431 struct irdma_device *iwdev = to_iwdev(ibpd->device);
432
433 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
434
435 return 0;
436}
437
438
439
440
441
442
443
444static struct irdma_pbl *irdma_get_pbl(unsigned long va,
445 struct list_head *pbl_list)
446{
447 struct irdma_pbl *iwpbl;
448
449 list_for_each_entry (iwpbl, pbl_list, list) {
450 if (iwpbl->user_base == va) {
451 list_del(&iwpbl->list);
452 iwpbl->on_list = false;
453 return iwpbl;
454 }
455 }
456
457 return NULL;
458}
459
460
461
462
463
464
465static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
466{
467 struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
468 unsigned long flags;
469
470 spin_lock_irqsave(&iwcq->lock, flags);
471 irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
472 spin_unlock_irqrestore(&iwcq->lock, flags);
473}
474
475static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
476{
477 if (iwqp->push_db_mmap_entry) {
478 rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
479 iwqp->push_db_mmap_entry = NULL;
480 }
481 if (iwqp->push_wqe_mmap_entry) {
482 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
483 iwqp->push_wqe_mmap_entry = NULL;
484 }
485}
486
487static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
488 struct irdma_qp *iwqp,
489 u64 *push_wqe_mmap_key,
490 u64 *push_db_mmap_key)
491{
492 struct irdma_device *iwdev = ucontext->iwdev;
493 u64 rsvd, bar_off;
494
495 rsvd = IRDMA_PF_BAR_RSVD;
496 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
497
498 bar_off += IRDMA_HW_PAGE_SIZE;
499
500 bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
501 iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
502 bar_off, IRDMA_MMAP_IO_WC,
503 push_wqe_mmap_key);
504 if (!iwqp->push_wqe_mmap_entry)
505 return -ENOMEM;
506
507
508 bar_off += IRDMA_HW_PAGE_SIZE;
509 iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
510 bar_off, IRDMA_MMAP_IO_NC,
511 push_db_mmap_key);
512 if (!iwqp->push_db_mmap_entry) {
513 rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
514 return -ENOMEM;
515 }
516
517 return 0;
518}
519
520
521
522
523
524
525static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
526{
527 struct irdma_qp *iwqp = to_iwqp(ibqp);
528 struct irdma_device *iwdev = iwqp->iwdev;
529
530 iwqp->sc_qp.qp_uk.destroy_pending = true;
531
532 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
533 irdma_modify_qp_to_err(&iwqp->sc_qp);
534
535 irdma_qp_rem_ref(&iwqp->ibqp);
536 wait_for_completion(&iwqp->free_qp);
537 irdma_free_lsmm_rsrc(iwqp);
538 irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
539
540 if (!iwqp->user_mode) {
541 if (iwqp->iwscq) {
542 irdma_clean_cqes(iwqp, iwqp->iwscq);
543 if (iwqp->iwrcq != iwqp->iwscq)
544 irdma_clean_cqes(iwqp, iwqp->iwrcq);
545 }
546 }
547 irdma_remove_push_mmap_entries(iwqp);
548 irdma_free_qp_rsrc(iwqp);
549
550 return 0;
551}
552
553
554
555
556
557
558
559static void irdma_setup_virt_qp(struct irdma_device *iwdev,
560 struct irdma_qp *iwqp,
561 struct irdma_qp_init_info *init_info)
562{
563 struct irdma_pbl *iwpbl = iwqp->iwpbl;
564 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
565
566 iwqp->page = qpmr->sq_page;
567 init_info->shadow_area_pa = qpmr->shadow;
568 if (iwpbl->pbl_allocated) {
569 init_info->virtual_map = true;
570 init_info->sq_pa = qpmr->sq_pbl.idx;
571 init_info->rq_pa = qpmr->rq_pbl.idx;
572 } else {
573 init_info->sq_pa = qpmr->sq_pbl.addr;
574 init_info->rq_pa = qpmr->rq_pbl.addr;
575 }
576}
577
578
579
580
581
582
583
584
585static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
586 struct irdma_qp *iwqp,
587 struct irdma_qp_init_info *info,
588 struct ib_qp_init_attr *init_attr)
589{
590 struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
591 u32 sqdepth, rqdepth;
592 u8 sqshift, rqshift;
593 u32 size;
594 enum irdma_status_code status;
595 struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
596 struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
597
598 irdma_get_wqe_shift(uk_attrs,
599 uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
600 ukinfo->max_sq_frag_cnt,
601 ukinfo->max_inline_data, &sqshift);
602 status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
603 &sqdepth);
604 if (status)
605 return -ENOMEM;
606
607 if (uk_attrs->hw_rev == IRDMA_GEN_1)
608 rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
609 else
610 irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
611 &rqshift);
612
613 status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
614 &rqdepth);
615 if (status)
616 return -ENOMEM;
617
618 iwqp->kqp.sq_wrid_mem =
619 kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
620 if (!iwqp->kqp.sq_wrid_mem)
621 return -ENOMEM;
622
623 iwqp->kqp.rq_wrid_mem =
624 kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
625 if (!iwqp->kqp.rq_wrid_mem) {
626 kfree(iwqp->kqp.sq_wrid_mem);
627 iwqp->kqp.sq_wrid_mem = NULL;
628 return -ENOMEM;
629 }
630
631 ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
632 ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
633
634 size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
635 size += (IRDMA_SHADOW_AREA_SIZE << 3);
636
637 mem->size = ALIGN(size, 256);
638 mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
639 &mem->pa, GFP_KERNEL);
640 if (!mem->va) {
641 kfree(iwqp->kqp.sq_wrid_mem);
642 iwqp->kqp.sq_wrid_mem = NULL;
643 kfree(iwqp->kqp.rq_wrid_mem);
644 iwqp->kqp.rq_wrid_mem = NULL;
645 return -ENOMEM;
646 }
647
648 ukinfo->sq = mem->va;
649 info->sq_pa = mem->pa;
650 ukinfo->rq = &ukinfo->sq[sqdepth];
651 info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
652 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
653 info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
654 ukinfo->sq_size = sqdepth >> sqshift;
655 ukinfo->rq_size = rqdepth >> rqshift;
656 ukinfo->qp_id = iwqp->ibqp.qp_num;
657
658 init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
659 init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
660
661 return 0;
662}
663
664static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
665{
666 struct irdma_pci_f *rf = iwqp->iwdev->rf;
667 struct irdma_cqp_request *cqp_request;
668 struct cqp_cmds_info *cqp_info;
669 struct irdma_create_qp_info *qp_info;
670 enum irdma_status_code status;
671
672 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
673 if (!cqp_request)
674 return -ENOMEM;
675
676 cqp_info = &cqp_request->info;
677 qp_info = &cqp_request->info.in.u.qp_create.info;
678 memset(qp_info, 0, sizeof(*qp_info));
679 qp_info->mac_valid = true;
680 qp_info->cq_num_valid = true;
681 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
682
683 cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
684 cqp_info->post_sq = 1;
685 cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
686 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
687 status = irdma_handle_cqp_op(rf, cqp_request);
688 irdma_put_cqp_request(&rf->cqp, cqp_request);
689
690 return status ? -ENOMEM : 0;
691}
692
693static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
694 struct irdma_qp_host_ctx_info *ctx_info)
695{
696 struct irdma_device *iwdev = iwqp->iwdev;
697 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
698 struct irdma_roce_offload_info *roce_info;
699 struct irdma_udp_offload_info *udp_info;
700
701 udp_info = &iwqp->udp_info;
702 udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
703 udp_info->cwnd = iwdev->roce_cwnd;
704 udp_info->rexmit_thresh = 2;
705 udp_info->rnr_nak_thresh = 2;
706 udp_info->src_port = 0xc000;
707 udp_info->dst_port = ROCE_V2_UDP_DPORT;
708 roce_info = &iwqp->roce_info;
709 ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
710
711 roce_info->rd_en = true;
712 roce_info->wr_rdresp_en = true;
713 roce_info->bind_en = true;
714 roce_info->dcqcn_en = false;
715 roce_info->rtomin = 5;
716
717 roce_info->ack_credits = iwdev->roce_ackcreds;
718 roce_info->ird_size = dev->hw_attrs.max_hw_ird;
719 roce_info->ord_size = dev->hw_attrs.max_hw_ord;
720
721 if (!iwqp->user_mode) {
722 roce_info->priv_mode_en = true;
723 roce_info->fast_reg_en = true;
724 roce_info->udprivcq_en = true;
725 }
726 roce_info->roce_tver = 0;
727
728 ctx_info->roce_info = &iwqp->roce_info;
729 ctx_info->udp_info = &iwqp->udp_info;
730 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
731}
732
733static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
734 struct irdma_qp_host_ctx_info *ctx_info)
735{
736 struct irdma_device *iwdev = iwqp->iwdev;
737 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
738 struct irdma_iwarp_offload_info *iwarp_info;
739
740 iwarp_info = &iwqp->iwarp_info;
741 ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
742 iwarp_info->rd_en = true;
743 iwarp_info->wr_rdresp_en = true;
744 iwarp_info->bind_en = true;
745 iwarp_info->ecn_en = true;
746 iwarp_info->rtomin = 5;
747
748 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
749 iwarp_info->ib_rd_en = true;
750 if (!iwqp->user_mode) {
751 iwarp_info->priv_mode_en = true;
752 iwarp_info->fast_reg_en = true;
753 }
754 iwarp_info->ddp_ver = 1;
755 iwarp_info->rdmap_ver = 1;
756
757 ctx_info->iwarp_info = &iwqp->iwarp_info;
758 ctx_info->iwarp_info_valid = true;
759 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
760 ctx_info->iwarp_info_valid = false;
761}
762
763static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
764 struct irdma_device *iwdev)
765{
766 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
767 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
768
769 if (init_attr->create_flags)
770 return -EOPNOTSUPP;
771
772 if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
773 init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
774 init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
775 return -EINVAL;
776
777 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
778 if (init_attr->qp_type != IB_QPT_RC &&
779 init_attr->qp_type != IB_QPT_UD &&
780 init_attr->qp_type != IB_QPT_GSI)
781 return -EOPNOTSUPP;
782 } else {
783 if (init_attr->qp_type != IB_QPT_RC)
784 return -EOPNOTSUPP;
785 }
786
787 return 0;
788}
789
790
791
792
793
794
795
796static struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
797 struct ib_qp_init_attr *init_attr,
798 struct ib_udata *udata)
799{
800 struct irdma_pd *iwpd = to_iwpd(ibpd);
801 struct irdma_device *iwdev = to_iwdev(ibpd->device);
802 struct irdma_pci_f *rf = iwdev->rf;
803 struct irdma_qp *iwqp;
804 struct irdma_create_qp_req req;
805 struct irdma_create_qp_resp uresp = {};
806 u32 qp_num = 0;
807 enum irdma_status_code ret;
808 int err_code;
809 int sq_size;
810 int rq_size;
811 struct irdma_sc_qp *qp;
812 struct irdma_sc_dev *dev = &rf->sc_dev;
813 struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
814 struct irdma_qp_init_info init_info = {};
815 struct irdma_qp_host_ctx_info *ctx_info;
816 unsigned long flags;
817
818 err_code = irdma_validate_qp_attrs(init_attr, iwdev);
819 if (err_code)
820 return ERR_PTR(err_code);
821
822 sq_size = init_attr->cap.max_send_wr;
823 rq_size = init_attr->cap.max_recv_wr;
824
825 init_info.vsi = &iwdev->vsi;
826 init_info.qp_uk_init_info.uk_attrs = uk_attrs;
827 init_info.qp_uk_init_info.sq_size = sq_size;
828 init_info.qp_uk_init_info.rq_size = rq_size;
829 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
830 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
831 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
832
833 iwqp = kzalloc(sizeof(*iwqp), GFP_KERNEL);
834 if (!iwqp)
835 return ERR_PTR(-ENOMEM);
836
837 qp = &iwqp->sc_qp;
838 qp->qp_uk.back_qp = iwqp;
839 qp->qp_uk.lock = &iwqp->lock;
840 qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
841
842 iwqp->iwdev = iwdev;
843 iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
844 256);
845 iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
846 iwqp->q2_ctx_mem.size,
847 &iwqp->q2_ctx_mem.pa,
848 GFP_KERNEL);
849 if (!iwqp->q2_ctx_mem.va) {
850 err_code = -ENOMEM;
851 goto error;
852 }
853
854 init_info.q2 = iwqp->q2_ctx_mem.va;
855 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
856 init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
857 init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
858
859 if (init_attr->qp_type == IB_QPT_GSI)
860 qp_num = 1;
861 else
862 err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
863 &qp_num, &rf->next_qp);
864 if (err_code)
865 goto error;
866
867 iwqp->iwpd = iwpd;
868 iwqp->ibqp.qp_num = qp_num;
869 qp = &iwqp->sc_qp;
870 iwqp->iwscq = to_iwcq(init_attr->send_cq);
871 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
872 iwqp->host_ctx.va = init_info.host_ctx;
873 iwqp->host_ctx.pa = init_info.host_ctx_pa;
874 iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
875
876 init_info.pd = &iwpd->sc_pd;
877 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
878 if (!rdma_protocol_roce(&iwdev->ibdev, 1))
879 init_info.qp_uk_init_info.first_sq_wq = 1;
880 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
881 init_waitqueue_head(&iwqp->waitq);
882 init_waitqueue_head(&iwqp->mod_qp_waitq);
883
884 if (udata) {
885 err_code = ib_copy_from_udata(&req, udata,
886 min(sizeof(req), udata->inlen));
887 if (err_code) {
888 ibdev_dbg(&iwdev->ibdev,
889 "VERBS: ib_copy_from_data fail\n");
890 goto error;
891 }
892
893 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
894 iwqp->user_mode = 1;
895 if (req.user_wqe_bufs) {
896 struct irdma_ucontext *ucontext =
897 rdma_udata_to_drv_context(udata,
898 struct irdma_ucontext,
899 ibucontext);
900
901 init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
902 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
903 iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
904 &ucontext->qp_reg_mem_list);
905 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
906
907 if (!iwqp->iwpbl) {
908 err_code = -ENODATA;
909 ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
910 goto error;
911 }
912 }
913 init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
914 irdma_setup_virt_qp(iwdev, iwqp, &init_info);
915 } else {
916 init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
917 err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
918 }
919
920 if (err_code) {
921 ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
922 goto error;
923 }
924
925 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
926 if (init_attr->qp_type == IB_QPT_RC) {
927 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
928 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
929 IRDMA_WRITE_WITH_IMM |
930 IRDMA_ROCE;
931 } else {
932 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
933 init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
934 IRDMA_ROCE;
935 }
936 } else {
937 init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
938 init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
939 }
940
941 if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
942 init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
943
944 ret = irdma_sc_qp_init(qp, &init_info);
945 if (ret) {
946 err_code = -EPROTO;
947 ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
948 goto error;
949 }
950
951 ctx_info = &iwqp->ctx_info;
952 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
953 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
954
955 if (rdma_protocol_roce(&iwdev->ibdev, 1))
956 irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
957 else
958 irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
959
960 err_code = irdma_cqp_create_qp_cmd(iwqp);
961 if (err_code)
962 goto error;
963
964 refcount_set(&iwqp->refcnt, 1);
965 spin_lock_init(&iwqp->lock);
966 spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
967 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
968 rf->qp_table[qp_num] = iwqp;
969 iwqp->max_send_wr = sq_size;
970 iwqp->max_recv_wr = rq_size;
971
972 if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
973 if (dev->ws_add(&iwdev->vsi, 0)) {
974 irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
975 err_code = -EINVAL;
976 goto error;
977 }
978
979 irdma_qp_add_qos(&iwqp->sc_qp);
980 }
981
982 if (udata) {
983
984 if (udata->outlen < sizeof(uresp)) {
985 uresp.lsmm = 1;
986 uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
987 } else {
988 if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
989 uresp.lsmm = 1;
990 }
991 uresp.actual_sq_size = sq_size;
992 uresp.actual_rq_size = rq_size;
993 uresp.qp_id = qp_num;
994 uresp.qp_caps = qp->qp_uk.qp_caps;
995
996 err_code = ib_copy_to_udata(udata, &uresp,
997 min(sizeof(uresp), udata->outlen));
998 if (err_code) {
999 ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
1000 irdma_destroy_qp(&iwqp->ibqp, udata);
1001 return ERR_PTR(err_code);
1002 }
1003 }
1004
1005 init_completion(&iwqp->free_qp);
1006 return &iwqp->ibqp;
1007
1008error:
1009 irdma_free_qp_rsrc(iwqp);
1010
1011 return ERR_PTR(err_code);
1012}
1013
1014static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
1015{
1016 int acc_flags = 0;
1017
1018 if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
1019 if (iwqp->roce_info.wr_rdresp_en) {
1020 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1021 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1022 }
1023 if (iwqp->roce_info.rd_en)
1024 acc_flags |= IB_ACCESS_REMOTE_READ;
1025 if (iwqp->roce_info.bind_en)
1026 acc_flags |= IB_ACCESS_MW_BIND;
1027 } else {
1028 if (iwqp->iwarp_info.wr_rdresp_en) {
1029 acc_flags |= IB_ACCESS_LOCAL_WRITE;
1030 acc_flags |= IB_ACCESS_REMOTE_WRITE;
1031 }
1032 if (iwqp->iwarp_info.rd_en)
1033 acc_flags |= IB_ACCESS_REMOTE_READ;
1034 if (iwqp->iwarp_info.bind_en)
1035 acc_flags |= IB_ACCESS_MW_BIND;
1036 }
1037 return acc_flags;
1038}
1039
1040
1041
1042
1043
1044
1045
1046
1047static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1048 int attr_mask, struct ib_qp_init_attr *init_attr)
1049{
1050 struct irdma_qp *iwqp = to_iwqp(ibqp);
1051 struct irdma_sc_qp *qp = &iwqp->sc_qp;
1052
1053 memset(attr, 0, sizeof(*attr));
1054 memset(init_attr, 0, sizeof(*init_attr));
1055
1056 attr->qp_state = iwqp->ibqp_state;
1057 attr->cur_qp_state = iwqp->ibqp_state;
1058 attr->cap.max_send_wr = iwqp->max_send_wr;
1059 attr->cap.max_recv_wr = iwqp->max_recv_wr;
1060 attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
1061 attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
1062 attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
1063 attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
1064 attr->port_num = 1;
1065 if (rdma_protocol_roce(ibqp->device, 1)) {
1066 attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
1067 attr->qkey = iwqp->roce_info.qkey;
1068 attr->rq_psn = iwqp->udp_info.epsn;
1069 attr->sq_psn = iwqp->udp_info.psn_nxt;
1070 attr->dest_qp_num = iwqp->roce_info.dest_qp;
1071 attr->pkey_index = iwqp->roce_info.p_key;
1072 attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
1073 attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
1074 attr->max_rd_atomic = iwqp->roce_info.ord_size;
1075 attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
1076 }
1077
1078 init_attr->event_handler = iwqp->ibqp.event_handler;
1079 init_attr->qp_context = iwqp->ibqp.qp_context;
1080 init_attr->send_cq = iwqp->ibqp.send_cq;
1081 init_attr->recv_cq = iwqp->ibqp.recv_cq;
1082 init_attr->cap = attr->cap;
1083
1084 return 0;
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
1095 u16 *pkey)
1096{
1097 if (index >= IRDMA_PKEY_TBL_SZ)
1098 return -EINVAL;
1099
1100 *pkey = IRDMA_DEFAULT_PKEY;
1101 return 0;
1102}
1103
1104
1105
1106
1107
1108
1109
1110
1111int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1112 int attr_mask, struct ib_udata *udata)
1113{
1114 struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
1115 struct irdma_qp *iwqp = to_iwqp(ibqp);
1116 struct irdma_device *iwdev = iwqp->iwdev;
1117 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1118 struct irdma_qp_host_ctx_info *ctx_info;
1119 struct irdma_roce_offload_info *roce_info;
1120 struct irdma_udp_offload_info *udp_info;
1121 struct irdma_modify_qp_info info = {};
1122 struct irdma_modify_qp_resp uresp = {};
1123 struct irdma_modify_qp_req ureq = {};
1124 unsigned long flags;
1125 u8 issue_modify_qp = 0;
1126 int ret = 0;
1127
1128 ctx_info = &iwqp->ctx_info;
1129 roce_info = &iwqp->roce_info;
1130 udp_info = &iwqp->udp_info;
1131
1132 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1133 return -EOPNOTSUPP;
1134
1135 if (attr_mask & IB_QP_DEST_QPN)
1136 roce_info->dest_qp = attr->dest_qp_num;
1137
1138 if (attr_mask & IB_QP_PKEY_INDEX) {
1139 ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
1140 &roce_info->p_key);
1141 if (ret)
1142 return ret;
1143 }
1144
1145 if (attr_mask & IB_QP_QKEY)
1146 roce_info->qkey = attr->qkey;
1147
1148 if (attr_mask & IB_QP_PATH_MTU)
1149 udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
1150
1151 if (attr_mask & IB_QP_SQ_PSN) {
1152 udp_info->psn_nxt = attr->sq_psn;
1153 udp_info->lsn = 0xffff;
1154 udp_info->psn_una = attr->sq_psn;
1155 udp_info->psn_max = attr->sq_psn;
1156 }
1157
1158 if (attr_mask & IB_QP_RQ_PSN)
1159 udp_info->epsn = attr->rq_psn;
1160
1161 if (attr_mask & IB_QP_RNR_RETRY)
1162 udp_info->rnr_nak_thresh = attr->rnr_retry;
1163
1164 if (attr_mask & IB_QP_RETRY_CNT)
1165 udp_info->rexmit_thresh = attr->retry_cnt;
1166
1167 ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
1168
1169 if (attr_mask & IB_QP_AV) {
1170 struct irdma_av *av = &iwqp->roce_ah.av;
1171 const struct ib_gid_attr *sgid_attr;
1172 u16 vlan_id = VLAN_N_VID;
1173 u32 local_ip[4];
1174
1175 memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
1176 if (attr->ah_attr.ah_flags & IB_AH_GRH) {
1177 udp_info->ttl = attr->ah_attr.grh.hop_limit;
1178 udp_info->flow_label = attr->ah_attr.grh.flow_label;
1179 udp_info->tos = attr->ah_attr.grh.traffic_class;
1180 irdma_qp_rem_qos(&iwqp->sc_qp);
1181 dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
1182 ctx_info->user_pri = rt_tos2priority(udp_info->tos);
1183 iwqp->sc_qp.user_pri = ctx_info->user_pri;
1184 if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
1185 return -ENOMEM;
1186 irdma_qp_add_qos(&iwqp->sc_qp);
1187 }
1188 sgid_attr = attr->ah_attr.grh.sgid_attr;
1189 ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
1190 ctx_info->roce_info->mac_addr);
1191 if (ret)
1192 return ret;
1193
1194 if (vlan_id >= VLAN_N_VID && iwdev->dcb)
1195 vlan_id = 0;
1196 if (vlan_id < VLAN_N_VID) {
1197 udp_info->insert_vlan_tag = true;
1198 udp_info->vlan_tag = vlan_id |
1199 ctx_info->user_pri << VLAN_PRIO_SHIFT;
1200 } else {
1201 udp_info->insert_vlan_tag = false;
1202 }
1203
1204 av->attrs = attr->ah_attr;
1205 rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
1206 rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
1207 roce_info->local_qp = ibqp->qp_num;
1208 if (av->sgid_addr.saddr.sa_family == AF_INET6) {
1209 __be32 *daddr =
1210 av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1211 __be32 *saddr =
1212 av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
1213
1214 irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
1215 irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
1216
1217 udp_info->ipv4 = false;
1218 irdma_copy_ip_ntohl(local_ip, daddr);
1219
1220 udp_info->arp_idx = irdma_arp_table(iwdev->rf,
1221 &local_ip[0],
1222 false, NULL,
1223 IRDMA_ARP_RESOLVE);
1224 } else {
1225 __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
1226 __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
1227
1228 local_ip[0] = ntohl(daddr);
1229
1230 udp_info->ipv4 = true;
1231 udp_info->dest_ip_addr[0] = 0;
1232 udp_info->dest_ip_addr[1] = 0;
1233 udp_info->dest_ip_addr[2] = 0;
1234 udp_info->dest_ip_addr[3] = local_ip[0];
1235
1236 udp_info->local_ipaddr[0] = 0;
1237 udp_info->local_ipaddr[1] = 0;
1238 udp_info->local_ipaddr[2] = 0;
1239 udp_info->local_ipaddr[3] = ntohl(saddr);
1240 }
1241 udp_info->arp_idx =
1242 irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
1243 attr->ah_attr.roce.dmac);
1244 }
1245
1246 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1247 if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
1248 ibdev_err(&iwdev->ibdev,
1249 "rd_atomic = %d, above max_hw_ord=%d\n",
1250 attr->max_rd_atomic,
1251 dev->hw_attrs.max_hw_ord);
1252 return -EINVAL;
1253 }
1254 if (attr->max_rd_atomic)
1255 roce_info->ord_size = attr->max_rd_atomic;
1256 info.ord_valid = true;
1257 }
1258
1259 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1260 if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
1261 ibdev_err(&iwdev->ibdev,
1262 "rd_atomic = %d, above max_hw_ird=%d\n",
1263 attr->max_rd_atomic,
1264 dev->hw_attrs.max_hw_ird);
1265 return -EINVAL;
1266 }
1267 if (attr->max_dest_rd_atomic)
1268 roce_info->ird_size = attr->max_dest_rd_atomic;
1269 }
1270
1271 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1272 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1273 roce_info->wr_rdresp_en = true;
1274 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1275 roce_info->wr_rdresp_en = true;
1276 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1277 roce_info->rd_en = true;
1278 }
1279
1280 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1281
1282 ibdev_dbg(&iwdev->ibdev,
1283 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
1284 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1285 iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
1286
1287 spin_lock_irqsave(&iwqp->lock, flags);
1288 if (attr_mask & IB_QP_STATE) {
1289 if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
1290 iwqp->ibqp.qp_type, attr_mask)) {
1291 ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
1292 iwqp->ibqp.qp_num, iwqp->ibqp_state,
1293 attr->qp_state);
1294 ret = -EINVAL;
1295 goto exit;
1296 }
1297 info.curr_iwarp_state = iwqp->iwarp_state;
1298
1299 switch (attr->qp_state) {
1300 case IB_QPS_INIT:
1301 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1302 ret = -EINVAL;
1303 goto exit;
1304 }
1305
1306 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1307 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1308 issue_modify_qp = 1;
1309 }
1310 break;
1311 case IB_QPS_RTR:
1312 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1313 ret = -EINVAL;
1314 goto exit;
1315 }
1316 info.arp_cache_idx_valid = true;
1317 info.cq_num_valid = true;
1318 info.next_iwarp_state = IRDMA_QP_STATE_RTR;
1319 issue_modify_qp = 1;
1320 break;
1321 case IB_QPS_RTS:
1322 if (iwqp->ibqp_state < IB_QPS_RTR ||
1323 iwqp->ibqp_state == IB_QPS_ERR) {
1324 ret = -EINVAL;
1325 goto exit;
1326 }
1327
1328 info.arp_cache_idx_valid = true;
1329 info.cq_num_valid = true;
1330 info.ord_valid = true;
1331 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1332 issue_modify_qp = 1;
1333 if (iwdev->push_mode && udata &&
1334 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1335 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1336 spin_unlock_irqrestore(&iwqp->lock, flags);
1337 irdma_alloc_push_page(iwqp);
1338 spin_lock_irqsave(&iwqp->lock, flags);
1339 }
1340 break;
1341 case IB_QPS_SQD:
1342 if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
1343 goto exit;
1344
1345 if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
1346 ret = -EINVAL;
1347 goto exit;
1348 }
1349
1350 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1351 issue_modify_qp = 1;
1352 break;
1353 case IB_QPS_SQE:
1354 case IB_QPS_ERR:
1355 case IB_QPS_RESET:
1356 if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS) {
1357 spin_unlock_irqrestore(&iwqp->lock, flags);
1358 info.next_iwarp_state = IRDMA_QP_STATE_SQD;
1359 irdma_hw_modify_qp(iwdev, iwqp, &info, true);
1360 spin_lock_irqsave(&iwqp->lock, flags);
1361 }
1362
1363 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1364 spin_unlock_irqrestore(&iwqp->lock, flags);
1365 if (udata) {
1366 if (ib_copy_from_udata(&ureq, udata,
1367 min(sizeof(ureq), udata->inlen)))
1368 return -EINVAL;
1369
1370 irdma_flush_wqes(iwqp,
1371 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1372 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1373 IRDMA_REFLUSH);
1374 }
1375 return 0;
1376 }
1377
1378 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1379 issue_modify_qp = 1;
1380 break;
1381 default:
1382 ret = -EINVAL;
1383 goto exit;
1384 }
1385
1386 iwqp->ibqp_state = attr->qp_state;
1387 }
1388
1389 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1390 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1391 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1392 spin_unlock_irqrestore(&iwqp->lock, flags);
1393
1394 if (attr_mask & IB_QP_STATE) {
1395 if (issue_modify_qp) {
1396 ctx_info->rem_endpoint_idx = udp_info->arp_idx;
1397 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1398 return -EINVAL;
1399 spin_lock_irqsave(&iwqp->lock, flags);
1400 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1401 iwqp->iwarp_state = info.next_iwarp_state;
1402 iwqp->ibqp_state = attr->qp_state;
1403 }
1404 if (iwqp->ibqp_state > IB_QPS_RTS &&
1405 !iwqp->flush_issued) {
1406 iwqp->flush_issued = 1;
1407 spin_unlock_irqrestore(&iwqp->lock, flags);
1408 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
1409 IRDMA_FLUSH_RQ |
1410 IRDMA_FLUSH_WAIT);
1411 } else {
1412 spin_unlock_irqrestore(&iwqp->lock, flags);
1413 }
1414 } else {
1415 iwqp->ibqp_state = attr->qp_state;
1416 }
1417 if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1418 struct irdma_ucontext *ucontext;
1419
1420 ucontext = rdma_udata_to_drv_context(udata,
1421 struct irdma_ucontext, ibucontext);
1422 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1423 !iwqp->push_wqe_mmap_entry &&
1424 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1425 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1426 uresp.push_valid = 1;
1427 uresp.push_offset = iwqp->sc_qp.push_offset;
1428 }
1429 ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1430 udata->outlen));
1431 if (ret) {
1432 irdma_remove_push_mmap_entries(iwqp);
1433 ibdev_dbg(&iwdev->ibdev,
1434 "VERBS: copy_to_udata failed\n");
1435 return ret;
1436 }
1437 }
1438 }
1439
1440 return 0;
1441exit:
1442 spin_unlock_irqrestore(&iwqp->lock, flags);
1443
1444 return ret;
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1455 struct ib_udata *udata)
1456{
1457 struct irdma_qp *iwqp = to_iwqp(ibqp);
1458 struct irdma_device *iwdev = iwqp->iwdev;
1459 struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
1460 struct irdma_qp_host_ctx_info *ctx_info;
1461 struct irdma_tcp_offload_info *tcp_info;
1462 struct irdma_iwarp_offload_info *offload_info;
1463 struct irdma_modify_qp_info info = {};
1464 struct irdma_modify_qp_resp uresp = {};
1465 struct irdma_modify_qp_req ureq = {};
1466 u8 issue_modify_qp = 0;
1467 u8 dont_wait = 0;
1468 int err;
1469 unsigned long flags;
1470
1471 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1472 return -EOPNOTSUPP;
1473
1474 ctx_info = &iwqp->ctx_info;
1475 offload_info = &iwqp->iwarp_info;
1476 tcp_info = &iwqp->tcp_info;
1477 wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
1478 ibdev_dbg(&iwdev->ibdev,
1479 "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
1480 __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
1481 iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
1482 iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
1483
1484 spin_lock_irqsave(&iwqp->lock, flags);
1485 if (attr_mask & IB_QP_STATE) {
1486 info.curr_iwarp_state = iwqp->iwarp_state;
1487 switch (attr->qp_state) {
1488 case IB_QPS_INIT:
1489 case IB_QPS_RTR:
1490 if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
1491 err = -EINVAL;
1492 goto exit;
1493 }
1494
1495 if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
1496 info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
1497 issue_modify_qp = 1;
1498 }
1499 if (iwdev->push_mode && udata &&
1500 iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
1501 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1502 spin_unlock_irqrestore(&iwqp->lock, flags);
1503 irdma_alloc_push_page(iwqp);
1504 spin_lock_irqsave(&iwqp->lock, flags);
1505 }
1506 break;
1507 case IB_QPS_RTS:
1508 if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
1509 !iwqp->cm_id) {
1510 err = -EINVAL;
1511 goto exit;
1512 }
1513
1514 issue_modify_qp = 1;
1515 iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
1516 iwqp->hte_added = 1;
1517 info.next_iwarp_state = IRDMA_QP_STATE_RTS;
1518 info.tcp_ctx_valid = true;
1519 info.ord_valid = true;
1520 info.arp_cache_idx_valid = true;
1521 info.cq_num_valid = true;
1522 break;
1523 case IB_QPS_SQD:
1524 if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
1525 err = 0;
1526 goto exit;
1527 }
1528
1529 if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
1530 iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
1531 err = 0;
1532 goto exit;
1533 }
1534
1535 if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
1536 err = -EINVAL;
1537 goto exit;
1538 }
1539
1540 info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
1541 issue_modify_qp = 1;
1542 break;
1543 case IB_QPS_SQE:
1544 if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
1545 err = -EINVAL;
1546 goto exit;
1547 }
1548
1549 info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
1550 issue_modify_qp = 1;
1551 break;
1552 case IB_QPS_ERR:
1553 case IB_QPS_RESET:
1554 if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
1555 spin_unlock_irqrestore(&iwqp->lock, flags);
1556 if (udata) {
1557 if (ib_copy_from_udata(&ureq, udata,
1558 min(sizeof(ureq), udata->inlen)))
1559 return -EINVAL;
1560
1561 irdma_flush_wqes(iwqp,
1562 (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
1563 (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
1564 IRDMA_REFLUSH);
1565 }
1566 return 0;
1567 }
1568
1569 if (iwqp->sc_qp.term_flags) {
1570 spin_unlock_irqrestore(&iwqp->lock, flags);
1571 irdma_terminate_del_timer(&iwqp->sc_qp);
1572 spin_lock_irqsave(&iwqp->lock, flags);
1573 }
1574 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1575 if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
1576 iwdev->iw_status &&
1577 iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
1578 info.reset_tcp_conn = true;
1579 else
1580 dont_wait = 1;
1581
1582 issue_modify_qp = 1;
1583 info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
1584 break;
1585 default:
1586 err = -EINVAL;
1587 goto exit;
1588 }
1589
1590 iwqp->ibqp_state = attr->qp_state;
1591 }
1592 if (attr_mask & IB_QP_ACCESS_FLAGS) {
1593 ctx_info->iwarp_info_valid = true;
1594 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
1595 offload_info->wr_rdresp_en = true;
1596 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
1597 offload_info->wr_rdresp_en = true;
1598 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
1599 offload_info->rd_en = true;
1600 }
1601
1602 if (ctx_info->iwarp_info_valid) {
1603 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1604 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1605 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
1606 }
1607 spin_unlock_irqrestore(&iwqp->lock, flags);
1608
1609 if (attr_mask & IB_QP_STATE) {
1610 if (issue_modify_qp) {
1611 ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
1612 if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
1613 return -EINVAL;
1614 }
1615
1616 spin_lock_irqsave(&iwqp->lock, flags);
1617 if (iwqp->iwarp_state == info.curr_iwarp_state) {
1618 iwqp->iwarp_state = info.next_iwarp_state;
1619 iwqp->ibqp_state = attr->qp_state;
1620 }
1621 spin_unlock_irqrestore(&iwqp->lock, flags);
1622 }
1623
1624 if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
1625 if (dont_wait) {
1626 if (iwqp->cm_id && iwqp->hw_tcp_state) {
1627 spin_lock_irqsave(&iwqp->lock, flags);
1628 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
1629 iwqp->last_aeq = IRDMA_AE_RESET_SENT;
1630 spin_unlock_irqrestore(&iwqp->lock, flags);
1631 irdma_cm_disconn(iwqp);
1632 }
1633 } else {
1634 int close_timer_started;
1635
1636 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
1637
1638 if (iwqp->cm_node) {
1639 refcount_inc(&iwqp->cm_node->refcnt);
1640 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1641 close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
1642 if (iwqp->cm_id && close_timer_started == 1)
1643 irdma_schedule_cm_timer(iwqp->cm_node,
1644 (struct irdma_puda_buf *)iwqp,
1645 IRDMA_TIMER_TYPE_CLOSE, 1, 0);
1646
1647 irdma_rem_ref_cm_node(iwqp->cm_node);
1648 } else {
1649 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
1650 }
1651 }
1652 }
1653 if (attr_mask & IB_QP_STATE && udata &&
1654 dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
1655 struct irdma_ucontext *ucontext;
1656
1657 ucontext = rdma_udata_to_drv_context(udata,
1658 struct irdma_ucontext, ibucontext);
1659 if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
1660 !iwqp->push_wqe_mmap_entry &&
1661 !irdma_setup_push_mmap_entries(ucontext, iwqp,
1662 &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
1663 uresp.push_valid = 1;
1664 uresp.push_offset = iwqp->sc_qp.push_offset;
1665 }
1666
1667 err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
1668 udata->outlen));
1669 if (err) {
1670 irdma_remove_push_mmap_entries(iwqp);
1671 ibdev_dbg(&iwdev->ibdev,
1672 "VERBS: copy_to_udata failed\n");
1673 return err;
1674 }
1675 }
1676
1677 return 0;
1678exit:
1679 spin_unlock_irqrestore(&iwqp->lock, flags);
1680
1681 return err;
1682}
1683
1684
1685
1686
1687
1688
1689static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
1690{
1691 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1692
1693 if (!iwcq->user_mode) {
1694 dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
1695 iwcq->kmem.va, iwcq->kmem.pa);
1696 iwcq->kmem.va = NULL;
1697 dma_free_coherent(rf->sc_dev.hw->device,
1698 iwcq->kmem_shadow.size,
1699 iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
1700 iwcq->kmem_shadow.va = NULL;
1701 }
1702
1703 irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
1704}
1705
1706
1707
1708
1709
1710static void irdma_free_cqbuf(struct work_struct *work)
1711{
1712 struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
1713
1714 dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
1715 cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
1716 cq_buf->kmem_buf.va = NULL;
1717 kfree(cq_buf);
1718}
1719
1720
1721
1722
1723
1724
1725
1726static int irdma_process_resize_list(struct irdma_cq *iwcq,
1727 struct irdma_device *iwdev,
1728 struct irdma_cq_buf *lcqe_buf)
1729{
1730 struct list_head *tmp_node, *list_node;
1731 struct irdma_cq_buf *cq_buf;
1732 int cnt = 0;
1733
1734 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
1735 cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
1736 if (cq_buf == lcqe_buf)
1737 return cnt;
1738
1739 list_del(&cq_buf->list);
1740 queue_work(iwdev->cleanup_wq, &cq_buf->work);
1741 cnt++;
1742 }
1743
1744 return cnt;
1745}
1746
1747
1748
1749
1750
1751
1752static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
1753{
1754 struct irdma_device *iwdev = to_iwdev(ib_cq->device);
1755 struct irdma_cq *iwcq = to_iwcq(ib_cq);
1756 struct irdma_sc_cq *cq = &iwcq->sc_cq;
1757 struct irdma_sc_dev *dev = cq->dev;
1758 struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
1759 struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
1760 unsigned long flags;
1761
1762 spin_lock_irqsave(&iwcq->lock, flags);
1763 if (!list_empty(&iwcq->resize_list))
1764 irdma_process_resize_list(iwcq, iwdev, NULL);
1765 spin_unlock_irqrestore(&iwcq->lock, flags);
1766
1767 irdma_cq_wq_destroy(iwdev->rf, cq);
1768 irdma_cq_free_rsrc(iwdev->rf, iwcq);
1769
1770 spin_lock_irqsave(&iwceq->ce_lock, flags);
1771 irdma_sc_cleanup_ceqes(cq, ceq);
1772 spin_unlock_irqrestore(&iwceq->ce_lock, flags);
1773
1774 return 0;
1775}
1776
1777
1778
1779
1780
1781
1782
1783static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
1784 struct ib_udata *udata)
1785{
1786 struct irdma_cq *iwcq = to_iwcq(ibcq);
1787 struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
1788 struct irdma_cqp_request *cqp_request;
1789 struct cqp_cmds_info *cqp_info;
1790 struct irdma_modify_cq_info *m_info;
1791 struct irdma_modify_cq_info info = {};
1792 struct irdma_dma_mem kmem_buf;
1793 struct irdma_cq_mr *cqmr_buf;
1794 struct irdma_pbl *iwpbl_buf;
1795 struct irdma_device *iwdev;
1796 struct irdma_pci_f *rf;
1797 struct irdma_cq_buf *cq_buf = NULL;
1798 enum irdma_status_code status = 0;
1799 unsigned long flags;
1800 int ret;
1801
1802 iwdev = to_iwdev(ibcq->device);
1803 rf = iwdev->rf;
1804
1805 if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
1806 IRDMA_FEATURE_CQ_RESIZE))
1807 return -EOPNOTSUPP;
1808
1809 if (entries > rf->max_cqe)
1810 return -EINVAL;
1811
1812 if (!iwcq->user_mode) {
1813 entries++;
1814 if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
1815 entries *= 2;
1816 }
1817
1818 info.cq_size = max(entries, 4);
1819
1820 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
1821 return 0;
1822
1823 if (udata) {
1824 struct irdma_resize_cq_req req = {};
1825 struct irdma_ucontext *ucontext =
1826 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1827 ibucontext);
1828
1829
1830 if (ucontext->legacy_mode)
1831 return -EOPNOTSUPP;
1832
1833 if (ib_copy_from_udata(&req, udata,
1834 min(sizeof(req), udata->inlen)))
1835 return -EINVAL;
1836
1837 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1838 iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
1839 &ucontext->cq_reg_mem_list);
1840 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1841
1842 if (!iwpbl_buf)
1843 return -ENOMEM;
1844
1845 cqmr_buf = &iwpbl_buf->cq_mr;
1846 if (iwpbl_buf->pbl_allocated) {
1847 info.virtual_map = true;
1848 info.pbl_chunk_size = 1;
1849 info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
1850 } else {
1851 info.cq_pa = cqmr_buf->cq_pbl.addr;
1852 }
1853 } else {
1854
1855 int rsize;
1856
1857 rsize = info.cq_size * sizeof(struct irdma_cqe);
1858 kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
1859 kmem_buf.va = dma_alloc_coherent(dev->hw->device,
1860 kmem_buf.size, &kmem_buf.pa,
1861 GFP_KERNEL);
1862 if (!kmem_buf.va)
1863 return -ENOMEM;
1864
1865 info.cq_base = kmem_buf.va;
1866 info.cq_pa = kmem_buf.pa;
1867 cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
1868 if (!cq_buf) {
1869 ret = -ENOMEM;
1870 goto error;
1871 }
1872 }
1873
1874 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1875 if (!cqp_request) {
1876 ret = -ENOMEM;
1877 goto error;
1878 }
1879
1880 info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
1881 info.cq_resize = true;
1882
1883 cqp_info = &cqp_request->info;
1884 m_info = &cqp_info->in.u.cq_modify.info;
1885 memcpy(m_info, &info, sizeof(*m_info));
1886
1887 cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
1888 cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
1889 cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
1890 cqp_info->post_sq = 1;
1891 status = irdma_handle_cqp_op(rf, cqp_request);
1892 irdma_put_cqp_request(&rf->cqp, cqp_request);
1893 if (status) {
1894 ret = -EPROTO;
1895 goto error;
1896 }
1897
1898 spin_lock_irqsave(&iwcq->lock, flags);
1899 if (cq_buf) {
1900 cq_buf->kmem_buf = iwcq->kmem;
1901 cq_buf->hw = dev->hw;
1902 memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
1903 INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
1904 list_add_tail(&cq_buf->list, &iwcq->resize_list);
1905 iwcq->kmem = kmem_buf;
1906 }
1907
1908 irdma_sc_cq_resize(&iwcq->sc_cq, &info);
1909 ibcq->cqe = info.cq_size - 1;
1910 spin_unlock_irqrestore(&iwcq->lock, flags);
1911
1912 return 0;
1913error:
1914 if (!udata) {
1915 dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
1916 kmem_buf.pa);
1917 kmem_buf.va = NULL;
1918 }
1919 kfree(cq_buf);
1920
1921 return ret;
1922}
1923
1924static inline int cq_validate_flags(u32 flags, u8 hw_rev)
1925{
1926
1927 if (hw_rev == IRDMA_GEN_1)
1928 return flags ? -EOPNOTSUPP : 0;
1929
1930 return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
1931}
1932
1933
1934
1935
1936
1937
1938
1939static int irdma_create_cq(struct ib_cq *ibcq,
1940 const struct ib_cq_init_attr *attr,
1941 struct ib_udata *udata)
1942{
1943 struct ib_device *ibdev = ibcq->device;
1944 struct irdma_device *iwdev = to_iwdev(ibdev);
1945 struct irdma_pci_f *rf = iwdev->rf;
1946 struct irdma_cq *iwcq = to_iwcq(ibcq);
1947 u32 cq_num = 0;
1948 struct irdma_sc_cq *cq;
1949 struct irdma_sc_dev *dev = &rf->sc_dev;
1950 struct irdma_cq_init_info info = {};
1951 enum irdma_status_code status;
1952 struct irdma_cqp_request *cqp_request;
1953 struct cqp_cmds_info *cqp_info;
1954 struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1955 unsigned long flags;
1956 int err_code;
1957 int entries = attr->cqe;
1958
1959 err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
1960 if (err_code)
1961 return err_code;
1962 err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
1963 &rf->next_cq);
1964 if (err_code)
1965 return err_code;
1966
1967 cq = &iwcq->sc_cq;
1968 cq->back_cq = iwcq;
1969 spin_lock_init(&iwcq->lock);
1970 INIT_LIST_HEAD(&iwcq->resize_list);
1971 info.dev = dev;
1972 ukinfo->cq_size = max(entries, 4);
1973 ukinfo->cq_id = cq_num;
1974 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1975 if (attr->comp_vector < rf->ceqs_count)
1976 info.ceq_id = attr->comp_vector;
1977 info.ceq_id_valid = true;
1978 info.ceqe_mask = 1;
1979 info.type = IRDMA_CQ_TYPE_IWARP;
1980 info.vsi = &iwdev->vsi;
1981
1982 if (udata) {
1983 struct irdma_ucontext *ucontext;
1984 struct irdma_create_cq_req req = {};
1985 struct irdma_cq_mr *cqmr;
1986 struct irdma_pbl *iwpbl;
1987 struct irdma_pbl *iwpbl_shadow;
1988 struct irdma_cq_mr *cqmr_shadow;
1989
1990 iwcq->user_mode = true;
1991 ucontext =
1992 rdma_udata_to_drv_context(udata, struct irdma_ucontext,
1993 ibucontext);
1994 if (ib_copy_from_udata(&req, udata,
1995 min(sizeof(req), udata->inlen))) {
1996 err_code = -EFAULT;
1997 goto cq_free_rsrc;
1998 }
1999
2000 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2001 iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
2002 &ucontext->cq_reg_mem_list);
2003 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2004 if (!iwpbl) {
2005 err_code = -EPROTO;
2006 goto cq_free_rsrc;
2007 }
2008
2009 iwcq->iwpbl = iwpbl;
2010 iwcq->cq_mem_size = 0;
2011 cqmr = &iwpbl->cq_mr;
2012
2013 if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
2014 IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
2015 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2016 iwpbl_shadow = irdma_get_pbl(
2017 (unsigned long)req.user_shadow_area,
2018 &ucontext->cq_reg_mem_list);
2019 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2020
2021 if (!iwpbl_shadow) {
2022 err_code = -EPROTO;
2023 goto cq_free_rsrc;
2024 }
2025 iwcq->iwpbl_shadow = iwpbl_shadow;
2026 cqmr_shadow = &iwpbl_shadow->cq_mr;
2027 info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
2028 cqmr->split = true;
2029 } else {
2030 info.shadow_area_pa = cqmr->shadow;
2031 }
2032 if (iwpbl->pbl_allocated) {
2033 info.virtual_map = true;
2034 info.pbl_chunk_size = 1;
2035 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
2036 } else {
2037 info.cq_base_pa = cqmr->cq_pbl.addr;
2038 }
2039 } else {
2040
2041 int rsize;
2042
2043 if (entries < 1 || entries > rf->max_cqe) {
2044 err_code = -EINVAL;
2045 goto cq_free_rsrc;
2046 }
2047
2048 entries++;
2049 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2050 entries *= 2;
2051 ukinfo->cq_size = entries;
2052
2053 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
2054 iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
2055 iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
2056 iwcq->kmem.size,
2057 &iwcq->kmem.pa, GFP_KERNEL);
2058 if (!iwcq->kmem.va) {
2059 err_code = -ENOMEM;
2060 goto cq_free_rsrc;
2061 }
2062
2063 iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
2064 64);
2065 iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
2066 iwcq->kmem_shadow.size,
2067 &iwcq->kmem_shadow.pa,
2068 GFP_KERNEL);
2069 if (!iwcq->kmem_shadow.va) {
2070 err_code = -ENOMEM;
2071 goto cq_free_rsrc;
2072 }
2073 info.shadow_area_pa = iwcq->kmem_shadow.pa;
2074 ukinfo->shadow_area = iwcq->kmem_shadow.va;
2075 ukinfo->cq_base = iwcq->kmem.va;
2076 info.cq_base_pa = iwcq->kmem.pa;
2077 }
2078
2079 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
2080 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
2081 (u32)IRDMA_MAX_CQ_READ_THRESH);
2082
2083 if (irdma_sc_cq_init(cq, &info)) {
2084 ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
2085 err_code = -EPROTO;
2086 goto cq_free_rsrc;
2087 }
2088
2089 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
2090 if (!cqp_request) {
2091 err_code = -ENOMEM;
2092 goto cq_free_rsrc;
2093 }
2094
2095 cqp_info = &cqp_request->info;
2096 cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
2097 cqp_info->post_sq = 1;
2098 cqp_info->in.u.cq_create.cq = cq;
2099 cqp_info->in.u.cq_create.check_overflow = true;
2100 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
2101 status = irdma_handle_cqp_op(rf, cqp_request);
2102 irdma_put_cqp_request(&rf->cqp, cqp_request);
2103 if (status) {
2104 err_code = -ENOMEM;
2105 goto cq_free_rsrc;
2106 }
2107
2108 if (udata) {
2109 struct irdma_create_cq_resp resp = {};
2110
2111 resp.cq_id = info.cq_uk_init_info.cq_id;
2112 resp.cq_size = info.cq_uk_init_info.cq_size;
2113 if (ib_copy_to_udata(udata, &resp,
2114 min(sizeof(resp), udata->outlen))) {
2115 ibdev_dbg(&iwdev->ibdev,
2116 "VERBS: copy to user data\n");
2117 err_code = -EPROTO;
2118 goto cq_destroy;
2119 }
2120 }
2121 return 0;
2122cq_destroy:
2123 irdma_cq_wq_destroy(rf, cq);
2124cq_free_rsrc:
2125 irdma_cq_free_rsrc(rf, iwcq);
2126
2127 return err_code;
2128}
2129
2130
2131
2132
2133
2134static inline u16 irdma_get_mr_access(int access)
2135{
2136 u16 hw_access = 0;
2137
2138 hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
2139 IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
2140 hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
2141 IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
2142 hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
2143 IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
2144 hw_access |= (access & IB_ACCESS_MW_BIND) ?
2145 IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
2146 hw_access |= (access & IB_ZERO_BASED) ?
2147 IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
2148 hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
2149
2150 return hw_access;
2151}
2152
2153
2154
2155
2156
2157
2158static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
2159{
2160 u32 stag_idx;
2161
2162 stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
2163 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
2164}
2165
2166
2167
2168
2169
2170static u32 irdma_create_stag(struct irdma_device *iwdev)
2171{
2172 u32 stag = 0;
2173 u32 stag_index = 0;
2174 u32 next_stag_index;
2175 u32 driver_key;
2176 u32 random;
2177 u8 consumer_key;
2178 int ret;
2179
2180 get_random_bytes(&random, sizeof(random));
2181 consumer_key = (u8)random;
2182
2183 driver_key = random & ~iwdev->rf->mr_stagmask;
2184 next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
2185 next_stag_index %= iwdev->rf->max_mr;
2186
2187 ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
2188 iwdev->rf->max_mr, &stag_index,
2189 &next_stag_index);
2190 if (ret)
2191 return stag;
2192 stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
2193 stag |= driver_key;
2194 stag += (u32)consumer_key;
2195
2196 return stag;
2197}
2198
2199
2200
2201
2202
2203
2204
2205static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
2206 u32 *idx)
2207{
2208 *idx += 1;
2209 if (!(*pinfo) || *idx != (*pinfo)->cnt)
2210 return ++pbl;
2211 *idx = 0;
2212 (*pinfo)++;
2213
2214 return (*pinfo)->addr;
2215}
2216
2217
2218
2219
2220
2221
2222
2223static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
2224 enum irdma_pble_level level)
2225{
2226 struct ib_umem *region = iwmr->region;
2227 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2228 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2229 struct irdma_pble_info *pinfo;
2230 struct ib_block_iter biter;
2231 u32 idx = 0;
2232 u32 pbl_cnt = 0;
2233
2234 pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
2235
2236 if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
2237 iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
2238
2239 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
2240 *pbl = rdma_block_iter_dma_address(&biter);
2241 if (++pbl_cnt == palloc->total_cnt)
2242 break;
2243 pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
2244 }
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
2255{
2256 u32 pg_idx;
2257
2258 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
2259 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
2260 return false;
2261 }
2262
2263 return true;
2264}
2265
2266
2267
2268
2269
2270
2271static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
2272 u32 pg_size)
2273{
2274 struct irdma_pble_level2 *lvl2 = &palloc->level2;
2275 struct irdma_pble_info *leaf = lvl2->leaf;
2276 u64 *arr = NULL;
2277 u64 *start_addr = NULL;
2278 int i;
2279 bool ret;
2280
2281 if (palloc->level == PBLE_LEVEL_1) {
2282 arr = palloc->level1.addr;
2283 ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
2284 pg_size);
2285 return ret;
2286 }
2287
2288 start_addr = leaf->addr;
2289
2290 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
2291 arr = leaf->addr;
2292 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
2293 return false;
2294 ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
2295 if (!ret)
2296 return false;
2297 }
2298
2299 return true;
2300}
2301
2302
2303
2304
2305
2306
2307
2308static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
2309 bool use_pbles)
2310{
2311 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2312 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2313 struct irdma_pble_info *pinfo;
2314 u64 *pbl;
2315 enum irdma_status_code status;
2316 enum irdma_pble_level level = PBLE_LEVEL_1;
2317
2318 if (use_pbles) {
2319 status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
2320 false);
2321 if (status)
2322 return -ENOMEM;
2323
2324 iwpbl->pbl_allocated = true;
2325 level = palloc->level;
2326 pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
2327 palloc->level2.leaf;
2328 pbl = pinfo->addr;
2329 } else {
2330 pbl = iwmr->pgaddrmem;
2331 }
2332
2333 irdma_copy_user_pgaddrs(iwmr, pbl, level);
2334
2335 if (use_pbles)
2336 iwmr->pgaddrmem[0] = *pbl;
2337
2338 return 0;
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348static int irdma_handle_q_mem(struct irdma_device *iwdev,
2349 struct irdma_mem_reg_req *req,
2350 struct irdma_pbl *iwpbl, bool use_pbles)
2351{
2352 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2353 struct irdma_mr *iwmr = iwpbl->iwmr;
2354 struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
2355 struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
2356 struct irdma_hmc_pble *hmc_p;
2357 u64 *arr = iwmr->pgaddrmem;
2358 u32 pg_size, total;
2359 int err = 0;
2360 bool ret = true;
2361
2362 pg_size = iwmr->page_size;
2363 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
2364 if (err)
2365 return err;
2366
2367 if (use_pbles && palloc->level != PBLE_LEVEL_1) {
2368 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2369 iwpbl->pbl_allocated = false;
2370 return -ENOMEM;
2371 }
2372
2373 if (use_pbles)
2374 arr = palloc->level1.addr;
2375
2376 switch (iwmr->type) {
2377 case IRDMA_MEMREG_TYPE_QP:
2378 total = req->sq_pages + req->rq_pages;
2379 hmc_p = &qpmr->sq_pbl;
2380 qpmr->shadow = (dma_addr_t)arr[total];
2381
2382 if (use_pbles) {
2383 ret = irdma_check_mem_contiguous(arr, req->sq_pages,
2384 pg_size);
2385 if (ret)
2386 ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
2387 req->rq_pages,
2388 pg_size);
2389 }
2390
2391 if (!ret) {
2392 hmc_p->idx = palloc->level1.idx;
2393 hmc_p = &qpmr->rq_pbl;
2394 hmc_p->idx = palloc->level1.idx + req->sq_pages;
2395 } else {
2396 hmc_p->addr = arr[0];
2397 hmc_p = &qpmr->rq_pbl;
2398 hmc_p->addr = arr[req->sq_pages];
2399 }
2400 break;
2401 case IRDMA_MEMREG_TYPE_CQ:
2402 hmc_p = &cqmr->cq_pbl;
2403
2404 if (!cqmr->split)
2405 cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
2406
2407 if (use_pbles)
2408 ret = irdma_check_mem_contiguous(arr, req->cq_pages,
2409 pg_size);
2410
2411 if (!ret)
2412 hmc_p->idx = palloc->level1.idx;
2413 else
2414 hmc_p->addr = arr[0];
2415 break;
2416 default:
2417 ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
2418 err = -EINVAL;
2419 }
2420
2421 if (use_pbles && ret) {
2422 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2423 iwpbl->pbl_allocated = false;
2424 }
2425
2426 return err;
2427}
2428
2429
2430
2431
2432
2433
2434static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
2435{
2436 struct irdma_mw_alloc_info *info;
2437 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2438 struct irdma_cqp_request *cqp_request;
2439 struct cqp_cmds_info *cqp_info;
2440 enum irdma_status_code status;
2441
2442 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2443 if (!cqp_request)
2444 return -ENOMEM;
2445
2446 cqp_info = &cqp_request->info;
2447 info = &cqp_info->in.u.mw_alloc.info;
2448 memset(info, 0, sizeof(*info));
2449 if (iwmr->ibmw.type == IB_MW_TYPE_1)
2450 info->mw_wide = true;
2451
2452 info->page_size = PAGE_SIZE;
2453 info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2454 info->pd_id = iwpd->sc_pd.pd_id;
2455 info->remote_access = true;
2456 cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
2457 cqp_info->post_sq = 1;
2458 cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
2459 cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
2460 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2461 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2462
2463 return status ? -ENOMEM : 0;
2464}
2465
2466
2467
2468
2469
2470
2471static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
2472{
2473 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2474 struct irdma_mr *iwmr = to_iwmw(ibmw);
2475 int err_code;
2476 u32 stag;
2477
2478 stag = irdma_create_stag(iwdev);
2479 if (!stag)
2480 return -ENOMEM;
2481
2482 iwmr->stag = stag;
2483 ibmw->rkey = stag;
2484
2485 err_code = irdma_hw_alloc_mw(iwdev, iwmr);
2486 if (err_code) {
2487 irdma_free_stag(iwdev, stag);
2488 return err_code;
2489 }
2490
2491 return 0;
2492}
2493
2494
2495
2496
2497
2498static int irdma_dealloc_mw(struct ib_mw *ibmw)
2499{
2500 struct ib_pd *ibpd = ibmw->pd;
2501 struct irdma_pd *iwpd = to_iwpd(ibpd);
2502 struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
2503 struct irdma_device *iwdev = to_iwdev(ibmw->device);
2504 struct irdma_cqp_request *cqp_request;
2505 struct cqp_cmds_info *cqp_info;
2506 struct irdma_dealloc_stag_info *info;
2507
2508 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2509 if (!cqp_request)
2510 return -ENOMEM;
2511
2512 cqp_info = &cqp_request->info;
2513 info = &cqp_info->in.u.dealloc_stag.info;
2514 memset(info, 0, sizeof(*info));
2515 info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
2516 info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
2517 info->mr = false;
2518 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
2519 cqp_info->post_sq = 1;
2520 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
2521 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2522 irdma_handle_cqp_op(iwdev->rf, cqp_request);
2523 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2524 irdma_free_stag(iwdev, iwmr->stag);
2525
2526 return 0;
2527}
2528
2529
2530
2531
2532
2533
2534static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
2535 struct irdma_mr *iwmr)
2536{
2537 struct irdma_allocate_stag_info *info;
2538 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2539 enum irdma_status_code status;
2540 int err = 0;
2541 struct irdma_cqp_request *cqp_request;
2542 struct cqp_cmds_info *cqp_info;
2543
2544 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2545 if (!cqp_request)
2546 return -ENOMEM;
2547
2548 cqp_info = &cqp_request->info;
2549 info = &cqp_info->in.u.alloc_stag.info;
2550 memset(info, 0, sizeof(*info));
2551 info->page_size = PAGE_SIZE;
2552 info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2553 info->pd_id = iwpd->sc_pd.pd_id;
2554 info->total_len = iwmr->len;
2555 info->remote_access = true;
2556 cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
2557 cqp_info->post_sq = 1;
2558 cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
2559 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
2560 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2561 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2562 if (status)
2563 err = -ENOMEM;
2564
2565 return err;
2566}
2567
2568
2569
2570
2571
2572
2573
2574static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2575 u32 max_num_sg)
2576{
2577 struct irdma_device *iwdev = to_iwdev(pd->device);
2578 struct irdma_pble_alloc *palloc;
2579 struct irdma_pbl *iwpbl;
2580 struct irdma_mr *iwmr;
2581 enum irdma_status_code status;
2582 u32 stag;
2583 int err_code = -ENOMEM;
2584
2585 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2586 if (!iwmr)
2587 return ERR_PTR(-ENOMEM);
2588
2589 stag = irdma_create_stag(iwdev);
2590 if (!stag) {
2591 err_code = -ENOMEM;
2592 goto err;
2593 }
2594
2595 iwmr->stag = stag;
2596 iwmr->ibmr.rkey = stag;
2597 iwmr->ibmr.lkey = stag;
2598 iwmr->ibmr.pd = pd;
2599 iwmr->ibmr.device = pd->device;
2600 iwpbl = &iwmr->iwpbl;
2601 iwpbl->iwmr = iwmr;
2602 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2603 palloc = &iwpbl->pble_alloc;
2604 iwmr->page_cnt = max_num_sg;
2605 status = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
2606 true);
2607 if (status)
2608 goto err_get_pble;
2609
2610 err_code = irdma_hw_alloc_stag(iwdev, iwmr);
2611 if (err_code)
2612 goto err_alloc_stag;
2613
2614 iwpbl->pbl_allocated = true;
2615
2616 return &iwmr->ibmr;
2617err_alloc_stag:
2618 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2619err_get_pble:
2620 irdma_free_stag(iwdev, stag);
2621err:
2622 kfree(iwmr);
2623
2624 return ERR_PTR(err_code);
2625}
2626
2627
2628
2629
2630
2631
2632static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
2633{
2634 struct irdma_mr *iwmr = to_iwmr(ibmr);
2635 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2636 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2637 u64 *pbl;
2638
2639 if (unlikely(iwmr->npages == iwmr->page_cnt))
2640 return -ENOMEM;
2641
2642 pbl = palloc->level1.addr;
2643 pbl[iwmr->npages++] = addr;
2644
2645 return 0;
2646}
2647
2648
2649
2650
2651
2652
2653
2654
2655static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2656 int sg_nents, unsigned int *sg_offset)
2657{
2658 struct irdma_mr *iwmr = to_iwmr(ibmr);
2659
2660 iwmr->npages = 0;
2661
2662 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
2663}
2664
2665
2666
2667
2668
2669
2670
2671static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
2672 u16 access)
2673{
2674 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2675 struct irdma_reg_ns_stag_info *stag_info;
2676 struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
2677 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
2678 enum irdma_status_code status;
2679 int err = 0;
2680 struct irdma_cqp_request *cqp_request;
2681 struct cqp_cmds_info *cqp_info;
2682
2683 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
2684 if (!cqp_request)
2685 return -ENOMEM;
2686
2687 cqp_info = &cqp_request->info;
2688 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
2689 memset(stag_info, 0, sizeof(*stag_info));
2690 stag_info->va = iwpbl->user_base;
2691 stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
2692 stag_info->stag_key = (u8)iwmr->stag;
2693 stag_info->total_len = iwmr->len;
2694 stag_info->access_rights = irdma_get_mr_access(access);
2695 stag_info->pd_id = iwpd->sc_pd.pd_id;
2696 if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
2697 stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
2698 else
2699 stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
2700 stag_info->page_size = iwmr->page_size;
2701
2702 if (iwpbl->pbl_allocated) {
2703 if (palloc->level == PBLE_LEVEL_1) {
2704 stag_info->first_pm_pbl_index = palloc->level1.idx;
2705 stag_info->chunk_size = 1;
2706 } else {
2707 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
2708 stag_info->chunk_size = 3;
2709 }
2710 } else {
2711 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
2712 }
2713
2714 cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
2715 cqp_info->post_sq = 1;
2716 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
2717 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
2718 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
2719 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
2720 if (status)
2721 err = -ENOMEM;
2722
2723 return err;
2724}
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
2736 u64 virt, int access,
2737 struct ib_udata *udata)
2738{
2739 struct irdma_device *iwdev = to_iwdev(pd->device);
2740 struct irdma_ucontext *ucontext;
2741 struct irdma_pble_alloc *palloc;
2742 struct irdma_pbl *iwpbl;
2743 struct irdma_mr *iwmr;
2744 struct ib_umem *region;
2745 struct irdma_mem_reg_req req;
2746 u32 total, stag = 0;
2747 u8 shadow_pgcnt = 1;
2748 bool use_pbles = false;
2749 unsigned long flags;
2750 int err = -EINVAL;
2751 int ret;
2752
2753 if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
2754 return ERR_PTR(-EINVAL);
2755
2756 region = ib_umem_get(udata, start, len, access);
2757
2758 if (IS_ERR(region)) {
2759 ibdev_dbg(&iwdev->ibdev,
2760 "VERBS: Failed to create ib_umem region\n");
2761 return (struct ib_mr *)region;
2762 }
2763
2764 if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
2765 ib_umem_release(region);
2766 return ERR_PTR(-EFAULT);
2767 }
2768
2769 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2770 if (!iwmr) {
2771 ib_umem_release(region);
2772 return ERR_PTR(-ENOMEM);
2773 }
2774
2775 iwpbl = &iwmr->iwpbl;
2776 iwpbl->iwmr = iwmr;
2777 iwmr->region = region;
2778 iwmr->ibmr.pd = pd;
2779 iwmr->ibmr.device = pd->device;
2780 iwmr->ibmr.iova = virt;
2781 iwmr->page_size = PAGE_SIZE;
2782
2783 if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
2784 iwmr->page_size = ib_umem_find_best_pgsz(region,
2785 SZ_4K | SZ_2M | SZ_1G,
2786 virt);
2787 if (unlikely(!iwmr->page_size)) {
2788 kfree(iwmr);
2789 ib_umem_release(region);
2790 return ERR_PTR(-EOPNOTSUPP);
2791 }
2792 }
2793 iwmr->len = region->length;
2794 iwpbl->user_base = virt;
2795 palloc = &iwpbl->pble_alloc;
2796 iwmr->type = req.reg_type;
2797 iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
2798
2799 switch (req.reg_type) {
2800 case IRDMA_MEMREG_TYPE_QP:
2801 total = req.sq_pages + req.rq_pages + shadow_pgcnt;
2802 if (total > iwmr->page_cnt) {
2803 err = -EINVAL;
2804 goto error;
2805 }
2806 total = req.sq_pages + req.rq_pages;
2807 use_pbles = (total > 2);
2808 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2809 if (err)
2810 goto error;
2811
2812 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2813 ibucontext);
2814 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2815 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
2816 iwpbl->on_list = true;
2817 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2818 break;
2819 case IRDMA_MEMREG_TYPE_CQ:
2820 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
2821 shadow_pgcnt = 0;
2822 total = req.cq_pages + shadow_pgcnt;
2823 if (total > iwmr->page_cnt) {
2824 err = -EINVAL;
2825 goto error;
2826 }
2827
2828 use_pbles = (req.cq_pages > 1);
2829 err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
2830 if (err)
2831 goto error;
2832
2833 ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
2834 ibucontext);
2835 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2836 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
2837 iwpbl->on_list = true;
2838 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2839 break;
2840 case IRDMA_MEMREG_TYPE_MEM:
2841 use_pbles = (iwmr->page_cnt != 1);
2842
2843 err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
2844 if (err)
2845 goto error;
2846
2847 if (use_pbles) {
2848 ret = irdma_check_mr_contiguous(palloc,
2849 iwmr->page_size);
2850 if (ret) {
2851 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2852 iwpbl->pbl_allocated = false;
2853 }
2854 }
2855
2856 stag = irdma_create_stag(iwdev);
2857 if (!stag) {
2858 err = -ENOMEM;
2859 goto error;
2860 }
2861
2862 iwmr->stag = stag;
2863 iwmr->ibmr.rkey = stag;
2864 iwmr->ibmr.lkey = stag;
2865 err = irdma_hwreg_mr(iwdev, iwmr, access);
2866 if (err) {
2867 irdma_free_stag(iwdev, stag);
2868 goto error;
2869 }
2870
2871 break;
2872 default:
2873 goto error;
2874 }
2875
2876 iwmr->type = req.reg_type;
2877
2878 return &iwmr->ibmr;
2879
2880error:
2881 if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
2882 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
2883 ib_umem_release(region);
2884 kfree(iwmr);
2885
2886 return ERR_PTR(err);
2887}
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
2898 u64 *iova_start)
2899{
2900 struct irdma_device *iwdev = to_iwdev(pd->device);
2901 struct irdma_pbl *iwpbl;
2902 struct irdma_mr *iwmr;
2903 enum irdma_status_code status;
2904 u32 stag;
2905 int ret;
2906
2907 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
2908 if (!iwmr)
2909 return ERR_PTR(-ENOMEM);
2910
2911 iwmr->ibmr.pd = pd;
2912 iwmr->ibmr.device = pd->device;
2913 iwpbl = &iwmr->iwpbl;
2914 iwpbl->iwmr = iwmr;
2915 iwmr->type = IRDMA_MEMREG_TYPE_MEM;
2916 iwpbl->user_base = *iova_start;
2917 stag = irdma_create_stag(iwdev);
2918 if (!stag) {
2919 ret = -ENOMEM;
2920 goto err;
2921 }
2922
2923 iwmr->stag = stag;
2924 iwmr->ibmr.iova = *iova_start;
2925 iwmr->ibmr.rkey = stag;
2926 iwmr->ibmr.lkey = stag;
2927 iwmr->page_cnt = 1;
2928 iwmr->pgaddrmem[0] = addr;
2929 iwmr->len = size;
2930 iwmr->page_size = SZ_4K;
2931 status = irdma_hwreg_mr(iwdev, iwmr, access);
2932 if (status) {
2933 irdma_free_stag(iwdev, stag);
2934 ret = -ENOMEM;
2935 goto err;
2936 }
2937
2938 return &iwmr->ibmr;
2939
2940err:
2941 kfree(iwmr);
2942
2943 return ERR_PTR(ret);
2944}
2945
2946
2947
2948
2949
2950
2951static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
2952{
2953 u64 kva = 0;
2954
2955 return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
2956}
2957
2958
2959
2960
2961
2962
2963static void irdma_del_memlist(struct irdma_mr *iwmr,
2964 struct irdma_ucontext *ucontext)
2965{
2966 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
2967 unsigned long flags;
2968
2969 switch (iwmr->type) {
2970 case IRDMA_MEMREG_TYPE_CQ:
2971 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2972 if (iwpbl->on_list) {
2973 iwpbl->on_list = false;
2974 list_del(&iwpbl->list);
2975 }
2976 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2977 break;
2978 case IRDMA_MEMREG_TYPE_QP:
2979 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2980 if (iwpbl->on_list) {
2981 iwpbl->on_list = false;
2982 list_del(&iwpbl->list);
2983 }
2984 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2985 break;
2986 default:
2987 break;
2988 }
2989}
2990
2991
2992
2993
2994
2995
2996static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
2997{
2998 struct ib_pd *ibpd = ib_mr->pd;
2999 struct irdma_pd *iwpd = to_iwpd(ibpd);
3000 struct irdma_mr *iwmr = to_iwmr(ib_mr);
3001 struct irdma_device *iwdev = to_iwdev(ib_mr->device);
3002 struct irdma_dealloc_stag_info *info;
3003 struct irdma_pbl *iwpbl = &iwmr->iwpbl;
3004 struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
3005 struct irdma_cqp_request *cqp_request;
3006 struct cqp_cmds_info *cqp_info;
3007
3008 if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
3009 if (iwmr->region) {
3010 struct irdma_ucontext *ucontext;
3011
3012 ucontext = rdma_udata_to_drv_context(udata,
3013 struct irdma_ucontext,
3014 ibucontext);
3015 irdma_del_memlist(iwmr, ucontext);
3016 }
3017 goto done;
3018 }
3019
3020 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3021 if (!cqp_request)
3022 return -ENOMEM;
3023
3024 cqp_info = &cqp_request->info;
3025 info = &cqp_info->in.u.dealloc_stag.info;
3026 memset(info, 0, sizeof(*info));
3027 info->pd_id = iwpd->sc_pd.pd_id & 0x00007fff;
3028 info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
3029 info->mr = true;
3030 if (iwpbl->pbl_allocated)
3031 info->dealloc_pbl = true;
3032
3033 cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
3034 cqp_info->post_sq = 1;
3035 cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
3036 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
3037 irdma_handle_cqp_op(iwdev->rf, cqp_request);
3038 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3039 irdma_free_stag(iwdev, iwmr->stag);
3040done:
3041 if (iwpbl->pbl_allocated)
3042 irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
3043 ib_umem_release(iwmr->region);
3044 kfree(iwmr);
3045
3046 return 0;
3047}
3048
3049
3050
3051
3052
3053
3054
3055static void irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
3056 int num_sges)
3057{
3058 unsigned int i;
3059
3060 for (i = 0; (i < num_sges) && (i < IRDMA_MAX_WQ_FRAGMENT_COUNT); i++) {
3061 sg_list[i].tag_off = sgl[i].addr;
3062 sg_list[i].len = sgl[i].length;
3063 sg_list[i].stag = sgl[i].lkey;
3064 }
3065}
3066
3067
3068
3069
3070
3071
3072
3073static int irdma_post_send(struct ib_qp *ibqp,
3074 const struct ib_send_wr *ib_wr,
3075 const struct ib_send_wr **bad_wr)
3076{
3077 struct irdma_qp *iwqp;
3078 struct irdma_qp_uk *ukqp;
3079 struct irdma_sc_dev *dev;
3080 struct irdma_post_sq_info info;
3081 enum irdma_status_code ret;
3082 int err = 0;
3083 unsigned long flags;
3084 bool inv_stag;
3085 struct irdma_ah *ah;
3086 bool reflush = false;
3087
3088 iwqp = to_iwqp(ibqp);
3089 ukqp = &iwqp->sc_qp.qp_uk;
3090 dev = &iwqp->iwdev->rf->sc_dev;
3091
3092 spin_lock_irqsave(&iwqp->lock, flags);
3093 if (iwqp->flush_issued && ukqp->sq_flush_complete)
3094 reflush = true;
3095 while (ib_wr) {
3096 memset(&info, 0, sizeof(info));
3097 inv_stag = false;
3098 info.wr_id = (ib_wr->wr_id);
3099 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
3100 info.signaled = true;
3101 if (ib_wr->send_flags & IB_SEND_FENCE)
3102 info.read_fence = true;
3103 switch (ib_wr->opcode) {
3104 case IB_WR_SEND_WITH_IMM:
3105 if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
3106 info.imm_data_valid = true;
3107 info.imm_data = ntohl(ib_wr->ex.imm_data);
3108 } else {
3109 err = -EINVAL;
3110 break;
3111 }
3112 fallthrough;
3113 case IB_WR_SEND:
3114 case IB_WR_SEND_WITH_INV:
3115 if (ib_wr->opcode == IB_WR_SEND ||
3116 ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
3117 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3118 info.op_type = IRDMA_OP_TYPE_SEND_SOL;
3119 else
3120 info.op_type = IRDMA_OP_TYPE_SEND;
3121 } else {
3122 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3123 info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
3124 else
3125 info.op_type = IRDMA_OP_TYPE_SEND_INV;
3126 info.stag_to_inv = ib_wr->ex.invalidate_rkey;
3127 }
3128
3129 if (ib_wr->send_flags & IB_SEND_INLINE) {
3130 info.op.inline_send.data = (void *)(unsigned long)
3131 ib_wr->sg_list[0].addr;
3132 info.op.inline_send.len = ib_wr->sg_list[0].length;
3133 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3134 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3135 ah = to_iwah(ud_wr(ib_wr)->ah);
3136 info.op.inline_send.ah_id = ah->sc_ah.ah_info.ah_idx;
3137 info.op.inline_send.qkey = ud_wr(ib_wr)->remote_qkey;
3138 info.op.inline_send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3139 }
3140 ret = irdma_uk_inline_send(ukqp, &info, false);
3141 } else {
3142 info.op.send.num_sges = ib_wr->num_sge;
3143 info.op.send.sg_list = (struct irdma_sge *)
3144 ib_wr->sg_list;
3145 if (iwqp->ibqp.qp_type == IB_QPT_UD ||
3146 iwqp->ibqp.qp_type == IB_QPT_GSI) {
3147 ah = to_iwah(ud_wr(ib_wr)->ah);
3148 info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
3149 info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
3150 info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
3151 }
3152 ret = irdma_uk_send(ukqp, &info, false);
3153 }
3154
3155 if (ret) {
3156 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
3157 err = -ENOMEM;
3158 else
3159 err = -EINVAL;
3160 }
3161 break;
3162 case IB_WR_RDMA_WRITE_WITH_IMM:
3163 if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
3164 info.imm_data_valid = true;
3165 info.imm_data = ntohl(ib_wr->ex.imm_data);
3166 } else {
3167 err = -EINVAL;
3168 break;
3169 }
3170 fallthrough;
3171 case IB_WR_RDMA_WRITE:
3172 if (ib_wr->send_flags & IB_SEND_SOLICITED)
3173 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
3174 else
3175 info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
3176
3177 if (ib_wr->send_flags & IB_SEND_INLINE) {
3178 info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
3179 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
3180 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
3181 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
3182 ret = irdma_uk_inline_rdma_write(ukqp, &info, false);
3183 } else {
3184 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
3185 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
3186 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
3187 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
3188 ret = irdma_uk_rdma_write(ukqp, &info, false);
3189 }
3190
3191 if (ret) {
3192 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
3193 err = -ENOMEM;
3194 else
3195 err = -EINVAL;
3196 }
3197 break;
3198 case IB_WR_RDMA_READ_WITH_INV:
3199 inv_stag = true;
3200 fallthrough;
3201 case IB_WR_RDMA_READ:
3202 if (ib_wr->num_sge >
3203 dev->hw_attrs.uk_attrs.max_hw_read_sges) {
3204 err = -EINVAL;
3205 break;
3206 }
3207 info.op_type = IRDMA_OP_TYPE_RDMA_READ;
3208 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
3209 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
3210 info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
3211 info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
3212
3213 ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
3214 if (ret) {
3215 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
3216 err = -ENOMEM;
3217 else
3218 err = -EINVAL;
3219 }
3220 break;
3221 case IB_WR_LOCAL_INV:
3222 info.op_type = IRDMA_OP_TYPE_INV_STAG;
3223 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
3224 ret = irdma_uk_stag_local_invalidate(ukqp, &info, true);
3225 if (ret)
3226 err = -ENOMEM;
3227 break;
3228 case IB_WR_REG_MR: {
3229 struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
3230 struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
3231 struct irdma_fast_reg_stag_info stag_info = {};
3232
3233 stag_info.signaled = info.signaled;
3234 stag_info.read_fence = info.read_fence;
3235 stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
3236 stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
3237 stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
3238 stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
3239 stag_info.wr_id = ib_wr->wr_id;
3240 stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
3241 stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
3242 stag_info.total_len = iwmr->ibmr.length;
3243 stag_info.reg_addr_pa = *palloc->level1.addr;
3244 stag_info.first_pm_pbl_index = palloc->level1.idx;
3245 stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
3246 if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
3247 stag_info.chunk_size = 1;
3248 ret = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
3249 true);
3250 if (ret)
3251 err = -ENOMEM;
3252 break;
3253 }
3254 default:
3255 err = -EINVAL;
3256 ibdev_dbg(&iwqp->iwdev->ibdev,
3257 "VERBS: upost_send bad opcode = 0x%x\n",
3258 ib_wr->opcode);
3259 break;
3260 }
3261
3262 if (err)
3263 break;
3264 ib_wr = ib_wr->next;
3265 }
3266
3267 if (!iwqp->flush_issued && iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS) {
3268 irdma_uk_qp_post_wr(ukqp);
3269 spin_unlock_irqrestore(&iwqp->lock, flags);
3270 } else if (reflush) {
3271 ukqp->sq_flush_complete = false;
3272 spin_unlock_irqrestore(&iwqp->lock, flags);
3273 irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_REFLUSH);
3274 } else {
3275 spin_unlock_irqrestore(&iwqp->lock, flags);
3276 }
3277 if (err)
3278 *bad_wr = ib_wr;
3279
3280 return err;
3281}
3282
3283
3284
3285
3286
3287
3288
3289static int irdma_post_recv(struct ib_qp *ibqp,
3290 const struct ib_recv_wr *ib_wr,
3291 const struct ib_recv_wr **bad_wr)
3292{
3293 struct irdma_qp *iwqp;
3294 struct irdma_qp_uk *ukqp;
3295 struct irdma_post_rq_info post_recv = {};
3296 struct irdma_sge sg_list[IRDMA_MAX_WQ_FRAGMENT_COUNT];
3297 enum irdma_status_code ret = 0;
3298 unsigned long flags;
3299 int err = 0;
3300 bool reflush = false;
3301
3302 iwqp = to_iwqp(ibqp);
3303 ukqp = &iwqp->sc_qp.qp_uk;
3304
3305 spin_lock_irqsave(&iwqp->lock, flags);
3306 if (iwqp->flush_issued && ukqp->rq_flush_complete)
3307 reflush = true;
3308 while (ib_wr) {
3309 post_recv.num_sges = ib_wr->num_sge;
3310 post_recv.wr_id = ib_wr->wr_id;
3311 irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
3312 post_recv.sg_list = sg_list;
3313 ret = irdma_uk_post_receive(ukqp, &post_recv);
3314 if (ret) {
3315 ibdev_dbg(&iwqp->iwdev->ibdev,
3316 "VERBS: post_recv err %d\n", ret);
3317 if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED)
3318 err = -ENOMEM;
3319 else
3320 err = -EINVAL;
3321 goto out;
3322 }
3323
3324 ib_wr = ib_wr->next;
3325 }
3326
3327out:
3328 if (reflush) {
3329 ukqp->rq_flush_complete = false;
3330 spin_unlock_irqrestore(&iwqp->lock, flags);
3331 irdma_flush_wqes(iwqp, IRDMA_FLUSH_RQ | IRDMA_REFLUSH);
3332 } else {
3333 spin_unlock_irqrestore(&iwqp->lock, flags);
3334 }
3335
3336 if (err)
3337 *bad_wr = ib_wr;
3338
3339 return err;
3340}
3341
3342
3343
3344
3345
3346static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
3347{
3348 switch (opcode) {
3349 case FLUSH_PROT_ERR:
3350 return IB_WC_LOC_PROT_ERR;
3351 case FLUSH_REM_ACCESS_ERR:
3352 return IB_WC_REM_ACCESS_ERR;
3353 case FLUSH_LOC_QP_OP_ERR:
3354 return IB_WC_LOC_QP_OP_ERR;
3355 case FLUSH_REM_OP_ERR:
3356 return IB_WC_REM_OP_ERR;
3357 case FLUSH_LOC_LEN_ERR:
3358 return IB_WC_LOC_LEN_ERR;
3359 case FLUSH_GENERAL_ERR:
3360 return IB_WC_WR_FLUSH_ERR;
3361 case FLUSH_RETRY_EXC_ERR:
3362 return IB_WC_RETRY_EXC_ERR;
3363 case FLUSH_MW_BIND_ERR:
3364 return IB_WC_MW_BIND_ERR;
3365 case FLUSH_FATAL_ERR:
3366 default:
3367 return IB_WC_FATAL_ERR;
3368 }
3369}
3370
3371
3372
3373
3374
3375
3376static void irdma_process_cqe(struct ib_wc *entry,
3377 struct irdma_cq_poll_info *cq_poll_info)
3378{
3379 struct irdma_qp *iwqp;
3380 struct irdma_sc_qp *qp;
3381
3382 entry->wc_flags = 0;
3383 entry->pkey_index = 0;
3384 entry->wr_id = cq_poll_info->wr_id;
3385
3386 qp = cq_poll_info->qp_handle;
3387 iwqp = qp->qp_uk.back_qp;
3388 entry->qp = qp->qp_uk.back_qp;
3389
3390 if (cq_poll_info->error) {
3391 entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
3392 irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
3393
3394 entry->vendor_err = cq_poll_info->major_err << 16 |
3395 cq_poll_info->minor_err;
3396 } else {
3397 entry->status = IB_WC_SUCCESS;
3398 if (cq_poll_info->imm_valid) {
3399 entry->ex.imm_data = htonl(cq_poll_info->imm_data);
3400 entry->wc_flags |= IB_WC_WITH_IMM;
3401 }
3402 if (cq_poll_info->ud_smac_valid) {
3403 ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
3404 entry->wc_flags |= IB_WC_WITH_SMAC;
3405 }
3406
3407 if (cq_poll_info->ud_vlan_valid) {
3408 u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
3409
3410 entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
3411 if (vlan) {
3412 entry->vlan_id = vlan;
3413 entry->wc_flags |= IB_WC_WITH_VLAN;
3414 }
3415 } else {
3416 entry->sl = 0;
3417 }
3418 }
3419
3420 switch (cq_poll_info->op_type) {
3421 case IRDMA_OP_TYPE_RDMA_WRITE:
3422 case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
3423 entry->opcode = IB_WC_RDMA_WRITE;
3424 break;
3425 case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
3426 case IRDMA_OP_TYPE_RDMA_READ:
3427 entry->opcode = IB_WC_RDMA_READ;
3428 break;
3429 case IRDMA_OP_TYPE_SEND_INV:
3430 case IRDMA_OP_TYPE_SEND_SOL:
3431 case IRDMA_OP_TYPE_SEND_SOL_INV:
3432 case IRDMA_OP_TYPE_SEND:
3433 entry->opcode = IB_WC_SEND;
3434 break;
3435 case IRDMA_OP_TYPE_FAST_REG_NSMR:
3436 entry->opcode = IB_WC_REG_MR;
3437 break;
3438 case IRDMA_OP_TYPE_INV_STAG:
3439 entry->opcode = IB_WC_LOCAL_INV;
3440 break;
3441 case IRDMA_OP_TYPE_REC_IMM:
3442 case IRDMA_OP_TYPE_REC:
3443 entry->opcode = cq_poll_info->op_type == IRDMA_OP_TYPE_REC_IMM ?
3444 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
3445 if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
3446 cq_poll_info->stag_invalid_set) {
3447 entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
3448 entry->wc_flags |= IB_WC_WITH_INVALIDATE;
3449 }
3450 break;
3451 default:
3452 ibdev_err(&iwqp->iwdev->ibdev,
3453 "Invalid opcode = %d in CQE\n", cq_poll_info->op_type);
3454 entry->status = IB_WC_GENERAL_ERR;
3455 return;
3456 }
3457
3458 if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
3459 entry->src_qp = cq_poll_info->ud_src_qpn;
3460 entry->slid = 0;
3461 entry->wc_flags |=
3462 (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
3463 entry->network_hdr_type = cq_poll_info->ipv4 ?
3464 RDMA_NETWORK_IPV4 :
3465 RDMA_NETWORK_IPV6;
3466 } else {
3467 entry->src_qp = cq_poll_info->qp_id;
3468 }
3469
3470 entry->byte_len = cq_poll_info->bytes_xfered;
3471}
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
3482 struct irdma_cq_poll_info *cur_cqe,
3483 struct ib_wc *entry)
3484{
3485 int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
3486
3487 if (ret)
3488 return ret;
3489
3490 irdma_process_cqe(entry, cur_cqe);
3491
3492 return 0;
3493}
3494
3495
3496
3497
3498
3499
3500
3501static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
3502{
3503 struct list_head *tmp_node, *list_node;
3504 struct irdma_cq_buf *last_buf = NULL;
3505 struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
3506 struct irdma_cq_buf *cq_buf;
3507 enum irdma_status_code ret;
3508 struct irdma_device *iwdev;
3509 struct irdma_cq_uk *ukcq;
3510 bool cq_new_cqe = false;
3511 int resized_bufs = 0;
3512 int npolled = 0;
3513
3514 iwdev = to_iwdev(iwcq->ibcq.device);
3515 ukcq = &iwcq->sc_cq.cq_uk;
3516
3517
3518 list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
3519 cq_buf = container_of(list_node, struct irdma_cq_buf, list);
3520 while (npolled < num_entries) {
3521 ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
3522 if (!ret) {
3523 ++npolled;
3524 cq_new_cqe = true;
3525 continue;
3526 }
3527 if (ret == IRDMA_ERR_Q_EMPTY)
3528 break;
3529
3530 if (ret == IRDMA_ERR_Q_DESTROYED) {
3531 cq_new_cqe = true;
3532 continue;
3533 }
3534 goto error;
3535 }
3536
3537
3538 if (cq_new_cqe)
3539 last_buf = cq_buf;
3540 cq_new_cqe = false;
3541 }
3542
3543
3544 while (npolled < num_entries) {
3545 ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
3546 if (!ret) {
3547 ++npolled;
3548 cq_new_cqe = true;
3549 continue;
3550 }
3551
3552 if (ret == IRDMA_ERR_Q_EMPTY)
3553 break;
3554
3555 if (ret == IRDMA_ERR_Q_DESTROYED) {
3556 cq_new_cqe = true;
3557 continue;
3558 }
3559 goto error;
3560 }
3561
3562 if (cq_new_cqe)
3563
3564 resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
3565 else if (last_buf)
3566
3567 resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
3568 if (resized_bufs)
3569
3570 irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
3571
3572 return npolled;
3573error:
3574 ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
3575 __func__, ret);
3576
3577 return -EINVAL;
3578}
3579
3580
3581
3582
3583
3584
3585
3586static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
3587 struct ib_wc *entry)
3588{
3589 struct irdma_cq *iwcq;
3590 unsigned long flags;
3591 int ret;
3592
3593 iwcq = to_iwcq(ibcq);
3594
3595 spin_lock_irqsave(&iwcq->lock, flags);
3596 ret = __irdma_poll_cq(iwcq, num_entries, entry);
3597 spin_unlock_irqrestore(&iwcq->lock, flags);
3598
3599 return ret;
3600}
3601
3602
3603
3604
3605
3606
3607static int irdma_req_notify_cq(struct ib_cq *ibcq,
3608 enum ib_cq_notify_flags notify_flags)
3609{
3610 struct irdma_cq *iwcq;
3611 struct irdma_cq_uk *ukcq;
3612 unsigned long flags;
3613 enum irdma_cmpl_notify cq_notify;
3614 bool promo_event = false;
3615 int ret = 0;
3616
3617 cq_notify = notify_flags == IB_CQ_SOLICITED ?
3618 IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
3619 iwcq = to_iwcq(ibcq);
3620 ukcq = &iwcq->sc_cq.cq_uk;
3621
3622 spin_lock_irqsave(&iwcq->lock, flags);
3623
3624 if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
3625 promo_event = true;
3626
3627 if (!iwcq->armed || promo_event) {
3628 iwcq->armed = true;
3629 iwcq->last_notify = cq_notify;
3630 irdma_uk_cq_request_notification(ukcq, cq_notify);
3631 }
3632
3633 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq))
3634 ret = 1;
3635 spin_unlock_irqrestore(&iwcq->lock, flags);
3636
3637 return ret;
3638}
3639
3640static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
3641 struct ib_port_immutable *immutable)
3642{
3643 struct ib_port_attr attr;
3644 int err;
3645
3646 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
3647 err = ib_query_port(ibdev, port_num, &attr);
3648 if (err)
3649 return err;
3650
3651 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
3652 immutable->pkey_tbl_len = attr.pkey_tbl_len;
3653 immutable->gid_tbl_len = attr.gid_tbl_len;
3654
3655 return 0;
3656}
3657
3658static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
3659 struct ib_port_immutable *immutable)
3660{
3661 struct ib_port_attr attr;
3662 int err;
3663
3664 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
3665 err = ib_query_port(ibdev, port_num, &attr);
3666 if (err)
3667 return err;
3668 immutable->gid_tbl_len = attr.gid_tbl_len;
3669
3670 return 0;
3671}
3672
3673static const char *const irdma_hw_stat_names[] = {
3674
3675 [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
3676 [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
3677 [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
3678 [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
3679 [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
3680 [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
3681 [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
3682 [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
3683 [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
3684 [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
3685 [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
3686 [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
3687 [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
3688
3689
3690 [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3691 "ip4InOctets",
3692 [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3693 "ip4InPkts",
3694 [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3695 "ip4InReasmRqd",
3696 [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3697 "ip4InMcastOctets",
3698 [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3699 "ip4InMcastPkts",
3700 [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3701 "ip4OutOctets",
3702 [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3703 "ip4OutPkts",
3704 [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3705 "ip4OutSegRqd",
3706 [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3707 "ip4OutMcastOctets",
3708 [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3709 "ip4OutMcastPkts",
3710 [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3711 "ip6InOctets",
3712 [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3713 "ip6InPkts",
3714 [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3715 "ip6InReasmRqd",
3716 [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3717 "ip6InMcastOctets",
3718 [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3719 "ip6InMcastPkts",
3720 [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3721 "ip6OutOctets",
3722 [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3723 "ip6OutPkts",
3724 [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3725 "ip6OutSegRqd",
3726 [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3727 "ip6OutMcastOctets",
3728 [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3729 "ip6OutMcastPkts",
3730 [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32] =
3731 "tcpInSegs",
3732 [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32] =
3733 "tcpOutSegs",
3734 [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
3735 "iwInRdmaReads",
3736 [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
3737 "iwInRdmaSends",
3738 [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
3739 "iwInRdmaWrites",
3740 [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
3741 "iwOutRdmaReads",
3742 [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
3743 "iwOutRdmaSends",
3744 [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
3745 "iwOutRdmaWrites",
3746 [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32] =
3747 "iwRdmaBnd",
3748 [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32] =
3749 "iwRdmaInv",
3750 [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3751 "RxUDP",
3752 [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3753 "TxUDP",
3754 [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
3755 "RxECNMrkd",
3756};
3757
3758static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
3759{
3760 struct irdma_device *iwdev = to_iwdev(dev);
3761
3762 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
3763 irdma_fw_major_ver(&iwdev->rf->sc_dev),
3764 irdma_fw_minor_ver(&iwdev->rf->sc_dev));
3765}
3766
3767
3768
3769
3770
3771
3772static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
3773 u32 port_num)
3774{
3775 int num_counters = IRDMA_HW_STAT_INDEX_MAX_32 +
3776 IRDMA_HW_STAT_INDEX_MAX_64;
3777 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
3778
3779 BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_names) !=
3780 (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));
3781
3782 return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
3783 lifespan);
3784}
3785
3786
3787
3788
3789
3790
3791
3792
3793static int irdma_get_hw_stats(struct ib_device *ibdev,
3794 struct rdma_hw_stats *stats, u32 port_num,
3795 int index)
3796{
3797 struct irdma_device *iwdev = to_iwdev(ibdev);
3798 struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
3799
3800 if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
3801 irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
3802 else
3803 irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
3804
3805 memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
3806
3807 return stats->num_counters;
3808}
3809
3810
3811
3812
3813
3814
3815
3816
3817static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
3818 union ib_gid *gid)
3819{
3820 struct irdma_device *iwdev = to_iwdev(ibdev);
3821
3822 memset(gid->raw, 0, sizeof(gid->raw));
3823 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
3824
3825 return 0;
3826}
3827
3828
3829
3830
3831
3832
3833static void mcast_list_add(struct irdma_pci_f *rf,
3834 struct mc_table_list *new_elem)
3835{
3836 list_add(&new_elem->list, &rf->mc_qht_list.list);
3837}
3838
3839
3840
3841
3842
3843static void mcast_list_del(struct mc_table_list *mc_qht_elem)
3844{
3845 if (mc_qht_elem)
3846 list_del(&mc_qht_elem->list);
3847}
3848
3849
3850
3851
3852
3853
3854static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
3855 u32 *ip_mcast)
3856{
3857 struct mc_table_list *mc_qht_el;
3858 struct list_head *pos, *q;
3859
3860 list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
3861 mc_qht_el = list_entry(pos, struct mc_table_list, list);
3862 if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
3863 sizeof(mc_qht_el->mc_info.dest_ip)))
3864 return mc_qht_el;
3865 }
3866
3867 return NULL;
3868}
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
3879 struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
3880{
3881 struct cqp_cmds_info *cqp_info;
3882 struct irdma_cqp_request *cqp_request;
3883 enum irdma_status_code status;
3884
3885 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
3886 if (!cqp_request)
3887 return -ENOMEM;
3888
3889 cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
3890 cqp_info = &cqp_request->info;
3891 cqp_info->cqp_cmd = op;
3892 cqp_info->post_sq = 1;
3893 cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
3894 cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
3895 status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
3896 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
3897 if (status)
3898 return -ENOMEM;
3899
3900 return 0;
3901}
3902
3903
3904
3905
3906
3907
3908
3909
3910void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
3911{
3912 u8 *ip = (u8 *)ip_addr;
3913
3914 if (ipv4) {
3915 unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
3916 0x00, 0x00};
3917
3918 mac4[3] = ip[2] & 0x7F;
3919 mac4[4] = ip[1];
3920 mac4[5] = ip[0];
3921 ether_addr_copy(mac, mac4);
3922 } else {
3923 unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
3924 0x00, 0x00};
3925
3926 mac6[2] = ip[3];
3927 mac6[3] = ip[2];
3928 mac6[4] = ip[1];
3929 mac6[5] = ip[0];
3930 ether_addr_copy(mac, mac6);
3931 }
3932}
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
3943{
3944 struct irdma_qp *iwqp = to_iwqp(ibqp);
3945 struct irdma_device *iwdev = iwqp->iwdev;
3946 struct irdma_pci_f *rf = iwdev->rf;
3947 struct mc_table_list *mc_qht_elem;
3948 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
3949 unsigned long flags;
3950 u32 ip_addr[4] = {};
3951 u32 mgn;
3952 u32 no_mgs;
3953 int ret = 0;
3954 bool ipv4;
3955 u16 vlan_id;
3956 union {
3957 struct sockaddr saddr;
3958 struct sockaddr_in saddr_in;
3959 struct sockaddr_in6 saddr_in6;
3960 } sgid_addr;
3961 unsigned char dmac[ETH_ALEN];
3962
3963 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
3964
3965 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
3966 irdma_copy_ip_ntohl(ip_addr,
3967 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
3968 irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
3969 ipv4 = false;
3970 ibdev_dbg(&iwdev->ibdev,
3971 "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
3972 ip_addr);
3973 irdma_mcast_mac(ip_addr, dmac, false);
3974 } else {
3975 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
3976 ipv4 = true;
3977 vlan_id = irdma_get_vlan_ipv4(ip_addr);
3978 irdma_mcast_mac(ip_addr, dmac, true);
3979 ibdev_dbg(&iwdev->ibdev,
3980 "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
3981 ibqp->qp_num, ip_addr, dmac);
3982 }
3983
3984 spin_lock_irqsave(&rf->qh_list_lock, flags);
3985 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
3986 if (!mc_qht_elem) {
3987 struct irdma_dma_mem *dma_mem_mc;
3988
3989 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
3990 mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
3991 if (!mc_qht_elem)
3992 return -ENOMEM;
3993
3994 mc_qht_elem->mc_info.ipv4_valid = ipv4;
3995 memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
3996 sizeof(mc_qht_elem->mc_info.dest_ip));
3997 ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
3998 &mgn, &rf->next_mcg);
3999 if (ret) {
4000 kfree(mc_qht_elem);
4001 return -ENOMEM;
4002 }
4003
4004 mc_qht_elem->mc_info.mgn = mgn;
4005 dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
4006 dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
4007 IRDMA_HW_PAGE_SIZE);
4008 dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
4009 dma_mem_mc->size,
4010 &dma_mem_mc->pa,
4011 GFP_KERNEL);
4012 if (!dma_mem_mc->va) {
4013 irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
4014 kfree(mc_qht_elem);
4015 return -ENOMEM;
4016 }
4017
4018 mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
4019 memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
4020 sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
4021 mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
4022 mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
4023 if (vlan_id < VLAN_N_VID)
4024 mc_qht_elem->mc_grp_ctx.vlan_valid = true;
4025 mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->vsi.fcn_id;
4026 mc_qht_elem->mc_grp_ctx.qs_handle =
4027 iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
4028 ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
4029
4030 spin_lock_irqsave(&rf->qh_list_lock, flags);
4031 mcast_list_add(rf, mc_qht_elem);
4032 } else {
4033 if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
4034 IRDMA_MAX_MGS_PER_CTX) {
4035 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4036 return -ENOMEM;
4037 }
4038 }
4039
4040 mcg_info.qp_id = iwqp->ibqp.qp_num;
4041 no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
4042 irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4043 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4044
4045
4046 if (!no_mgs) {
4047 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4048 IRDMA_OP_MC_CREATE);
4049 } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4050 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4051 IRDMA_OP_MC_MODIFY);
4052 } else {
4053 return 0;
4054 }
4055
4056 if (ret)
4057 goto error;
4058
4059 return 0;
4060
4061error:
4062 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4063 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4064 mcast_list_del(mc_qht_elem);
4065 dma_free_coherent(rf->hw.device,
4066 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4067 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4068 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4069 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4070 irdma_free_rsrc(rf, rf->allocated_mcgs,
4071 mc_qht_elem->mc_grp_ctx.mg_id);
4072 kfree(mc_qht_elem);
4073 }
4074
4075 return ret;
4076}
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
4087{
4088 struct irdma_qp *iwqp = to_iwqp(ibqp);
4089 struct irdma_device *iwdev = iwqp->iwdev;
4090 struct irdma_pci_f *rf = iwdev->rf;
4091 u32 ip_addr[4] = {};
4092 struct mc_table_list *mc_qht_elem;
4093 struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
4094 int ret;
4095 unsigned long flags;
4096 union {
4097 struct sockaddr saddr;
4098 struct sockaddr_in saddr_in;
4099 struct sockaddr_in6 saddr_in6;
4100 } sgid_addr;
4101
4102 rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
4103 if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
4104 irdma_copy_ip_ntohl(ip_addr,
4105 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4106 else
4107 ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4108
4109 spin_lock_irqsave(&rf->qh_list_lock, flags);
4110 mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
4111 if (!mc_qht_elem) {
4112 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4113 ibdev_dbg(&iwdev->ibdev,
4114 "VERBS: address not found MCG\n");
4115 return 0;
4116 }
4117
4118 mcg_info.qp_id = iwqp->ibqp.qp_num;
4119 irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
4120 if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
4121 mcast_list_del(mc_qht_elem);
4122 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4123 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4124 IRDMA_OP_MC_DESTROY);
4125 if (ret) {
4126 ibdev_dbg(&iwdev->ibdev,
4127 "VERBS: failed MC_DESTROY MCG\n");
4128 spin_lock_irqsave(&rf->qh_list_lock, flags);
4129 mcast_list_add(rf, mc_qht_elem);
4130 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4131 return -EAGAIN;
4132 }
4133
4134 dma_free_coherent(rf->hw.device,
4135 mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
4136 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
4137 mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
4138 mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
4139 irdma_free_rsrc(rf, rf->allocated_mcgs,
4140 mc_qht_elem->mc_grp_ctx.mg_id);
4141 kfree(mc_qht_elem);
4142 } else {
4143 spin_unlock_irqrestore(&rf->qh_list_lock, flags);
4144 ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
4145 IRDMA_OP_MC_MODIFY);
4146 if (ret) {
4147 ibdev_dbg(&iwdev->ibdev,
4148 "VERBS: failed Modify MCG\n");
4149 return ret;
4150 }
4151 }
4152
4153 return 0;
4154}
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164static int irdma_create_ah(struct ib_ah *ibah,
4165 struct rdma_ah_init_attr *attr,
4166 struct ib_udata *udata)
4167{
4168 struct irdma_pd *pd = to_iwpd(ibah->pd);
4169 struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
4170 struct rdma_ah_attr *ah_attr = attr->ah_attr;
4171 const struct ib_gid_attr *sgid_attr;
4172 struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
4173 struct irdma_pci_f *rf = iwdev->rf;
4174 struct irdma_sc_ah *sc_ah;
4175 u32 ah_id = 0;
4176 struct irdma_ah_info *ah_info;
4177 struct irdma_create_ah_resp uresp;
4178 union {
4179 struct sockaddr saddr;
4180 struct sockaddr_in saddr_in;
4181 struct sockaddr_in6 saddr_in6;
4182 } sgid_addr, dgid_addr;
4183 int err;
4184 u8 dmac[ETH_ALEN];
4185
4186 err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah_id,
4187 &rf->next_ah);
4188 if (err)
4189 return err;
4190
4191 ah->pd = pd;
4192 sc_ah = &ah->sc_ah;
4193 sc_ah->ah_info.ah_idx = ah_id;
4194 sc_ah->ah_info.vsi = &iwdev->vsi;
4195 irdma_sc_init_ah(&rf->sc_dev, sc_ah);
4196 ah->sgid_index = ah_attr->grh.sgid_index;
4197 sgid_attr = ah_attr->grh.sgid_attr;
4198 memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
4199 rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
4200 rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
4201 ah->av.attrs = *ah_attr;
4202 ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
4203 ah->av.sgid_addr.saddr = sgid_addr.saddr;
4204 ah->av.dgid_addr.saddr = dgid_addr.saddr;
4205 ah_info = &sc_ah->ah_info;
4206 ah_info->ah_idx = ah_id;
4207 ah_info->pd_idx = pd->sc_pd.pd_id;
4208 if (ah_attr->ah_flags & IB_AH_GRH) {
4209 ah_info->flow_label = ah_attr->grh.flow_label;
4210 ah_info->hop_ttl = ah_attr->grh.hop_limit;
4211 ah_info->tc_tos = ah_attr->grh.traffic_class;
4212 }
4213
4214 ether_addr_copy(dmac, ah_attr->roce.dmac);
4215 if (rdma_gid_attr_network_type(sgid_attr) == RDMA_NETWORK_IPV4) {
4216 ah_info->ipv4_valid = true;
4217 ah_info->dest_ip_addr[0] =
4218 ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
4219 ah_info->src_ip_addr[0] =
4220 ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
4221 ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
4222 ah_info->dest_ip_addr[0]);
4223 if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
4224 ah_info->do_lpbk = true;
4225 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
4226 }
4227 } else {
4228 irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
4229 dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4230 irdma_copy_ip_ntohl(ah_info->src_ip_addr,
4231 sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
4232 ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
4233 ah_info->dest_ip_addr);
4234 if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
4235 ah_info->do_lpbk = true;
4236 irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
4237 }
4238 }
4239
4240 err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
4241 ah_info->mac_addr);
4242 if (err)
4243 goto error;
4244
4245 ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
4246 ah_info->ipv4_valid, dmac);
4247
4248 if (ah_info->dst_arpindex == -1) {
4249 err = -EINVAL;
4250 goto error;
4251 }
4252
4253 if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb)
4254 ah_info->vlan_tag = 0;
4255
4256 if (ah_info->vlan_tag < VLAN_N_VID) {
4257 ah_info->insert_vlan_tag = true;
4258 ah_info->vlan_tag |=
4259 rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
4260 }
4261
4262 err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
4263 attr->flags & RDMA_CREATE_AH_SLEEPABLE,
4264 irdma_gsi_ud_qp_ah_cb, sc_ah);
4265
4266 if (err) {
4267 ibdev_dbg(&iwdev->ibdev,
4268 "VERBS: CQP-OP Create AH fail");
4269 goto error;
4270 }
4271
4272 if (!(attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
4273 int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
4274
4275 do {
4276 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
4277 mdelay(1);
4278 } while (!sc_ah->ah_info.ah_valid && --cnt);
4279
4280 if (!cnt) {
4281 ibdev_dbg(&iwdev->ibdev,
4282 "VERBS: CQP create AH timed out");
4283 err = -ETIMEDOUT;
4284 goto error;
4285 }
4286 }
4287
4288 if (udata) {
4289 uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
4290 err = ib_copy_to_udata(udata, &uresp,
4291 min(sizeof(uresp), udata->outlen));
4292 }
4293 return 0;
4294
4295error:
4296 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
4297
4298 return err;
4299}
4300
4301
4302
4303
4304
4305
4306static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
4307{
4308 struct irdma_device *iwdev = to_iwdev(ibah->device);
4309 struct irdma_ah *ah = to_iwah(ibah);
4310
4311 irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
4312 false, NULL, ah);
4313
4314 irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
4315 ah->sc_ah.ah_info.ah_idx);
4316
4317 return 0;
4318}
4319
4320
4321
4322
4323
4324
4325static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
4326{
4327 struct irdma_ah *ah = to_iwah(ibah);
4328
4329 memset(ah_attr, 0, sizeof(*ah_attr));
4330 if (ah->av.attrs.ah_flags & IB_AH_GRH) {
4331 ah_attr->ah_flags = IB_AH_GRH;
4332 ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
4333 ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
4334 ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
4335 ah_attr->grh.sgid_index = ah->sgid_index;
4336 ah_attr->grh.sgid_index = ah->sgid_index;
4337 memcpy(&ah_attr->grh.dgid, &ah->dgid,
4338 sizeof(ah_attr->grh.dgid));
4339 }
4340
4341 return 0;
4342}
4343
4344static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
4345 u32 port_num)
4346{
4347 return IB_LINK_LAYER_ETHERNET;
4348}
4349
4350static __be64 irdma_mac_to_guid(struct net_device *ndev)
4351{
4352 unsigned char *mac = ndev->dev_addr;
4353 __be64 guid;
4354 unsigned char *dst = (unsigned char *)&guid;
4355
4356 dst[0] = mac[0] ^ 2;
4357 dst[1] = mac[1];
4358 dst[2] = mac[2];
4359 dst[3] = 0xff;
4360 dst[4] = 0xfe;
4361 dst[5] = mac[3];
4362 dst[6] = mac[4];
4363 dst[7] = mac[5];
4364
4365 return guid;
4366}
4367
4368static const struct ib_device_ops irdma_roce_dev_ops = {
4369 .attach_mcast = irdma_attach_mcast,
4370 .create_ah = irdma_create_ah,
4371 .create_user_ah = irdma_create_ah,
4372 .destroy_ah = irdma_destroy_ah,
4373 .detach_mcast = irdma_detach_mcast,
4374 .get_link_layer = irdma_get_link_layer,
4375 .get_port_immutable = irdma_roce_port_immutable,
4376 .modify_qp = irdma_modify_qp_roce,
4377 .query_ah = irdma_query_ah,
4378 .query_pkey = irdma_query_pkey,
4379};
4380
4381static const struct ib_device_ops irdma_iw_dev_ops = {
4382 .modify_qp = irdma_modify_qp,
4383 .get_port_immutable = irdma_iw_port_immutable,
4384 .query_gid = irdma_query_gid,
4385};
4386
4387static const struct ib_device_ops irdma_dev_ops = {
4388 .owner = THIS_MODULE,
4389 .driver_id = RDMA_DRIVER_IRDMA,
4390 .uverbs_abi_ver = IRDMA_ABI_VER,
4391
4392 .alloc_hw_port_stats = irdma_alloc_hw_port_stats,
4393 .alloc_mr = irdma_alloc_mr,
4394 .alloc_mw = irdma_alloc_mw,
4395 .alloc_pd = irdma_alloc_pd,
4396 .alloc_ucontext = irdma_alloc_ucontext,
4397 .create_cq = irdma_create_cq,
4398 .create_qp = irdma_create_qp,
4399 .dealloc_driver = irdma_ib_dealloc_device,
4400 .dealloc_mw = irdma_dealloc_mw,
4401 .dealloc_pd = irdma_dealloc_pd,
4402 .dealloc_ucontext = irdma_dealloc_ucontext,
4403 .dereg_mr = irdma_dereg_mr,
4404 .destroy_cq = irdma_destroy_cq,
4405 .destroy_qp = irdma_destroy_qp,
4406 .disassociate_ucontext = irdma_disassociate_ucontext,
4407 .get_dev_fw_str = irdma_get_dev_fw_str,
4408 .get_dma_mr = irdma_get_dma_mr,
4409 .get_hw_stats = irdma_get_hw_stats,
4410 .map_mr_sg = irdma_map_mr_sg,
4411 .mmap = irdma_mmap,
4412 .mmap_free = irdma_mmap_free,
4413 .poll_cq = irdma_poll_cq,
4414 .post_recv = irdma_post_recv,
4415 .post_send = irdma_post_send,
4416 .query_device = irdma_query_device,
4417 .query_port = irdma_query_port,
4418 .query_qp = irdma_query_qp,
4419 .reg_user_mr = irdma_reg_user_mr,
4420 .req_notify_cq = irdma_req_notify_cq,
4421 .resize_cq = irdma_resize_cq,
4422 INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
4423 INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
4424 INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
4425 INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
4426 INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
4427};
4428
4429
4430
4431
4432
4433static void irdma_init_roce_device(struct irdma_device *iwdev)
4434{
4435 iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
4436 iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
4437 ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
4438}
4439
4440
4441
4442
4443
4444static int irdma_init_iw_device(struct irdma_device *iwdev)
4445{
4446 struct net_device *netdev = iwdev->netdev;
4447
4448 iwdev->ibdev.node_type = RDMA_NODE_RNIC;
4449 ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr);
4450 iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
4451 iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
4452 iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
4453 iwdev->ibdev.ops.iw_connect = irdma_connect;
4454 iwdev->ibdev.ops.iw_accept = irdma_accept;
4455 iwdev->ibdev.ops.iw_reject = irdma_reject;
4456 iwdev->ibdev.ops.iw_create_listen = irdma_create_listen;
4457 iwdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen;
4458 memcpy(iwdev->ibdev.iw_ifname, netdev->name,
4459 sizeof(iwdev->ibdev.iw_ifname));
4460 ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
4461
4462 return 0;
4463}
4464
4465
4466
4467
4468
4469static int irdma_init_rdma_device(struct irdma_device *iwdev)
4470{
4471 struct pci_dev *pcidev = iwdev->rf->pcidev;
4472 int ret;
4473
4474 if (iwdev->roce_mode) {
4475 irdma_init_roce_device(iwdev);
4476 } else {
4477 ret = irdma_init_iw_device(iwdev);
4478 if (ret)
4479 return ret;
4480 }
4481 iwdev->ibdev.phys_port_cnt = 1;
4482 iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
4483 iwdev->ibdev.dev.parent = &pcidev->dev;
4484 ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
4485
4486 return 0;
4487}
4488
4489
4490
4491
4492
4493void irdma_port_ibevent(struct irdma_device *iwdev)
4494{
4495 struct ib_event event;
4496
4497 event.device = &iwdev->ibdev;
4498 event.element.port_num = 1;
4499 event.event =
4500 iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4501 ib_dispatch_event(&event);
4502}
4503
4504
4505
4506
4507
4508
4509void irdma_ib_unregister_device(struct irdma_device *iwdev)
4510{
4511 iwdev->iw_status = 0;
4512 irdma_port_ibevent(iwdev);
4513 ib_unregister_device(&iwdev->ibdev);
4514}
4515
4516
4517
4518
4519
4520int irdma_ib_register_device(struct irdma_device *iwdev)
4521{
4522 int ret;
4523
4524 ret = irdma_init_rdma_device(iwdev);
4525 if (ret)
4526 return ret;
4527
4528 ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
4529 if (ret)
4530 goto error;
4531 dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
4532 ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
4533 if (ret)
4534 goto error;
4535
4536 iwdev->iw_status = 1;
4537 irdma_port_ibevent(iwdev);
4538
4539 return 0;
4540
4541error:
4542 if (ret)
4543 ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
4544
4545 return ret;
4546}
4547
4548
4549
4550
4551
4552
4553
4554
4555void irdma_ib_dealloc_device(struct ib_device *ibdev)
4556{
4557 struct irdma_device *iwdev = to_iwdev(ibdev);
4558
4559 irdma_rt_deinit_hw(iwdev);
4560 irdma_ctrl_deinit_hw(iwdev->rf);
4561 kfree(iwdev->rf);
4562}
4563