1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/random.h>
38#include <linux/highmem.h>
39#include <linux/time.h>
40#include <linux/hugetlb.h>
41#include <linux/irq.h>
42#include <asm/byteorder.h>
43#include <net/ip.h>
44#include <rdma/ib_verbs.h>
45#include <rdma/iw_cm.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include "i40iw.h"
49
50
51
52
53
54
55
56static int i40iw_query_device(struct ib_device *ibdev,
57 struct ib_device_attr *props,
58 struct ib_udata *udata)
59{
60 struct i40iw_device *iwdev = to_iwdev(ibdev);
61
62 if (udata->inlen || udata->outlen)
63 return -EINVAL;
64 memset(props, 0, sizeof(*props));
65 ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
66 props->fw_ver = I40IW_FW_VERSION;
67 props->device_cap_flags = iwdev->device_cap_flags;
68 props->vendor_id = iwdev->ldev->pcidev->vendor;
69 props->vendor_part_id = iwdev->ldev->pcidev->device;
70 props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
71 props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
72 props->max_qp = iwdev->max_qp - iwdev->used_qps;
73 props->max_qp_wr = I40IW_MAX_QP_WRS;
74 props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
75 props->max_cq = iwdev->max_cq - iwdev->used_cqs;
76 props->max_cqe = iwdev->max_cqe;
77 props->max_mr = iwdev->max_mr - iwdev->used_mrs;
78 props->max_pd = iwdev->max_pd - iwdev->used_pds;
79 props->max_sge_rd = I40IW_MAX_SGE_RD;
80 props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
81 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
82 props->atomic_cap = IB_ATOMIC_NONE;
83 props->max_map_per_fmr = 1;
84 props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
85 return 0;
86}
87
88
89
90
91
92
93
94static int i40iw_query_port(struct ib_device *ibdev,
95 u8 port,
96 struct ib_port_attr *props)
97{
98 struct i40iw_device *iwdev = to_iwdev(ibdev);
99 struct net_device *netdev = iwdev->netdev;
100
101
102 props->max_mtu = IB_MTU_4096;
103 props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
104
105 props->lid = 1;
106 if (netif_carrier_ok(iwdev->netdev))
107 props->state = IB_PORT_ACTIVE;
108 else
109 props->state = IB_PORT_DOWN;
110 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
111 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
112 props->gid_tbl_len = 1;
113 props->pkey_tbl_len = 1;
114 props->active_width = IB_WIDTH_4X;
115 props->active_speed = 1;
116 props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
117 return 0;
118}
119
120
121
122
123
124
125
126
127
128static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
129 struct ib_udata *udata)
130{
131 struct i40iw_device *iwdev = to_iwdev(ibdev);
132 struct i40iw_alloc_ucontext_req req;
133 struct i40iw_alloc_ucontext_resp uresp;
134 struct i40iw_ucontext *ucontext;
135
136 if (ib_copy_from_udata(&req, udata, sizeof(req)))
137 return ERR_PTR(-EINVAL);
138
139 if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) {
140 i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver);
141 return ERR_PTR(-EINVAL);
142 }
143
144 memset(&uresp, 0, sizeof(uresp));
145 uresp.max_qps = iwdev->max_qp;
146 uresp.max_pds = iwdev->max_pd;
147 uresp.wq_size = iwdev->max_qp_wr * 2;
148 uresp.kernel_ver = req.userspace_ver;
149
150 ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
151 if (!ucontext)
152 return ERR_PTR(-ENOMEM);
153
154 ucontext->iwdev = iwdev;
155 ucontext->abi_ver = req.userspace_ver;
156
157 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
158 kfree(ucontext);
159 return ERR_PTR(-EFAULT);
160 }
161
162 INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
163 spin_lock_init(&ucontext->cq_reg_mem_list_lock);
164 INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
165 spin_lock_init(&ucontext->qp_reg_mem_list_lock);
166
167 return &ucontext->ibucontext;
168}
169
170
171
172
173
174static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
175{
176 struct i40iw_ucontext *ucontext = to_ucontext(context);
177 unsigned long flags;
178
179 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
180 if (!list_empty(&ucontext->cq_reg_mem_list)) {
181 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
182 return -EBUSY;
183 }
184 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
185 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
186 if (!list_empty(&ucontext->qp_reg_mem_list)) {
187 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
188 return -EBUSY;
189 }
190 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
191
192 kfree(ucontext);
193 return 0;
194}
195
196
197
198
199
200
201static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
202{
203 struct i40iw_ucontext *ucontext;
204 u64 db_addr_offset;
205 u64 push_offset;
206
207 ucontext = to_ucontext(context);
208 if (ucontext->iwdev->sc_dev.is_pf) {
209 db_addr_offset = I40IW_DB_ADDR_OFFSET;
210 push_offset = I40IW_PUSH_OFFSET;
211 if (vma->vm_pgoff)
212 vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
213 } else {
214 db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
215 push_offset = I40IW_VF_PUSH_OFFSET;
216 if (vma->vm_pgoff)
217 vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
218 }
219
220 vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
221
222 if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
223 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
224 vma->vm_private_data = ucontext;
225 } else {
226 if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
227 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
228 else
229 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
230 }
231
232 if (io_remap_pfn_range(vma, vma->vm_start,
233 vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
234 PAGE_SIZE, vma->vm_page_prot))
235 return -EAGAIN;
236
237 return 0;
238}
239
240
241
242
243
244
245static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
246{
247 struct i40iw_cqp_request *cqp_request;
248 struct cqp_commands_info *cqp_info;
249 enum i40iw_status_code status;
250
251 if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
252 return;
253
254 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
255 if (!cqp_request)
256 return;
257
258 atomic_inc(&cqp_request->refcount);
259
260 cqp_info = &cqp_request->info;
261 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
262 cqp_info->post_sq = 1;
263
264 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
265 cqp_info->in.u.manage_push_page.info.free_page = 0;
266 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
267 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
268
269 status = i40iw_handle_cqp_op(iwdev, cqp_request);
270 if (!status)
271 qp->push_idx = cqp_request->compl_info.op_ret_val;
272 else
273 i40iw_pr_err("CQP-OP Push page fail");
274 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
275}
276
277
278
279
280
281
282static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
283{
284 struct i40iw_cqp_request *cqp_request;
285 struct cqp_commands_info *cqp_info;
286 enum i40iw_status_code status;
287
288 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
289 return;
290
291 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
292 if (!cqp_request)
293 return;
294
295 cqp_info = &cqp_request->info;
296 cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
297 cqp_info->post_sq = 1;
298
299 cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
300 cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
301 cqp_info->in.u.manage_push_page.info.free_page = 1;
302 cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
303 cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
304
305 status = i40iw_handle_cqp_op(iwdev, cqp_request);
306 if (!status)
307 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
308 else
309 i40iw_pr_err("CQP-OP Push page fail");
310}
311
312
313
314
315
316
317
318static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
319 struct ib_ucontext *context,
320 struct ib_udata *udata)
321{
322 struct i40iw_pd *iwpd;
323 struct i40iw_device *iwdev = to_iwdev(ibdev);
324 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
325 struct i40iw_alloc_pd_resp uresp;
326 struct i40iw_sc_pd *sc_pd;
327 struct i40iw_ucontext *ucontext;
328 u32 pd_id = 0;
329 int err;
330
331 if (iwdev->closing)
332 return ERR_PTR(-ENODEV);
333
334 err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
335 iwdev->max_pd, &pd_id, &iwdev->next_pd);
336 if (err) {
337 i40iw_pr_err("alloc resource failed\n");
338 return ERR_PTR(err);
339 }
340
341 iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
342 if (!iwpd) {
343 err = -ENOMEM;
344 goto free_res;
345 }
346
347 sc_pd = &iwpd->sc_pd;
348
349 if (context) {
350 ucontext = to_ucontext(context);
351 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
352 memset(&uresp, 0, sizeof(uresp));
353 uresp.pd_id = pd_id;
354 if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
355 err = -EFAULT;
356 goto error;
357 }
358 } else {
359 dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1);
360 }
361
362 i40iw_add_pdusecount(iwpd);
363 return &iwpd->ibpd;
364error:
365 kfree(iwpd);
366free_res:
367 i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
368 return ERR_PTR(err);
369}
370
371
372
373
374
375static int i40iw_dealloc_pd(struct ib_pd *ibpd)
376{
377 struct i40iw_pd *iwpd = to_iwpd(ibpd);
378 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
379
380 i40iw_rem_pdusecount(iwpd, iwdev);
381 return 0;
382}
383
384
385
386
387
388
389
390static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
391 struct list_head *pbl_list)
392{
393 struct i40iw_pbl *iwpbl;
394
395 list_for_each_entry(iwpbl, pbl_list, list) {
396 if (iwpbl->user_base == va) {
397 iwpbl->on_list = false;
398 list_del(&iwpbl->list);
399 return iwpbl;
400 }
401 }
402 return NULL;
403}
404
405
406
407
408
409
410
411void i40iw_free_qp_resources(struct i40iw_device *iwdev,
412 struct i40iw_qp *iwqp,
413 u32 qp_num)
414{
415 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
416
417 i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
418 i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
419 if (qp_num)
420 i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
421 if (iwpbl->pbl_allocated)
422 i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc);
423 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
424 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
425 kfree(iwqp->kqp.wrid_mem);
426 iwqp->kqp.wrid_mem = NULL;
427 kfree(iwqp->allocated_buffer);
428}
429
430
431
432
433
434
435static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
436{
437 struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
438
439 ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
440}
441
442
443
444
445
446static int i40iw_destroy_qp(struct ib_qp *ibqp)
447{
448 struct i40iw_qp *iwqp = to_iwqp(ibqp);
449
450 iwqp->destroyed = 1;
451
452 if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
453 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
454
455 if (!iwqp->user_mode) {
456 if (iwqp->iwscq) {
457 i40iw_clean_cqes(iwqp, iwqp->iwscq);
458 if (iwqp->iwrcq != iwqp->iwscq)
459 i40iw_clean_cqes(iwqp, iwqp->iwrcq);
460 }
461 }
462
463 i40iw_rem_ref(&iwqp->ibqp);
464 return 0;
465}
466
467
468
469
470
471
472
473static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
474 struct i40iw_qp *iwqp,
475 struct i40iw_qp_init_info *init_info)
476{
477 struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
478 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
479
480 iwqp->page = qpmr->sq_page;
481 init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
482 if (iwpbl->pbl_allocated) {
483 init_info->virtual_map = true;
484 init_info->sq_pa = qpmr->sq_pbl.idx;
485 init_info->rq_pa = qpmr->rq_pbl.idx;
486 } else {
487 init_info->sq_pa = qpmr->sq_pbl.addr;
488 init_info->rq_pa = qpmr->rq_pbl.addr;
489 }
490 return 0;
491}
492
493
494
495
496
497
498
499static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
500 struct i40iw_qp *iwqp,
501 struct i40iw_qp_init_info *info)
502{
503 struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
504 u32 sqdepth, rqdepth;
505 u8 sqshift;
506 u32 size;
507 enum i40iw_status_code status;
508 struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
509
510 i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
511 status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth);
512 if (status)
513 return -ENOMEM;
514
515 status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth);
516 if (status)
517 return -ENOMEM;
518
519 size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
520 iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
521
522 ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
523 if (!ukinfo->sq_wrtrk_array)
524 return -ENOMEM;
525
526 ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
527
528 size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
529 size += (I40IW_SHADOW_AREA_SIZE << 3);
530
531 status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
532 if (status) {
533 kfree(ukinfo->sq_wrtrk_array);
534 ukinfo->sq_wrtrk_array = NULL;
535 return -ENOMEM;
536 }
537
538 ukinfo->sq = mem->va;
539 info->sq_pa = mem->pa;
540
541 ukinfo->rq = &ukinfo->sq[sqdepth];
542 info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
543
544 ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
545 info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
546
547 ukinfo->sq_size = sqdepth >> sqshift;
548 ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT;
549 ukinfo->qp_id = iwqp->ibqp.qp_num;
550 return 0;
551}
552
553
554
555
556
557
558
559static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
560 struct ib_qp_init_attr *init_attr,
561 struct ib_udata *udata)
562{
563 struct i40iw_pd *iwpd = to_iwpd(ibpd);
564 struct i40iw_device *iwdev = to_iwdev(ibpd->device);
565 struct i40iw_cqp *iwcqp = &iwdev->cqp;
566 struct i40iw_qp *iwqp;
567 struct i40iw_ucontext *ucontext;
568 struct i40iw_create_qp_req req;
569 struct i40iw_create_qp_resp uresp;
570 u32 qp_num = 0;
571 void *mem;
572 enum i40iw_status_code ret;
573 int err_code;
574 int sq_size;
575 int rq_size;
576 struct i40iw_sc_qp *qp;
577 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
578 struct i40iw_qp_init_info init_info;
579 struct i40iw_create_qp_info *qp_info;
580 struct i40iw_cqp_request *cqp_request;
581 struct cqp_commands_info *cqp_info;
582
583 struct i40iw_qp_host_ctx_info *ctx_info;
584 struct i40iwarp_offload_info *iwarp_info;
585 unsigned long flags;
586
587 if (iwdev->closing)
588 return ERR_PTR(-ENODEV);
589
590 if (init_attr->create_flags)
591 return ERR_PTR(-EINVAL);
592 if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
593 init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
594
595 if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
596 init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
597
598 if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
599 init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
600
601 memset(&init_info, 0, sizeof(init_info));
602
603 sq_size = init_attr->cap.max_send_wr;
604 rq_size = init_attr->cap.max_recv_wr;
605
606 init_info.vsi = &iwdev->vsi;
607 init_info.qp_uk_init_info.sq_size = sq_size;
608 init_info.qp_uk_init_info.rq_size = rq_size;
609 init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
610 init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
611 init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
612
613 mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
614 if (!mem)
615 return ERR_PTR(-ENOMEM);
616
617 iwqp = (struct i40iw_qp *)mem;
618 iwqp->allocated_buffer = mem;
619 qp = &iwqp->sc_qp;
620 qp->back_qp = (void *)iwqp;
621 qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
622
623 iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
624
625 if (i40iw_allocate_dma_mem(dev->hw,
626 &iwqp->q2_ctx_mem,
627 I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
628 256)) {
629 i40iw_pr_err("dma_mem failed\n");
630 err_code = -ENOMEM;
631 goto error;
632 }
633
634 init_info.q2 = iwqp->q2_ctx_mem.va;
635 init_info.q2_pa = iwqp->q2_ctx_mem.pa;
636
637 init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
638 init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
639
640 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
641 &qp_num, &iwdev->next_qp);
642 if (err_code) {
643 i40iw_pr_err("qp resource\n");
644 goto error;
645 }
646
647 iwqp->iwdev = iwdev;
648 iwqp->iwpd = iwpd;
649 iwqp->ibqp.qp_num = qp_num;
650 qp = &iwqp->sc_qp;
651 iwqp->iwscq = to_iwcq(init_attr->send_cq);
652 iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
653
654 iwqp->host_ctx.va = init_info.host_ctx;
655 iwqp->host_ctx.pa = init_info.host_ctx_pa;
656 iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
657
658 init_info.pd = &iwpd->sc_pd;
659 init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
660 iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
661
662 if (init_attr->qp_type != IB_QPT_RC) {
663 err_code = -EINVAL;
664 goto error;
665 }
666 if (iwdev->push_mode)
667 i40iw_alloc_push_page(iwdev, qp);
668 if (udata) {
669 err_code = ib_copy_from_udata(&req, udata, sizeof(req));
670 if (err_code) {
671 i40iw_pr_err("ib_copy_from_data\n");
672 goto error;
673 }
674 iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
675 if (ibpd->uobject && ibpd->uobject->context) {
676 iwqp->user_mode = 1;
677 ucontext = to_ucontext(ibpd->uobject->context);
678
679 if (req.user_wqe_buffers) {
680 struct i40iw_pbl *iwpbl;
681
682 spin_lock_irqsave(
683 &ucontext->qp_reg_mem_list_lock, flags);
684 iwpbl = i40iw_get_pbl(
685 (unsigned long)req.user_wqe_buffers,
686 &ucontext->qp_reg_mem_list);
687 spin_unlock_irqrestore(
688 &ucontext->qp_reg_mem_list_lock, flags);
689
690 if (!iwpbl) {
691 err_code = -ENODATA;
692 i40iw_pr_err("no pbl info\n");
693 goto error;
694 }
695 memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl));
696 }
697 }
698 err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
699 } else {
700 err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
701 }
702
703 if (err_code) {
704 i40iw_pr_err("setup qp failed\n");
705 goto error;
706 }
707
708 init_info.type = I40IW_QP_TYPE_IWARP;
709 ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
710 if (ret) {
711 err_code = -EPROTO;
712 i40iw_pr_err("qp_init fail\n");
713 goto error;
714 }
715 ctx_info = &iwqp->ctx_info;
716 iwarp_info = &iwqp->iwarp_info;
717 iwarp_info->rd_enable = true;
718 iwarp_info->wr_rdresp_en = true;
719 if (!iwqp->user_mode) {
720 iwarp_info->fast_reg_en = true;
721 iwarp_info->priv_mode_en = true;
722 }
723 iwarp_info->ddp_ver = 1;
724 iwarp_info->rdmap_ver = 1;
725
726 ctx_info->iwarp_info_valid = true;
727 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
728 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
729 if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
730 ctx_info->push_mode_en = false;
731 } else {
732 ctx_info->push_mode_en = true;
733 ctx_info->push_idx = qp->push_idx;
734 }
735
736 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
737 (u64 *)iwqp->host_ctx.va,
738 ctx_info);
739 ctx_info->iwarp_info_valid = false;
740 cqp_request = i40iw_get_cqp_request(iwcqp, true);
741 if (!cqp_request) {
742 err_code = -ENOMEM;
743 goto error;
744 }
745 cqp_info = &cqp_request->info;
746 qp_info = &cqp_request->info.in.u.qp_create.info;
747
748 memset(qp_info, 0, sizeof(*qp_info));
749
750 qp_info->cq_num_valid = true;
751 qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
752
753 cqp_info->cqp_cmd = OP_QP_CREATE;
754 cqp_info->post_sq = 1;
755 cqp_info->in.u.qp_create.qp = qp;
756 cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
757 ret = i40iw_handle_cqp_op(iwdev, cqp_request);
758 if (ret) {
759 i40iw_pr_err("CQP-OP QP create fail");
760 err_code = -EACCES;
761 goto error;
762 }
763
764 i40iw_add_ref(&iwqp->ibqp);
765 spin_lock_init(&iwqp->lock);
766 iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
767 iwdev->qp_table[qp_num] = iwqp;
768 i40iw_add_pdusecount(iwqp->iwpd);
769 i40iw_add_devusecount(iwdev);
770 if (ibpd->uobject && udata) {
771 memset(&uresp, 0, sizeof(uresp));
772 uresp.actual_sq_size = sq_size;
773 uresp.actual_rq_size = rq_size;
774 uresp.qp_id = qp_num;
775 uresp.push_idx = qp->push_idx;
776 err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
777 if (err_code) {
778 i40iw_pr_err("copy_to_udata failed\n");
779 i40iw_destroy_qp(&iwqp->ibqp);
780
781 return ERR_PTR(err_code);
782 }
783 }
784 init_completion(&iwqp->sq_drained);
785 init_completion(&iwqp->rq_drained);
786
787 return &iwqp->ibqp;
788error:
789 i40iw_free_qp_resources(iwdev, iwqp, qp_num);
790 return ERR_PTR(err_code);
791}
792
793
794
795
796
797
798
799
800static int i40iw_query_qp(struct ib_qp *ibqp,
801 struct ib_qp_attr *attr,
802 int attr_mask,
803 struct ib_qp_init_attr *init_attr)
804{
805 struct i40iw_qp *iwqp = to_iwqp(ibqp);
806 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
807
808 attr->qp_access_flags = 0;
809 attr->cap.max_send_wr = qp->qp_uk.sq_size;
810 attr->cap.max_recv_wr = qp->qp_uk.rq_size;
811 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
812 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
813 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
814 attr->port_num = 1;
815 init_attr->event_handler = iwqp->ibqp.event_handler;
816 init_attr->qp_context = iwqp->ibqp.qp_context;
817 init_attr->send_cq = iwqp->ibqp.send_cq;
818 init_attr->recv_cq = iwqp->ibqp.recv_cq;
819 init_attr->srq = iwqp->ibqp.srq;
820 init_attr->cap = attr->cap;
821 init_attr->port_num = 1;
822 return 0;
823}
824
825
826
827
828
829
830
831
832void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
833 struct i40iw_modify_qp_info *info, bool wait)
834{
835 struct i40iw_cqp_request *cqp_request;
836 struct cqp_commands_info *cqp_info;
837 struct i40iw_modify_qp_info *m_info;
838 struct i40iw_gen_ae_info ae_info;
839
840 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
841 if (!cqp_request)
842 return;
843
844 cqp_info = &cqp_request->info;
845 m_info = &cqp_info->in.u.qp_modify.info;
846 memcpy(m_info, info, sizeof(*m_info));
847 cqp_info->cqp_cmd = OP_QP_MODIFY;
848 cqp_info->post_sq = 1;
849 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
850 cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
851 if (!i40iw_handle_cqp_op(iwdev, cqp_request))
852 return;
853
854 switch (m_info->next_iwarp_state) {
855 case I40IW_QP_STATE_RTS:
856 if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE)
857 i40iw_send_reset(iwqp->cm_node);
858
859 case I40IW_QP_STATE_IDLE:
860 case I40IW_QP_STATE_TERMINATE:
861 case I40IW_QP_STATE_CLOSING:
862 ae_info.ae_code = I40IW_AE_BAD_CLOSE;
863 ae_info.ae_source = 0;
864 i40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false);
865 break;
866 case I40IW_QP_STATE_ERROR:
867 default:
868 break;
869 }
870}
871
872
873
874
875
876
877
878
879int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
880 int attr_mask, struct ib_udata *udata)
881{
882 struct i40iw_qp *iwqp = to_iwqp(ibqp);
883 struct i40iw_device *iwdev = iwqp->iwdev;
884 struct i40iw_qp_host_ctx_info *ctx_info;
885 struct i40iwarp_offload_info *iwarp_info;
886 struct i40iw_modify_qp_info info;
887 u8 issue_modify_qp = 0;
888 u8 dont_wait = 0;
889 u32 err;
890 unsigned long flags;
891
892 memset(&info, 0, sizeof(info));
893 ctx_info = &iwqp->ctx_info;
894 iwarp_info = &iwqp->iwarp_info;
895
896 spin_lock_irqsave(&iwqp->lock, flags);
897
898 if (attr_mask & IB_QP_STATE) {
899 if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
900 err = -EINVAL;
901 goto exit;
902 }
903
904 switch (attr->qp_state) {
905 case IB_QPS_INIT:
906 case IB_QPS_RTR:
907 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
908 err = -EINVAL;
909 goto exit;
910 }
911 if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
912 info.next_iwarp_state = I40IW_QP_STATE_IDLE;
913 issue_modify_qp = 1;
914 }
915 break;
916 case IB_QPS_RTS:
917 if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
918 (!iwqp->cm_id)) {
919 err = -EINVAL;
920 goto exit;
921 }
922
923 issue_modify_qp = 1;
924 iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
925 iwqp->hte_added = 1;
926 info.next_iwarp_state = I40IW_QP_STATE_RTS;
927 info.tcp_ctx_valid = true;
928 info.ord_valid = true;
929 info.arp_cache_idx_valid = true;
930 info.cq_num_valid = true;
931 break;
932 case IB_QPS_SQD:
933 if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
934 err = 0;
935 goto exit;
936 }
937 if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
938 (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
939 err = 0;
940 goto exit;
941 }
942 if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
943 err = -EINVAL;
944 goto exit;
945 }
946 info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
947 issue_modify_qp = 1;
948 break;
949 case IB_QPS_SQE:
950 if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
951 err = -EINVAL;
952 goto exit;
953 }
954 info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
955 issue_modify_qp = 1;
956 break;
957 case IB_QPS_ERR:
958 case IB_QPS_RESET:
959 if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
960 err = -EINVAL;
961 goto exit;
962 }
963 if (iwqp->sc_qp.term_flags)
964 i40iw_terminate_del_timer(&iwqp->sc_qp);
965 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
966 if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
967 iwdev->iw_status &&
968 (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
969 info.reset_tcp_conn = true;
970 else
971 dont_wait = 1;
972 issue_modify_qp = 1;
973 info.next_iwarp_state = I40IW_QP_STATE_ERROR;
974 break;
975 default:
976 err = -EINVAL;
977 goto exit;
978 }
979
980 iwqp->ibqp_state = attr->qp_state;
981
982 }
983 if (attr_mask & IB_QP_ACCESS_FLAGS) {
984 ctx_info->iwarp_info_valid = true;
985 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
986 iwarp_info->wr_rdresp_en = true;
987 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
988 iwarp_info->wr_rdresp_en = true;
989 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
990 iwarp_info->rd_enable = true;
991 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
992 iwarp_info->bind_en = true;
993
994 if (iwqp->user_mode) {
995 iwarp_info->rd_enable = true;
996 iwarp_info->wr_rdresp_en = true;
997 iwarp_info->priv_mode_en = false;
998 }
999 }
1000
1001 if (ctx_info->iwarp_info_valid) {
1002 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1003 int ret;
1004
1005 ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
1006 ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
1007 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
1008 (u64 *)iwqp->host_ctx.va,
1009 ctx_info);
1010 if (ret) {
1011 i40iw_pr_err("setting QP context\n");
1012 err = -EINVAL;
1013 goto exit;
1014 }
1015 }
1016
1017 spin_unlock_irqrestore(&iwqp->lock, flags);
1018
1019 if (issue_modify_qp) {
1020 i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
1021
1022 spin_lock_irqsave(&iwqp->lock, flags);
1023 iwqp->iwarp_state = info.next_iwarp_state;
1024 spin_unlock_irqrestore(&iwqp->lock, flags);
1025 }
1026
1027 if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
1028 if (dont_wait) {
1029 if (iwqp->cm_id && iwqp->hw_tcp_state) {
1030 spin_lock_irqsave(&iwqp->lock, flags);
1031 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1032 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1033 spin_unlock_irqrestore(&iwqp->lock, flags);
1034 i40iw_cm_disconn(iwqp);
1035 }
1036 } else {
1037 spin_lock_irqsave(&iwqp->lock, flags);
1038 if (iwqp->cm_id) {
1039 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
1040 iwqp->cm_id->add_ref(iwqp->cm_id);
1041 i40iw_schedule_cm_timer(iwqp->cm_node,
1042 (struct i40iw_puda_buf *)iwqp,
1043 I40IW_TIMER_TYPE_CLOSE, 1, 0);
1044 }
1045 }
1046 spin_unlock_irqrestore(&iwqp->lock, flags);
1047 }
1048 }
1049 return 0;
1050exit:
1051 spin_unlock_irqrestore(&iwqp->lock, flags);
1052 return err;
1053}
1054
1055
1056
1057
1058
1059
1060static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
1061{
1062 struct i40iw_sc_cq *cq = &iwcq->sc_cq;
1063
1064 if (!iwcq->user_mode)
1065 i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
1066 i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
1067}
1068
1069
1070
1071
1072
1073
1074void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
1075{
1076 enum i40iw_status_code status;
1077 struct i40iw_cqp_request *cqp_request;
1078 struct cqp_commands_info *cqp_info;
1079
1080 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1081 if (!cqp_request)
1082 return;
1083
1084 cqp_info = &cqp_request->info;
1085
1086 cqp_info->cqp_cmd = OP_CQ_DESTROY;
1087 cqp_info->post_sq = 1;
1088 cqp_info->in.u.cq_destroy.cq = cq;
1089 cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1090 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1091 if (status)
1092 i40iw_pr_err("CQP-OP Destroy QP fail");
1093}
1094
1095
1096
1097
1098
1099static int i40iw_destroy_cq(struct ib_cq *ib_cq)
1100{
1101 struct i40iw_cq *iwcq;
1102 struct i40iw_device *iwdev;
1103 struct i40iw_sc_cq *cq;
1104
1105 if (!ib_cq) {
1106 i40iw_pr_err("ib_cq == NULL\n");
1107 return 0;
1108 }
1109
1110 iwcq = to_iwcq(ib_cq);
1111 iwdev = to_iwdev(ib_cq->device);
1112 cq = &iwcq->sc_cq;
1113 i40iw_cq_wq_destroy(iwdev, cq);
1114 cq_free_resources(iwdev, iwcq);
1115 kfree(iwcq);
1116 i40iw_rem_devusecount(iwdev);
1117 return 0;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
1128 const struct ib_cq_init_attr *attr,
1129 struct ib_ucontext *context,
1130 struct ib_udata *udata)
1131{
1132 struct i40iw_device *iwdev = to_iwdev(ibdev);
1133 struct i40iw_cq *iwcq;
1134 struct i40iw_pbl *iwpbl;
1135 u32 cq_num = 0;
1136 struct i40iw_sc_cq *cq;
1137 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1138 struct i40iw_cq_init_info info;
1139 enum i40iw_status_code status;
1140 struct i40iw_cqp_request *cqp_request;
1141 struct cqp_commands_info *cqp_info;
1142 struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
1143 unsigned long flags;
1144 int err_code;
1145 int entries = attr->cqe;
1146
1147 if (iwdev->closing)
1148 return ERR_PTR(-ENODEV);
1149
1150 if (entries > iwdev->max_cqe)
1151 return ERR_PTR(-EINVAL);
1152
1153 iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
1154 if (!iwcq)
1155 return ERR_PTR(-ENOMEM);
1156
1157 memset(&info, 0, sizeof(info));
1158
1159 err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
1160 iwdev->max_cq, &cq_num,
1161 &iwdev->next_cq);
1162 if (err_code)
1163 goto error;
1164
1165 cq = &iwcq->sc_cq;
1166 cq->back_cq = (void *)iwcq;
1167 spin_lock_init(&iwcq->lock);
1168
1169 info.dev = dev;
1170 ukinfo->cq_size = max(entries, 4);
1171 ukinfo->cq_id = cq_num;
1172 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
1173 info.ceqe_mask = 0;
1174 if (attr->comp_vector < iwdev->ceqs_count)
1175 info.ceq_id = attr->comp_vector;
1176 info.ceq_id_valid = true;
1177 info.ceqe_mask = 1;
1178 info.type = I40IW_CQ_TYPE_IWARP;
1179 if (context) {
1180 struct i40iw_ucontext *ucontext;
1181 struct i40iw_create_cq_req req;
1182 struct i40iw_cq_mr *cqmr;
1183
1184 memset(&req, 0, sizeof(req));
1185 iwcq->user_mode = true;
1186 ucontext = to_ucontext(context);
1187 if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) {
1188 err_code = -EFAULT;
1189 goto cq_free_resources;
1190 }
1191
1192 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1193 iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
1194 &ucontext->cq_reg_mem_list);
1195 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1196 if (!iwpbl) {
1197 err_code = -EPROTO;
1198 goto cq_free_resources;
1199 }
1200
1201 iwcq->iwpbl = iwpbl;
1202 iwcq->cq_mem_size = 0;
1203 cqmr = &iwpbl->cq_mr;
1204 info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
1205 if (iwpbl->pbl_allocated) {
1206 info.virtual_map = true;
1207 info.pbl_chunk_size = 1;
1208 info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
1209 } else {
1210 info.cq_base_pa = cqmr->cq_pbl.addr;
1211 }
1212 } else {
1213
1214 int rsize;
1215 int shadow;
1216
1217 rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
1218 rsize = round_up(rsize, 256);
1219 shadow = I40IW_SHADOW_AREA_SIZE << 3;
1220 status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
1221 rsize + shadow, 256);
1222 if (status) {
1223 err_code = -ENOMEM;
1224 goto cq_free_resources;
1225 }
1226 ukinfo->cq_base = iwcq->kmem.va;
1227 info.cq_base_pa = iwcq->kmem.pa;
1228 info.shadow_area_pa = info.cq_base_pa + rsize;
1229 ukinfo->shadow_area = iwcq->kmem.va + rsize;
1230 }
1231
1232 if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
1233 i40iw_pr_err("init cq fail\n");
1234 err_code = -EPROTO;
1235 goto cq_free_resources;
1236 }
1237
1238 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1239 if (!cqp_request) {
1240 err_code = -ENOMEM;
1241 goto cq_free_resources;
1242 }
1243
1244 cqp_info = &cqp_request->info;
1245 cqp_info->cqp_cmd = OP_CQ_CREATE;
1246 cqp_info->post_sq = 1;
1247 cqp_info->in.u.cq_create.cq = cq;
1248 cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1249 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1250 if (status) {
1251 i40iw_pr_err("CQP-OP Create QP fail");
1252 err_code = -EPROTO;
1253 goto cq_free_resources;
1254 }
1255
1256 if (context) {
1257 struct i40iw_create_cq_resp resp;
1258
1259 memset(&resp, 0, sizeof(resp));
1260 resp.cq_id = info.cq_uk_init_info.cq_id;
1261 resp.cq_size = info.cq_uk_init_info.cq_size;
1262 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
1263 i40iw_pr_err("copy to user data\n");
1264 err_code = -EPROTO;
1265 goto cq_destroy;
1266 }
1267 }
1268
1269 i40iw_add_devusecount(iwdev);
1270 return (struct ib_cq *)iwcq;
1271
1272cq_destroy:
1273 i40iw_cq_wq_destroy(iwdev, cq);
1274cq_free_resources:
1275 cq_free_resources(iwdev, iwcq);
1276error:
1277 kfree(iwcq);
1278 return ERR_PTR(err_code);
1279}
1280
1281
1282
1283
1284
1285static inline u16 i40iw_get_user_access(int acc)
1286{
1287 u16 access = 0;
1288
1289 access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
1290 access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
1291 access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
1292 access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
1293 return access;
1294}
1295
1296
1297
1298
1299
1300
1301static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
1302{
1303 u32 stag_idx;
1304
1305 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1306 i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
1307 i40iw_rem_devusecount(iwdev);
1308}
1309
1310
1311
1312
1313
1314static u32 i40iw_create_stag(struct i40iw_device *iwdev)
1315{
1316 u32 stag = 0;
1317 u32 stag_index = 0;
1318 u32 next_stag_index;
1319 u32 driver_key;
1320 u32 random;
1321 u8 consumer_key;
1322 int ret;
1323
1324 get_random_bytes(&random, sizeof(random));
1325 consumer_key = (u8)random;
1326
1327 driver_key = random & ~iwdev->mr_stagmask;
1328 next_stag_index = (random & iwdev->mr_stagmask) >> 8;
1329 next_stag_index %= iwdev->max_mr;
1330
1331 ret = i40iw_alloc_resource(iwdev,
1332 iwdev->allocated_mrs, iwdev->max_mr,
1333 &stag_index, &next_stag_index);
1334 if (!ret) {
1335 stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
1336 stag |= driver_key;
1337 stag += (u32)consumer_key;
1338 i40iw_add_devusecount(iwdev);
1339 }
1340 return stag;
1341}
1342
1343
1344
1345
1346
1347
1348
1349static inline u64 *i40iw_next_pbl_addr(u64 *pbl,
1350 struct i40iw_pble_info **pinfo,
1351 u32 *idx)
1352{
1353 *idx += 1;
1354 if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
1355 return ++pbl;
1356 *idx = 0;
1357 (*pinfo)++;
1358 return (u64 *)(*pinfo)->addr;
1359}
1360
1361
1362
1363
1364
1365
1366
1367static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
1368 u64 *pbl,
1369 enum i40iw_pble_level level)
1370{
1371 struct ib_umem *region = iwmr->region;
1372 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1373 int chunk_pages, entry, i;
1374 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1375 struct i40iw_pble_info *pinfo;
1376 struct scatterlist *sg;
1377 u64 pg_addr = 0;
1378 u32 idx = 0;
1379
1380 pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
1381
1382 for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
1383 chunk_pages = sg_dma_len(sg) >> region->page_shift;
1384 if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
1385 !iwpbl->qp_mr.sq_page)
1386 iwpbl->qp_mr.sq_page = sg_page(sg);
1387 for (i = 0; i < chunk_pages; i++) {
1388 pg_addr = sg_dma_address(sg) +
1389 (i << region->page_shift);
1390
1391 if ((entry + i) == 0)
1392 *pbl = cpu_to_le64(pg_addr & iwmr->page_msk);
1393 else if (!(pg_addr & ~iwmr->page_msk))
1394 *pbl = cpu_to_le64(pg_addr);
1395 else
1396 continue;
1397 pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx);
1398 }
1399 }
1400}
1401
1402
1403
1404
1405
1406
1407static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr)
1408{
1409 struct vm_area_struct *vma;
1410 struct hstate *h;
1411
1412 vma = find_vma(current->mm, addr);
1413 if (vma && is_vm_hugetlb_page(vma)) {
1414 h = hstate_vma(vma);
1415 if (huge_page_size(h) == 0x200000) {
1416 iwmr->page_size = huge_page_size(h);
1417 iwmr->page_msk = huge_page_mask(h);
1418 }
1419 }
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
1430{
1431 u32 pg_idx;
1432
1433 for (pg_idx = 0; pg_idx < npages; pg_idx++) {
1434 if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
1435 return false;
1436 }
1437 return true;
1438}
1439
1440
1441
1442
1443
1444
1445static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size)
1446{
1447 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
1448 struct i40iw_pble_info *leaf = lvl2->leaf;
1449 u64 *arr = NULL;
1450 u64 *start_addr = NULL;
1451 int i;
1452 bool ret;
1453
1454 if (palloc->level == I40IW_LEVEL_1) {
1455 arr = (u64 *)palloc->level1.addr;
1456 ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size);
1457 return ret;
1458 }
1459
1460 start_addr = (u64 *)leaf->addr;
1461
1462 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
1463 arr = (u64 *)leaf->addr;
1464 if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
1465 return false;
1466 ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size);
1467 if (!ret)
1468 return false;
1469 }
1470
1471 return true;
1472}
1473
1474
1475
1476
1477
1478
1479
1480static int i40iw_setup_pbles(struct i40iw_device *iwdev,
1481 struct i40iw_mr *iwmr,
1482 bool use_pbles)
1483{
1484 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1485 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1486 struct i40iw_pble_info *pinfo;
1487 u64 *pbl;
1488 enum i40iw_status_code status;
1489 enum i40iw_pble_level level = I40IW_LEVEL_1;
1490
1491 if (use_pbles) {
1492 mutex_lock(&iwdev->pbl_mutex);
1493 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1494 mutex_unlock(&iwdev->pbl_mutex);
1495 if (status)
1496 return -ENOMEM;
1497
1498 iwpbl->pbl_allocated = true;
1499 level = palloc->level;
1500 pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
1501 pbl = (u64 *)pinfo->addr;
1502 } else {
1503 pbl = iwmr->pgaddrmem;
1504 }
1505
1506 i40iw_copy_user_pgaddrs(iwmr, pbl, level);
1507
1508 if (use_pbles)
1509 iwmr->pgaddrmem[0] = *pbl;
1510
1511 return 0;
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
1522 struct i40iw_mem_reg_req *req,
1523 struct i40iw_pbl *iwpbl,
1524 bool use_pbles)
1525{
1526 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1527 struct i40iw_mr *iwmr = iwpbl->iwmr;
1528 struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
1529 struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
1530 struct i40iw_hmc_pble *hmc_p;
1531 u64 *arr = iwmr->pgaddrmem;
1532 u32 pg_size;
1533 int err;
1534 int total;
1535 bool ret = true;
1536
1537 total = req->sq_pages + req->rq_pages + req->cq_pages;
1538 pg_size = iwmr->page_size;
1539
1540 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1541 if (err)
1542 return err;
1543
1544 if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
1545 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1546 iwpbl->pbl_allocated = false;
1547 return -ENOMEM;
1548 }
1549
1550 if (use_pbles)
1551 arr = (u64 *)palloc->level1.addr;
1552
1553 if (iwmr->type == IW_MEMREG_TYPE_QP) {
1554 hmc_p = &qpmr->sq_pbl;
1555 qpmr->shadow = (dma_addr_t)arr[total];
1556
1557 if (use_pbles) {
1558 ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size);
1559 if (ret)
1560 ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size);
1561 }
1562
1563 if (!ret) {
1564 hmc_p->idx = palloc->level1.idx;
1565 hmc_p = &qpmr->rq_pbl;
1566 hmc_p->idx = palloc->level1.idx + req->sq_pages;
1567 } else {
1568 hmc_p->addr = arr[0];
1569 hmc_p = &qpmr->rq_pbl;
1570 hmc_p->addr = arr[req->sq_pages];
1571 }
1572 } else {
1573 hmc_p = &cqmr->cq_pbl;
1574 cqmr->shadow = (dma_addr_t)arr[total];
1575
1576 if (use_pbles)
1577 ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size);
1578
1579 if (!ret)
1580 hmc_p->idx = palloc->level1.idx;
1581 else
1582 hmc_p->addr = arr[0];
1583 }
1584
1585 if (use_pbles && ret) {
1586 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1587 iwpbl->pbl_allocated = false;
1588 }
1589
1590 return err;
1591}
1592
1593
1594
1595
1596
1597
1598static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
1599{
1600 struct i40iw_allocate_stag_info *info;
1601 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1602 enum i40iw_status_code status;
1603 int err = 0;
1604 struct i40iw_cqp_request *cqp_request;
1605 struct cqp_commands_info *cqp_info;
1606
1607 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1608 if (!cqp_request)
1609 return -ENOMEM;
1610
1611 cqp_info = &cqp_request->info;
1612 info = &cqp_info->in.u.alloc_stag.info;
1613 memset(info, 0, sizeof(*info));
1614 info->page_size = PAGE_SIZE;
1615 info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1616 info->pd_id = iwpd->sc_pd.pd_id;
1617 info->total_len = iwmr->length;
1618 info->remote_access = true;
1619 cqp_info->cqp_cmd = OP_ALLOC_STAG;
1620 cqp_info->post_sq = 1;
1621 cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
1622 cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
1623
1624 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1625 if (status) {
1626 err = -ENOMEM;
1627 i40iw_pr_err("CQP-OP MR Reg fail");
1628 }
1629 return err;
1630}
1631
1632
1633
1634
1635
1636
1637
1638static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
1639 enum ib_mr_type mr_type,
1640 u32 max_num_sg)
1641{
1642 struct i40iw_pd *iwpd = to_iwpd(pd);
1643 struct i40iw_device *iwdev = to_iwdev(pd->device);
1644 struct i40iw_pble_alloc *palloc;
1645 struct i40iw_pbl *iwpbl;
1646 struct i40iw_mr *iwmr;
1647 enum i40iw_status_code status;
1648 u32 stag;
1649 int err_code = -ENOMEM;
1650
1651 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1652 if (!iwmr)
1653 return ERR_PTR(-ENOMEM);
1654
1655 stag = i40iw_create_stag(iwdev);
1656 if (!stag) {
1657 err_code = -EOVERFLOW;
1658 goto err;
1659 }
1660 stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
1661 iwmr->stag = stag;
1662 iwmr->ibmr.rkey = stag;
1663 iwmr->ibmr.lkey = stag;
1664 iwmr->ibmr.pd = pd;
1665 iwmr->ibmr.device = pd->device;
1666 iwpbl = &iwmr->iwpbl;
1667 iwpbl->iwmr = iwmr;
1668 iwmr->type = IW_MEMREG_TYPE_MEM;
1669 palloc = &iwpbl->pble_alloc;
1670 iwmr->page_cnt = max_num_sg;
1671 mutex_lock(&iwdev->pbl_mutex);
1672 status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
1673 mutex_unlock(&iwdev->pbl_mutex);
1674 if (status)
1675 goto err1;
1676
1677 if (palloc->level != I40IW_LEVEL_1)
1678 goto err2;
1679 err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
1680 if (err_code)
1681 goto err2;
1682 iwpbl->pbl_allocated = true;
1683 i40iw_add_pdusecount(iwpd);
1684 return &iwmr->ibmr;
1685err2:
1686 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1687err1:
1688 i40iw_free_stag(iwdev, stag);
1689err:
1690 kfree(iwmr);
1691 return ERR_PTR(err_code);
1692}
1693
1694
1695
1696
1697
1698
1699static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
1700{
1701 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1702 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1703 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1704 u64 *pbl;
1705
1706 if (unlikely(iwmr->npages == iwmr->page_cnt))
1707 return -ENOMEM;
1708
1709 pbl = (u64 *)palloc->level1.addr;
1710 pbl[iwmr->npages++] = cpu_to_le64(addr);
1711 return 0;
1712}
1713
1714
1715
1716
1717
1718
1719
1720static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1721 int sg_nents, unsigned int *sg_offset)
1722{
1723 struct i40iw_mr *iwmr = to_iwmr(ibmr);
1724
1725 iwmr->npages = 0;
1726 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
1727}
1728
1729
1730
1731
1732
1733static void i40iw_drain_sq(struct ib_qp *ibqp)
1734{
1735 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1736 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1737
1738 if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
1739 wait_for_completion(&iwqp->sq_drained);
1740}
1741
1742
1743
1744
1745
1746static void i40iw_drain_rq(struct ib_qp *ibqp)
1747{
1748 struct i40iw_qp *iwqp = to_iwqp(ibqp);
1749 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
1750
1751 if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
1752 wait_for_completion(&iwqp->rq_drained);
1753}
1754
1755
1756
1757
1758
1759
1760
1761static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
1762 struct i40iw_mr *iwmr,
1763 u16 access)
1764{
1765 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
1766 struct i40iw_reg_ns_stag_info *stag_info;
1767 struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
1768 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
1769 enum i40iw_status_code status;
1770 int err = 0;
1771 struct i40iw_cqp_request *cqp_request;
1772 struct cqp_commands_info *cqp_info;
1773
1774 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1775 if (!cqp_request)
1776 return -ENOMEM;
1777
1778 cqp_info = &cqp_request->info;
1779 stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
1780 memset(stag_info, 0, sizeof(*stag_info));
1781 stag_info->va = (void *)(unsigned long)iwpbl->user_base;
1782 stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
1783 stag_info->stag_key = (u8)iwmr->stag;
1784 stag_info->total_len = iwmr->length;
1785 stag_info->access_rights = access;
1786 stag_info->pd_id = iwpd->sc_pd.pd_id;
1787 stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
1788 stag_info->page_size = iwmr->page_size;
1789
1790 if (iwpbl->pbl_allocated) {
1791 if (palloc->level == I40IW_LEVEL_1) {
1792 stag_info->first_pm_pbl_index = palloc->level1.idx;
1793 stag_info->chunk_size = 1;
1794 } else {
1795 stag_info->first_pm_pbl_index = palloc->level2.root.idx;
1796 stag_info->chunk_size = 3;
1797 }
1798 } else {
1799 stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
1800 }
1801
1802 cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
1803 cqp_info->post_sq = 1;
1804 cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
1805 cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
1806
1807 status = i40iw_handle_cqp_op(iwdev, cqp_request);
1808 if (status) {
1809 err = -ENOMEM;
1810 i40iw_pr_err("CQP-OP MR Reg fail");
1811 }
1812 return err;
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
1825 u64 start,
1826 u64 length,
1827 u64 virt,
1828 int acc,
1829 struct ib_udata *udata)
1830{
1831 struct i40iw_pd *iwpd = to_iwpd(pd);
1832 struct i40iw_device *iwdev = to_iwdev(pd->device);
1833 struct i40iw_ucontext *ucontext;
1834 struct i40iw_pble_alloc *palloc;
1835 struct i40iw_pbl *iwpbl;
1836 struct i40iw_mr *iwmr;
1837 struct ib_umem *region;
1838 struct i40iw_mem_reg_req req;
1839 u64 pbl_depth = 0;
1840 u32 stag = 0;
1841 u16 access;
1842 u64 region_length;
1843 bool use_pbles = false;
1844 unsigned long flags;
1845 int err = -ENOSYS;
1846 int ret;
1847 int pg_shift;
1848
1849 if (iwdev->closing)
1850 return ERR_PTR(-ENODEV);
1851
1852 if (length > I40IW_MAX_MR_SIZE)
1853 return ERR_PTR(-EINVAL);
1854 region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
1855 if (IS_ERR(region))
1856 return (struct ib_mr *)region;
1857
1858 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
1859 ib_umem_release(region);
1860 return ERR_PTR(-EFAULT);
1861 }
1862
1863 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1864 if (!iwmr) {
1865 ib_umem_release(region);
1866 return ERR_PTR(-ENOMEM);
1867 }
1868
1869 iwpbl = &iwmr->iwpbl;
1870 iwpbl->iwmr = iwmr;
1871 iwmr->region = region;
1872 iwmr->ibmr.pd = pd;
1873 iwmr->ibmr.device = pd->device;
1874 ucontext = to_ucontext(pd->uobject->context);
1875
1876 iwmr->page_size = PAGE_SIZE;
1877 iwmr->page_msk = PAGE_MASK;
1878
1879 if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM))
1880 i40iw_set_hugetlb_values(start, iwmr);
1881
1882 region_length = region->length + (start & (iwmr->page_size - 1));
1883 pg_shift = ffs(iwmr->page_size) - 1;
1884 pbl_depth = region_length >> pg_shift;
1885 pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
1886 iwmr->length = region->length;
1887
1888 iwpbl->user_base = virt;
1889 palloc = &iwpbl->pble_alloc;
1890
1891 iwmr->type = req.reg_type;
1892 iwmr->page_cnt = (u32)pbl_depth;
1893
1894 switch (req.reg_type) {
1895 case IW_MEMREG_TYPE_QP:
1896 use_pbles = ((req.sq_pages + req.rq_pages) > 2);
1897 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1898 if (err)
1899 goto error;
1900 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
1901 list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
1902 iwpbl->on_list = true;
1903 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
1904 break;
1905 case IW_MEMREG_TYPE_CQ:
1906 use_pbles = (req.cq_pages > 1);
1907 err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
1908 if (err)
1909 goto error;
1910
1911 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
1912 list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
1913 iwpbl->on_list = true;
1914 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
1915 break;
1916 case IW_MEMREG_TYPE_MEM:
1917 use_pbles = (iwmr->page_cnt != 1);
1918 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1919
1920 err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
1921 if (err)
1922 goto error;
1923
1924 if (use_pbles) {
1925 ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
1926 if (ret) {
1927 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1928 iwpbl->pbl_allocated = false;
1929 }
1930 }
1931
1932 access |= i40iw_get_user_access(acc);
1933 stag = i40iw_create_stag(iwdev);
1934 if (!stag) {
1935 err = -ENOMEM;
1936 goto error;
1937 }
1938
1939 iwmr->stag = stag;
1940 iwmr->ibmr.rkey = stag;
1941 iwmr->ibmr.lkey = stag;
1942
1943 err = i40iw_hwreg_mr(iwdev, iwmr, access);
1944 if (err) {
1945 i40iw_free_stag(iwdev, stag);
1946 goto error;
1947 }
1948
1949 break;
1950 default:
1951 goto error;
1952 }
1953
1954 iwmr->type = req.reg_type;
1955 if (req.reg_type == IW_MEMREG_TYPE_MEM)
1956 i40iw_add_pdusecount(iwpd);
1957 return &iwmr->ibmr;
1958
1959error:
1960 if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
1961 i40iw_free_pble(iwdev->pble_rsrc, palloc);
1962 ib_umem_release(region);
1963 kfree(iwmr);
1964 return ERR_PTR(err);
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
1976 u64 addr,
1977 u64 size,
1978 int acc,
1979 u64 *iova_start)
1980{
1981 struct i40iw_pd *iwpd = to_iwpd(pd);
1982 struct i40iw_device *iwdev = to_iwdev(pd->device);
1983 struct i40iw_pbl *iwpbl;
1984 struct i40iw_mr *iwmr;
1985 enum i40iw_status_code status;
1986 u32 stag;
1987 u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
1988 int ret;
1989
1990 iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
1991 if (!iwmr)
1992 return ERR_PTR(-ENOMEM);
1993 iwmr->ibmr.pd = pd;
1994 iwmr->ibmr.device = pd->device;
1995 iwpbl = &iwmr->iwpbl;
1996 iwpbl->iwmr = iwmr;
1997 iwmr->type = IW_MEMREG_TYPE_MEM;
1998 iwpbl->user_base = *iova_start;
1999 stag = i40iw_create_stag(iwdev);
2000 if (!stag) {
2001 ret = -EOVERFLOW;
2002 goto err;
2003 }
2004 access |= i40iw_get_user_access(acc);
2005 iwmr->stag = stag;
2006 iwmr->ibmr.rkey = stag;
2007 iwmr->ibmr.lkey = stag;
2008 iwmr->page_cnt = 1;
2009 iwmr->pgaddrmem[0] = addr;
2010 iwmr->length = size;
2011 status = i40iw_hwreg_mr(iwdev, iwmr, access);
2012 if (status) {
2013 i40iw_free_stag(iwdev, stag);
2014 ret = -ENOMEM;
2015 goto err;
2016 }
2017
2018 i40iw_add_pdusecount(iwpd);
2019 return &iwmr->ibmr;
2020 err:
2021 kfree(iwmr);
2022 return ERR_PTR(ret);
2023}
2024
2025
2026
2027
2028
2029
2030static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
2031{
2032 u64 kva = 0;
2033
2034 return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva);
2035}
2036
2037
2038
2039
2040
2041
2042static void i40iw_del_memlist(struct i40iw_mr *iwmr,
2043 struct i40iw_ucontext *ucontext)
2044{
2045 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2046 unsigned long flags;
2047
2048 switch (iwmr->type) {
2049 case IW_MEMREG_TYPE_CQ:
2050 spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
2051 if (iwpbl->on_list) {
2052 iwpbl->on_list = false;
2053 list_del(&iwpbl->list);
2054 }
2055 spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
2056 break;
2057 case IW_MEMREG_TYPE_QP:
2058 spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
2059 if (iwpbl->on_list) {
2060 iwpbl->on_list = false;
2061 list_del(&iwpbl->list);
2062 }
2063 spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
2064 break;
2065 default:
2066 break;
2067 }
2068}
2069
2070
2071
2072
2073
2074static int i40iw_dereg_mr(struct ib_mr *ib_mr)
2075{
2076 struct ib_pd *ibpd = ib_mr->pd;
2077 struct i40iw_pd *iwpd = to_iwpd(ibpd);
2078 struct i40iw_mr *iwmr = to_iwmr(ib_mr);
2079 struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
2080 enum i40iw_status_code status;
2081 struct i40iw_dealloc_stag_info *info;
2082 struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
2083 struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
2084 struct i40iw_cqp_request *cqp_request;
2085 struct cqp_commands_info *cqp_info;
2086 u32 stag_idx;
2087
2088 if (iwmr->region)
2089 ib_umem_release(iwmr->region);
2090
2091 if (iwmr->type != IW_MEMREG_TYPE_MEM) {
2092 if (ibpd->uobject) {
2093 struct i40iw_ucontext *ucontext;
2094
2095 ucontext = to_ucontext(ibpd->uobject->context);
2096 i40iw_del_memlist(iwmr, ucontext);
2097 }
2098 if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP)
2099 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2100 kfree(iwmr);
2101 return 0;
2102 }
2103
2104 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
2105 if (!cqp_request)
2106 return -ENOMEM;
2107
2108 cqp_info = &cqp_request->info;
2109 info = &cqp_info->in.u.dealloc_stag.info;
2110 memset(info, 0, sizeof(*info));
2111
2112 info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
2113 info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
2114 stag_idx = info->stag_idx;
2115 info->mr = true;
2116 if (iwpbl->pbl_allocated)
2117 info->dealloc_pbl = true;
2118
2119 cqp_info->cqp_cmd = OP_DEALLOC_STAG;
2120 cqp_info->post_sq = 1;
2121 cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
2122 cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
2123 status = i40iw_handle_cqp_op(iwdev, cqp_request);
2124 if (status)
2125 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
2126 i40iw_rem_pdusecount(iwpd, iwdev);
2127 i40iw_free_stag(iwdev, iwmr->stag);
2128 if (iwpbl->pbl_allocated)
2129 i40iw_free_pble(iwdev->pble_rsrc, palloc);
2130 kfree(iwmr);
2131 return 0;
2132}
2133
2134
2135
2136
2137static ssize_t i40iw_show_rev(struct device *dev,
2138 struct device_attribute *attr, char *buf)
2139{
2140 struct i40iw_ib_device *iwibdev = container_of(dev,
2141 struct i40iw_ib_device,
2142 ibdev.dev);
2143 u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
2144
2145 return sprintf(buf, "%x\n", hw_rev);
2146}
2147
2148
2149
2150
2151static ssize_t i40iw_show_hca(struct device *dev,
2152 struct device_attribute *attr, char *buf)
2153{
2154 return sprintf(buf, "I40IW\n");
2155}
2156
2157
2158
2159
2160static ssize_t i40iw_show_board(struct device *dev,
2161 struct device_attribute *attr,
2162 char *buf)
2163{
2164 return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
2165}
2166
2167static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
2168static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
2169static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
2170
2171static struct device_attribute *i40iw_dev_attributes[] = {
2172 &dev_attr_hw_rev,
2173 &dev_attr_hca_type,
2174 &dev_attr_board_id
2175};
2176
2177
2178
2179
2180
2181
2182
2183static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
2184{
2185 unsigned int i;
2186
2187 for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
2188 sg_list[i].tag_off = sgl[i].addr;
2189 sg_list[i].len = sgl[i].length;
2190 sg_list[i].stag = sgl[i].lkey;
2191 }
2192}
2193
2194
2195
2196
2197
2198
2199
2200static int i40iw_post_send(struct ib_qp *ibqp,
2201 struct ib_send_wr *ib_wr,
2202 struct ib_send_wr **bad_wr)
2203{
2204 struct i40iw_qp *iwqp;
2205 struct i40iw_qp_uk *ukqp;
2206 struct i40iw_post_sq_info info;
2207 enum i40iw_status_code ret;
2208 int err = 0;
2209 unsigned long flags;
2210 bool inv_stag;
2211
2212 iwqp = (struct i40iw_qp *)ibqp;
2213 ukqp = &iwqp->sc_qp.qp_uk;
2214
2215 spin_lock_irqsave(&iwqp->lock, flags);
2216
2217 if (iwqp->flush_issued) {
2218 err = -EINVAL;
2219 goto out;
2220 }
2221
2222 while (ib_wr) {
2223 inv_stag = false;
2224 memset(&info, 0, sizeof(info));
2225 info.wr_id = (u64)(ib_wr->wr_id);
2226 if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
2227 info.signaled = true;
2228 if (ib_wr->send_flags & IB_SEND_FENCE)
2229 info.read_fence = true;
2230
2231 switch (ib_wr->opcode) {
2232 case IB_WR_SEND:
2233
2234 case IB_WR_SEND_WITH_INV:
2235 if (ib_wr->opcode == IB_WR_SEND) {
2236 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2237 info.op_type = I40IW_OP_TYPE_SEND_SOL;
2238 else
2239 info.op_type = I40IW_OP_TYPE_SEND;
2240 } else {
2241 if (ib_wr->send_flags & IB_SEND_SOLICITED)
2242 info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
2243 else
2244 info.op_type = I40IW_OP_TYPE_SEND_INV;
2245 }
2246
2247 if (ib_wr->send_flags & IB_SEND_INLINE) {
2248 info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2249 info.op.inline_send.len = ib_wr->sg_list[0].length;
2250 ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2251 } else {
2252 info.op.send.num_sges = ib_wr->num_sge;
2253 info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
2254 ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
2255 }
2256
2257 if (ret) {
2258 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2259 err = -ENOMEM;
2260 else
2261 err = -EINVAL;
2262 }
2263 break;
2264 case IB_WR_RDMA_WRITE:
2265 info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
2266
2267 if (ib_wr->send_flags & IB_SEND_INLINE) {
2268 info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
2269 info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
2270 info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2271 info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2272 ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
2273 } else {
2274 info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
2275 info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
2276 info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2277 info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2278 ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
2279 }
2280
2281 if (ret) {
2282 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2283 err = -ENOMEM;
2284 else
2285 err = -EINVAL;
2286 }
2287 break;
2288 case IB_WR_RDMA_READ_WITH_INV:
2289 inv_stag = true;
2290
2291 case IB_WR_RDMA_READ:
2292 if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
2293 err = -EINVAL;
2294 break;
2295 }
2296 info.op_type = I40IW_OP_TYPE_RDMA_READ;
2297 info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
2298 info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
2299 info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
2300 info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
2301 info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
2302 ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
2303 if (ret) {
2304 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2305 err = -ENOMEM;
2306 else
2307 err = -EINVAL;
2308 }
2309 break;
2310 case IB_WR_LOCAL_INV:
2311 info.op_type = I40IW_OP_TYPE_INV_STAG;
2312 info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
2313 ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
2314 if (ret)
2315 err = -ENOMEM;
2316 break;
2317 case IB_WR_REG_MR:
2318 {
2319 struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
2320 int flags = reg_wr(ib_wr)->access;
2321 struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
2322 struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
2323 struct i40iw_fast_reg_stag_info info;
2324
2325 memset(&info, 0, sizeof(info));
2326 info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
2327 info.access_rights |= i40iw_get_user_access(flags);
2328 info.stag_key = reg_wr(ib_wr)->key & 0xff;
2329 info.stag_idx = reg_wr(ib_wr)->key >> 8;
2330 info.page_size = reg_wr(ib_wr)->mr->page_size;
2331 info.wr_id = ib_wr->wr_id;
2332
2333 info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
2334 info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
2335 info.total_len = iwmr->ibmr.length;
2336 info.reg_addr_pa = *(u64 *)palloc->level1.addr;
2337 info.first_pm_pbl_index = palloc->level1.idx;
2338 info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
2339 info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
2340
2341 if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
2342 info.chunk_size = 1;
2343
2344 ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
2345 if (ret)
2346 err = -ENOMEM;
2347 break;
2348 }
2349 default:
2350 err = -EINVAL;
2351 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2352 ib_wr->opcode);
2353 break;
2354 }
2355
2356 if (err)
2357 break;
2358 ib_wr = ib_wr->next;
2359 }
2360
2361out:
2362 if (err)
2363 *bad_wr = ib_wr;
2364 else
2365 ukqp->ops.iw_qp_post_wr(ukqp);
2366 spin_unlock_irqrestore(&iwqp->lock, flags);
2367
2368 return err;
2369}
2370
2371
2372
2373
2374
2375
2376
2377static int i40iw_post_recv(struct ib_qp *ibqp,
2378 struct ib_recv_wr *ib_wr,
2379 struct ib_recv_wr **bad_wr)
2380{
2381 struct i40iw_qp *iwqp;
2382 struct i40iw_qp_uk *ukqp;
2383 struct i40iw_post_rq_info post_recv;
2384 struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
2385 enum i40iw_status_code ret = 0;
2386 unsigned long flags;
2387 int err = 0;
2388
2389 iwqp = (struct i40iw_qp *)ibqp;
2390 ukqp = &iwqp->sc_qp.qp_uk;
2391
2392 memset(&post_recv, 0, sizeof(post_recv));
2393 spin_lock_irqsave(&iwqp->lock, flags);
2394
2395 if (iwqp->flush_issued) {
2396 err = -EINVAL;
2397 goto out;
2398 }
2399
2400 while (ib_wr) {
2401 post_recv.num_sges = ib_wr->num_sge;
2402 post_recv.wr_id = ib_wr->wr_id;
2403 i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
2404 post_recv.sg_list = sg_list;
2405 ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
2406 if (ret) {
2407 i40iw_pr_err(" post_recv err %d\n", ret);
2408 if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED)
2409 err = -ENOMEM;
2410 else
2411 err = -EINVAL;
2412 *bad_wr = ib_wr;
2413 goto out;
2414 }
2415 ib_wr = ib_wr->next;
2416 }
2417 out:
2418 spin_unlock_irqrestore(&iwqp->lock, flags);
2419 return err;
2420}
2421
2422
2423
2424
2425
2426
2427
2428static int i40iw_poll_cq(struct ib_cq *ibcq,
2429 int num_entries,
2430 struct ib_wc *entry)
2431{
2432 struct i40iw_cq *iwcq;
2433 int cqe_count = 0;
2434 struct i40iw_cq_poll_info cq_poll_info;
2435 enum i40iw_status_code ret;
2436 struct i40iw_cq_uk *ukcq;
2437 struct i40iw_sc_qp *qp;
2438 struct i40iw_qp *iwqp;
2439 unsigned long flags;
2440
2441 iwcq = (struct i40iw_cq *)ibcq;
2442 ukcq = &iwcq->sc_cq.cq_uk;
2443
2444 spin_lock_irqsave(&iwcq->lock, flags);
2445 while (cqe_count < num_entries) {
2446 ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info);
2447 if (ret == I40IW_ERR_QUEUE_EMPTY) {
2448 break;
2449 } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
2450 continue;
2451 } else if (ret) {
2452 if (!cqe_count)
2453 cqe_count = -1;
2454 break;
2455 }
2456 entry->wc_flags = 0;
2457 entry->wr_id = cq_poll_info.wr_id;
2458 if (cq_poll_info.error) {
2459 entry->status = IB_WC_WR_FLUSH_ERR;
2460 entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
2461 } else {
2462 entry->status = IB_WC_SUCCESS;
2463 }
2464
2465 switch (cq_poll_info.op_type) {
2466 case I40IW_OP_TYPE_RDMA_WRITE:
2467 entry->opcode = IB_WC_RDMA_WRITE;
2468 break;
2469 case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
2470 case I40IW_OP_TYPE_RDMA_READ:
2471 entry->opcode = IB_WC_RDMA_READ;
2472 break;
2473 case I40IW_OP_TYPE_SEND_SOL:
2474 case I40IW_OP_TYPE_SEND_SOL_INV:
2475 case I40IW_OP_TYPE_SEND_INV:
2476 case I40IW_OP_TYPE_SEND:
2477 entry->opcode = IB_WC_SEND;
2478 break;
2479 case I40IW_OP_TYPE_REC:
2480 entry->opcode = IB_WC_RECV;
2481 break;
2482 default:
2483 entry->opcode = IB_WC_RECV;
2484 break;
2485 }
2486
2487 entry->ex.imm_data = 0;
2488 qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
2489 entry->qp = (struct ib_qp *)qp->back_qp;
2490 entry->src_qp = cq_poll_info.qp_id;
2491 iwqp = (struct i40iw_qp *)qp->back_qp;
2492 if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
2493 if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
2494 complete(&iwqp->sq_drained);
2495 if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
2496 complete(&iwqp->rq_drained);
2497 }
2498 entry->byte_len = cq_poll_info.bytes_xfered;
2499 entry++;
2500 cqe_count++;
2501 }
2502 spin_unlock_irqrestore(&iwcq->lock, flags);
2503 return cqe_count;
2504}
2505
2506
2507
2508
2509
2510
2511static int i40iw_req_notify_cq(struct ib_cq *ibcq,
2512 enum ib_cq_notify_flags notify_flags)
2513{
2514 struct i40iw_cq *iwcq;
2515 struct i40iw_cq_uk *ukcq;
2516 unsigned long flags;
2517 enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
2518
2519 iwcq = (struct i40iw_cq *)ibcq;
2520 ukcq = &iwcq->sc_cq.cq_uk;
2521 if (notify_flags == IB_CQ_SOLICITED)
2522 cq_notify = IW_CQ_COMPL_SOLICITED;
2523 spin_lock_irqsave(&iwcq->lock, flags);
2524 ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
2525 spin_unlock_irqrestore(&iwcq->lock, flags);
2526 return 0;
2527}
2528
2529
2530
2531
2532
2533
2534
2535static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
2536 struct ib_port_immutable *immutable)
2537{
2538 struct ib_port_attr attr;
2539 int err;
2540
2541 immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
2542
2543 err = ib_query_port(ibdev, port_num, &attr);
2544
2545 if (err)
2546 return err;
2547
2548 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2549 immutable->gid_tbl_len = attr.gid_tbl_len;
2550
2551 return 0;
2552}
2553
2554static const char * const i40iw_hw_stat_names[] = {
2555
2556 [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
2557 [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
2558 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
2559 [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
2560 [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
2561 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
2562 [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
2563 [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
2564 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
2565
2566 [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2567 "ip4InOctets",
2568 [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2569 "ip4InPkts",
2570 [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2571 "ip4InReasmRqd",
2572 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2573 "ip4InMcastPkts",
2574 [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2575 "ip4OutOctets",
2576 [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2577 "ip4OutPkts",
2578 [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2579 "ip4OutSegRqd",
2580 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2581 "ip4OutMcastPkts",
2582 [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2583 "ip6InOctets",
2584 [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2585 "ip6InPkts",
2586 [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2587 "ip6InReasmRqd",
2588 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2589 "ip6InMcastPkts",
2590 [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
2591 "ip6OutOctets",
2592 [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2593 "ip6OutPkts",
2594 [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
2595 "ip6OutSegRqd",
2596 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
2597 "ip6OutMcastPkts",
2598 [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
2599 "tcpInSegs",
2600 [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
2601 "tcpOutSegs",
2602 [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2603 "iwInRdmaReads",
2604 [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2605 "iwInRdmaSends",
2606 [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2607 "iwInRdmaWrites",
2608 [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
2609 "iwOutRdmaReads",
2610 [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
2611 "iwOutRdmaSends",
2612 [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
2613 "iwOutRdmaWrites",
2614 [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
2615 "iwRdmaBnd",
2616 [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
2617 "iwRdmaInv"
2618};
2619
2620static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str)
2621{
2622 u32 firmware_version = I40IW_FW_VERSION;
2623
2624 snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version,
2625 (firmware_version & 0x000000ff));
2626}
2627
2628
2629
2630
2631
2632
2633static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
2634 u8 port_num)
2635{
2636 struct i40iw_device *iwdev = to_iwdev(ibdev);
2637 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2638 int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
2639 I40IW_HW_STAT_INDEX_MAX_64;
2640 unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
2641
2642 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
2643 (I40IW_HW_STAT_INDEX_MAX_32 +
2644 I40IW_HW_STAT_INDEX_MAX_64));
2645
2646
2647
2648
2649
2650 if (!dev->is_pf)
2651 lifespan = 1000;
2652 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
2653 lifespan);
2654}
2655
2656
2657
2658
2659
2660
2661
2662
2663static int i40iw_get_hw_stats(struct ib_device *ibdev,
2664 struct rdma_hw_stats *stats,
2665 u8 port_num, int index)
2666{
2667 struct i40iw_device *iwdev = to_iwdev(ibdev);
2668 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
2669 struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat;
2670 struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
2671
2672 if (dev->is_pf) {
2673 i40iw_hw_stats_read_all(devstat, &devstat->hw_stats);
2674 } else {
2675 if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
2676 return -ENOSYS;
2677 }
2678
2679 memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
2680
2681 return stats->num_counters;
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691static int i40iw_query_gid(struct ib_device *ibdev,
2692 u8 port,
2693 int index,
2694 union ib_gid *gid)
2695{
2696 struct i40iw_device *iwdev = to_iwdev(ibdev);
2697
2698 memset(gid->raw, 0, sizeof(gid->raw));
2699 ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
2700 return 0;
2701}
2702
2703
2704
2705
2706
2707
2708
2709
2710static int i40iw_modify_port(struct ib_device *ibdev,
2711 u8 port,
2712 int port_modify_mask,
2713 struct ib_port_modify *props)
2714{
2715 return -ENOSYS;
2716}
2717
2718
2719
2720
2721
2722
2723
2724
2725static int i40iw_query_pkey(struct ib_device *ibdev,
2726 u8 port,
2727 u16 index,
2728 u16 *pkey)
2729{
2730 *pkey = 0;
2731 return 0;
2732}
2733
2734
2735
2736
2737
2738
2739static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
2740 struct rdma_ah_attr *attr,
2741 struct ib_udata *udata)
2742
2743{
2744 return ERR_PTR(-ENOSYS);
2745}
2746
2747
2748
2749
2750
2751static int i40iw_destroy_ah(struct ib_ah *ah)
2752{
2753 return -ENOSYS;
2754}
2755
2756
2757
2758
2759
2760
2761static const struct cpumask *i40iw_get_vector_affinity(struct ib_device *ibdev,
2762 int comp_vector)
2763{
2764 struct i40iw_device *iwdev = to_iwdev(ibdev);
2765 struct i40iw_msix_vector *msix_vec;
2766
2767 if (iwdev->msix_shared)
2768 msix_vec = &iwdev->iw_msixtbl[comp_vector];
2769 else
2770 msix_vec = &iwdev->iw_msixtbl[comp_vector + 1];
2771
2772 return irq_get_affinity_mask(msix_vec->irq);
2773}
2774
2775
2776
2777
2778
2779static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
2780{
2781 struct i40iw_ib_device *iwibdev;
2782 struct net_device *netdev = iwdev->netdev;
2783 struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
2784
2785 iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
2786 if (!iwibdev) {
2787 i40iw_pr_err("iwdev == NULL\n");
2788 return NULL;
2789 }
2790 strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
2791 iwibdev->ibdev.owner = THIS_MODULE;
2792 iwdev->iwibdev = iwibdev;
2793 iwibdev->iwdev = iwdev;
2794
2795 iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
2796 ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
2797
2798 iwibdev->ibdev.uverbs_cmd_mask =
2799 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2800 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2801 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2802 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2803 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2804 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2805 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2806 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2807 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2808 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2809 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2810 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2811 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2812 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2813 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2814 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2815 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2816 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2817 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2818 (1ull << IB_USER_VERBS_CMD_POST_SEND);
2819 iwibdev->ibdev.phys_port_cnt = 1;
2820 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count;
2821 iwibdev->ibdev.dev.parent = &pcidev->dev;
2822 iwibdev->ibdev.query_port = i40iw_query_port;
2823 iwibdev->ibdev.modify_port = i40iw_modify_port;
2824 iwibdev->ibdev.query_pkey = i40iw_query_pkey;
2825 iwibdev->ibdev.query_gid = i40iw_query_gid;
2826 iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
2827 iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
2828 iwibdev->ibdev.mmap = i40iw_mmap;
2829 iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
2830 iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
2831 iwibdev->ibdev.create_qp = i40iw_create_qp;
2832 iwibdev->ibdev.modify_qp = i40iw_modify_qp;
2833 iwibdev->ibdev.query_qp = i40iw_query_qp;
2834 iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
2835 iwibdev->ibdev.create_cq = i40iw_create_cq;
2836 iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
2837 iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
2838 iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
2839 iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
2840 iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
2841 iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
2842 iwibdev->ibdev.query_device = i40iw_query_device;
2843 iwibdev->ibdev.create_ah = i40iw_create_ah;
2844 iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
2845 iwibdev->ibdev.drain_sq = i40iw_drain_sq;
2846 iwibdev->ibdev.drain_rq = i40iw_drain_rq;
2847 iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
2848 iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
2849 iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
2850 if (!iwibdev->ibdev.iwcm) {
2851 ib_dealloc_device(&iwibdev->ibdev);
2852 return NULL;
2853 }
2854
2855 iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
2856 iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
2857 iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
2858 iwibdev->ibdev.iwcm->connect = i40iw_connect;
2859 iwibdev->ibdev.iwcm->accept = i40iw_accept;
2860 iwibdev->ibdev.iwcm->reject = i40iw_reject;
2861 iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
2862 iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
2863 memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
2864 sizeof(iwibdev->ibdev.iwcm->ifname));
2865 iwibdev->ibdev.get_port_immutable = i40iw_port_immutable;
2866 iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str;
2867 iwibdev->ibdev.poll_cq = i40iw_poll_cq;
2868 iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
2869 iwibdev->ibdev.post_send = i40iw_post_send;
2870 iwibdev->ibdev.post_recv = i40iw_post_recv;
2871 iwibdev->ibdev.get_vector_affinity = i40iw_get_vector_affinity;
2872
2873 return iwibdev;
2874}
2875
2876
2877
2878
2879
2880void i40iw_port_ibevent(struct i40iw_device *iwdev)
2881{
2882 struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
2883 struct ib_event event;
2884
2885 event.device = &iwibdev->ibdev;
2886 event.element.port_num = 1;
2887 event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2888 ib_dispatch_event(&event);
2889}
2890
2891
2892
2893
2894
2895static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
2896{
2897 int i;
2898
2899 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
2900 device_remove_file(&iwibdev->ibdev.dev,
2901 i40iw_dev_attributes[i]);
2902 ib_unregister_device(&iwibdev->ibdev);
2903}
2904
2905
2906
2907
2908
2909void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
2910{
2911 if (!iwibdev)
2912 return;
2913
2914 i40iw_unregister_rdma_device(iwibdev);
2915 kfree(iwibdev->ibdev.iwcm);
2916 iwibdev->ibdev.iwcm = NULL;
2917 wait_event_timeout(iwibdev->iwdev->close_wq,
2918 !atomic64_read(&iwibdev->iwdev->use_count),
2919 I40IW_EVENT_TIMEOUT);
2920 ib_dealloc_device(&iwibdev->ibdev);
2921}
2922
2923
2924
2925
2926
2927int i40iw_register_rdma_device(struct i40iw_device *iwdev)
2928{
2929 int i, ret;
2930 struct i40iw_ib_device *iwibdev;
2931
2932 iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
2933 if (!iwdev->iwibdev)
2934 return -ENOMEM;
2935 iwibdev = iwdev->iwibdev;
2936
2937 iwibdev->ibdev.driver_id = RDMA_DRIVER_I40IW;
2938 ret = ib_register_device(&iwibdev->ibdev, NULL);
2939 if (ret)
2940 goto error;
2941
2942 for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
2943 ret =
2944 device_create_file(&iwibdev->ibdev.dev,
2945 i40iw_dev_attributes[i]);
2946 if (ret) {
2947 while (i > 0) {
2948 i--;
2949 device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
2950 }
2951 ib_unregister_device(&iwibdev->ibdev);
2952 goto error;
2953 }
2954 }
2955 return 0;
2956error:
2957 kfree(iwdev->iwibdev->ibdev.iwcm);
2958 iwdev->iwibdev->ibdev.iwcm = NULL;
2959 ib_dealloc_device(&iwdev->iwibdev->ibdev);
2960 return ret;
2961}
2962