1
2
3
4
5
6#include <linux/vmalloc.h>
7
8#include <rdma/ib_addr.h>
9#include <rdma/ib_umem.h>
10#include <rdma/ib_user_verbs.h>
11#include <rdma/ib_verbs.h>
12#include <rdma/uverbs_ioctl.h>
13
14#include "efa.h"
15
16#define EFA_MMAP_FLAG_SHIFT 56
17#define EFA_MMAP_PAGE_MASK GENMASK(EFA_MMAP_FLAG_SHIFT - 1, 0)
18#define EFA_MMAP_INVALID U64_MAX
19
20enum {
21 EFA_MMAP_DMA_PAGE = 0,
22 EFA_MMAP_IO_WC,
23 EFA_MMAP_IO_NC,
24};
25
26#define EFA_AENQ_ENABLED_GROUPS \
27 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
28 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
29
30struct efa_mmap_entry {
31 void *obj;
32 u64 address;
33 u64 length;
34 u32 mmap_page;
35 u8 mmap_flag;
36};
37
38static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
39{
40 return ((u64)efa->mmap_flag << EFA_MMAP_FLAG_SHIFT) |
41 ((u64)efa->mmap_page << PAGE_SHIFT);
42}
43
44#define EFA_CHUNK_PAYLOAD_SHIFT 12
45#define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
46#define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
47
48#define EFA_CHUNK_SHIFT 12
49#define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
50#define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
51
52#define EFA_PTRS_PER_CHUNK \
53 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
54
55#define EFA_CHUNK_USED_SIZE \
56 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
57
58#define EFA_SUPPORTED_ACCESS_FLAGS IB_ACCESS_LOCAL_WRITE
59
60struct pbl_chunk {
61 dma_addr_t dma_addr;
62 u64 *buf;
63 u32 length;
64};
65
66struct pbl_chunk_list {
67 struct pbl_chunk *chunks;
68 unsigned int size;
69};
70
71struct pbl_context {
72 union {
73 struct {
74 dma_addr_t dma_addr;
75 } continuous;
76 struct {
77 u32 pbl_buf_size_in_pages;
78 struct scatterlist *sgl;
79 int sg_dma_cnt;
80 struct pbl_chunk_list chunk_list;
81 } indirect;
82 } phys;
83 u64 *pbl_buf;
84 u32 pbl_buf_size_in_bytes;
85 u8 physically_continuous;
86};
87
88static inline struct efa_dev *to_edev(struct ib_device *ibdev)
89{
90 return container_of(ibdev, struct efa_dev, ibdev);
91}
92
93static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
94{
95 return container_of(ibucontext, struct efa_ucontext, ibucontext);
96}
97
98static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
99{
100 return container_of(ibpd, struct efa_pd, ibpd);
101}
102
103static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
104{
105 return container_of(ibmr, struct efa_mr, ibmr);
106}
107
108static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
109{
110 return container_of(ibqp, struct efa_qp, ibqp);
111}
112
113static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
114{
115 return container_of(ibcq, struct efa_cq, ibcq);
116}
117
118static inline struct efa_ah *to_eah(struct ib_ah *ibah)
119{
120 return container_of(ibah, struct efa_ah, ibah);
121}
122
123#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
124 sizeof(((typeof(x) *)0)->fld) <= (sz))
125
126#define is_reserved_cleared(reserved) \
127 !memchr_inv(reserved, 0, sizeof(reserved))
128
129static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
130 size_t size, enum dma_data_direction dir)
131{
132 void *addr;
133
134 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
135 if (!addr)
136 return NULL;
137
138 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
139 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
140 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
141 free_pages_exact(addr, size);
142 return NULL;
143 }
144
145 return addr;
146}
147
148
149
150
151
152
153
154
155static void mmap_entries_remove_free(struct efa_dev *dev,
156 struct efa_ucontext *ucontext)
157{
158 struct efa_mmap_entry *entry;
159 unsigned long mmap_page;
160
161 xa_for_each(&ucontext->mmap_xa, mmap_page, entry) {
162 xa_erase(&ucontext->mmap_xa, mmap_page);
163
164 ibdev_dbg(
165 &dev->ibdev,
166 "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
167 entry->obj, get_mmap_key(entry), entry->address,
168 entry->length);
169 if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
170
171 free_pages_exact(phys_to_virt(entry->address),
172 entry->length);
173 kfree(entry);
174 }
175}
176
177static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev,
178 struct efa_ucontext *ucontext,
179 u64 key, u64 len)
180{
181 struct efa_mmap_entry *entry;
182 u64 mmap_page;
183
184 mmap_page = (key & EFA_MMAP_PAGE_MASK) >> PAGE_SHIFT;
185 if (mmap_page > U32_MAX)
186 return NULL;
187
188 entry = xa_load(&ucontext->mmap_xa, mmap_page);
189 if (!entry || get_mmap_key(entry) != key || entry->length != len)
190 return NULL;
191
192 ibdev_dbg(&dev->ibdev,
193 "mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
194 entry->obj, key, entry->address, entry->length);
195
196 return entry;
197}
198
199
200
201
202
203static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
204 void *obj, u64 address, u64 length, u8 mmap_flag)
205{
206 struct efa_mmap_entry *entry;
207 u32 next_mmap_page;
208 int err;
209
210 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
211 if (!entry)
212 return EFA_MMAP_INVALID;
213
214 entry->obj = obj;
215 entry->address = address;
216 entry->length = length;
217 entry->mmap_flag = mmap_flag;
218
219 xa_lock(&ucontext->mmap_xa);
220 if (check_add_overflow(ucontext->mmap_xa_page,
221 (u32)(length >> PAGE_SHIFT),
222 &next_mmap_page))
223 goto err_unlock;
224
225 entry->mmap_page = ucontext->mmap_xa_page;
226 ucontext->mmap_xa_page = next_mmap_page;
227 err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
228 GFP_KERNEL);
229 if (err)
230 goto err_unlock;
231
232 xa_unlock(&ucontext->mmap_xa);
233
234 ibdev_dbg(
235 &dev->ibdev,
236 "mmap: obj[0x%p] addr[%#llx], len[%#llx], key[%#llx] inserted\n",
237 entry->obj, entry->address, entry->length, get_mmap_key(entry));
238
239 return get_mmap_key(entry);
240
241err_unlock:
242 xa_unlock(&ucontext->mmap_xa);
243 kfree(entry);
244 return EFA_MMAP_INVALID;
245
246}
247
248int efa_query_device(struct ib_device *ibdev,
249 struct ib_device_attr *props,
250 struct ib_udata *udata)
251{
252 struct efa_com_get_device_attr_result *dev_attr;
253 struct efa_ibv_ex_query_device_resp resp = {};
254 struct efa_dev *dev = to_edev(ibdev);
255 int err;
256
257 if (udata && udata->inlen &&
258 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
259 ibdev_dbg(ibdev,
260 "Incompatible ABI params, udata not cleared\n");
261 return -EINVAL;
262 }
263
264 dev_attr = &dev->dev_attr;
265
266 memset(props, 0, sizeof(*props));
267 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
268 props->page_size_cap = dev_attr->page_size_cap;
269 props->vendor_id = dev->pdev->vendor;
270 props->vendor_part_id = dev->pdev->device;
271 props->hw_ver = dev->pdev->subsystem_device;
272 props->max_qp = dev_attr->max_qp;
273 props->max_cq = dev_attr->max_cq;
274 props->max_pd = dev_attr->max_pd;
275 props->max_mr = dev_attr->max_mr;
276 props->max_ah = dev_attr->max_ah;
277 props->max_cqe = dev_attr->max_cq_depth;
278 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
279 dev_attr->max_rq_depth);
280 props->max_send_sge = dev_attr->max_sq_sge;
281 props->max_recv_sge = dev_attr->max_rq_sge;
282
283 if (udata && udata->outlen) {
284 resp.max_sq_sge = dev_attr->max_sq_sge;
285 resp.max_rq_sge = dev_attr->max_rq_sge;
286 resp.max_sq_wr = dev_attr->max_sq_depth;
287 resp.max_rq_wr = dev_attr->max_rq_depth;
288
289 err = ib_copy_to_udata(udata, &resp,
290 min(sizeof(resp), udata->outlen));
291 if (err) {
292 ibdev_dbg(ibdev,
293 "Failed to copy udata for query_device\n");
294 return err;
295 }
296 }
297
298 return 0;
299}
300
301int efa_query_port(struct ib_device *ibdev, u8 port,
302 struct ib_port_attr *props)
303{
304 struct efa_dev *dev = to_edev(ibdev);
305
306 props->lmc = 1;
307
308 props->state = IB_PORT_ACTIVE;
309 props->phys_state = 5;
310 props->gid_tbl_len = 1;
311 props->pkey_tbl_len = 1;
312 props->active_speed = IB_SPEED_EDR;
313 props->active_width = IB_WIDTH_4X;
314 props->max_mtu = ib_mtu_int_to_enum(dev->mtu);
315 props->active_mtu = ib_mtu_int_to_enum(dev->mtu);
316 props->max_msg_sz = dev->mtu;
317 props->max_vl_num = 1;
318
319 return 0;
320}
321
322int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
323 int qp_attr_mask,
324 struct ib_qp_init_attr *qp_init_attr)
325{
326 struct efa_dev *dev = to_edev(ibqp->device);
327 struct efa_com_query_qp_params params = {};
328 struct efa_com_query_qp_result result;
329 struct efa_qp *qp = to_eqp(ibqp);
330 int err;
331
332#define EFA_QUERY_QP_SUPP_MASK \
333 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
334 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP)
335
336 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
337 ibdev_dbg(&dev->ibdev,
338 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
339 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
340 return -EOPNOTSUPP;
341 }
342
343 memset(qp_attr, 0, sizeof(*qp_attr));
344 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
345
346 params.qp_handle = qp->qp_handle;
347 err = efa_com_query_qp(&dev->edev, ¶ms, &result);
348 if (err)
349 return err;
350
351 qp_attr->qp_state = result.qp_state;
352 qp_attr->qkey = result.qkey;
353 qp_attr->sq_psn = result.sq_psn;
354 qp_attr->sq_draining = result.sq_draining;
355 qp_attr->port_num = 1;
356
357 qp_attr->cap.max_send_wr = qp->max_send_wr;
358 qp_attr->cap.max_recv_wr = qp->max_recv_wr;
359 qp_attr->cap.max_send_sge = qp->max_send_sge;
360 qp_attr->cap.max_recv_sge = qp->max_recv_sge;
361 qp_attr->cap.max_inline_data = qp->max_inline_data;
362
363 qp_init_attr->qp_type = ibqp->qp_type;
364 qp_init_attr->recv_cq = ibqp->recv_cq;
365 qp_init_attr->send_cq = ibqp->send_cq;
366 qp_init_attr->qp_context = ibqp->qp_context;
367 qp_init_attr->cap = qp_attr->cap;
368
369 return 0;
370}
371
372int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
373 union ib_gid *gid)
374{
375 struct efa_dev *dev = to_edev(ibdev);
376
377 memcpy(gid->raw, dev->addr, sizeof(dev->addr));
378
379 return 0;
380}
381
382int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
383 u16 *pkey)
384{
385 if (index > 0)
386 return -EINVAL;
387
388 *pkey = 0xffff;
389 return 0;
390}
391
392static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
393{
394 struct efa_com_dealloc_pd_params params = {
395 .pdn = pdn,
396 };
397
398 return efa_com_dealloc_pd(&dev->edev, ¶ms);
399}
400
401int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
402{
403 struct efa_dev *dev = to_edev(ibpd->device);
404 struct efa_ibv_alloc_pd_resp resp = {};
405 struct efa_com_alloc_pd_result result;
406 struct efa_pd *pd = to_epd(ibpd);
407 int err;
408
409 if (udata->inlen &&
410 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
411 ibdev_dbg(&dev->ibdev,
412 "Incompatible ABI params, udata not cleared\n");
413 err = -EINVAL;
414 goto err_out;
415 }
416
417 err = efa_com_alloc_pd(&dev->edev, &result);
418 if (err)
419 goto err_out;
420
421 pd->pdn = result.pdn;
422 resp.pdn = result.pdn;
423
424 if (udata->outlen) {
425 err = ib_copy_to_udata(udata, &resp,
426 min(sizeof(resp), udata->outlen));
427 if (err) {
428 ibdev_dbg(&dev->ibdev,
429 "Failed to copy udata for alloc_pd\n");
430 goto err_dealloc_pd;
431 }
432 }
433
434 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
435
436 return 0;
437
438err_dealloc_pd:
439 efa_pd_dealloc(dev, result.pdn);
440err_out:
441 atomic64_inc(&dev->stats.sw_stats.alloc_pd_err);
442 return err;
443}
444
445void efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
446{
447 struct efa_dev *dev = to_edev(ibpd->device);
448 struct efa_pd *pd = to_epd(ibpd);
449
450 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
451 efa_pd_dealloc(dev, pd->pdn);
452}
453
454static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
455{
456 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
457
458 return efa_com_destroy_qp(&dev->edev, ¶ms);
459}
460
461int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
462{
463 struct efa_dev *dev = to_edev(ibqp->pd->device);
464 struct efa_qp *qp = to_eqp(ibqp);
465 int err;
466
467 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
468 err = efa_destroy_qp_handle(dev, qp->qp_handle);
469 if (err)
470 return err;
471
472 if (qp->rq_cpu_addr) {
473 ibdev_dbg(&dev->ibdev,
474 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
475 qp->rq_cpu_addr, qp->rq_size,
476 &qp->rq_dma_addr);
477 dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
478 DMA_TO_DEVICE);
479 }
480
481 kfree(qp);
482 return 0;
483}
484
485static int qp_mmap_entries_setup(struct efa_qp *qp,
486 struct efa_dev *dev,
487 struct efa_ucontext *ucontext,
488 struct efa_com_create_qp_params *params,
489 struct efa_ibv_create_qp_resp *resp)
490{
491
492
493
494
495 resp->sq_db_mmap_key =
496 mmap_entry_insert(dev, ucontext, qp,
497 dev->db_bar_addr + resp->sq_db_offset,
498 PAGE_SIZE, EFA_MMAP_IO_NC);
499 if (resp->sq_db_mmap_key == EFA_MMAP_INVALID)
500 return -ENOMEM;
501
502 resp->sq_db_offset &= ~PAGE_MASK;
503
504 resp->llq_desc_mmap_key =
505 mmap_entry_insert(dev, ucontext, qp,
506 dev->mem_bar_addr + resp->llq_desc_offset,
507 PAGE_ALIGN(params->sq_ring_size_in_bytes +
508 (resp->llq_desc_offset & ~PAGE_MASK)),
509 EFA_MMAP_IO_WC);
510 if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID)
511 return -ENOMEM;
512
513 resp->llq_desc_offset &= ~PAGE_MASK;
514
515 if (qp->rq_size) {
516 resp->rq_db_mmap_key =
517 mmap_entry_insert(dev, ucontext, qp,
518 dev->db_bar_addr + resp->rq_db_offset,
519 PAGE_SIZE, EFA_MMAP_IO_NC);
520 if (resp->rq_db_mmap_key == EFA_MMAP_INVALID)
521 return -ENOMEM;
522
523 resp->rq_db_offset &= ~PAGE_MASK;
524
525 resp->rq_mmap_key =
526 mmap_entry_insert(dev, ucontext, qp,
527 virt_to_phys(qp->rq_cpu_addr),
528 qp->rq_size, EFA_MMAP_DMA_PAGE);
529 if (resp->rq_mmap_key == EFA_MMAP_INVALID)
530 return -ENOMEM;
531
532 resp->rq_mmap_size = qp->rq_size;
533 }
534
535 return 0;
536}
537
538static int efa_qp_validate_cap(struct efa_dev *dev,
539 struct ib_qp_init_attr *init_attr)
540{
541 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
542 ibdev_dbg(&dev->ibdev,
543 "qp: requested send wr[%u] exceeds the max[%u]\n",
544 init_attr->cap.max_send_wr,
545 dev->dev_attr.max_sq_depth);
546 return -EINVAL;
547 }
548 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
549 ibdev_dbg(&dev->ibdev,
550 "qp: requested receive wr[%u] exceeds the max[%u]\n",
551 init_attr->cap.max_recv_wr,
552 dev->dev_attr.max_rq_depth);
553 return -EINVAL;
554 }
555 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
556 ibdev_dbg(&dev->ibdev,
557 "qp: requested sge send[%u] exceeds the max[%u]\n",
558 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
559 return -EINVAL;
560 }
561 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
562 ibdev_dbg(&dev->ibdev,
563 "qp: requested sge recv[%u] exceeds the max[%u]\n",
564 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
565 return -EINVAL;
566 }
567 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
568 ibdev_dbg(&dev->ibdev,
569 "qp: requested inline data[%u] exceeds the max[%u]\n",
570 init_attr->cap.max_inline_data,
571 dev->dev_attr.inline_buf_size);
572 return -EINVAL;
573 }
574
575 return 0;
576}
577
578static int efa_qp_validate_attr(struct efa_dev *dev,
579 struct ib_qp_init_attr *init_attr)
580{
581 if (init_attr->qp_type != IB_QPT_DRIVER &&
582 init_attr->qp_type != IB_QPT_UD) {
583 ibdev_dbg(&dev->ibdev,
584 "Unsupported qp type %d\n", init_attr->qp_type);
585 return -EOPNOTSUPP;
586 }
587
588 if (init_attr->srq) {
589 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
590 return -EOPNOTSUPP;
591 }
592
593 if (init_attr->create_flags) {
594 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
595 return -EOPNOTSUPP;
596 }
597
598 return 0;
599}
600
601struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
602 struct ib_qp_init_attr *init_attr,
603 struct ib_udata *udata)
604{
605 struct efa_com_create_qp_params create_qp_params = {};
606 struct efa_com_create_qp_result create_qp_resp;
607 struct efa_dev *dev = to_edev(ibpd->device);
608 struct efa_ibv_create_qp_resp resp = {};
609 struct efa_ibv_create_qp cmd = {};
610 bool rq_entry_inserted = false;
611 struct efa_ucontext *ucontext;
612 struct efa_qp *qp;
613 int err;
614
615 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
616 ibucontext);
617
618 err = efa_qp_validate_cap(dev, init_attr);
619 if (err)
620 goto err_out;
621
622 err = efa_qp_validate_attr(dev, init_attr);
623 if (err)
624 goto err_out;
625
626 if (!field_avail(cmd, driver_qp_type, udata->inlen)) {
627 ibdev_dbg(&dev->ibdev,
628 "Incompatible ABI params, no input udata\n");
629 err = -EINVAL;
630 goto err_out;
631 }
632
633 if (udata->inlen > sizeof(cmd) &&
634 !ib_is_udata_cleared(udata, sizeof(cmd),
635 udata->inlen - sizeof(cmd))) {
636 ibdev_dbg(&dev->ibdev,
637 "Incompatible ABI params, unknown fields in udata\n");
638 err = -EINVAL;
639 goto err_out;
640 }
641
642 err = ib_copy_from_udata(&cmd, udata,
643 min(sizeof(cmd), udata->inlen));
644 if (err) {
645 ibdev_dbg(&dev->ibdev,
646 "Cannot copy udata for create_qp\n");
647 goto err_out;
648 }
649
650 if (cmd.comp_mask) {
651 ibdev_dbg(&dev->ibdev,
652 "Incompatible ABI params, unknown fields in udata\n");
653 err = -EINVAL;
654 goto err_out;
655 }
656
657 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
658 if (!qp) {
659 err = -ENOMEM;
660 goto err_out;
661 }
662
663 create_qp_params.uarn = ucontext->uarn;
664 create_qp_params.pd = to_epd(ibpd)->pdn;
665
666 if (init_attr->qp_type == IB_QPT_UD) {
667 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
668 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
669 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
670 } else {
671 ibdev_dbg(&dev->ibdev,
672 "Unsupported qp type %d driver qp type %d\n",
673 init_attr->qp_type, cmd.driver_qp_type);
674 err = -EOPNOTSUPP;
675 goto err_free_qp;
676 }
677
678 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
679 init_attr->qp_type, cmd.driver_qp_type);
680 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
681 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
682 create_qp_params.sq_depth = init_attr->cap.max_send_wr;
683 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
684
685 create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
686 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
687 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
688 if (qp->rq_size) {
689 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
690 qp->rq_size, DMA_TO_DEVICE);
691 if (!qp->rq_cpu_addr) {
692 err = -ENOMEM;
693 goto err_free_qp;
694 }
695
696 ibdev_dbg(&dev->ibdev,
697 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
698 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
699 create_qp_params.rq_base_addr = qp->rq_dma_addr;
700 }
701
702 err = efa_com_create_qp(&dev->edev, &create_qp_params,
703 &create_qp_resp);
704 if (err)
705 goto err_free_mapped;
706
707 resp.sq_db_offset = create_qp_resp.sq_db_offset;
708 resp.rq_db_offset = create_qp_resp.rq_db_offset;
709 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
710 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
711 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
712
713 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
714 &resp);
715 if (err)
716 goto err_destroy_qp;
717
718 rq_entry_inserted = true;
719 qp->qp_handle = create_qp_resp.qp_handle;
720 qp->ibqp.qp_num = create_qp_resp.qp_num;
721 qp->ibqp.qp_type = init_attr->qp_type;
722 qp->max_send_wr = init_attr->cap.max_send_wr;
723 qp->max_recv_wr = init_attr->cap.max_recv_wr;
724 qp->max_send_sge = init_attr->cap.max_send_sge;
725 qp->max_recv_sge = init_attr->cap.max_recv_sge;
726 qp->max_inline_data = init_attr->cap.max_inline_data;
727
728 if (udata->outlen) {
729 err = ib_copy_to_udata(udata, &resp,
730 min(sizeof(resp), udata->outlen));
731 if (err) {
732 ibdev_dbg(&dev->ibdev,
733 "Failed to copy udata for qp[%u]\n",
734 create_qp_resp.qp_num);
735 goto err_destroy_qp;
736 }
737 }
738
739 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
740
741 return &qp->ibqp;
742
743err_destroy_qp:
744 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
745err_free_mapped:
746 if (qp->rq_size) {
747 dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
748 DMA_TO_DEVICE);
749 if (!rq_entry_inserted)
750 free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
751 }
752err_free_qp:
753 kfree(qp);
754err_out:
755 atomic64_inc(&dev->stats.sw_stats.create_qp_err);
756 return ERR_PTR(err);
757}
758
759static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
760 struct ib_qp_attr *qp_attr, int qp_attr_mask,
761 enum ib_qp_state cur_state,
762 enum ib_qp_state new_state)
763{
764#define EFA_MODIFY_QP_SUPP_MASK \
765 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
766 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN)
767
768 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
769 ibdev_dbg(&dev->ibdev,
770 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
771 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
772 return -EOPNOTSUPP;
773 }
774
775 if (!ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
776 qp_attr_mask)) {
777 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
778 return -EINVAL;
779 }
780
781 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
782 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
783 return -EOPNOTSUPP;
784 }
785
786 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
787 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
788 return -EOPNOTSUPP;
789 }
790
791 return 0;
792}
793
794int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
795 int qp_attr_mask, struct ib_udata *udata)
796{
797 struct efa_dev *dev = to_edev(ibqp->device);
798 struct efa_com_modify_qp_params params = {};
799 struct efa_qp *qp = to_eqp(ibqp);
800 enum ib_qp_state cur_state;
801 enum ib_qp_state new_state;
802 int err;
803
804 if (udata->inlen &&
805 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
806 ibdev_dbg(&dev->ibdev,
807 "Incompatible ABI params, udata not cleared\n");
808 return -EINVAL;
809 }
810
811 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
812 qp->state;
813 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
814
815 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
816 new_state);
817 if (err)
818 return err;
819
820 params.qp_handle = qp->qp_handle;
821
822 if (qp_attr_mask & IB_QP_STATE) {
823 params.modify_mask |= BIT(EFA_ADMIN_QP_STATE_BIT) |
824 BIT(EFA_ADMIN_CUR_QP_STATE_BIT);
825 params.cur_qp_state = qp_attr->cur_qp_state;
826 params.qp_state = qp_attr->qp_state;
827 }
828
829 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
830 params.modify_mask |=
831 BIT(EFA_ADMIN_SQ_DRAINED_ASYNC_NOTIFY_BIT);
832 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
833 }
834
835 if (qp_attr_mask & IB_QP_QKEY) {
836 params.modify_mask |= BIT(EFA_ADMIN_QKEY_BIT);
837 params.qkey = qp_attr->qkey;
838 }
839
840 if (qp_attr_mask & IB_QP_SQ_PSN) {
841 params.modify_mask |= BIT(EFA_ADMIN_SQ_PSN_BIT);
842 params.sq_psn = qp_attr->sq_psn;
843 }
844
845 err = efa_com_modify_qp(&dev->edev, ¶ms);
846 if (err)
847 return err;
848
849 qp->state = new_state;
850
851 return 0;
852}
853
854static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
855{
856 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
857
858 return efa_com_destroy_cq(&dev->edev, ¶ms);
859}
860
861void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
862{
863 struct efa_dev *dev = to_edev(ibcq->device);
864 struct efa_cq *cq = to_ecq(ibcq);
865
866 ibdev_dbg(&dev->ibdev,
867 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
868 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
869
870 efa_destroy_cq_idx(dev, cq->cq_idx);
871 dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
872 DMA_FROM_DEVICE);
873}
874
875static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
876 struct efa_ibv_create_cq_resp *resp)
877{
878 resp->q_mmap_size = cq->size;
879 resp->q_mmap_key = mmap_entry_insert(dev, cq->ucontext, cq,
880 virt_to_phys(cq->cpu_addr),
881 cq->size, EFA_MMAP_DMA_PAGE);
882 if (resp->q_mmap_key == EFA_MMAP_INVALID)
883 return -ENOMEM;
884
885 return 0;
886}
887
888int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
889 struct ib_udata *udata)
890{
891 struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
892 udata, struct efa_ucontext, ibucontext);
893 struct efa_ibv_create_cq_resp resp = {};
894 struct efa_com_create_cq_params params;
895 struct efa_com_create_cq_result result;
896 struct ib_device *ibdev = ibcq->device;
897 struct efa_dev *dev = to_edev(ibdev);
898 struct efa_ibv_create_cq cmd = {};
899 struct efa_cq *cq = to_ecq(ibcq);
900 bool cq_entry_inserted = false;
901 int entries = attr->cqe;
902 int err;
903
904 ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
905
906 if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
907 ibdev_dbg(ibdev,
908 "cq: requested entries[%u] non-positive or greater than max[%u]\n",
909 entries, dev->dev_attr.max_cq_depth);
910 err = -EINVAL;
911 goto err_out;
912 }
913
914 if (!field_avail(cmd, num_sub_cqs, udata->inlen)) {
915 ibdev_dbg(ibdev,
916 "Incompatible ABI params, no input udata\n");
917 err = -EINVAL;
918 goto err_out;
919 }
920
921 if (udata->inlen > sizeof(cmd) &&
922 !ib_is_udata_cleared(udata, sizeof(cmd),
923 udata->inlen - sizeof(cmd))) {
924 ibdev_dbg(ibdev,
925 "Incompatible ABI params, unknown fields in udata\n");
926 err = -EINVAL;
927 goto err_out;
928 }
929
930 err = ib_copy_from_udata(&cmd, udata,
931 min(sizeof(cmd), udata->inlen));
932 if (err) {
933 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
934 goto err_out;
935 }
936
937 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
938 ibdev_dbg(ibdev,
939 "Incompatible ABI params, unknown fields in udata\n");
940 err = -EINVAL;
941 goto err_out;
942 }
943
944 if (!cmd.cq_entry_size) {
945 ibdev_dbg(ibdev,
946 "Invalid entry size [%u]\n", cmd.cq_entry_size);
947 err = -EINVAL;
948 goto err_out;
949 }
950
951 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
952 ibdev_dbg(ibdev,
953 "Invalid number of sub cqs[%u] expected[%u]\n",
954 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
955 err = -EINVAL;
956 goto err_out;
957 }
958
959 cq->ucontext = ucontext;
960 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
961 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
962 DMA_FROM_DEVICE);
963 if (!cq->cpu_addr) {
964 err = -ENOMEM;
965 goto err_out;
966 }
967
968 params.uarn = cq->ucontext->uarn;
969 params.cq_depth = entries;
970 params.dma_addr = cq->dma_addr;
971 params.entry_size_in_bytes = cmd.cq_entry_size;
972 params.num_sub_cqs = cmd.num_sub_cqs;
973 err = efa_com_create_cq(&dev->edev, ¶ms, &result);
974 if (err)
975 goto err_free_mapped;
976
977 resp.cq_idx = result.cq_idx;
978 cq->cq_idx = result.cq_idx;
979 cq->ibcq.cqe = result.actual_depth;
980 WARN_ON_ONCE(entries != result.actual_depth);
981
982 err = cq_mmap_entries_setup(dev, cq, &resp);
983 if (err) {
984 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
985 cq->cq_idx);
986 goto err_destroy_cq;
987 }
988
989 cq_entry_inserted = true;
990
991 if (udata->outlen) {
992 err = ib_copy_to_udata(udata, &resp,
993 min(sizeof(resp), udata->outlen));
994 if (err) {
995 ibdev_dbg(ibdev,
996 "Failed to copy udata for create_cq\n");
997 goto err_destroy_cq;
998 }
999 }
1000
1001 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1002 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1003
1004 return 0;
1005
1006err_destroy_cq:
1007 efa_destroy_cq_idx(dev, cq->cq_idx);
1008err_free_mapped:
1009 dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
1010 DMA_FROM_DEVICE);
1011 if (!cq_entry_inserted)
1012 free_pages_exact(cq->cpu_addr, cq->size);
1013err_out:
1014 atomic64_inc(&dev->stats.sw_stats.create_cq_err);
1015 return err;
1016}
1017
1018static int umem_to_page_list(struct efa_dev *dev,
1019 struct ib_umem *umem,
1020 u64 *page_list,
1021 u32 hp_cnt,
1022 u8 hp_shift)
1023{
1024 u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1025 struct ib_block_iter biter;
1026 unsigned int hp_idx = 0;
1027
1028 ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1029 hp_cnt, pages_in_hp);
1030
1031 rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
1032 BIT(hp_shift))
1033 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1034
1035 return 0;
1036}
1037
1038static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1039{
1040 struct scatterlist *sglist;
1041 struct page *pg;
1042 int i;
1043
1044 sglist = kcalloc(page_cnt, sizeof(*sglist), GFP_KERNEL);
1045 if (!sglist)
1046 return NULL;
1047 sg_init_table(sglist, page_cnt);
1048 for (i = 0; i < page_cnt; i++) {
1049 pg = vmalloc_to_page(buf);
1050 if (!pg)
1051 goto err;
1052 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1053 buf += PAGE_SIZE / sizeof(*buf);
1054 }
1055 return sglist;
1056
1057err:
1058 kfree(sglist);
1059 return NULL;
1060}
1061
1062
1063
1064
1065
1066static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1067{
1068 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1069 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1070 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1071 unsigned int chunk_list_size, chunk_idx, payload_idx;
1072 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1073 struct efa_com_ctrl_buff_info *ctrl_buf;
1074 u64 *cur_chunk_buf, *prev_chunk_buf;
1075 struct ib_block_iter biter;
1076 dma_addr_t dma_addr;
1077 int i;
1078
1079
1080 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1081
1082 chunk_list->size = chunk_list_size;
1083 chunk_list->chunks = kcalloc(chunk_list_size,
1084 sizeof(*chunk_list->chunks),
1085 GFP_KERNEL);
1086 if (!chunk_list->chunks)
1087 return -ENOMEM;
1088
1089 ibdev_dbg(&dev->ibdev,
1090 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1091 page_cnt);
1092
1093
1094 for (i = 0; i < chunk_list_size; i++) {
1095 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1096 if (!chunk_list->chunks[i].buf)
1097 goto chunk_list_dealloc;
1098
1099 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1100 }
1101 chunk_list->chunks[chunk_list_size - 1].length =
1102 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1103 EFA_CHUNK_PTR_SIZE;
1104
1105
1106 chunk_idx = 0;
1107 payload_idx = 0;
1108 cur_chunk_buf = chunk_list->chunks[0].buf;
1109 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1110 EFA_CHUNK_PAYLOAD_SIZE) {
1111 cur_chunk_buf[payload_idx++] =
1112 rdma_block_iter_dma_address(&biter);
1113
1114 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1115 chunk_idx++;
1116 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1117 payload_idx = 0;
1118 }
1119 }
1120
1121
1122 for (i = chunk_list_size - 1; i >= 0; i--) {
1123 dma_addr = dma_map_single(&dev->pdev->dev,
1124 chunk_list->chunks[i].buf,
1125 chunk_list->chunks[i].length,
1126 DMA_TO_DEVICE);
1127 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1128 ibdev_err(&dev->ibdev,
1129 "chunk[%u] dma_map_failed\n", i);
1130 goto chunk_list_unmap;
1131 }
1132
1133 chunk_list->chunks[i].dma_addr = dma_addr;
1134 ibdev_dbg(&dev->ibdev,
1135 "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1136
1137 if (!i)
1138 break;
1139
1140 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1141
1142 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1143 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1144 ctrl_buf->length = chunk_list->chunks[i].length;
1145
1146 efa_com_set_dma_addr(dma_addr,
1147 &ctrl_buf->address.mem_addr_high,
1148 &ctrl_buf->address.mem_addr_low);
1149 }
1150
1151 return 0;
1152
1153chunk_list_unmap:
1154 for (; i < chunk_list_size; i++) {
1155 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1156 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1157 }
1158chunk_list_dealloc:
1159 for (i = 0; i < chunk_list_size; i++)
1160 kfree(chunk_list->chunks[i].buf);
1161
1162 kfree(chunk_list->chunks);
1163 return -ENOMEM;
1164}
1165
1166static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1167{
1168 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1169 int i;
1170
1171 for (i = 0; i < chunk_list->size; i++) {
1172 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1173 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1174 kfree(chunk_list->chunks[i].buf);
1175 }
1176
1177 kfree(chunk_list->chunks);
1178}
1179
1180
1181static int pbl_continuous_initialize(struct efa_dev *dev,
1182 struct pbl_context *pbl)
1183{
1184 dma_addr_t dma_addr;
1185
1186 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1187 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1188 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1189 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1190 return -ENOMEM;
1191 }
1192
1193 pbl->phys.continuous.dma_addr = dma_addr;
1194 ibdev_dbg(&dev->ibdev,
1195 "pbl continuous - dma_addr = %pad, size[%u]\n",
1196 &dma_addr, pbl->pbl_buf_size_in_bytes);
1197
1198 return 0;
1199}
1200
1201
1202
1203
1204
1205
1206static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1207{
1208 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1209 struct scatterlist *sgl;
1210 int sg_dma_cnt, err;
1211
1212 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1213 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1214 if (!sgl)
1215 return -ENOMEM;
1216
1217 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1218 if (!sg_dma_cnt) {
1219 err = -EINVAL;
1220 goto err_map;
1221 }
1222
1223 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1224 pbl->phys.indirect.sgl = sgl;
1225 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1226 err = pbl_chunk_list_create(dev, pbl);
1227 if (err) {
1228 ibdev_dbg(&dev->ibdev,
1229 "chunk_list creation failed[%d]\n", err);
1230 goto err_chunk;
1231 }
1232
1233 ibdev_dbg(&dev->ibdev,
1234 "pbl indirect - size[%u], chunks[%u]\n",
1235 pbl->pbl_buf_size_in_bytes,
1236 pbl->phys.indirect.chunk_list.size);
1237
1238 return 0;
1239
1240err_chunk:
1241 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1242err_map:
1243 kfree(sgl);
1244 return err;
1245}
1246
1247static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1248{
1249 pbl_chunk_list_destroy(dev, pbl);
1250 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1251 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1252 kfree(pbl->phys.indirect.sgl);
1253}
1254
1255
1256static int pbl_create(struct efa_dev *dev,
1257 struct pbl_context *pbl,
1258 struct ib_umem *umem,
1259 int hp_cnt,
1260 u8 hp_shift)
1261{
1262 int err;
1263
1264 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1265 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1266 if (!pbl->pbl_buf)
1267 return -ENOMEM;
1268
1269 if (is_vmalloc_addr(pbl->pbl_buf)) {
1270 pbl->physically_continuous = 0;
1271 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1272 hp_shift);
1273 if (err)
1274 goto err_free;
1275
1276 err = pbl_indirect_initialize(dev, pbl);
1277 if (err)
1278 goto err_free;
1279 } else {
1280 pbl->physically_continuous = 1;
1281 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1282 hp_shift);
1283 if (err)
1284 goto err_free;
1285
1286 err = pbl_continuous_initialize(dev, pbl);
1287 if (err)
1288 goto err_free;
1289 }
1290
1291 ibdev_dbg(&dev->ibdev,
1292 "user_pbl_created: user_pages[%u], continuous[%u]\n",
1293 hp_cnt, pbl->physically_continuous);
1294
1295 return 0;
1296
1297err_free:
1298 kvfree(pbl->pbl_buf);
1299 return err;
1300}
1301
1302static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1303{
1304 if (pbl->physically_continuous)
1305 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1306 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1307 else
1308 pbl_indirect_terminate(dev, pbl);
1309
1310 kvfree(pbl->pbl_buf);
1311}
1312
1313static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1314 struct efa_com_reg_mr_params *params)
1315{
1316 int err;
1317
1318 params->inline_pbl = 1;
1319 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1320 params->page_num, params->page_shift);
1321 if (err)
1322 return err;
1323
1324 ibdev_dbg(&dev->ibdev,
1325 "inline_pbl_array - pages[%u]\n", params->page_num);
1326
1327 return 0;
1328}
1329
1330static int efa_create_pbl(struct efa_dev *dev,
1331 struct pbl_context *pbl,
1332 struct efa_mr *mr,
1333 struct efa_com_reg_mr_params *params)
1334{
1335 int err;
1336
1337 err = pbl_create(dev, pbl, mr->umem, params->page_num,
1338 params->page_shift);
1339 if (err) {
1340 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1341 return err;
1342 }
1343
1344 params->inline_pbl = 0;
1345 params->indirect = !pbl->physically_continuous;
1346 if (pbl->physically_continuous) {
1347 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1348
1349 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1350 ¶ms->pbl.pbl.address.mem_addr_high,
1351 ¶ms->pbl.pbl.address.mem_addr_low);
1352 } else {
1353 params->pbl.pbl.length =
1354 pbl->phys.indirect.chunk_list.chunks[0].length;
1355
1356 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1357 ¶ms->pbl.pbl.address.mem_addr_high,
1358 ¶ms->pbl.pbl.address.mem_addr_low);
1359 }
1360
1361 return 0;
1362}
1363
1364struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1365 u64 virt_addr, int access_flags,
1366 struct ib_udata *udata)
1367{
1368 struct efa_dev *dev = to_edev(ibpd->device);
1369 struct efa_com_reg_mr_params params = {};
1370 struct efa_com_reg_mr_result result = {};
1371 struct pbl_context pbl;
1372 unsigned int pg_sz;
1373 struct efa_mr *mr;
1374 int inline_size;
1375 int err;
1376
1377 if (udata->inlen &&
1378 !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1379 ibdev_dbg(&dev->ibdev,
1380 "Incompatible ABI params, udata not cleared\n");
1381 err = -EINVAL;
1382 goto err_out;
1383 }
1384
1385 if (access_flags & ~EFA_SUPPORTED_ACCESS_FLAGS) {
1386 ibdev_dbg(&dev->ibdev,
1387 "Unsupported access flags[%#x], supported[%#x]\n",
1388 access_flags, EFA_SUPPORTED_ACCESS_FLAGS);
1389 err = -EOPNOTSUPP;
1390 goto err_out;
1391 }
1392
1393 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1394 if (!mr) {
1395 err = -ENOMEM;
1396 goto err_out;
1397 }
1398
1399 mr->umem = ib_umem_get(udata, start, length, access_flags, 0);
1400 if (IS_ERR(mr->umem)) {
1401 err = PTR_ERR(mr->umem);
1402 ibdev_dbg(&dev->ibdev,
1403 "Failed to pin and map user space memory[%d]\n", err);
1404 goto err_free;
1405 }
1406
1407 params.pd = to_epd(ibpd)->pdn;
1408 params.iova = virt_addr;
1409 params.mr_length_in_bytes = length;
1410 params.permissions = access_flags & 0x1;
1411
1412 pg_sz = ib_umem_find_best_pgsz(mr->umem,
1413 dev->dev_attr.page_size_cap,
1414 virt_addr);
1415 if (!pg_sz) {
1416 err = -EOPNOTSUPP;
1417 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1418 dev->dev_attr.page_size_cap);
1419 goto err_unmap;
1420 }
1421
1422 params.page_shift = __ffs(pg_sz);
1423 params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
1424 pg_sz);
1425
1426 ibdev_dbg(&dev->ibdev,
1427 "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1428 start, length, params.page_shift, params.page_num);
1429
1430 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1431 if (params.page_num <= inline_size) {
1432 err = efa_create_inline_pbl(dev, mr, ¶ms);
1433 if (err)
1434 goto err_unmap;
1435
1436 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1437 if (err)
1438 goto err_unmap;
1439 } else {
1440 err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1441 if (err)
1442 goto err_unmap;
1443
1444 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1445 pbl_destroy(dev, &pbl);
1446
1447 if (err)
1448 goto err_unmap;
1449 }
1450
1451 mr->ibmr.lkey = result.l_key;
1452 mr->ibmr.rkey = result.r_key;
1453 mr->ibmr.length = length;
1454 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1455
1456 return &mr->ibmr;
1457
1458err_unmap:
1459 ib_umem_release(mr->umem);
1460err_free:
1461 kfree(mr);
1462err_out:
1463 atomic64_inc(&dev->stats.sw_stats.reg_mr_err);
1464 return ERR_PTR(err);
1465}
1466
1467int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1468{
1469 struct efa_dev *dev = to_edev(ibmr->device);
1470 struct efa_com_dereg_mr_params params;
1471 struct efa_mr *mr = to_emr(ibmr);
1472 int err;
1473
1474 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1475
1476 if (mr->umem) {
1477 params.l_key = mr->ibmr.lkey;
1478 err = efa_com_dereg_mr(&dev->edev, ¶ms);
1479 if (err)
1480 return err;
1481 }
1482 ib_umem_release(mr->umem);
1483
1484 kfree(mr);
1485
1486 return 0;
1487}
1488
1489int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1490 struct ib_port_immutable *immutable)
1491{
1492 struct ib_port_attr attr;
1493 int err;
1494
1495 err = ib_query_port(ibdev, port_num, &attr);
1496 if (err) {
1497 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1498 return err;
1499 }
1500
1501 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1502 immutable->gid_tbl_len = attr.gid_tbl_len;
1503
1504 return 0;
1505}
1506
1507static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1508{
1509 struct efa_com_dealloc_uar_params params = {
1510 .uarn = uarn,
1511 };
1512
1513 return efa_com_dealloc_uar(&dev->edev, ¶ms);
1514}
1515
1516int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1517{
1518 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1519 struct efa_dev *dev = to_edev(ibucontext->device);
1520 struct efa_ibv_alloc_ucontext_resp resp = {};
1521 struct efa_com_alloc_uar_result result;
1522 int err;
1523
1524
1525
1526
1527
1528
1529 err = efa_com_alloc_uar(&dev->edev, &result);
1530 if (err)
1531 goto err_out;
1532
1533 ucontext->uarn = result.uarn;
1534 xa_init(&ucontext->mmap_xa);
1535
1536 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1537 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1538 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1539 resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1540 resp.max_llq_size = dev->dev_attr.max_llq_size;
1541
1542 if (udata && udata->outlen) {
1543 err = ib_copy_to_udata(udata, &resp,
1544 min(sizeof(resp), udata->outlen));
1545 if (err)
1546 goto err_dealloc_uar;
1547 }
1548
1549 return 0;
1550
1551err_dealloc_uar:
1552 efa_dealloc_uar(dev, result.uarn);
1553err_out:
1554 atomic64_inc(&dev->stats.sw_stats.alloc_ucontext_err);
1555 return err;
1556}
1557
1558void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1559{
1560 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1561 struct efa_dev *dev = to_edev(ibucontext->device);
1562
1563 mmap_entries_remove_free(dev, ucontext);
1564 efa_dealloc_uar(dev, ucontext->uarn);
1565}
1566
1567static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1568 struct vm_area_struct *vma, u64 key, u64 length)
1569{
1570 struct efa_mmap_entry *entry;
1571 unsigned long va;
1572 u64 pfn;
1573 int err;
1574
1575 entry = mmap_entry_get(dev, ucontext, key, length);
1576 if (!entry) {
1577 ibdev_dbg(&dev->ibdev, "key[%#llx] does not have valid entry\n",
1578 key);
1579 return -EINVAL;
1580 }
1581
1582 ibdev_dbg(&dev->ibdev,
1583 "Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n",
1584 entry->address, length, entry->mmap_flag);
1585
1586 pfn = entry->address >> PAGE_SHIFT;
1587 switch (entry->mmap_flag) {
1588 case EFA_MMAP_IO_NC:
1589 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
1590 pgprot_noncached(vma->vm_page_prot));
1591 break;
1592 case EFA_MMAP_IO_WC:
1593 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length,
1594 pgprot_writecombine(vma->vm_page_prot));
1595 break;
1596 case EFA_MMAP_DMA_PAGE:
1597 for (va = vma->vm_start; va < vma->vm_end;
1598 va += PAGE_SIZE, pfn++) {
1599 err = vm_insert_page(vma, va, pfn_to_page(pfn));
1600 if (err)
1601 break;
1602 }
1603 break;
1604 default:
1605 err = -EINVAL;
1606 }
1607
1608 if (err) {
1609 ibdev_dbg(
1610 &dev->ibdev,
1611 "Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n",
1612 entry->address, length, entry->mmap_flag, err);
1613 return err;
1614 }
1615
1616 return 0;
1617}
1618
1619int efa_mmap(struct ib_ucontext *ibucontext,
1620 struct vm_area_struct *vma)
1621{
1622 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1623 struct efa_dev *dev = to_edev(ibucontext->device);
1624 u64 length = vma->vm_end - vma->vm_start;
1625 u64 key = vma->vm_pgoff << PAGE_SHIFT;
1626
1627 ibdev_dbg(&dev->ibdev,
1628 "start %#lx, end %#lx, length = %#llx, key = %#llx\n",
1629 vma->vm_start, vma->vm_end, length, key);
1630
1631 if (length % PAGE_SIZE != 0 || !(vma->vm_flags & VM_SHARED)) {
1632 ibdev_dbg(&dev->ibdev,
1633 "length[%#llx] is not page size aligned[%#lx] or VM_SHARED is not set [%#lx]\n",
1634 length, PAGE_SIZE, vma->vm_flags);
1635 return -EINVAL;
1636 }
1637
1638 if (vma->vm_flags & VM_EXEC) {
1639 ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
1640 return -EPERM;
1641 }
1642
1643 return __efa_mmap(dev, ucontext, vma, key, length);
1644}
1645
1646static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1647{
1648 struct efa_com_destroy_ah_params params = {
1649 .ah = ah->ah,
1650 .pdn = to_epd(ah->ibah.pd)->pdn,
1651 };
1652
1653 return efa_com_destroy_ah(&dev->edev, ¶ms);
1654}
1655
1656int efa_create_ah(struct ib_ah *ibah,
1657 struct rdma_ah_attr *ah_attr,
1658 u32 flags,
1659 struct ib_udata *udata)
1660{
1661 struct efa_dev *dev = to_edev(ibah->device);
1662 struct efa_com_create_ah_params params = {};
1663 struct efa_ibv_create_ah_resp resp = {};
1664 struct efa_com_create_ah_result result;
1665 struct efa_ah *ah = to_eah(ibah);
1666 int err;
1667
1668 if (!(flags & RDMA_CREATE_AH_SLEEPABLE)) {
1669 ibdev_dbg(&dev->ibdev,
1670 "Create address handle is not supported in atomic context\n");
1671 err = -EOPNOTSUPP;
1672 goto err_out;
1673 }
1674
1675 if (udata->inlen &&
1676 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1677 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1678 err = -EINVAL;
1679 goto err_out;
1680 }
1681
1682 memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1683 sizeof(params.dest_addr));
1684 params.pdn = to_epd(ibah->pd)->pdn;
1685 err = efa_com_create_ah(&dev->edev, ¶ms, &result);
1686 if (err)
1687 goto err_out;
1688
1689 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1690 ah->ah = result.ah;
1691
1692 resp.efa_address_handle = result.ah;
1693
1694 if (udata->outlen) {
1695 err = ib_copy_to_udata(udata, &resp,
1696 min(sizeof(resp), udata->outlen));
1697 if (err) {
1698 ibdev_dbg(&dev->ibdev,
1699 "Failed to copy udata for create_ah response\n");
1700 goto err_destroy_ah;
1701 }
1702 }
1703 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1704
1705 return 0;
1706
1707err_destroy_ah:
1708 efa_ah_destroy(dev, ah);
1709err_out:
1710 atomic64_inc(&dev->stats.sw_stats.create_ah_err);
1711 return err;
1712}
1713
1714void efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1715{
1716 struct efa_dev *dev = to_edev(ibah->pd->device);
1717 struct efa_ah *ah = to_eah(ibah);
1718
1719 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1720
1721 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1722 ibdev_dbg(&dev->ibdev,
1723 "Destroy address handle is not supported in atomic context\n");
1724 return;
1725 }
1726
1727 efa_ah_destroy(dev, ah);
1728}
1729
1730enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1731 u8 port_num)
1732{
1733 return IB_LINK_LAYER_UNSPECIFIED;
1734}
1735
1736