1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/module.h>
34#include <linux/mlx5/qp.h>
35#include <linux/mlx5/srq.h>
36#include <linux/slab.h>
37#include <rdma/ib_umem.h>
38#include <rdma/ib_user_verbs.h>
39
40#include "mlx5_ib.h"
41
42
43static int srq_signature;
44
45static void *get_wqe(struct mlx5_ib_srq *srq, int n)
46{
47 return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
48}
49
50static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
51{
52 struct ib_event event;
53 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
54
55 if (ibsrq->event_handler) {
56 event.device = ibsrq->device;
57 event.element.srq = ibsrq;
58 switch (type) {
59 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
60 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
61 break;
62 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
63 event.event = IB_EVENT_SRQ_ERR;
64 break;
65 default:
66 pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
67 type, srq->srqn);
68 return;
69 }
70
71 ibsrq->event_handler(&event, ibsrq->srq_context);
72 }
73}
74
75static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
76 struct mlx5_srq_attr *in,
77 struct ib_udata *udata, int buf_size)
78{
79 struct mlx5_ib_dev *dev = to_mdev(pd->device);
80 struct mlx5_ib_create_srq ucmd = {};
81 size_t ucmdlen;
82 int err;
83 int npages;
84 int page_shift;
85 int ncont;
86 u32 offset;
87 u32 uidx = MLX5_IB_DEFAULT_UIDX;
88
89 ucmdlen = min(udata->inlen, sizeof(ucmd));
90
91 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
92 mlx5_ib_dbg(dev, "failed copy udata\n");
93 return -EFAULT;
94 }
95
96 if (ucmd.reserved0 || ucmd.reserved1)
97 return -EINVAL;
98
99 if (udata->inlen > sizeof(ucmd) &&
100 !ib_is_udata_cleared(udata, sizeof(ucmd),
101 udata->inlen - sizeof(ucmd)))
102 return -EINVAL;
103
104 if (in->type != IB_SRQT_BASIC) {
105 err = get_srq_user_index(to_mucontext(pd->uobject->context),
106 &ucmd, udata->inlen, &uidx);
107 if (err)
108 return err;
109 }
110
111 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
112
113 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
114 0, 0);
115 if (IS_ERR(srq->umem)) {
116 mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
117 err = PTR_ERR(srq->umem);
118 return err;
119 }
120
121 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
122 &page_shift, &ncont, NULL);
123 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
124 &offset);
125 if (err) {
126 mlx5_ib_warn(dev, "bad offset\n");
127 goto err_umem;
128 }
129
130 in->pas = kvcalloc(ncont, sizeof(*in->pas), GFP_KERNEL);
131 if (!in->pas) {
132 err = -ENOMEM;
133 goto err_umem;
134 }
135
136 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0);
137
138 err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
139 ucmd.db_addr, &srq->db);
140 if (err) {
141 mlx5_ib_dbg(dev, "map doorbell failed\n");
142 goto err_in;
143 }
144
145 in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
146 in->page_offset = offset;
147 in->uid = to_mpd(pd)->uid;
148 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
149 in->type != IB_SRQT_BASIC)
150 in->user_index = uidx;
151
152 return 0;
153
154err_in:
155 kvfree(in->pas);
156
157err_umem:
158 ib_umem_release(srq->umem);
159
160 return err;
161}
162
163static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
164 struct mlx5_srq_attr *in, int buf_size)
165{
166 int err;
167 int i;
168 struct mlx5_wqe_srq_next_seg *next;
169
170 err = mlx5_db_alloc(dev->mdev, &srq->db);
171 if (err) {
172 mlx5_ib_warn(dev, "alloc dbell rec failed\n");
173 return err;
174 }
175
176 if (mlx5_buf_alloc(dev->mdev, buf_size, &srq->buf)) {
177 mlx5_ib_dbg(dev, "buf alloc failed\n");
178 err = -ENOMEM;
179 goto err_db;
180 }
181
182 srq->head = 0;
183 srq->tail = srq->msrq.max - 1;
184 srq->wqe_ctr = 0;
185
186 for (i = 0; i < srq->msrq.max; i++) {
187 next = get_wqe(srq, i);
188 next->next_wqe_index =
189 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
190 }
191
192 mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
193 in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
194 if (!in->pas) {
195 err = -ENOMEM;
196 goto err_buf;
197 }
198 mlx5_fill_page_array(&srq->buf, in->pas);
199
200 srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
201 if (!srq->wrid) {
202 err = -ENOMEM;
203 goto err_in;
204 }
205 srq->wq_sig = !!srq_signature;
206
207 in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
208 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
209 in->type != IB_SRQT_BASIC)
210 in->user_index = MLX5_IB_DEFAULT_UIDX;
211
212 return 0;
213
214err_in:
215 kvfree(in->pas);
216
217err_buf:
218 mlx5_buf_free(dev->mdev, &srq->buf);
219
220err_db:
221 mlx5_db_free(dev->mdev, &srq->db);
222 return err;
223}
224
225static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
226{
227 mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
228 ib_umem_release(srq->umem);
229}
230
231
232static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
233{
234 kvfree(srq->wrid);
235 mlx5_buf_free(dev->mdev, &srq->buf);
236 mlx5_db_free(dev->mdev, &srq->db);
237}
238
239struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
240 struct ib_srq_init_attr *init_attr,
241 struct ib_udata *udata)
242{
243 struct mlx5_ib_dev *dev = to_mdev(pd->device);
244 struct mlx5_ib_srq *srq;
245 size_t desc_size;
246 size_t buf_size;
247 int err;
248 struct mlx5_srq_attr in = {0};
249 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
250
251
252 if (init_attr->attr.max_wr >= max_srq_wqes) {
253 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
254 init_attr->attr.max_wr,
255 max_srq_wqes);
256 return ERR_PTR(-EINVAL);
257 }
258
259 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
260 if (!srq)
261 return ERR_PTR(-ENOMEM);
262
263 mutex_init(&srq->mutex);
264 spin_lock_init(&srq->lock);
265 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
266 srq->msrq.max_gs = init_attr->attr.max_sge;
267
268 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
269 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
270 if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
271 err = -EINVAL;
272 goto err_srq;
273 }
274 desc_size = roundup_pow_of_two(desc_size);
275 desc_size = max_t(size_t, 32, desc_size);
276 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
277 err = -EINVAL;
278 goto err_srq;
279 }
280 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
281 sizeof(struct mlx5_wqe_data_seg);
282 srq->msrq.wqe_shift = ilog2(desc_size);
283 buf_size = srq->msrq.max * desc_size;
284 if (buf_size < desc_size) {
285 err = -EINVAL;
286 goto err_srq;
287 }
288 in.type = init_attr->srq_type;
289
290 if (pd->uobject)
291 err = create_srq_user(pd, srq, &in, udata, buf_size);
292 else
293 err = create_srq_kernel(dev, srq, &in, buf_size);
294
295 if (err) {
296 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
297 pd->uobject ? "user" : "kernel", err);
298 goto err_srq;
299 }
300
301 in.log_size = ilog2(srq->msrq.max);
302 in.wqe_shift = srq->msrq.wqe_shift - 4;
303 if (srq->wq_sig)
304 in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
305
306 if (init_attr->srq_type == IB_SRQT_XRC)
307 in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
308 else
309 in.xrcd = to_mxrcd(dev->devr.x0)->xrcdn;
310
311 if (init_attr->srq_type == IB_SRQT_TM) {
312 in.tm_log_list_size =
313 ilog2(init_attr->ext.tag_matching.max_num_tags) + 1;
314 if (in.tm_log_list_size >
315 MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
316 mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n");
317 err = -EINVAL;
318 goto err_usr_kern_srq;
319 }
320 in.flags |= MLX5_SRQ_FLAG_RNDV;
321 }
322
323 if (ib_srq_has_cq(init_attr->srq_type))
324 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn;
325 else
326 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
327
328 in.pd = to_mpd(pd)->pdn;
329 in.db_record = srq->db.dma;
330 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
331 kvfree(in.pas);
332 if (err) {
333 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
334 goto err_usr_kern_srq;
335 }
336
337 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
338
339 srq->msrq.event = mlx5_ib_srq_event;
340 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
341
342 if (pd->uobject)
343 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
344 mlx5_ib_dbg(dev, "copy to user failed\n");
345 err = -EFAULT;
346 goto err_core;
347 }
348
349 init_attr->attr.max_wr = srq->msrq.max - 1;
350
351 return &srq->ibsrq;
352
353err_core:
354 mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
355
356err_usr_kern_srq:
357 if (pd->uobject)
358 destroy_srq_user(pd, srq);
359 else
360 destroy_srq_kernel(dev, srq);
361
362err_srq:
363 kfree(srq);
364
365 return ERR_PTR(err);
366}
367
368int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
369 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
370{
371 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
372 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
373 int ret;
374
375
376 if (attr_mask & IB_SRQ_MAX_WR)
377 return -EINVAL;
378
379 if (attr_mask & IB_SRQ_LIMIT) {
380 if (attr->srq_limit >= srq->msrq.max)
381 return -EINVAL;
382
383 mutex_lock(&srq->mutex);
384 ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
385 mutex_unlock(&srq->mutex);
386
387 if (ret)
388 return ret;
389 }
390
391 return 0;
392}
393
394int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
395{
396 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
397 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
398 int ret;
399 struct mlx5_srq_attr *out;
400
401 out = kzalloc(sizeof(*out), GFP_KERNEL);
402 if (!out)
403 return -ENOMEM;
404
405 ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
406 if (ret)
407 goto out_box;
408
409 srq_attr->srq_limit = out->lwm;
410 srq_attr->max_wr = srq->msrq.max - 1;
411 srq_attr->max_sge = srq->msrq.max_gs;
412
413out_box:
414 kfree(out);
415 return ret;
416}
417
418int mlx5_ib_destroy_srq(struct ib_srq *srq)
419{
420 struct mlx5_ib_dev *dev = to_mdev(srq->device);
421 struct mlx5_ib_srq *msrq = to_msrq(srq);
422
423 mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
424
425 if (srq->uobject) {
426 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
427 ib_umem_release(msrq->umem);
428 } else {
429 destroy_srq_kernel(dev, msrq);
430 }
431
432 kfree(srq);
433 return 0;
434}
435
436void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
437{
438 struct mlx5_wqe_srq_next_seg *next;
439
440
441 spin_lock(&srq->lock);
442
443 next = get_wqe(srq, srq->tail);
444 next->next_wqe_index = cpu_to_be16(wqe_index);
445 srq->tail = wqe_index;
446
447 spin_unlock(&srq->lock);
448}
449
450int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
451 const struct ib_recv_wr **bad_wr)
452{
453 struct mlx5_ib_srq *srq = to_msrq(ibsrq);
454 struct mlx5_wqe_srq_next_seg *next;
455 struct mlx5_wqe_data_seg *scat;
456 struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
457 struct mlx5_core_dev *mdev = dev->mdev;
458 unsigned long flags;
459 int err = 0;
460 int nreq;
461 int i;
462
463 spin_lock_irqsave(&srq->lock, flags);
464
465 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
466 err = -EIO;
467 *bad_wr = wr;
468 goto out;
469 }
470
471 for (nreq = 0; wr; nreq++, wr = wr->next) {
472 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
473 err = -EINVAL;
474 *bad_wr = wr;
475 break;
476 }
477
478 if (unlikely(srq->head == srq->tail)) {
479 err = -ENOMEM;
480 *bad_wr = wr;
481 break;
482 }
483
484 srq->wrid[srq->head] = wr->wr_id;
485
486 next = get_wqe(srq, srq->head);
487 srq->head = be16_to_cpu(next->next_wqe_index);
488 scat = (struct mlx5_wqe_data_seg *)(next + 1);
489
490 for (i = 0; i < wr->num_sge; i++) {
491 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
492 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
493 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
494 }
495
496 if (i < srq->msrq.max_avail_gather) {
497 scat[i].byte_count = 0;
498 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
499 scat[i].addr = 0;
500 }
501 }
502
503 if (likely(nreq)) {
504 srq->wqe_ctr += nreq;
505
506
507
508
509 wmb();
510
511 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
512 }
513out:
514 spin_unlock_irqrestore(&srq->lock, flags);
515
516 return err;
517}
518