1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mlx4/qp.h>
35#include <linux/mlx4/srq.h>
36#include <linux/slab.h>
37
38#include "mlx4_ib.h"
39#include <rdma/mlx4-abi.h>
40#include <rdma/uverbs_ioctl.h>
41
42static void *get_wqe(struct mlx4_ib_srq *srq, int n)
43{
44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
45}
46
47static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
48{
49 struct ib_event event;
50 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
51
52 if (ibsrq->event_handler) {
53 event.device = ibsrq->device;
54 event.element.srq = ibsrq;
55 switch (type) {
56 case MLX4_EVENT_TYPE_SRQ_LIMIT:
57 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
58 break;
59 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
60 event.event = IB_EVENT_SRQ_ERR;
61 break;
62 default:
63 pr_warn("Unexpected event type %d "
64 "on SRQ %06x\n", type, srq->srqn);
65 return;
66 }
67
68 ibsrq->event_handler(&event, ibsrq->srq_context);
69 }
70}
71
72int mlx4_ib_create_srq(struct ib_srq *ib_srq,
73 struct ib_srq_init_attr *init_attr,
74 struct ib_udata *udata)
75{
76 struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
77 struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
78 udata, struct mlx4_ib_ucontext, ibucontext);
79 struct mlx4_ib_srq *srq = to_msrq(ib_srq);
80 struct mlx4_wqe_srq_next_seg *next;
81 struct mlx4_wqe_data_seg *scatter;
82 u32 cqn;
83 u16 xrcdn;
84 int desc_size;
85 int buf_size;
86 int err;
87 int i;
88
89
90 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
91 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
92 return -EINVAL;
93
94 mutex_init(&srq->mutex);
95 spin_lock_init(&srq->lock);
96 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
97 srq->msrq.max_gs = init_attr->attr.max_sge;
98
99 desc_size = max(32UL,
100 roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
101 srq->msrq.max_gs *
102 sizeof (struct mlx4_wqe_data_seg)));
103 srq->msrq.wqe_shift = ilog2(desc_size);
104
105 buf_size = srq->msrq.max * desc_size;
106
107 if (udata) {
108 struct mlx4_ib_create_srq ucmd;
109
110 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
111 return -EFAULT;
112
113 srq->umem =
114 ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
115 if (IS_ERR(srq->umem))
116 return PTR_ERR(srq->umem);
117
118 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
119 PAGE_SHIFT, &srq->mtt);
120 if (err)
121 goto err_buf;
122
123 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
124 if (err)
125 goto err_mtt;
126
127 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
128 if (err)
129 goto err_mtt;
130 } else {
131 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
132 if (err)
133 return err;
134
135 *srq->db.db = 0;
136
137 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
138 &srq->buf)) {
139 err = -ENOMEM;
140 goto err_db;
141 }
142
143 srq->head = 0;
144 srq->tail = srq->msrq.max - 1;
145 srq->wqe_ctr = 0;
146
147 for (i = 0; i < srq->msrq.max; ++i) {
148 next = get_wqe(srq, i);
149 next->next_wqe_index =
150 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
151
152 for (scatter = (void *) (next + 1);
153 (void *) scatter < (void *) next + desc_size;
154 ++scatter)
155 scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
156 }
157
158 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
159 &srq->mtt);
160 if (err)
161 goto err_buf;
162
163 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
164 if (err)
165 goto err_mtt;
166
167 srq->wrid = kvmalloc_array(srq->msrq.max,
168 sizeof(u64), GFP_KERNEL);
169 if (!srq->wrid) {
170 err = -ENOMEM;
171 goto err_mtt;
172 }
173 }
174
175 cqn = ib_srq_has_cq(init_attr->srq_type) ?
176 to_mcq(init_attr->ext.cq)->mcq.cqn : 0;
177 xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
178 to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
179 (u16) dev->dev->caps.reserved_xrcds;
180 err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
181 &srq->mtt, srq->db.dma, &srq->msrq);
182 if (err)
183 goto err_wrid;
184
185 srq->msrq.event = mlx4_ib_srq_event;
186 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
187
188 if (udata)
189 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
190 err = -EFAULT;
191 goto err_wrid;
192 }
193
194 init_attr->attr.max_wr = srq->msrq.max - 1;
195
196 return 0;
197
198err_wrid:
199 if (udata)
200 mlx4_ib_db_unmap_user(ucontext, &srq->db);
201 else
202 kvfree(srq->wrid);
203
204err_mtt:
205 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
206
207err_buf:
208 if (!srq->umem)
209 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
210 ib_umem_release(srq->umem);
211
212err_db:
213 if (!udata)
214 mlx4_db_free(dev->dev, &srq->db);
215
216 return err;
217}
218
219int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
220 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
221{
222 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
223 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
224 int ret;
225
226
227 if (attr_mask & IB_SRQ_MAX_WR)
228 return -EINVAL;
229
230 if (attr_mask & IB_SRQ_LIMIT) {
231 if (attr->srq_limit >= srq->msrq.max)
232 return -EINVAL;
233
234 mutex_lock(&srq->mutex);
235 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
236 mutex_unlock(&srq->mutex);
237
238 if (ret)
239 return ret;
240 }
241
242 return 0;
243}
244
245int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
246{
247 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
248 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
249 int ret;
250 int limit_watermark;
251
252 ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
253 if (ret)
254 return ret;
255
256 srq_attr->srq_limit = limit_watermark;
257 srq_attr->max_wr = srq->msrq.max - 1;
258 srq_attr->max_sge = srq->msrq.max_gs;
259
260 return 0;
261}
262
263void mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
264{
265 struct mlx4_ib_dev *dev = to_mdev(srq->device);
266 struct mlx4_ib_srq *msrq = to_msrq(srq);
267
268 mlx4_srq_free(dev->dev, &msrq->msrq);
269 mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
270
271 if (udata) {
272 mlx4_ib_db_unmap_user(
273 rdma_udata_to_drv_context(
274 udata,
275 struct mlx4_ib_ucontext,
276 ibucontext),
277 &msrq->db);
278 } else {
279 kvfree(msrq->wrid);
280 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
281 &msrq->buf);
282 mlx4_db_free(dev->dev, &msrq->db);
283 }
284 ib_umem_release(msrq->umem);
285}
286
287void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
288{
289 struct mlx4_wqe_srq_next_seg *next;
290
291
292 spin_lock(&srq->lock);
293
294 next = get_wqe(srq, srq->tail);
295 next->next_wqe_index = cpu_to_be16(wqe_index);
296 srq->tail = wqe_index;
297
298 spin_unlock(&srq->lock);
299}
300
301int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
302 const struct ib_recv_wr **bad_wr)
303{
304 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
305 struct mlx4_wqe_srq_next_seg *next;
306 struct mlx4_wqe_data_seg *scat;
307 unsigned long flags;
308 int err = 0;
309 int nreq;
310 int i;
311 struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
312
313 spin_lock_irqsave(&srq->lock, flags);
314 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
315 err = -EIO;
316 *bad_wr = wr;
317 nreq = 0;
318 goto out;
319 }
320
321 for (nreq = 0; wr; ++nreq, wr = wr->next) {
322 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
323 err = -EINVAL;
324 *bad_wr = wr;
325 break;
326 }
327
328 if (unlikely(srq->head == srq->tail)) {
329 err = -ENOMEM;
330 *bad_wr = wr;
331 break;
332 }
333
334 srq->wrid[srq->head] = wr->wr_id;
335
336 next = get_wqe(srq, srq->head);
337 srq->head = be16_to_cpu(next->next_wqe_index);
338 scat = (struct mlx4_wqe_data_seg *) (next + 1);
339
340 for (i = 0; i < wr->num_sge; ++i) {
341 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
342 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
343 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
344 }
345
346 if (i < srq->msrq.max_gs) {
347 scat[i].byte_count = 0;
348 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
349 scat[i].addr = 0;
350 }
351 }
352
353 if (likely(nreq)) {
354 srq->wqe_ctr += nreq;
355
356
357
358
359
360 wmb();
361
362 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
363 }
364out:
365
366 spin_unlock_irqrestore(&srq->lock, flags);
367
368 return err;
369}
370