1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mlx4/qp.h>
35#include <linux/mlx4/srq.h>
36#include <linux/slab.h>
37
38#include "mlx4_ib.h"
39#include <rdma/mlx4-abi.h>
40#include <rdma/uverbs_ioctl.h>
41
42static void *get_wqe(struct mlx4_ib_srq *srq, int n)
43{
44 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
45}
46
47static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
48{
49 struct ib_event event;
50 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
51
52 if (ibsrq->event_handler) {
53 event.device = ibsrq->device;
54 event.element.srq = ibsrq;
55 switch (type) {
56 case MLX4_EVENT_TYPE_SRQ_LIMIT:
57 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
58 break;
59 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
60 event.event = IB_EVENT_SRQ_ERR;
61 break;
62 default:
63 pr_warn("Unexpected event type %d "
64 "on SRQ %06x\n", type, srq->srqn);
65 return;
66 }
67
68 ibsrq->event_handler(&event, ibsrq->srq_context);
69 }
70}
71
72int mlx4_ib_create_srq(struct ib_srq *ib_srq,
73 struct ib_srq_init_attr *init_attr,
74 struct ib_udata *udata)
75{
76 struct mlx4_ib_dev *dev = to_mdev(ib_srq->device);
77 struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context(
78 udata, struct mlx4_ib_ucontext, ibucontext);
79 struct mlx4_ib_srq *srq = to_msrq(ib_srq);
80 struct mlx4_wqe_srq_next_seg *next;
81 struct mlx4_wqe_data_seg *scatter;
82 u32 cqn;
83 u16 xrcdn;
84 int desc_size;
85 int buf_size;
86 int err;
87 int i;
88
89 if (init_attr->srq_type != IB_SRQT_BASIC &&
90 init_attr->srq_type != IB_SRQT_XRC)
91 return -EOPNOTSUPP;
92
93
94 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
95 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
96 return -EINVAL;
97
98 mutex_init(&srq->mutex);
99 spin_lock_init(&srq->lock);
100 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
101 srq->msrq.max_gs = init_attr->attr.max_sge;
102
103 desc_size = max(32UL,
104 roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
105 srq->msrq.max_gs *
106 sizeof (struct mlx4_wqe_data_seg)));
107 srq->msrq.wqe_shift = ilog2(desc_size);
108
109 buf_size = srq->msrq.max * desc_size;
110
111 if (udata) {
112 struct mlx4_ib_create_srq ucmd;
113
114 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)))
115 return -EFAULT;
116
117 srq->umem =
118 ib_umem_get(ib_srq->device, ucmd.buf_addr, buf_size, 0);
119 if (IS_ERR(srq->umem))
120 return PTR_ERR(srq->umem);
121
122 err = mlx4_mtt_init(
123 dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
124 PAGE_SHIFT, &srq->mtt);
125 if (err)
126 goto err_buf;
127
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
129 if (err)
130 goto err_mtt;
131
132 err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &srq->db);
133 if (err)
134 goto err_mtt;
135 } else {
136 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
137 if (err)
138 return err;
139
140 *srq->db.db = 0;
141
142 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
143 &srq->buf)) {
144 err = -ENOMEM;
145 goto err_db;
146 }
147
148 srq->head = 0;
149 srq->tail = srq->msrq.max - 1;
150 srq->wqe_ctr = 0;
151
152 for (i = 0; i < srq->msrq.max; ++i) {
153 next = get_wqe(srq, i);
154 next->next_wqe_index =
155 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
156
157 for (scatter = (void *) (next + 1);
158 (void *) scatter < (void *) next + desc_size;
159 ++scatter)
160 scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
161 }
162
163 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
164 &srq->mtt);
165 if (err)
166 goto err_buf;
167
168 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
169 if (err)
170 goto err_mtt;
171
172 srq->wrid = kvmalloc_array(srq->msrq.max,
173 sizeof(u64), GFP_KERNEL);
174 if (!srq->wrid) {
175 err = -ENOMEM;
176 goto err_mtt;
177 }
178 }
179
180 cqn = ib_srq_has_cq(init_attr->srq_type) ?
181 to_mcq(init_attr->ext.cq)->mcq.cqn : 0;
182 xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
183 to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
184 (u16) dev->dev->caps.reserved_xrcds;
185 err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn,
186 &srq->mtt, srq->db.dma, &srq->msrq);
187 if (err)
188 goto err_wrid;
189
190 srq->msrq.event = mlx4_ib_srq_event;
191 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
192
193 if (udata)
194 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
195 err = -EFAULT;
196 goto err_wrid;
197 }
198
199 init_attr->attr.max_wr = srq->msrq.max - 1;
200
201 return 0;
202
203err_wrid:
204 if (udata)
205 mlx4_ib_db_unmap_user(ucontext, &srq->db);
206 else
207 kvfree(srq->wrid);
208
209err_mtt:
210 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
211
212err_buf:
213 if (!srq->umem)
214 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
215 ib_umem_release(srq->umem);
216
217err_db:
218 if (!udata)
219 mlx4_db_free(dev->dev, &srq->db);
220
221 return err;
222}
223
224int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
225 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
226{
227 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
228 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
229 int ret;
230
231
232 if (attr_mask & IB_SRQ_MAX_WR)
233 return -EINVAL;
234
235 if (attr_mask & IB_SRQ_LIMIT) {
236 if (attr->srq_limit >= srq->msrq.max)
237 return -EINVAL;
238
239 mutex_lock(&srq->mutex);
240 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
241 mutex_unlock(&srq->mutex);
242
243 if (ret)
244 return ret;
245 }
246
247 return 0;
248}
249
250int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
251{
252 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
253 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
254 int ret;
255 int limit_watermark;
256
257 ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
258 if (ret)
259 return ret;
260
261 srq_attr->srq_limit = limit_watermark;
262 srq_attr->max_wr = srq->msrq.max - 1;
263 srq_attr->max_sge = srq->msrq.max_gs;
264
265 return 0;
266}
267
268int mlx4_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
269{
270 struct mlx4_ib_dev *dev = to_mdev(srq->device);
271 struct mlx4_ib_srq *msrq = to_msrq(srq);
272
273 mlx4_srq_free(dev->dev, &msrq->msrq);
274 mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
275
276 if (udata) {
277 mlx4_ib_db_unmap_user(
278 rdma_udata_to_drv_context(
279 udata,
280 struct mlx4_ib_ucontext,
281 ibucontext),
282 &msrq->db);
283 } else {
284 kvfree(msrq->wrid);
285 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
286 &msrq->buf);
287 mlx4_db_free(dev->dev, &msrq->db);
288 }
289 ib_umem_release(msrq->umem);
290 return 0;
291}
292
293void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
294{
295 struct mlx4_wqe_srq_next_seg *next;
296
297
298 spin_lock(&srq->lock);
299
300 next = get_wqe(srq, srq->tail);
301 next->next_wqe_index = cpu_to_be16(wqe_index);
302 srq->tail = wqe_index;
303
304 spin_unlock(&srq->lock);
305}
306
307int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
308 const struct ib_recv_wr **bad_wr)
309{
310 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
311 struct mlx4_wqe_srq_next_seg *next;
312 struct mlx4_wqe_data_seg *scat;
313 unsigned long flags;
314 int err = 0;
315 int nreq;
316 int i;
317 struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
318
319 spin_lock_irqsave(&srq->lock, flags);
320 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
321 err = -EIO;
322 *bad_wr = wr;
323 nreq = 0;
324 goto out;
325 }
326
327 for (nreq = 0; wr; ++nreq, wr = wr->next) {
328 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
329 err = -EINVAL;
330 *bad_wr = wr;
331 break;
332 }
333
334 if (unlikely(srq->head == srq->tail)) {
335 err = -ENOMEM;
336 *bad_wr = wr;
337 break;
338 }
339
340 srq->wrid[srq->head] = wr->wr_id;
341
342 next = get_wqe(srq, srq->head);
343 srq->head = be16_to_cpu(next->next_wqe_index);
344 scat = (struct mlx4_wqe_data_seg *) (next + 1);
345
346 for (i = 0; i < wr->num_sge; ++i) {
347 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
348 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
349 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
350 }
351
352 if (i < srq->msrq.max_gs) {
353 scat[i].byte_count = 0;
354 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
355 scat[i].addr = 0;
356 }
357 }
358
359 if (likely(nreq)) {
360 srq->wqe_ctr += nreq;
361
362
363
364
365
366 wmb();
367
368 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
369 }
370out:
371
372 spin_unlock_irqrestore(&srq->lock, flags);
373
374 return err;
375}
376