1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mlx4/qp.h>
35#include <linux/mlx4/srq.h>
36#include <linux/slab.h>
37
38#include "mlx4_ib.h"
39#include "user.h"
40
41static void *get_wqe(struct mlx4_ib_srq *srq, int n)
42{
43 return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
44}
45
46static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
47{
48 struct ib_event event;
49 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
50
51 if (ibsrq->event_handler) {
52 event.device = ibsrq->device;
53 event.element.srq = ibsrq;
54 switch (type) {
55 case MLX4_EVENT_TYPE_SRQ_LIMIT:
56 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
57 break;
58 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
59 event.event = IB_EVENT_SRQ_ERR;
60 break;
61 default:
62 pr_warn("Unexpected event type %d "
63 "on SRQ %06x\n", type, srq->srqn);
64 return;
65 }
66
67 ibsrq->event_handler(&event, ibsrq->srq_context);
68 }
69}
70
71struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
72 struct ib_srq_init_attr *init_attr,
73 struct ib_udata *udata)
74{
75 struct mlx4_ib_dev *dev = to_mdev(pd->device);
76 struct mlx4_ib_srq *srq;
77 struct mlx4_wqe_srq_next_seg *next;
78 struct mlx4_wqe_data_seg *scatter;
79 u32 cqn;
80 u16 xrcdn;
81 int desc_size;
82 int buf_size;
83 int err;
84 int i;
85
86
87 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
88 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
89 return ERR_PTR(-EINVAL);
90
91 srq = kmalloc(sizeof *srq, GFP_KERNEL);
92 if (!srq)
93 return ERR_PTR(-ENOMEM);
94
95 mutex_init(&srq->mutex);
96 spin_lock_init(&srq->lock);
97 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
98 srq->msrq.max_gs = init_attr->attr.max_sge;
99
100 desc_size = max(32UL,
101 roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
102 srq->msrq.max_gs *
103 sizeof (struct mlx4_wqe_data_seg)));
104 srq->msrq.wqe_shift = ilog2(desc_size);
105
106 buf_size = srq->msrq.max * desc_size;
107
108 if (pd->uobject) {
109 struct mlx4_ib_create_srq ucmd;
110
111 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
112 err = -EFAULT;
113 goto err_srq;
114 }
115
116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
117 buf_size, 0, 0);
118 if (IS_ERR(srq->umem)) {
119 err = PTR_ERR(srq->umem);
120 goto err_srq;
121 }
122
123 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
124 ilog2(srq->umem->page_size), &srq->mtt);
125 if (err)
126 goto err_buf;
127
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
129 if (err)
130 goto err_mtt;
131
132 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
133 ucmd.db_addr, &srq->db);
134 if (err)
135 goto err_mtt;
136 } else {
137 err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
138 if (err)
139 goto err_srq;
140
141 *srq->db.db = 0;
142
143 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
144 GFP_KERNEL)) {
145 err = -ENOMEM;
146 goto err_db;
147 }
148
149 srq->head = 0;
150 srq->tail = srq->msrq.max - 1;
151 srq->wqe_ctr = 0;
152
153 for (i = 0; i < srq->msrq.max; ++i) {
154 next = get_wqe(srq, i);
155 next->next_wqe_index =
156 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
157
158 for (scatter = (void *) (next + 1);
159 (void *) scatter < (void *) next + desc_size;
160 ++scatter)
161 scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
162 }
163
164 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
165 &srq->mtt);
166 if (err)
167 goto err_buf;
168
169 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
170 if (err)
171 goto err_mtt;
172
173 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
174 if (!srq->wrid) {
175 err = -ENOMEM;
176 goto err_mtt;
177 }
178 }
179
180 cqn = (init_attr->srq_type == IB_SRQT_XRC) ?
181 to_mcq(init_attr->ext.xrc.cq)->mcq.cqn : 0;
182 xrcdn = (init_attr->srq_type == IB_SRQT_XRC) ?
183 to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn :
184 (u16) dev->dev->caps.reserved_xrcds;
185 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
186 srq->db.dma, &srq->msrq);
187 if (err)
188 goto err_wrid;
189
190 srq->msrq.event = mlx4_ib_srq_event;
191 srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
192
193 if (pd->uobject)
194 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
195 err = -EFAULT;
196 goto err_wrid;
197 }
198
199 init_attr->attr.max_wr = srq->msrq.max - 1;
200
201 return &srq->ibsrq;
202
203err_wrid:
204 if (pd->uobject)
205 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
206 else
207 kfree(srq->wrid);
208
209err_mtt:
210 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
211
212err_buf:
213 if (pd->uobject)
214 ib_umem_release(srq->umem);
215 else
216 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
217
218err_db:
219 if (!pd->uobject)
220 mlx4_db_free(dev->dev, &srq->db);
221
222err_srq:
223 kfree(srq);
224
225 return ERR_PTR(err);
226}
227
228int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
229 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
230{
231 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
232 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
233 int ret;
234
235
236 if (attr_mask & IB_SRQ_MAX_WR)
237 return -EINVAL;
238
239 if (attr_mask & IB_SRQ_LIMIT) {
240 if (attr->srq_limit >= srq->msrq.max)
241 return -EINVAL;
242
243 mutex_lock(&srq->mutex);
244 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
245 mutex_unlock(&srq->mutex);
246
247 if (ret)
248 return ret;
249 }
250
251 return 0;
252}
253
254int mlx4_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
255{
256 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
257 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
258 int ret;
259 int limit_watermark;
260
261 ret = mlx4_srq_query(dev->dev, &srq->msrq, &limit_watermark);
262 if (ret)
263 return ret;
264
265 srq_attr->srq_limit = limit_watermark;
266 srq_attr->max_wr = srq->msrq.max - 1;
267 srq_attr->max_sge = srq->msrq.max_gs;
268
269 return 0;
270}
271
272int mlx4_ib_destroy_srq(struct ib_srq *srq)
273{
274 struct mlx4_ib_dev *dev = to_mdev(srq->device);
275 struct mlx4_ib_srq *msrq = to_msrq(srq);
276
277 mlx4_srq_free(dev->dev, &msrq->msrq);
278 mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
279
280 if (srq->uobject) {
281 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
282 ib_umem_release(msrq->umem);
283 } else {
284 kfree(msrq->wrid);
285 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
286 &msrq->buf);
287 mlx4_db_free(dev->dev, &msrq->db);
288 }
289
290 kfree(msrq);
291
292 return 0;
293}
294
295void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
296{
297 struct mlx4_wqe_srq_next_seg *next;
298
299
300 spin_lock(&srq->lock);
301
302 next = get_wqe(srq, srq->tail);
303 next->next_wqe_index = cpu_to_be16(wqe_index);
304 srq->tail = wqe_index;
305
306 spin_unlock(&srq->lock);
307}
308
309int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
310 struct ib_recv_wr **bad_wr)
311{
312 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
313 struct mlx4_wqe_srq_next_seg *next;
314 struct mlx4_wqe_data_seg *scat;
315 unsigned long flags;
316 int err = 0;
317 int nreq;
318 int i;
319 struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
320
321 spin_lock_irqsave(&srq->lock, flags);
322 if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
323 err = -EIO;
324 *bad_wr = wr;
325 nreq = 0;
326 goto out;
327 }
328
329 for (nreq = 0; wr; ++nreq, wr = wr->next) {
330 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
331 err = -EINVAL;
332 *bad_wr = wr;
333 break;
334 }
335
336 if (unlikely(srq->head == srq->tail)) {
337 err = -ENOMEM;
338 *bad_wr = wr;
339 break;
340 }
341
342 srq->wrid[srq->head] = wr->wr_id;
343
344 next = get_wqe(srq, srq->head);
345 srq->head = be16_to_cpu(next->next_wqe_index);
346 scat = (struct mlx4_wqe_data_seg *) (next + 1);
347
348 for (i = 0; i < wr->num_sge; ++i) {
349 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
350 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
351 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
352 }
353
354 if (i < srq->msrq.max_gs) {
355 scat[i].byte_count = 0;
356 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
357 scat[i].addr = 0;
358 }
359 }
360
361 if (likely(nreq)) {
362 srq->wqe_ctr += nreq;
363
364
365
366
367
368 wmb();
369
370 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
371 }
372out:
373
374 spin_unlock_irqrestore(&srq->lock, flags);
375
376 return err;
377}
378