1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/err.h>
49#include <linux/slab.h>
50#include <linux/vmalloc.h>
51
52#include "srq.h"
53#include "vt.h"
54
55
56
57
58
59
60
61void rvt_driver_srq_init(struct rvt_dev_info *rdi)
62{
63 spin_lock_init(&rdi->n_srqs_lock);
64 rdi->n_srqs_allocated = 0;
65}
66
67
68
69
70
71
72
73
74
75struct ib_srq *rvt_create_srq(struct ib_pd *ibpd,
76 struct ib_srq_init_attr *srq_init_attr,
77 struct ib_udata *udata)
78{
79 struct rvt_dev_info *dev = ib_to_rvt(ibpd->device);
80 struct rvt_srq *srq;
81 u32 sz;
82 struct ib_srq *ret;
83
84 if (srq_init_attr->srq_type != IB_SRQT_BASIC)
85 return ERR_PTR(-ENOSYS);
86
87 if (srq_init_attr->attr.max_sge == 0 ||
88 srq_init_attr->attr.max_sge > dev->dparms.props.max_srq_sge ||
89 srq_init_attr->attr.max_wr == 0 ||
90 srq_init_attr->attr.max_wr > dev->dparms.props.max_srq_wr)
91 return ERR_PTR(-EINVAL);
92
93 srq = kmalloc(sizeof(*srq), GFP_KERNEL);
94 if (!srq)
95 return ERR_PTR(-ENOMEM);
96
97
98
99
100 srq->rq.size = srq_init_attr->attr.max_wr + 1;
101 srq->rq.max_sge = srq_init_attr->attr.max_sge;
102 sz = sizeof(struct ib_sge) * srq->rq.max_sge +
103 sizeof(struct rvt_rwqe);
104 srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz);
105 if (!srq->rq.wq) {
106 ret = ERR_PTR(-ENOMEM);
107 goto bail_srq;
108 }
109
110
111
112
113
114 if (udata && udata->outlen >= sizeof(__u64)) {
115 int err;
116 u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
117
118 srq->ip =
119 rvt_create_mmap_info(dev, s, ibpd->uobject->context,
120 srq->rq.wq);
121 if (!srq->ip) {
122 ret = ERR_PTR(-ENOMEM);
123 goto bail_wq;
124 }
125
126 err = ib_copy_to_udata(udata, &srq->ip->offset,
127 sizeof(srq->ip->offset));
128 if (err) {
129 ret = ERR_PTR(err);
130 goto bail_ip;
131 }
132 } else {
133 srq->ip = NULL;
134 }
135
136
137
138
139 spin_lock_init(&srq->rq.lock);
140 srq->rq.wq->head = 0;
141 srq->rq.wq->tail = 0;
142 srq->limit = srq_init_attr->attr.srq_limit;
143
144 spin_lock(&dev->n_srqs_lock);
145 if (dev->n_srqs_allocated == dev->dparms.props.max_srq) {
146 spin_unlock(&dev->n_srqs_lock);
147 ret = ERR_PTR(-ENOMEM);
148 goto bail_ip;
149 }
150
151 dev->n_srqs_allocated++;
152 spin_unlock(&dev->n_srqs_lock);
153
154 if (srq->ip) {
155 spin_lock_irq(&dev->pending_lock);
156 list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
157 spin_unlock_irq(&dev->pending_lock);
158 }
159
160 return &srq->ibsrq;
161
162bail_ip:
163 kfree(srq->ip);
164bail_wq:
165 vfree(srq->rq.wq);
166bail_srq:
167 kfree(srq);
168 return ret;
169}
170
171
172
173
174
175
176
177
178
179
180int rvt_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
181 enum ib_srq_attr_mask attr_mask,
182 struct ib_udata *udata)
183{
184 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
185 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
186 struct rvt_rwq *wq;
187 int ret = 0;
188
189 if (attr_mask & IB_SRQ_MAX_WR) {
190 struct rvt_rwq *owq;
191 struct rvt_rwqe *p;
192 u32 sz, size, n, head, tail;
193
194
195 if ((attr->max_wr > dev->dparms.props.max_srq_wr) ||
196 ((attr_mask & IB_SRQ_LIMIT) ?
197 attr->srq_limit : srq->limit) > attr->max_wr)
198 return -EINVAL;
199
200 sz = sizeof(struct rvt_rwqe) +
201 srq->rq.max_sge * sizeof(struct ib_sge);
202 size = attr->max_wr + 1;
203 wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz);
204 if (!wq)
205 return -ENOMEM;
206
207
208 if (udata && udata->inlen >= sizeof(__u64)) {
209 __u64 offset_addr;
210 __u64 offset = 0;
211
212 ret = ib_copy_from_udata(&offset_addr, udata,
213 sizeof(offset_addr));
214 if (ret)
215 goto bail_free;
216 udata->outbuf = (void __user *)
217 (unsigned long)offset_addr;
218 ret = ib_copy_to_udata(udata, &offset,
219 sizeof(offset));
220 if (ret)
221 goto bail_free;
222 }
223
224 spin_lock_irq(&srq->rq.lock);
225
226
227
228
229 owq = srq->rq.wq;
230 head = owq->head;
231 tail = owq->tail;
232 if (head >= srq->rq.size || tail >= srq->rq.size) {
233 ret = -EINVAL;
234 goto bail_unlock;
235 }
236 n = head;
237 if (n < tail)
238 n += srq->rq.size - tail;
239 else
240 n -= tail;
241 if (size <= n) {
242 ret = -EINVAL;
243 goto bail_unlock;
244 }
245 n = 0;
246 p = wq->wq;
247 while (tail != head) {
248 struct rvt_rwqe *wqe;
249 int i;
250
251 wqe = rvt_get_rwqe_ptr(&srq->rq, tail);
252 p->wr_id = wqe->wr_id;
253 p->num_sge = wqe->num_sge;
254 for (i = 0; i < wqe->num_sge; i++)
255 p->sg_list[i] = wqe->sg_list[i];
256 n++;
257 p = (struct rvt_rwqe *)((char *)p + sz);
258 if (++tail >= srq->rq.size)
259 tail = 0;
260 }
261 srq->rq.wq = wq;
262 srq->rq.size = size;
263 wq->head = n;
264 wq->tail = 0;
265 if (attr_mask & IB_SRQ_LIMIT)
266 srq->limit = attr->srq_limit;
267 spin_unlock_irq(&srq->rq.lock);
268
269 vfree(owq);
270
271 if (srq->ip) {
272 struct rvt_mmap_info *ip = srq->ip;
273 struct rvt_dev_info *dev = ib_to_rvt(srq->ibsrq.device);
274 u32 s = sizeof(struct rvt_rwq) + size * sz;
275
276 rvt_update_mmap_info(dev, ip, s, wq);
277
278
279
280
281
282 if (udata && udata->inlen >= sizeof(__u64)) {
283 ret = ib_copy_to_udata(udata, &ip->offset,
284 sizeof(ip->offset));
285 if (ret)
286 return ret;
287 }
288
289
290
291
292
293 spin_lock_irq(&dev->pending_lock);
294 if (list_empty(&ip->pending_mmaps))
295 list_add(&ip->pending_mmaps,
296 &dev->pending_mmaps);
297 spin_unlock_irq(&dev->pending_lock);
298 }
299 } else if (attr_mask & IB_SRQ_LIMIT) {
300 spin_lock_irq(&srq->rq.lock);
301 if (attr->srq_limit >= srq->rq.size)
302 ret = -EINVAL;
303 else
304 srq->limit = attr->srq_limit;
305 spin_unlock_irq(&srq->rq.lock);
306 }
307 return ret;
308
309bail_unlock:
310 spin_unlock_irq(&srq->rq.lock);
311bail_free:
312 vfree(wq);
313 return ret;
314}
315
316
317
318
319
320
321
322int rvt_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
323{
324 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
325
326 attr->max_wr = srq->rq.size - 1;
327 attr->max_sge = srq->rq.max_sge;
328 attr->srq_limit = srq->limit;
329 return 0;
330}
331
332
333
334
335
336
337
338int rvt_destroy_srq(struct ib_srq *ibsrq)
339{
340 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
341 struct rvt_dev_info *dev = ib_to_rvt(ibsrq->device);
342
343 spin_lock(&dev->n_srqs_lock);
344 dev->n_srqs_allocated--;
345 spin_unlock(&dev->n_srqs_lock);
346 if (srq->ip)
347 kref_put(&srq->ip->ref, rvt_release_mmap_info);
348 else
349 vfree(srq->rq.wq);
350 kfree(srq);
351
352 return 0;
353}
354