1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <rdma/ib_umem.h>
35#include <rdma/ib_smi.h>
36
37#include "qib.h"
38
39
40struct qib_fmr {
41 struct ib_fmr ibfmr;
42 struct qib_mregion mr;
43};
44
45static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
46{
47 return container_of(ibfmr, struct qib_fmr, ibfmr);
48}
49
50static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
51 int count)
52{
53 int m, i = 0;
54 int rval = 0;
55
56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
57 for (; i < m; i++) {
58 mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
59 if (!mr->map[i])
60 goto bail;
61 }
62 mr->mapsz = m;
63 init_completion(&mr->comp);
64
65 atomic_set(&mr->refcount, 1);
66 mr->pd = pd;
67 mr->max_segs = count;
68out:
69 return rval;
70bail:
71 while (i)
72 kfree(mr->map[--i]);
73 rval = -ENOMEM;
74 goto out;
75}
76
77static void deinit_qib_mregion(struct qib_mregion *mr)
78{
79 int i = mr->mapsz;
80
81 mr->mapsz = 0;
82 while (i)
83 kfree(mr->map[--i]);
84}
85
86
87
88
89
90
91
92
93
94
95
96struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
97{
98 struct qib_mr *mr = NULL;
99 struct ib_mr *ret;
100 int rval;
101
102 if (to_ipd(pd)->user) {
103 ret = ERR_PTR(-EPERM);
104 goto bail;
105 }
106
107 mr = kzalloc(sizeof *mr, GFP_KERNEL);
108 if (!mr) {
109 ret = ERR_PTR(-ENOMEM);
110 goto bail;
111 }
112
113 rval = init_qib_mregion(&mr->mr, pd, 0);
114 if (rval) {
115 ret = ERR_PTR(rval);
116 goto bail;
117 }
118
119
120 rval = qib_alloc_lkey(&mr->mr, 1);
121 if (rval) {
122 ret = ERR_PTR(rval);
123 goto bail_mregion;
124 }
125
126 mr->mr.access_flags = acc;
127 ret = &mr->ibmr;
128done:
129 return ret;
130
131bail_mregion:
132 deinit_qib_mregion(&mr->mr);
133bail:
134 kfree(mr);
135 goto done;
136}
137
138static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
139{
140 struct qib_mr *mr;
141 int rval = -ENOMEM;
142 int m;
143
144
145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
146 mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
147 if (!mr)
148 goto bail;
149
150 rval = init_qib_mregion(&mr->mr, pd, count);
151 if (rval)
152 goto bail;
153
154
155
156
157 rval = qib_alloc_lkey(&mr->mr, 0);
158 if (rval)
159 goto bail_mregion;
160 mr->ibmr.lkey = mr->mr.lkey;
161 mr->ibmr.rkey = mr->mr.lkey;
162done:
163 return mr;
164
165bail_mregion:
166 deinit_qib_mregion(&mr->mr);
167bail:
168 kfree(mr);
169 mr = ERR_PTR(rval);
170 goto done;
171}
172
173
174
175
176
177
178
179
180
181
182struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
183 struct ib_phys_buf *buffer_list,
184 int num_phys_buf, int acc, u64 *iova_start)
185{
186 struct qib_mr *mr;
187 int n, m, i;
188 struct ib_mr *ret;
189
190 mr = alloc_mr(num_phys_buf, pd);
191 if (IS_ERR(mr)) {
192 ret = (struct ib_mr *)mr;
193 goto bail;
194 }
195
196 mr->mr.user_base = *iova_start;
197 mr->mr.iova = *iova_start;
198 mr->mr.access_flags = acc;
199
200 m = 0;
201 n = 0;
202 for (i = 0; i < num_phys_buf; i++) {
203 mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
204 mr->mr.map[m]->segs[n].length = buffer_list[i].size;
205 mr->mr.length += buffer_list[i].size;
206 n++;
207 if (n == QIB_SEGSZ) {
208 m++;
209 n = 0;
210 }
211 }
212
213 ret = &mr->ibmr;
214
215bail:
216 return ret;
217}
218
219
220
221
222
223
224
225
226
227
228
229struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
230 u64 virt_addr, int mr_access_flags,
231 struct ib_udata *udata)
232{
233 struct qib_mr *mr;
234 struct ib_umem *umem;
235 struct ib_umem_chunk *chunk;
236 int n, m, i;
237 struct ib_mr *ret;
238
239 if (length == 0) {
240 ret = ERR_PTR(-EINVAL);
241 goto bail;
242 }
243
244 umem = ib_umem_get(pd->uobject->context, start, length,
245 mr_access_flags, 0);
246 if (IS_ERR(umem))
247 return (void *) umem;
248
249 n = 0;
250 list_for_each_entry(chunk, &umem->chunk_list, list)
251 n += chunk->nents;
252
253 mr = alloc_mr(n, pd);
254 if (IS_ERR(mr)) {
255 ret = (struct ib_mr *)mr;
256 ib_umem_release(umem);
257 goto bail;
258 }
259
260 mr->mr.user_base = start;
261 mr->mr.iova = virt_addr;
262 mr->mr.length = length;
263 mr->mr.offset = umem->offset;
264 mr->mr.access_flags = mr_access_flags;
265 mr->umem = umem;
266
267 if (is_power_of_2(umem->page_size))
268 mr->mr.page_shift = ilog2(umem->page_size);
269 m = 0;
270 n = 0;
271 list_for_each_entry(chunk, &umem->chunk_list, list) {
272 for (i = 0; i < chunk->nents; i++) {
273 void *vaddr;
274
275 vaddr = page_address(sg_page(&chunk->page_list[i]));
276 if (!vaddr) {
277 ret = ERR_PTR(-EINVAL);
278 goto bail;
279 }
280 mr->mr.map[m]->segs[n].vaddr = vaddr;
281 mr->mr.map[m]->segs[n].length = umem->page_size;
282 n++;
283 if (n == QIB_SEGSZ) {
284 m++;
285 n = 0;
286 }
287 }
288 }
289 ret = &mr->ibmr;
290
291bail:
292 return ret;
293}
294
295
296
297
298
299
300
301
302
303
304int qib_dereg_mr(struct ib_mr *ibmr)
305{
306 struct qib_mr *mr = to_imr(ibmr);
307 int ret = 0;
308 unsigned long timeout;
309
310 qib_free_lkey(&mr->mr);
311
312 qib_put_mr(&mr->mr);
313 timeout = wait_for_completion_timeout(&mr->mr.comp,
314 5 * HZ);
315 if (!timeout) {
316 qib_get_mr(&mr->mr);
317 ret = -EBUSY;
318 goto out;
319 }
320 deinit_qib_mregion(&mr->mr);
321 if (mr->umem)
322 ib_umem_release(mr->umem);
323 kfree(mr);
324out:
325 return ret;
326}
327
328
329
330
331
332
333
334struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
335{
336 struct qib_mr *mr;
337
338 mr = alloc_mr(max_page_list_len, pd);
339 if (IS_ERR(mr))
340 return (struct ib_mr *)mr;
341
342 return &mr->ibmr;
343}
344
345struct ib_fast_reg_page_list *
346qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
347{
348 unsigned size = page_list_len * sizeof(u64);
349 struct ib_fast_reg_page_list *pl;
350
351 if (size > PAGE_SIZE)
352 return ERR_PTR(-EINVAL);
353
354 pl = kzalloc(sizeof *pl, GFP_KERNEL);
355 if (!pl)
356 return ERR_PTR(-ENOMEM);
357
358 pl->page_list = kzalloc(size, GFP_KERNEL);
359 if (!pl->page_list)
360 goto err_free;
361
362 return pl;
363
364err_free:
365 kfree(pl);
366 return ERR_PTR(-ENOMEM);
367}
368
369void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
370{
371 kfree(pl->page_list);
372 kfree(pl);
373}
374
375
376
377
378
379
380
381
382
383struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
384 struct ib_fmr_attr *fmr_attr)
385{
386 struct qib_fmr *fmr;
387 int m;
388 struct ib_fmr *ret;
389 int rval = -ENOMEM;
390
391
392 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
393 fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
394 if (!fmr)
395 goto bail;
396
397 rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
398 if (rval)
399 goto bail;
400
401
402
403
404
405 rval = qib_alloc_lkey(&fmr->mr, 0);
406 if (rval)
407 goto bail_mregion;
408 fmr->ibfmr.rkey = fmr->mr.lkey;
409 fmr->ibfmr.lkey = fmr->mr.lkey;
410
411
412
413
414 fmr->mr.access_flags = mr_access_flags;
415 fmr->mr.max_segs = fmr_attr->max_pages;
416 fmr->mr.page_shift = fmr_attr->page_shift;
417
418 ret = &fmr->ibfmr;
419done:
420 return ret;
421
422bail_mregion:
423 deinit_qib_mregion(&fmr->mr);
424bail:
425 kfree(fmr);
426 ret = ERR_PTR(rval);
427 goto done;
428}
429
430
431
432
433
434
435
436
437
438
439
440int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
441 int list_len, u64 iova)
442{
443 struct qib_fmr *fmr = to_ifmr(ibfmr);
444 struct qib_lkey_table *rkt;
445 unsigned long flags;
446 int m, n, i;
447 u32 ps;
448 int ret;
449
450 i = atomic_read(&fmr->mr.refcount);
451 if (i > 2)
452 return -EBUSY;
453
454 if (list_len > fmr->mr.max_segs) {
455 ret = -EINVAL;
456 goto bail;
457 }
458 rkt = &to_idev(ibfmr->device)->lk_table;
459 spin_lock_irqsave(&rkt->lock, flags);
460 fmr->mr.user_base = iova;
461 fmr->mr.iova = iova;
462 ps = 1 << fmr->mr.page_shift;
463 fmr->mr.length = list_len * ps;
464 m = 0;
465 n = 0;
466 for (i = 0; i < list_len; i++) {
467 fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
468 fmr->mr.map[m]->segs[n].length = ps;
469 if (++n == QIB_SEGSZ) {
470 m++;
471 n = 0;
472 }
473 }
474 spin_unlock_irqrestore(&rkt->lock, flags);
475 ret = 0;
476
477bail:
478 return ret;
479}
480
481
482
483
484
485
486
487int qib_unmap_fmr(struct list_head *fmr_list)
488{
489 struct qib_fmr *fmr;
490 struct qib_lkey_table *rkt;
491 unsigned long flags;
492
493 list_for_each_entry(fmr, fmr_list, ibfmr.list) {
494 rkt = &to_idev(fmr->ibfmr.device)->lk_table;
495 spin_lock_irqsave(&rkt->lock, flags);
496 fmr->mr.user_base = 0;
497 fmr->mr.iova = 0;
498 fmr->mr.length = 0;
499 spin_unlock_irqrestore(&rkt->lock, flags);
500 }
501 return 0;
502}
503
504
505
506
507
508
509
510int qib_dealloc_fmr(struct ib_fmr *ibfmr)
511{
512 struct qib_fmr *fmr = to_ifmr(ibfmr);
513 int ret = 0;
514 unsigned long timeout;
515
516 qib_free_lkey(&fmr->mr);
517 qib_put_mr(&fmr->mr);
518 timeout = wait_for_completion_timeout(&fmr->mr.comp,
519 5 * HZ);
520 if (!timeout) {
521 qib_get_mr(&fmr->mr);
522 ret = -EBUSY;
523 goto out;
524 }
525 deinit_qib_mregion(&fmr->mr);
526 kfree(fmr);
527out:
528 return ret;
529}
530
531void mr_rcu_callback(struct rcu_head *list)
532{
533 struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
534
535 complete(&mr->comp);
536}
537