1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/slab.h>
35
36#include "mlx4_ib.h"
37
38static u32 convert_access(int acc)
39{
40 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
41 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
42 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
43 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
44 (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
45 MLX4_PERM_LOCAL_READ;
46}
47
48static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
49{
50 switch (type) {
51 case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
52 case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
53 default: return -1;
54 }
55}
56
57struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
58{
59 struct mlx4_ib_mr *mr;
60 int err;
61
62 mr = kmalloc(sizeof *mr, GFP_KERNEL);
63 if (!mr)
64 return ERR_PTR(-ENOMEM);
65
66 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
67 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
68 if (err)
69 goto err_free;
70
71 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
72 if (err)
73 goto err_mr;
74
75 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
76 mr->umem = NULL;
77
78 return &mr->ibmr;
79
80err_mr:
81 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
82
83err_free:
84 kfree(mr);
85
86 return ERR_PTR(err);
87}
88
89int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
90 struct ib_umem *umem)
91{
92 u64 *pages;
93 int i, k, entry;
94 int n;
95 int len;
96 int err = 0;
97 struct scatterlist *sg;
98
99 pages = (u64 *) __get_free_page(GFP_KERNEL);
100 if (!pages)
101 return -ENOMEM;
102
103 i = n = 0;
104
105 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
106 len = sg_dma_len(sg) >> mtt->page_shift;
107 for (k = 0; k < len; ++k) {
108 pages[i++] = sg_dma_address(sg) +
109 umem->page_size * k;
110
111
112
113
114 if (i == PAGE_SIZE / sizeof (u64)) {
115 err = mlx4_write_mtt(dev->dev, mtt, n,
116 i, pages);
117 if (err)
118 goto out;
119 n += i;
120 i = 0;
121 }
122 }
123 }
124
125 if (i)
126 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
127
128out:
129 free_page((unsigned long) pages);
130 return err;
131}
132
133struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
134 u64 virt_addr, int access_flags,
135 struct ib_udata *udata)
136{
137 struct mlx4_ib_dev *dev = to_mdev(pd->device);
138 struct mlx4_ib_mr *mr;
139 int shift;
140 int err;
141 int n;
142
143 mr = kmalloc(sizeof *mr, GFP_KERNEL);
144 if (!mr)
145 return ERR_PTR(-ENOMEM);
146
147 mr->umem = ib_umem_get(pd->uobject->context, start, length,
148 access_flags, 0);
149 if (IS_ERR(mr->umem)) {
150 err = PTR_ERR(mr->umem);
151 goto err_free;
152 }
153
154 n = ib_umem_page_count(mr->umem);
155 shift = ilog2(mr->umem->page_size);
156
157 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
158 convert_access(access_flags), n, shift, &mr->mmr);
159 if (err)
160 goto err_umem;
161
162 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
163 if (err)
164 goto err_mr;
165
166 err = mlx4_mr_enable(dev->dev, &mr->mmr);
167 if (err)
168 goto err_mr;
169
170 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
171
172 return &mr->ibmr;
173
174err_mr:
175 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
176
177err_umem:
178 ib_umem_release(mr->umem);
179
180err_free:
181 kfree(mr);
182
183 return ERR_PTR(err);
184}
185
186int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
187{
188 struct mlx4_ib_mr *mr = to_mmr(ibmr);
189 int ret;
190
191 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
192 if (ret)
193 return ret;
194 if (mr->umem)
195 ib_umem_release(mr->umem);
196 kfree(mr);
197
198 return 0;
199}
200
201struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
202{
203 struct mlx4_ib_dev *dev = to_mdev(pd->device);
204 struct mlx4_ib_mw *mw;
205 int err;
206
207 mw = kmalloc(sizeof(*mw), GFP_KERNEL);
208 if (!mw)
209 return ERR_PTR(-ENOMEM);
210
211 err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
212 to_mlx4_type(type), &mw->mmw);
213 if (err)
214 goto err_free;
215
216 err = mlx4_mw_enable(dev->dev, &mw->mmw);
217 if (err)
218 goto err_mw;
219
220 mw->ibmw.rkey = mw->mmw.key;
221
222 return &mw->ibmw;
223
224err_mw:
225 mlx4_mw_free(dev->dev, &mw->mmw);
226
227err_free:
228 kfree(mw);
229
230 return ERR_PTR(err);
231}
232
233int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
234 struct ib_mw_bind *mw_bind)
235{
236 struct ib_send_wr wr;
237 struct ib_send_wr *bad_wr;
238 int ret;
239
240 memset(&wr, 0, sizeof(wr));
241 wr.opcode = IB_WR_BIND_MW;
242 wr.wr_id = mw_bind->wr_id;
243 wr.send_flags = mw_bind->send_flags;
244 wr.wr.bind_mw.mw = mw;
245 wr.wr.bind_mw.bind_info = mw_bind->bind_info;
246 wr.wr.bind_mw.rkey = ib_inc_rkey(mw->rkey);
247
248 ret = mlx4_ib_post_send(qp, &wr, &bad_wr);
249 if (!ret)
250 mw->rkey = wr.wr.bind_mw.rkey;
251
252 return ret;
253}
254
255int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
256{
257 struct mlx4_ib_mw *mw = to_mmw(ibmw);
258
259 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
260 kfree(mw);
261
262 return 0;
263}
264
265struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
266 int max_page_list_len)
267{
268 struct mlx4_ib_dev *dev = to_mdev(pd->device);
269 struct mlx4_ib_mr *mr;
270 int err;
271
272 mr = kmalloc(sizeof *mr, GFP_KERNEL);
273 if (!mr)
274 return ERR_PTR(-ENOMEM);
275
276 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
277 max_page_list_len, 0, &mr->mmr);
278 if (err)
279 goto err_free;
280
281 err = mlx4_mr_enable(dev->dev, &mr->mmr);
282 if (err)
283 goto err_mr;
284
285 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
286 mr->umem = NULL;
287
288 return &mr->ibmr;
289
290err_mr:
291 (void) mlx4_mr_free(dev->dev, &mr->mmr);
292
293err_free:
294 kfree(mr);
295 return ERR_PTR(err);
296}
297
298struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
299 int page_list_len)
300{
301 struct mlx4_ib_dev *dev = to_mdev(ibdev);
302 struct mlx4_ib_fast_reg_page_list *mfrpl;
303 int size = page_list_len * sizeof (u64);
304
305 if (page_list_len > MLX4_MAX_FAST_REG_PAGES)
306 return ERR_PTR(-EINVAL);
307
308 mfrpl = kmalloc(sizeof *mfrpl, GFP_KERNEL);
309 if (!mfrpl)
310 return ERR_PTR(-ENOMEM);
311
312 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
313 if (!mfrpl->ibfrpl.page_list)
314 goto err_free;
315
316 mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
317 size, &mfrpl->map,
318 GFP_KERNEL);
319 if (!mfrpl->mapped_page_list)
320 goto err_free;
321
322 WARN_ON(mfrpl->map & 0x3f);
323
324 return &mfrpl->ibfrpl;
325
326err_free:
327 kfree(mfrpl->ibfrpl.page_list);
328 kfree(mfrpl);
329 return ERR_PTR(-ENOMEM);
330}
331
332void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
333{
334 struct mlx4_ib_dev *dev = to_mdev(page_list->device);
335 struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
336 int size = page_list->max_page_list_len * sizeof (u64);
337
338 dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
339 mfrpl->map);
340 kfree(mfrpl->ibfrpl.page_list);
341 kfree(mfrpl);
342}
343
344struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
345 struct ib_fmr_attr *fmr_attr)
346{
347 struct mlx4_ib_dev *dev = to_mdev(pd->device);
348 struct mlx4_ib_fmr *fmr;
349 int err = -ENOMEM;
350
351 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
352 if (!fmr)
353 return ERR_PTR(-ENOMEM);
354
355 err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
356 fmr_attr->max_pages, fmr_attr->max_maps,
357 fmr_attr->page_shift, &fmr->mfmr);
358 if (err)
359 goto err_free;
360
361 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
362 if (err)
363 goto err_mr;
364
365 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
366
367 return &fmr->ibfmr;
368
369err_mr:
370 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
371
372err_free:
373 kfree(fmr);
374
375 return ERR_PTR(err);
376}
377
378int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
379 int npages, u64 iova)
380{
381 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
382 struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
383
384 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
385 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
386}
387
388int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
389{
390 struct ib_fmr *ibfmr;
391 int err;
392 struct mlx4_dev *mdev = NULL;
393
394 list_for_each_entry(ibfmr, fmr_list, list) {
395 if (mdev && to_mdev(ibfmr->device)->dev != mdev)
396 return -EINVAL;
397 mdev = to_mdev(ibfmr->device)->dev;
398 }
399
400 if (!mdev)
401 return 0;
402
403 list_for_each_entry(ibfmr, fmr_list, list) {
404 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
405
406 mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
407 }
408
409
410
411
412
413 wmb();
414
415 err = mlx4_SYNC_TPT(mdev);
416 if (err)
417 pr_warn("SYNC_TPT error %d when "
418 "unmapping FMRs\n", err);
419
420 return 0;
421}
422
423int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
424{
425 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
426 struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
427 int err;
428
429 err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
430
431 if (!err)
432 kfree(ifmr);
433
434 return err;
435}
436