1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/list.h>
47#include <linux/slab.h>
48
49#include "pvrdma.h"
50
51
52
53
54
55
56
57
58struct ib_mr *pvrdma_get_dma_mr(struct ib_pd *pd, int acc)
59{
60 struct pvrdma_dev *dev = to_vdev(pd->device);
61 struct pvrdma_user_mr *mr;
62 union pvrdma_cmd_req req;
63 union pvrdma_cmd_resp rsp;
64 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
65 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
66 int ret;
67
68
69 if (acc & ~IB_ACCESS_LOCAL_WRITE) {
70 dev_warn(&dev->pdev->dev,
71 "unsupported dma mr access flags %#x\n", acc);
72 return ERR_PTR(-EOPNOTSUPP);
73 }
74
75 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
76 if (!mr)
77 return ERR_PTR(-ENOMEM);
78
79 memset(cmd, 0, sizeof(*cmd));
80 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
81 cmd->pd_handle = to_vpd(pd)->pd_handle;
82 cmd->access_flags = acc;
83 cmd->flags = PVRDMA_MR_FLAG_DMA;
84
85 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
86 if (ret < 0) {
87 dev_warn(&dev->pdev->dev,
88 "could not get DMA mem region, error: %d\n", ret);
89 kfree(mr);
90 return ERR_PTR(ret);
91 }
92
93 mr->mmr.mr_handle = resp->mr_handle;
94 mr->ibmr.lkey = resp->lkey;
95 mr->ibmr.rkey = resp->rkey;
96
97 return &mr->ibmr;
98}
99
100
101
102
103
104
105
106
107
108
109
110
111struct ib_mr *pvrdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
112 u64 virt_addr, int access_flags,
113 struct ib_udata *udata)
114{
115 struct pvrdma_dev *dev = to_vdev(pd->device);
116 struct pvrdma_user_mr *mr = NULL;
117 struct ib_umem *umem;
118 union pvrdma_cmd_req req;
119 union pvrdma_cmd_resp rsp;
120 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
121 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
122 int ret;
123
124 if (length == 0 || length > dev->dsr->caps.max_mr_size) {
125 dev_warn(&dev->pdev->dev, "invalid mem region length\n");
126 return ERR_PTR(-EINVAL);
127 }
128
129 umem = ib_umem_get(pd->uobject->context, start,
130 length, access_flags, 0);
131 if (IS_ERR(umem)) {
132 dev_warn(&dev->pdev->dev,
133 "could not get umem for mem region\n");
134 return ERR_CAST(umem);
135 }
136
137 if (umem->npages < 0 || umem->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
138 dev_warn(&dev->pdev->dev, "overflow %d pages in mem region\n",
139 umem->npages);
140 ret = -EINVAL;
141 goto err_umem;
142 }
143
144 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
145 if (!mr) {
146 ret = -ENOMEM;
147 goto err_umem;
148 }
149
150 mr->mmr.iova = virt_addr;
151 mr->mmr.size = length;
152 mr->umem = umem;
153
154 ret = pvrdma_page_dir_init(dev, &mr->pdir, umem->npages, false);
155 if (ret) {
156 dev_warn(&dev->pdev->dev,
157 "could not allocate page directory\n");
158 goto err_umem;
159 }
160
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0);
162 if (ret)
163 goto err_pdir;
164
165 memset(cmd, 0, sizeof(*cmd));
166 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
167 cmd->start = start;
168 cmd->length = length;
169 cmd->pd_handle = to_vpd(pd)->pd_handle;
170 cmd->access_flags = access_flags;
171 cmd->nchunks = umem->npages;
172 cmd->pdir_dma = mr->pdir.dir_dma;
173
174 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
175 if (ret < 0) {
176 dev_warn(&dev->pdev->dev,
177 "could not register mem region, error: %d\n", ret);
178 goto err_pdir;
179 }
180
181 mr->mmr.mr_handle = resp->mr_handle;
182 mr->ibmr.lkey = resp->lkey;
183 mr->ibmr.rkey = resp->rkey;
184
185 return &mr->ibmr;
186
187err_pdir:
188 pvrdma_page_dir_cleanup(dev, &mr->pdir);
189err_umem:
190 ib_umem_release(umem);
191 kfree(mr);
192
193 return ERR_PTR(ret);
194}
195
196
197
198
199
200
201
202
203
204struct ib_mr *pvrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
205 u32 max_num_sg)
206{
207 struct pvrdma_dev *dev = to_vdev(pd->device);
208 struct pvrdma_user_mr *mr;
209 union pvrdma_cmd_req req;
210 union pvrdma_cmd_resp rsp;
211 struct pvrdma_cmd_create_mr *cmd = &req.create_mr;
212 struct pvrdma_cmd_create_mr_resp *resp = &rsp.create_mr_resp;
213 int size = max_num_sg * sizeof(u64);
214 int ret;
215
216 if (mr_type != IB_MR_TYPE_MEM_REG ||
217 max_num_sg > PVRDMA_MAX_FAST_REG_PAGES)
218 return ERR_PTR(-EINVAL);
219
220 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
221 if (!mr)
222 return ERR_PTR(-ENOMEM);
223
224 mr->pages = kzalloc(size, GFP_KERNEL);
225 if (!mr->pages) {
226 ret = -ENOMEM;
227 goto freemr;
228 }
229
230 ret = pvrdma_page_dir_init(dev, &mr->pdir, max_num_sg, false);
231 if (ret) {
232 dev_warn(&dev->pdev->dev,
233 "failed to allocate page dir for mr\n");
234 ret = -ENOMEM;
235 goto freepages;
236 }
237
238 memset(cmd, 0, sizeof(*cmd));
239 cmd->hdr.cmd = PVRDMA_CMD_CREATE_MR;
240 cmd->pd_handle = to_vpd(pd)->pd_handle;
241 cmd->access_flags = 0;
242 cmd->flags = PVRDMA_MR_FLAG_FRMR;
243 cmd->nchunks = max_num_sg;
244
245 ret = pvrdma_cmd_post(dev, &req, &rsp, PVRDMA_CMD_CREATE_MR_RESP);
246 if (ret < 0) {
247 dev_warn(&dev->pdev->dev,
248 "could not create FR mem region, error: %d\n", ret);
249 goto freepdir;
250 }
251
252 mr->max_pages = max_num_sg;
253 mr->mmr.mr_handle = resp->mr_handle;
254 mr->ibmr.lkey = resp->lkey;
255 mr->ibmr.rkey = resp->rkey;
256 mr->page_shift = PAGE_SHIFT;
257 mr->umem = NULL;
258
259 return &mr->ibmr;
260
261freepdir:
262 pvrdma_page_dir_cleanup(dev, &mr->pdir);
263freepages:
264 kfree(mr->pages);
265freemr:
266 kfree(mr);
267 return ERR_PTR(ret);
268}
269
270
271
272
273
274
275
276int pvrdma_dereg_mr(struct ib_mr *ibmr)
277{
278 struct pvrdma_user_mr *mr = to_vmr(ibmr);
279 struct pvrdma_dev *dev = to_vdev(ibmr->device);
280 union pvrdma_cmd_req req;
281 struct pvrdma_cmd_destroy_mr *cmd = &req.destroy_mr;
282 int ret;
283
284 memset(cmd, 0, sizeof(*cmd));
285 cmd->hdr.cmd = PVRDMA_CMD_DESTROY_MR;
286 cmd->mr_handle = mr->mmr.mr_handle;
287 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
288 if (ret < 0)
289 dev_warn(&dev->pdev->dev,
290 "could not deregister mem region, error: %d\n", ret);
291
292 pvrdma_page_dir_cleanup(dev, &mr->pdir);
293 if (mr->umem)
294 ib_umem_release(mr->umem);
295
296 kfree(mr->pages);
297 kfree(mr);
298
299 return 0;
300}
301
302static int pvrdma_set_page(struct ib_mr *ibmr, u64 addr)
303{
304 struct pvrdma_user_mr *mr = to_vmr(ibmr);
305
306 if (mr->npages == mr->max_pages)
307 return -ENOMEM;
308
309 mr->pages[mr->npages++] = addr;
310 return 0;
311}
312
313int pvrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
314 unsigned int *sg_offset)
315{
316 struct pvrdma_user_mr *mr = to_vmr(ibmr);
317 struct pvrdma_dev *dev = to_vdev(ibmr->device);
318 int ret;
319
320 mr->npages = 0;
321
322 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, pvrdma_set_page);
323 if (ret < 0)
324 dev_warn(&dev->pdev->dev, "could not map sg to pages\n");
325
326 return ret;
327}
328