1
2
3
4
5
6
7#include <linux/xarray.h>
8#include "uverbs.h"
9#include "core_priv.h"
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30void rdma_umap_priv_init(struct rdma_umap_priv *priv,
31 struct vm_area_struct *vma,
32 struct rdma_user_mmap_entry *entry)
33{
34 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
35
36 priv->vma = vma;
37 if (entry) {
38 kref_get(&entry->ref);
39 priv->entry = entry;
40 }
41 vma->vm_private_data = priv;
42
43
44 mutex_lock(&ufile->umap_lock);
45 list_add(&priv->list, &ufile->umaps);
46 mutex_unlock(&ufile->umap_lock);
47}
48EXPORT_SYMBOL(rdma_umap_priv_init);
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
68 unsigned long pfn, unsigned long size, pgprot_t prot,
69 struct rdma_user_mmap_entry *entry)
70{
71 struct ib_uverbs_file *ufile = ucontext->ufile;
72 struct rdma_umap_priv *priv;
73
74 if (!(vma->vm_flags & VM_SHARED))
75 return -EINVAL;
76
77 if (vma->vm_end - vma->vm_start != size)
78 return -EINVAL;
79
80
81 if (WARN_ON(!vma->vm_file ||
82 vma->vm_file->private_data != ufile))
83 return -EINVAL;
84 lockdep_assert_held(&ufile->device->disassociate_srcu);
85
86 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
87 if (!priv)
88 return -ENOMEM;
89
90 vma->vm_page_prot = prot;
91 if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
92 kfree(priv);
93 return -EAGAIN;
94 }
95
96 rdma_umap_priv_init(priv, vma, entry);
97 return 0;
98}
99EXPORT_SYMBOL(rdma_user_mmap_io);
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116struct rdma_user_mmap_entry *
117rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
118 unsigned long pgoff)
119{
120 struct rdma_user_mmap_entry *entry;
121
122 if (pgoff > U32_MAX)
123 return NULL;
124
125 xa_lock(&ucontext->mmap_xa);
126
127 entry = xa_load(&ucontext->mmap_xa, pgoff);
128
129
130
131
132
133
134 if (!entry || entry->start_pgoff != pgoff || entry->driver_removed ||
135 !kref_get_unless_zero(&entry->ref))
136 goto err;
137
138 xa_unlock(&ucontext->mmap_xa);
139
140 ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] returned\n",
141 pgoff, entry->npages);
142
143 return entry;
144
145err:
146 xa_unlock(&ucontext->mmap_xa);
147 return NULL;
148}
149EXPORT_SYMBOL(rdma_user_mmap_entry_get_pgoff);
150
151
152
153
154
155
156
157
158
159
160struct rdma_user_mmap_entry *
161rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
162 struct vm_area_struct *vma)
163{
164 struct rdma_user_mmap_entry *entry;
165
166 if (!(vma->vm_flags & VM_SHARED))
167 return NULL;
168 entry = rdma_user_mmap_entry_get_pgoff(ucontext, vma->vm_pgoff);
169 if (!entry)
170 return NULL;
171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) {
172 rdma_user_mmap_entry_put(entry);
173 return NULL;
174 }
175 return entry;
176}
177EXPORT_SYMBOL(rdma_user_mmap_entry_get);
178
179static void rdma_user_mmap_entry_free(struct kref *kref)
180{
181 struct rdma_user_mmap_entry *entry =
182 container_of(kref, struct rdma_user_mmap_entry, ref);
183 struct ib_ucontext *ucontext = entry->ucontext;
184 unsigned long i;
185
186
187
188
189
190 xa_lock(&ucontext->mmap_xa);
191 for (i = 0; i < entry->npages; i++)
192 __xa_erase(&ucontext->mmap_xa, entry->start_pgoff + i);
193 xa_unlock(&ucontext->mmap_xa);
194
195 ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#zx] removed\n",
196 entry->start_pgoff, entry->npages);
197
198 if (ucontext->device->ops.mmap_free)
199 ucontext->device->ops.mmap_free(entry);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry)
215{
216 kref_put(&entry->ref, rdma_user_mmap_entry_free);
217}
218EXPORT_SYMBOL(rdma_user_mmap_entry_put);
219
220
221
222
223
224
225
226
227
228
229
230void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry)
231{
232 if (!entry)
233 return;
234
235 xa_lock(&entry->ucontext->mmap_xa);
236 entry->driver_removed = true;
237 xa_unlock(&entry->ucontext->mmap_xa);
238 kref_put(&entry->ref, rdma_user_mmap_entry_free);
239}
240EXPORT_SYMBOL(rdma_user_mmap_entry_remove);
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
263 struct rdma_user_mmap_entry *entry,
264 size_t length, u32 min_pgoff,
265 u32 max_pgoff)
266{
267 struct ib_uverbs_file *ufile = ucontext->ufile;
268 XA_STATE(xas, &ucontext->mmap_xa, min_pgoff);
269 u32 xa_first, xa_last, npages;
270 int err;
271 u32 i;
272
273 if (!entry)
274 return -EINVAL;
275
276 kref_init(&entry->ref);
277 entry->ucontext = ucontext;
278
279
280
281
282
283
284
285 mutex_lock(&ufile->umap_lock);
286
287 xa_lock(&ucontext->mmap_xa);
288
289
290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE);
291 entry->npages = npages;
292 while (true) {
293
294 xas_find_marked(&xas, max_pgoff, XA_FREE_MARK);
295 if (xas.xa_node == XAS_RESTART)
296 goto err_unlock;
297
298 xa_first = xas.xa_index;
299
300
301 if (check_add_overflow(xa_first, npages, &xa_last))
302 goto err_unlock;
303
304
305
306
307
308 xas_next_entry(&xas, xa_last - 1);
309 if (xas.xa_node == XAS_BOUNDS || xas.xa_index >= xa_last)
310 break;
311 }
312
313 for (i = xa_first; i < xa_last; i++) {
314 err = __xa_insert(&ucontext->mmap_xa, i, entry, GFP_KERNEL);
315 if (err)
316 goto err_undo;
317 }
318
319
320
321
322
323 entry->start_pgoff = xa_first;
324 xa_unlock(&ucontext->mmap_xa);
325 mutex_unlock(&ufile->umap_lock);
326
327 ibdev_dbg(ucontext->device, "mmap: pgoff[%#lx] npages[%#x] inserted\n",
328 entry->start_pgoff, npages);
329
330 return 0;
331
332err_undo:
333 for (; i > xa_first; i--)
334 __xa_erase(&ucontext->mmap_xa, i - 1);
335
336err_unlock:
337 xa_unlock(&ucontext->mmap_xa);
338 mutex_unlock(&ufile->umap_lock);
339 return -ENOMEM;
340}
341EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range);
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
361 struct rdma_user_mmap_entry *entry,
362 size_t length)
363{
364 return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0,
365 U32_MAX);
366}
367EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
368