1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/mm.h>
36#include <linux/dma-mapping.h>
37#include <linux/sched/signal.h>
38#include <linux/sched/mm.h>
39#include <linux/hugetlb.h>
40#include <linux/iommu.h>
41#include <linux/workqueue.h>
42#include <linux/list.h>
43#include <linux/pci.h>
44#include <rdma/ib_verbs.h>
45
46#include "usnic_log.h"
47#include "usnic_uiom.h"
48#include "usnic_uiom_interval_tree.h"
49
50#define USNIC_UIOM_PAGE_CHUNK \
51 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
52 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
53 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
54
55static int usnic_uiom_dma_fault(struct iommu_domain *domain,
56 struct device *dev,
57 unsigned long iova, int flags,
58 void *token)
59{
60 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
61 dev_name(dev),
62 domain, iova, flags);
63 return -ENOSYS;
64}
65
66static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
67{
68 struct usnic_uiom_chunk *chunk, *tmp;
69 struct page *page;
70 struct scatterlist *sg;
71 int i;
72 dma_addr_t pa;
73
74 list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
75 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
76 page = sg_page(sg);
77 pa = sg_phys(sg);
78 unpin_user_pages_dirty_lock(&page, 1, dirty);
79 usnic_dbg("pa: %pa\n", &pa);
80 }
81 kfree(chunk);
82 }
83}
84
85static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
86 int dmasync, struct usnic_uiom_reg *uiomr)
87{
88 struct list_head *chunk_list = &uiomr->chunk_list;
89 struct page **page_list;
90 struct scatterlist *sg;
91 struct usnic_uiom_chunk *chunk;
92 unsigned long locked;
93 unsigned long lock_limit;
94 unsigned long cur_base;
95 unsigned long npages;
96 int ret;
97 int off;
98 int i;
99 int flags;
100 dma_addr_t pa;
101 unsigned int gup_flags;
102 struct mm_struct *mm;
103
104
105
106
107
108 if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
109 return -EINVAL;
110
111 if (!size)
112 return -EINVAL;
113
114 if (!can_do_mlock())
115 return -EPERM;
116
117 INIT_LIST_HEAD(chunk_list);
118
119 page_list = (struct page **) __get_free_page(GFP_KERNEL);
120 if (!page_list)
121 return -ENOMEM;
122
123 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
124
125 uiomr->owning_mm = mm = current->mm;
126 mmap_read_lock(mm);
127
128 locked = atomic64_add_return(npages, ¤t->mm->pinned_vm);
129 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
130
131 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
132 ret = -ENOMEM;
133 goto out;
134 }
135
136 flags = IOMMU_READ | IOMMU_CACHE;
137 flags |= (writable) ? IOMMU_WRITE : 0;
138 gup_flags = FOLL_WRITE;
139 gup_flags |= (writable) ? 0 : FOLL_FORCE;
140 cur_base = addr & PAGE_MASK;
141 ret = 0;
142
143 while (npages) {
144 ret = pin_user_pages(cur_base,
145 min_t(unsigned long, npages,
146 PAGE_SIZE / sizeof(struct page *)),
147 gup_flags | FOLL_LONGTERM,
148 page_list, NULL);
149
150 if (ret < 0)
151 goto out;
152
153 npages -= ret;
154 off = 0;
155
156 while (ret) {
157 chunk = kmalloc(struct_size(chunk, page_list,
158 min_t(int, ret, USNIC_UIOM_PAGE_CHUNK)),
159 GFP_KERNEL);
160 if (!chunk) {
161 ret = -ENOMEM;
162 goto out;
163 }
164
165 chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
166 sg_init_table(chunk->page_list, chunk->nents);
167 for_each_sg(chunk->page_list, sg, chunk->nents, i) {
168 sg_set_page(sg, page_list[i + off],
169 PAGE_SIZE, 0);
170 pa = sg_phys(sg);
171 usnic_dbg("va: 0x%lx pa: %pa\n",
172 cur_base + i*PAGE_SIZE, &pa);
173 }
174 cur_base += chunk->nents * PAGE_SIZE;
175 ret -= chunk->nents;
176 off += chunk->nents;
177 list_add_tail(&chunk->list, chunk_list);
178 }
179
180 ret = 0;
181 }
182
183out:
184 if (ret < 0) {
185 usnic_uiom_put_pages(chunk_list, 0);
186 atomic64_sub(npages, ¤t->mm->pinned_vm);
187 } else
188 mmgrab(uiomr->owning_mm);
189
190 mmap_read_unlock(mm);
191 free_page((unsigned long) page_list);
192 return ret;
193}
194
195static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
196 struct usnic_uiom_pd *pd)
197{
198 struct usnic_uiom_interval_node *interval, *tmp;
199 long unsigned va, size;
200
201 list_for_each_entry_safe(interval, tmp, intervals, link) {
202 va = interval->start << PAGE_SHIFT;
203 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
204 while (size > 0) {
205
206 usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
207 iommu_unmap(pd->domain, va, PAGE_SIZE);
208 va += PAGE_SIZE;
209 size -= PAGE_SIZE;
210 }
211 }
212}
213
214static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
215 struct usnic_uiom_reg *uiomr,
216 int dirty)
217{
218 int npages;
219 unsigned long vpn_start, vpn_last;
220 struct usnic_uiom_interval_node *interval, *tmp;
221 int writable = 0;
222 LIST_HEAD(rm_intervals);
223
224 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
225 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
226 vpn_last = vpn_start + npages - 1;
227
228 spin_lock(&pd->lock);
229 usnic_uiom_remove_interval(&pd->root, vpn_start,
230 vpn_last, &rm_intervals);
231 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
232
233 list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
234 if (interval->flags & IOMMU_WRITE)
235 writable = 1;
236 list_del(&interval->link);
237 kfree(interval);
238 }
239
240 usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
241 spin_unlock(&pd->lock);
242}
243
244static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
245 struct usnic_uiom_reg *uiomr)
246{
247 int i, err;
248 size_t size;
249 struct usnic_uiom_chunk *chunk;
250 struct usnic_uiom_interval_node *interval_node;
251 dma_addr_t pa;
252 dma_addr_t pa_start = 0;
253 dma_addr_t pa_end = 0;
254 long int va_start = -EINVAL;
255 struct usnic_uiom_pd *pd = uiomr->pd;
256 long int va = uiomr->va & PAGE_MASK;
257 int flags = IOMMU_READ | IOMMU_CACHE;
258
259 flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
260 chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
261 list);
262 list_for_each_entry(interval_node, intervals, link) {
263iter_chunk:
264 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
265 pa = sg_phys(&chunk->page_list[i]);
266 if ((va >> PAGE_SHIFT) < interval_node->start)
267 continue;
268
269 if ((va >> PAGE_SHIFT) == interval_node->start) {
270
271 va_start = va;
272 pa_start = pa;
273 pa_end = pa;
274 }
275
276 WARN_ON(va_start == -EINVAL);
277
278 if ((pa_end + PAGE_SIZE != pa) &&
279 (pa != pa_start)) {
280
281 size = pa_end - pa_start + PAGE_SIZE;
282 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
283 va_start, &pa_start, size, flags);
284 err = iommu_map(pd->domain, va_start, pa_start,
285 size, flags);
286 if (err) {
287 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
288 va_start, &pa_start, size, err);
289 goto err_out;
290 }
291 va_start = va;
292 pa_start = pa;
293 pa_end = pa;
294 }
295
296 if ((va >> PAGE_SHIFT) == interval_node->last) {
297
298 size = pa - pa_start + PAGE_SIZE;
299 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
300 va_start, &pa_start, size, flags);
301 err = iommu_map(pd->domain, va_start, pa_start,
302 size, flags);
303 if (err) {
304 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
305 va_start, &pa_start, size, err);
306 goto err_out;
307 }
308 break;
309 }
310
311 if (pa != pa_start)
312 pa_end += PAGE_SIZE;
313 }
314
315 if (i == chunk->nents) {
316
317
318
319
320 chunk = list_first_entry(&chunk->list,
321 struct usnic_uiom_chunk,
322 list);
323 goto iter_chunk;
324 }
325 }
326
327 return 0;
328
329err_out:
330 usnic_uiom_unmap_sorted_intervals(intervals, pd);
331 return err;
332}
333
334struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
335 unsigned long addr, size_t size,
336 int writable, int dmasync)
337{
338 struct usnic_uiom_reg *uiomr;
339 unsigned long va_base, vpn_start, vpn_last;
340 unsigned long npages;
341 int offset, err;
342 LIST_HEAD(sorted_diff_intervals);
343
344
345
346
347
348
349
350
351 writable = 1;
352
353 va_base = addr & PAGE_MASK;
354 offset = addr & ~PAGE_MASK;
355 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
356 vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
357 vpn_last = vpn_start + npages - 1;
358
359 uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
360 if (!uiomr)
361 return ERR_PTR(-ENOMEM);
362
363 uiomr->va = va_base;
364 uiomr->offset = offset;
365 uiomr->length = size;
366 uiomr->writable = writable;
367 uiomr->pd = pd;
368
369 err = usnic_uiom_get_pages(addr, size, writable, dmasync,
370 uiomr);
371 if (err) {
372 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
373 vpn_start, vpn_last, err);
374 goto out_free_uiomr;
375 }
376
377 spin_lock(&pd->lock);
378 err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
379 (writable) ? IOMMU_WRITE : 0,
380 IOMMU_WRITE,
381 &pd->root,
382 &sorted_diff_intervals);
383 if (err) {
384 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
385 vpn_start, vpn_last, err);
386 goto out_put_pages;
387 }
388
389 err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
390 if (err) {
391 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
392 vpn_start, vpn_last, err);
393 goto out_put_intervals;
394
395 }
396
397 err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
398 (writable) ? IOMMU_WRITE : 0);
399 if (err) {
400 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
401 vpn_start, vpn_last, err);
402 goto out_unmap_intervals;
403 }
404
405 usnic_uiom_put_interval_set(&sorted_diff_intervals);
406 spin_unlock(&pd->lock);
407
408 return uiomr;
409
410out_unmap_intervals:
411 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
412out_put_intervals:
413 usnic_uiom_put_interval_set(&sorted_diff_intervals);
414out_put_pages:
415 usnic_uiom_put_pages(&uiomr->chunk_list, 0);
416 spin_unlock(&pd->lock);
417 mmdrop(uiomr->owning_mm);
418out_free_uiomr:
419 kfree(uiomr);
420 return ERR_PTR(err);
421}
422
423static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
424{
425 mmdrop(uiomr->owning_mm);
426 kfree(uiomr);
427}
428
429static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
430{
431 return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
432}
433
434void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
435{
436 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
437
438 atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
439 __usnic_uiom_release_tail(uiomr);
440}
441
442struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
443{
444 struct usnic_uiom_pd *pd;
445 void *domain;
446
447 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
448 if (!pd)
449 return ERR_PTR(-ENOMEM);
450
451 pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
452 if (!domain) {
453 usnic_err("Failed to allocate IOMMU domain");
454 kfree(pd);
455 return ERR_PTR(-ENOMEM);
456 }
457
458 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
459
460 spin_lock_init(&pd->lock);
461 INIT_LIST_HEAD(&pd->devs);
462
463 return pd;
464}
465
466void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
467{
468 iommu_domain_free(pd->domain);
469 kfree(pd);
470}
471
472int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
473{
474 struct usnic_uiom_dev *uiom_dev;
475 int err;
476
477 uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
478 if (!uiom_dev)
479 return -ENOMEM;
480 uiom_dev->dev = dev;
481
482 err = iommu_attach_device(pd->domain, dev);
483 if (err)
484 goto out_free_dev;
485
486 if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
487 usnic_err("IOMMU of %s does not support cache coherency\n",
488 dev_name(dev));
489 err = -EINVAL;
490 goto out_detach_device;
491 }
492
493 spin_lock(&pd->lock);
494 list_add_tail(&uiom_dev->link, &pd->devs);
495 pd->dev_cnt++;
496 spin_unlock(&pd->lock);
497
498 return 0;
499
500out_detach_device:
501 iommu_detach_device(pd->domain, dev);
502out_free_dev:
503 kfree(uiom_dev);
504 return err;
505}
506
507void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
508{
509 struct usnic_uiom_dev *uiom_dev;
510 int found = 0;
511
512 spin_lock(&pd->lock);
513 list_for_each_entry(uiom_dev, &pd->devs, link) {
514 if (uiom_dev->dev == dev) {
515 found = 1;
516 break;
517 }
518 }
519
520 if (!found) {
521 usnic_err("Unable to free dev %s - not found\n",
522 dev_name(dev));
523 spin_unlock(&pd->lock);
524 return;
525 }
526
527 list_del(&uiom_dev->link);
528 pd->dev_cnt--;
529 spin_unlock(&pd->lock);
530
531 return iommu_detach_device(pd->domain, dev);
532}
533
534struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
535{
536 struct usnic_uiom_dev *uiom_dev;
537 struct device **devs;
538 int i = 0;
539
540 spin_lock(&pd->lock);
541 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
542 if (!devs) {
543 devs = ERR_PTR(-ENOMEM);
544 goto out;
545 }
546
547 list_for_each_entry(uiom_dev, &pd->devs, link) {
548 devs[i++] = uiom_dev->dev;
549 }
550out:
551 spin_unlock(&pd->lock);
552 return devs;
553}
554
555void usnic_uiom_free_dev_list(struct device **devs)
556{
557 kfree(devs);
558}
559
560int usnic_uiom_init(char *drv_name)
561{
562 if (!iommu_present(&pci_bus_type)) {
563 usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
564 return -EPERM;
565 }
566
567 return 0;
568}
569