1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
35struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
44#if defined(CONFIG_MMU_NOTIFIER)
45#include <linux/interval_tree.h>
46
47struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
52 struct list_head linear;
53 unsigned long serial;
54 bool has_linear;
55};
56
57struct i915_mmu_object {
58 struct i915_mmu_notifier *mn;
59 struct interval_tree_node it;
60 struct list_head link;
61 struct drm_i915_gem_object *obj;
62 bool is_linear;
63};
64
65static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
66{
67 struct drm_device *dev = obj->base.dev;
68 unsigned long end;
69
70 mutex_lock(&dev->struct_mutex);
71
72 obj->userptr.work = NULL;
73
74 if (obj->pages != NULL) {
75 struct drm_i915_private *dev_priv = to_i915(dev);
76 struct i915_vma *vma, *tmp;
77 bool was_interruptible;
78
79 was_interruptible = dev_priv->mm.interruptible;
80 dev_priv->mm.interruptible = false;
81
82 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
83 int ret = i915_vma_unbind(vma);
84 WARN_ON(ret && ret != -EIO);
85 }
86 WARN_ON(i915_gem_object_put_pages(obj));
87
88 dev_priv->mm.interruptible = was_interruptible;
89 }
90
91 end = obj->userptr.ptr + obj->base.size;
92
93 drm_gem_object_unreference(&obj->base);
94 mutex_unlock(&dev->struct_mutex);
95
96 return end;
97}
98
99static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
100 struct mm_struct *mm,
101 unsigned long start,
102 unsigned long end)
103{
104 struct i915_mmu_object *mo;
105 unsigned long serial;
106
107restart:
108 serial = mn->serial;
109 list_for_each_entry(mo, &mn->linear, link) {
110 struct drm_i915_gem_object *obj;
111
112 if (mo->it.last < start || mo->it.start > end)
113 continue;
114
115 obj = mo->obj;
116
117 if (!kref_get_unless_zero(&obj->base.refcount))
118 continue;
119
120 spin_unlock(&mn->lock);
121
122 cancel_userptr(obj);
123
124 spin_lock(&mn->lock);
125 if (serial != mn->serial)
126 goto restart;
127 }
128
129 return NULL;
130}
131
132static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
133 struct mm_struct *mm,
134 unsigned long start,
135 unsigned long end)
136{
137 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
138 struct interval_tree_node *it = NULL;
139 unsigned long next = start;
140 unsigned long serial = 0;
141
142 end--;
143 while (next < end) {
144 struct drm_i915_gem_object *obj = NULL;
145
146 spin_lock(&mn->lock);
147 if (mn->has_linear)
148 it = invalidate_range__linear(mn, mm, start, end);
149 else if (serial == mn->serial)
150 it = interval_tree_iter_next(it, next, end);
151 else
152 it = interval_tree_iter_first(&mn->objects, start, end);
153 if (it != NULL) {
154 obj = container_of(it, struct i915_mmu_object, it)->obj;
155
156
157
158
159
160
161
162
163 if (!kref_get_unless_zero(&obj->base.refcount)) {
164 spin_unlock(&mn->lock);
165 serial = 0;
166 continue;
167 }
168
169 serial = mn->serial;
170 }
171 spin_unlock(&mn->lock);
172 if (obj == NULL)
173 return;
174
175 next = cancel_userptr(obj);
176 }
177}
178
179static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
180 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
181};
182
183static struct i915_mmu_notifier *
184i915_mmu_notifier_create(struct mm_struct *mm)
185{
186 struct i915_mmu_notifier *mn;
187 int ret;
188
189 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
190 if (mn == NULL)
191 return ERR_PTR(-ENOMEM);
192
193 spin_lock_init(&mn->lock);
194 mn->mn.ops = &i915_gem_userptr_notifier;
195 mn->objects = RB_ROOT;
196 mn->serial = 1;
197 INIT_LIST_HEAD(&mn->linear);
198 mn->has_linear = false;
199
200
201 ret = __mmu_notifier_register(&mn->mn, mm);
202 if (ret) {
203 kfree(mn);
204 return ERR_PTR(ret);
205 }
206
207 return mn;
208}
209
210static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
211{
212 if (++mn->serial == 0)
213 mn->serial = 1;
214}
215
216static int
217i915_mmu_notifier_add(struct drm_device *dev,
218 struct i915_mmu_notifier *mn,
219 struct i915_mmu_object *mo)
220{
221 struct interval_tree_node *it;
222 int ret = 0;
223
224
225
226
227
228
229 mutex_lock(&dev->struct_mutex);
230
231
232
233
234
235 i915_gem_retire_requests(dev);
236
237 spin_lock(&mn->lock);
238 it = interval_tree_iter_first(&mn->objects,
239 mo->it.start, mo->it.last);
240 if (it) {
241 struct drm_i915_gem_object *obj;
242
243
244
245
246
247
248
249
250
251
252
253
254 obj = container_of(it, struct i915_mmu_object, it)->obj;
255 if (!obj->userptr.workers)
256 mn->has_linear = mo->is_linear = true;
257 else
258 ret = -EAGAIN;
259 } else
260 interval_tree_insert(&mo->it, &mn->objects);
261
262 if (ret == 0) {
263 list_add(&mo->link, &mn->linear);
264 __i915_mmu_notifier_update_serial(mn);
265 }
266 spin_unlock(&mn->lock);
267 mutex_unlock(&dev->struct_mutex);
268
269 return ret;
270}
271
272static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
273{
274 struct i915_mmu_object *mo;
275
276 list_for_each_entry(mo, &mn->linear, link)
277 if (mo->is_linear)
278 return true;
279
280 return false;
281}
282
283static void
284i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
285 struct i915_mmu_object *mo)
286{
287 spin_lock(&mn->lock);
288 list_del(&mo->link);
289 if (mo->is_linear)
290 mn->has_linear = i915_mmu_notifier_has_linear(mn);
291 else
292 interval_tree_remove(&mo->it, &mn->objects);
293 __i915_mmu_notifier_update_serial(mn);
294 spin_unlock(&mn->lock);
295}
296
297static void
298i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
299{
300 struct i915_mmu_object *mo;
301
302 mo = obj->userptr.mmu_object;
303 if (mo == NULL)
304 return;
305
306 i915_mmu_notifier_del(mo->mn, mo);
307 kfree(mo);
308
309 obj->userptr.mmu_object = NULL;
310}
311
312static struct i915_mmu_notifier *
313i915_mmu_notifier_find(struct i915_mm_struct *mm)
314{
315 struct i915_mmu_notifier *mn = mm->mn;
316
317 mn = mm->mn;
318 if (mn)
319 return mn;
320
321 down_write(&mm->mm->mmap_sem);
322 mutex_lock(&to_i915(mm->dev)->mm_lock);
323 if ((mn = mm->mn) == NULL) {
324 mn = i915_mmu_notifier_create(mm->mm);
325 if (!IS_ERR(mn))
326 mm->mn = mn;
327 }
328 mutex_unlock(&to_i915(mm->dev)->mm_lock);
329 up_write(&mm->mm->mmap_sem);
330
331 return mn;
332}
333
334static int
335i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
336 unsigned flags)
337{
338 struct i915_mmu_notifier *mn;
339 struct i915_mmu_object *mo;
340 int ret;
341
342 if (flags & I915_USERPTR_UNSYNCHRONIZED)
343 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
344
345 if (WARN_ON(obj->userptr.mm == NULL))
346 return -EINVAL;
347
348 mn = i915_mmu_notifier_find(obj->userptr.mm);
349 if (IS_ERR(mn))
350 return PTR_ERR(mn);
351
352 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
353 if (mo == NULL)
354 return -ENOMEM;
355
356 mo->mn = mn;
357 mo->it.start = obj->userptr.ptr;
358 mo->it.last = mo->it.start + obj->base.size - 1;
359 mo->obj = obj;
360
361 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
362 if (ret) {
363 kfree(mo);
364 return ret;
365 }
366
367 obj->userptr.mmu_object = mo;
368 return 0;
369}
370
371static void
372i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
373 struct mm_struct *mm)
374{
375 if (mn == NULL)
376 return;
377
378 mmu_notifier_unregister(&mn->mn, mm);
379 kfree(mn);
380}
381
382#else
383
384static void
385i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
386{
387}
388
389static int
390i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
391 unsigned flags)
392{
393 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
394 return -ENODEV;
395
396 if (!capable(CAP_SYS_ADMIN))
397 return -EPERM;
398
399 return 0;
400}
401
402static void
403i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
404 struct mm_struct *mm)
405{
406}
407
408#endif
409
410static struct i915_mm_struct *
411__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
412{
413 struct i915_mm_struct *mm;
414
415
416 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
417 if (mm->mm == real)
418 return mm;
419
420 return NULL;
421}
422
423static int
424i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
425{
426 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
427 struct i915_mm_struct *mm;
428 int ret = 0;
429
430
431
432
433
434
435
436
437
438
439
440 mutex_lock(&dev_priv->mm_lock);
441 mm = __i915_mm_struct_find(dev_priv, current->mm);
442 if (mm == NULL) {
443 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
444 if (mm == NULL) {
445 ret = -ENOMEM;
446 goto out;
447 }
448
449 kref_init(&mm->kref);
450 mm->dev = obj->base.dev;
451
452 mm->mm = current->mm;
453 atomic_inc(¤t->mm->mm_count);
454
455 mm->mn = NULL;
456
457
458 hash_add(dev_priv->mm_structs,
459 &mm->node, (unsigned long)mm->mm);
460 } else
461 kref_get(&mm->kref);
462
463 obj->userptr.mm = mm;
464out:
465 mutex_unlock(&dev_priv->mm_lock);
466 return ret;
467}
468
469static void
470__i915_mm_struct_free__worker(struct work_struct *work)
471{
472 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
473 i915_mmu_notifier_free(mm->mn, mm->mm);
474 mmdrop(mm->mm);
475 kfree(mm);
476}
477
478static void
479__i915_mm_struct_free(struct kref *kref)
480{
481 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
482
483
484 hash_del(&mm->node);
485 mutex_unlock(&to_i915(mm->dev)->mm_lock);
486
487 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
488 schedule_work(&mm->work);
489}
490
491static void
492i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
493{
494 if (obj->userptr.mm == NULL)
495 return;
496
497 kref_put_mutex(&obj->userptr.mm->kref,
498 __i915_mm_struct_free,
499 &to_i915(obj->base.dev)->mm_lock);
500 obj->userptr.mm = NULL;
501}
502
503struct get_pages_work {
504 struct work_struct work;
505 struct drm_i915_gem_object *obj;
506 struct task_struct *task;
507};
508
509#if IS_ENABLED(CONFIG_SWIOTLB)
510#define swiotlb_active() swiotlb_nr_tbl()
511#else
512#define swiotlb_active() 0
513#endif
514
515static int
516st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
517{
518 struct scatterlist *sg;
519 int ret, n;
520
521 *st = kmalloc(sizeof(**st), GFP_KERNEL);
522 if (*st == NULL)
523 return -ENOMEM;
524
525 if (swiotlb_active()) {
526 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
527 if (ret)
528 goto err;
529
530 for_each_sg((*st)->sgl, sg, num_pages, n)
531 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
532 } else {
533 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
534 0, num_pages << PAGE_SHIFT,
535 GFP_KERNEL);
536 if (ret)
537 goto err;
538 }
539
540 return 0;
541
542err:
543 kfree(*st);
544 *st = NULL;
545 return ret;
546}
547
548static int
549__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
550 struct page **pvec, int num_pages)
551{
552 int ret;
553
554 ret = st_set_pages(&obj->pages, pvec, num_pages);
555 if (ret)
556 return ret;
557
558 ret = i915_gem_gtt_prepare_object(obj);
559 if (ret) {
560 sg_free_table(obj->pages);
561 kfree(obj->pages);
562 obj->pages = NULL;
563 }
564
565 return ret;
566}
567
568static void
569__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
570{
571 struct get_pages_work *work = container_of(_work, typeof(*work), work);
572 struct drm_i915_gem_object *obj = work->obj;
573 struct drm_device *dev = obj->base.dev;
574 const int num_pages = obj->base.size >> PAGE_SHIFT;
575 struct page **pvec;
576 int pinned, ret;
577
578 ret = -ENOMEM;
579 pinned = 0;
580
581 pvec = kmalloc(num_pages*sizeof(struct page *),
582 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
583 if (pvec == NULL)
584 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
585 if (pvec != NULL) {
586 struct mm_struct *mm = obj->userptr.mm->mm;
587
588 down_read(&mm->mmap_sem);
589 while (pinned < num_pages) {
590 ret = get_user_pages(work->task, mm,
591 obj->userptr.ptr + pinned * PAGE_SIZE,
592 num_pages - pinned,
593 !obj->userptr.read_only, 0,
594 pvec + pinned, NULL);
595 if (ret < 0)
596 break;
597
598 pinned += ret;
599 }
600 up_read(&mm->mmap_sem);
601 }
602
603 mutex_lock(&dev->struct_mutex);
604 if (obj->userptr.work != &work->work) {
605 ret = 0;
606 } else if (pinned == num_pages) {
607 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
608 if (ret == 0) {
609 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
610 obj->get_page.sg = obj->pages->sgl;
611 obj->get_page.last = 0;
612
613 pinned = 0;
614 }
615 }
616
617 obj->userptr.work = ERR_PTR(ret);
618 obj->userptr.workers--;
619 drm_gem_object_unreference(&obj->base);
620 mutex_unlock(&dev->struct_mutex);
621
622 release_pages(pvec, pinned, 0);
623 drm_free_large(pvec);
624
625 put_task_struct(work->task);
626 kfree(work);
627}
628
629static int
630i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
631{
632 const int num_pages = obj->base.size >> PAGE_SHIFT;
633 struct page **pvec;
634 int pinned, ret;
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653 pvec = NULL;
654 pinned = 0;
655 if (obj->userptr.mm->mm == current->mm) {
656 pvec = kmalloc(num_pages*sizeof(struct page *),
657 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
658 if (pvec == NULL) {
659 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
660 if (pvec == NULL)
661 return -ENOMEM;
662 }
663
664 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
665 !obj->userptr.read_only, pvec);
666 }
667 if (pinned < num_pages) {
668 if (pinned < 0) {
669 ret = pinned;
670 pinned = 0;
671 } else {
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691 ret = -EAGAIN;
692 if (obj->userptr.work == NULL &&
693 obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
694 struct get_pages_work *work;
695
696 work = kmalloc(sizeof(*work), GFP_KERNEL);
697 if (work != NULL) {
698 obj->userptr.work = &work->work;
699 obj->userptr.workers++;
700
701 work->obj = obj;
702 drm_gem_object_reference(&obj->base);
703
704 work->task = current;
705 get_task_struct(work->task);
706
707 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
708 schedule_work(&work->work);
709 } else
710 ret = -ENOMEM;
711 } else {
712 if (IS_ERR(obj->userptr.work)) {
713 ret = PTR_ERR(obj->userptr.work);
714 obj->userptr.work = NULL;
715 }
716 }
717 }
718 } else {
719 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
720 if (ret == 0) {
721 obj->userptr.work = NULL;
722 pinned = 0;
723 }
724 }
725
726 release_pages(pvec, pinned, 0);
727 drm_free_large(pvec);
728 return ret;
729}
730
731static void
732i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
733{
734 struct sg_page_iter sg_iter;
735
736 BUG_ON(obj->userptr.work != NULL);
737
738 if (obj->madv != I915_MADV_WILLNEED)
739 obj->dirty = 0;
740
741 i915_gem_gtt_finish_object(obj);
742
743 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
744 struct page *page = sg_page_iter_page(&sg_iter);
745
746 if (obj->dirty)
747 set_page_dirty(page);
748
749 mark_page_accessed(page);
750 page_cache_release(page);
751 }
752 obj->dirty = 0;
753
754 sg_free_table(obj->pages);
755 kfree(obj->pages);
756}
757
758static void
759i915_gem_userptr_release(struct drm_i915_gem_object *obj)
760{
761 i915_gem_userptr_release__mmu_notifier(obj);
762 i915_gem_userptr_release__mm_struct(obj);
763}
764
765static int
766i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
767{
768 if (obj->userptr.mmu_object)
769 return 0;
770
771 return i915_gem_userptr_init__mmu_notifier(obj, 0);
772}
773
774static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
775 .dmabuf_export = i915_gem_userptr_dmabuf_export,
776 .get_pages = i915_gem_userptr_get_pages,
777 .put_pages = i915_gem_userptr_put_pages,
778 .release = i915_gem_userptr_release,
779};
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813int
814i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
815{
816 struct drm_i915_private *dev_priv = dev->dev_private;
817 struct drm_i915_gem_userptr *args = data;
818 struct drm_i915_gem_object *obj;
819 int ret;
820 u32 handle;
821
822 if (args->flags & ~(I915_USERPTR_READ_ONLY |
823 I915_USERPTR_UNSYNCHRONIZED))
824 return -EINVAL;
825
826 if (offset_in_page(args->user_ptr | args->user_size))
827 return -EINVAL;
828
829 if (args->user_size > dev_priv->gtt.base.total)
830 return -E2BIG;
831
832 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
833 (char __user *)(unsigned long)args->user_ptr, args->user_size))
834 return -EFAULT;
835
836 if (args->flags & I915_USERPTR_READ_ONLY) {
837
838
839
840 return -ENODEV;
841 }
842
843 obj = i915_gem_object_alloc(dev);
844 if (obj == NULL)
845 return -ENOMEM;
846
847 drm_gem_private_object_init(dev, &obj->base, args->user_size);
848 i915_gem_object_init(obj, &i915_gem_userptr_ops);
849 obj->cache_level = I915_CACHE_LLC;
850 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
851 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
852
853 obj->userptr.ptr = args->user_ptr;
854 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
855
856
857
858
859
860 ret = i915_gem_userptr_init__mm_struct(obj);
861 if (ret == 0)
862 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
863 if (ret == 0)
864 ret = drm_gem_handle_create(file, &obj->base, &handle);
865
866
867 drm_gem_object_unreference_unlocked(&obj->base);
868 if (ret)
869 return ret;
870
871 args->handle = handle;
872 return 0;
873}
874
875int
876i915_gem_init_userptr(struct drm_device *dev)
877{
878 struct drm_i915_private *dev_priv = to_i915(dev);
879 mutex_init(&dev_priv->mm_lock);
880 hash_init(dev_priv->mm_structs);
881 return 0;
882}
883