1
2
3
4
5
6
7#include <linux/mmu_context.h>
8#include <linux/mmu_notifier.h>
9#include <linux/mempolicy.h>
10#include <linux/swap.h>
11#include <linux/sched/mm.h>
12
13#include "i915_drv.h"
14#include "i915_gem_ioctls.h"
15#include "i915_gem_object.h"
16#include "i915_scatterlist.h"
17
18struct i915_mm_struct {
19 struct mm_struct *mm;
20 struct drm_i915_private *i915;
21 struct i915_mmu_notifier *mn;
22 struct hlist_node node;
23 struct kref kref;
24 struct rcu_work work;
25};
26
27#if defined(CONFIG_MMU_NOTIFIER)
28#include <linux/interval_tree.h>
29
30struct i915_mmu_notifier {
31 spinlock_t lock;
32 struct hlist_node node;
33 struct mmu_notifier mn;
34 struct rb_root_cached objects;
35 struct i915_mm_struct *mm;
36};
37
38struct i915_mmu_object {
39 struct i915_mmu_notifier *mn;
40 struct drm_i915_gem_object *obj;
41 struct interval_tree_node it;
42};
43
44static void add_object(struct i915_mmu_object *mo)
45{
46 GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
47 interval_tree_insert(&mo->it, &mo->mn->objects);
48}
49
50static void del_object(struct i915_mmu_object *mo)
51{
52 if (RB_EMPTY_NODE(&mo->it.rb))
53 return;
54
55 interval_tree_remove(&mo->it, &mo->mn->objects);
56 RB_CLEAR_NODE(&mo->it.rb);
57}
58
59static void
60__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
61{
62 struct i915_mmu_object *mo = obj->userptr.mmu_object;
63
64
65
66
67
68
69
70
71
72
73
74
75 if (!mo)
76 return;
77
78 spin_lock(&mo->mn->lock);
79 if (value)
80 add_object(mo);
81 else
82 del_object(mo);
83 spin_unlock(&mo->mn->lock);
84}
85
86static int
87userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
88 const struct mmu_notifier_range *range)
89{
90 struct i915_mmu_notifier *mn =
91 container_of(_mn, struct i915_mmu_notifier, mn);
92 struct interval_tree_node *it;
93 unsigned long end;
94 int ret = 0;
95
96 if (RB_EMPTY_ROOT(&mn->objects.rb_root))
97 return 0;
98
99
100 end = range->end - 1;
101
102 spin_lock(&mn->lock);
103 it = interval_tree_iter_first(&mn->objects, range->start, end);
104 while (it) {
105 struct drm_i915_gem_object *obj;
106
107 if (!mmu_notifier_range_blockable(range)) {
108 ret = -EAGAIN;
109 break;
110 }
111
112
113
114
115
116
117
118
119
120
121
122 obj = container_of(it, struct i915_mmu_object, it)->obj;
123 if (!kref_get_unless_zero(&obj->base.refcount)) {
124 it = interval_tree_iter_next(it, range->start, end);
125 continue;
126 }
127 spin_unlock(&mn->lock);
128
129 ret = i915_gem_object_unbind(obj,
130 I915_GEM_OBJECT_UNBIND_ACTIVE |
131 I915_GEM_OBJECT_UNBIND_BARRIER);
132 if (ret == 0)
133 ret = __i915_gem_object_put_pages(obj);
134 i915_gem_object_put(obj);
135 if (ret)
136 return ret;
137
138 spin_lock(&mn->lock);
139
140
141
142
143
144
145 it = interval_tree_iter_first(&mn->objects, range->start, end);
146 }
147 spin_unlock(&mn->lock);
148
149 return ret;
150
151}
152
153static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
154 .invalidate_range_start = userptr_mn_invalidate_range_start,
155};
156
157static struct i915_mmu_notifier *
158i915_mmu_notifier_create(struct i915_mm_struct *mm)
159{
160 struct i915_mmu_notifier *mn;
161
162 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
163 if (mn == NULL)
164 return ERR_PTR(-ENOMEM);
165
166 spin_lock_init(&mn->lock);
167 mn->mn.ops = &i915_gem_userptr_notifier;
168 mn->objects = RB_ROOT_CACHED;
169 mn->mm = mm;
170
171 return mn;
172}
173
174static void
175i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
176{
177 struct i915_mmu_object *mo;
178
179 mo = fetch_and_zero(&obj->userptr.mmu_object);
180 if (!mo)
181 return;
182
183 spin_lock(&mo->mn->lock);
184 del_object(mo);
185 spin_unlock(&mo->mn->lock);
186 kfree(mo);
187}
188
189static struct i915_mmu_notifier *
190i915_mmu_notifier_find(struct i915_mm_struct *mm)
191{
192 struct i915_mmu_notifier *mn, *old;
193 int err;
194
195 mn = READ_ONCE(mm->mn);
196 if (likely(mn))
197 return mn;
198
199 mn = i915_mmu_notifier_create(mm);
200 if (IS_ERR(mn))
201 return mn;
202
203 err = mmu_notifier_register(&mn->mn, mm->mm);
204 if (err) {
205 kfree(mn);
206 return ERR_PTR(err);
207 }
208
209 old = cmpxchg(&mm->mn, NULL, mn);
210 if (old) {
211 mmu_notifier_unregister(&mn->mn, mm->mm);
212 kfree(mn);
213 mn = old;
214 }
215
216 return mn;
217}
218
219static int
220i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
221 unsigned flags)
222{
223 struct i915_mmu_notifier *mn;
224 struct i915_mmu_object *mo;
225
226 if (flags & I915_USERPTR_UNSYNCHRONIZED)
227 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
228
229 if (GEM_WARN_ON(!obj->userptr.mm))
230 return -EINVAL;
231
232 mn = i915_mmu_notifier_find(obj->userptr.mm);
233 if (IS_ERR(mn))
234 return PTR_ERR(mn);
235
236 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
237 if (!mo)
238 return -ENOMEM;
239
240 mo->mn = mn;
241 mo->obj = obj;
242 mo->it.start = obj->userptr.ptr;
243 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
244 RB_CLEAR_NODE(&mo->it.rb);
245
246 obj->userptr.mmu_object = mo;
247 return 0;
248}
249
250static void
251i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
252 struct mm_struct *mm)
253{
254 if (mn == NULL)
255 return;
256
257 mmu_notifier_unregister(&mn->mn, mm);
258 kfree(mn);
259}
260
261#else
262
263static void
264__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
265{
266}
267
268static void
269i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
270{
271}
272
273static int
274i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
275 unsigned flags)
276{
277 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
278 return -ENODEV;
279
280 if (!capable(CAP_SYS_ADMIN))
281 return -EPERM;
282
283 return 0;
284}
285
286static void
287i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
288 struct mm_struct *mm)
289{
290}
291
292#endif
293
294static struct i915_mm_struct *
295__i915_mm_struct_find(struct drm_i915_private *i915, struct mm_struct *real)
296{
297 struct i915_mm_struct *it, *mm = NULL;
298
299 rcu_read_lock();
300 hash_for_each_possible_rcu(i915->mm_structs,
301 it, node,
302 (unsigned long)real)
303 if (it->mm == real && kref_get_unless_zero(&it->kref)) {
304 mm = it;
305 break;
306 }
307 rcu_read_unlock();
308
309 return mm;
310}
311
312static int
313i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
314{
315 struct drm_i915_private *i915 = to_i915(obj->base.dev);
316 struct i915_mm_struct *mm, *new;
317 int ret = 0;
318
319
320
321
322
323
324
325
326
327
328
329 mm = __i915_mm_struct_find(i915, current->mm);
330 if (mm)
331 goto out;
332
333 new = kmalloc(sizeof(*mm), GFP_KERNEL);
334 if (!new)
335 return -ENOMEM;
336
337 kref_init(&new->kref);
338 new->i915 = to_i915(obj->base.dev);
339 new->mm = current->mm;
340 new->mn = NULL;
341
342 spin_lock(&i915->mm_lock);
343 mm = __i915_mm_struct_find(i915, current->mm);
344 if (!mm) {
345 hash_add_rcu(i915->mm_structs,
346 &new->node,
347 (unsigned long)new->mm);
348 mmgrab(current->mm);
349 mm = new;
350 }
351 spin_unlock(&i915->mm_lock);
352 if (mm != new)
353 kfree(new);
354
355out:
356 obj->userptr.mm = mm;
357 return ret;
358}
359
360static void
361__i915_mm_struct_free__worker(struct work_struct *work)
362{
363 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work.work);
364
365 i915_mmu_notifier_free(mm->mn, mm->mm);
366 mmdrop(mm->mm);
367 kfree(mm);
368}
369
370static void
371__i915_mm_struct_free(struct kref *kref)
372{
373 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
374
375 spin_lock(&mm->i915->mm_lock);
376 hash_del_rcu(&mm->node);
377 spin_unlock(&mm->i915->mm_lock);
378
379 INIT_RCU_WORK(&mm->work, __i915_mm_struct_free__worker);
380 queue_rcu_work(system_wq, &mm->work);
381}
382
383static void
384i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
385{
386 if (obj->userptr.mm == NULL)
387 return;
388
389 kref_put(&obj->userptr.mm->kref, __i915_mm_struct_free);
390 obj->userptr.mm = NULL;
391}
392
393struct get_pages_work {
394 struct work_struct work;
395 struct drm_i915_gem_object *obj;
396 struct task_struct *task;
397};
398
399static struct sg_table *
400__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
401 struct page **pvec, unsigned long num_pages)
402{
403 unsigned int max_segment = i915_sg_segment_size();
404 struct sg_table *st;
405 unsigned int sg_page_sizes;
406 struct scatterlist *sg;
407 int ret;
408
409 st = kmalloc(sizeof(*st), GFP_KERNEL);
410 if (!st)
411 return ERR_PTR(-ENOMEM);
412
413alloc_table:
414 sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
415 num_pages << PAGE_SHIFT, max_segment,
416 NULL, 0, GFP_KERNEL);
417 if (IS_ERR(sg)) {
418 kfree(st);
419 return ERR_CAST(sg);
420 }
421
422 ret = i915_gem_gtt_prepare_pages(obj, st);
423 if (ret) {
424 sg_free_table(st);
425
426 if (max_segment > PAGE_SIZE) {
427 max_segment = PAGE_SIZE;
428 goto alloc_table;
429 }
430
431 kfree(st);
432 return ERR_PTR(ret);
433 }
434
435 sg_page_sizes = i915_sg_page_sizes(st->sgl);
436
437 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
438
439 return st;
440}
441
442static void
443__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
444{
445 struct get_pages_work *work = container_of(_work, typeof(*work), work);
446 struct drm_i915_gem_object *obj = work->obj;
447 const unsigned long npages = obj->base.size >> PAGE_SHIFT;
448 unsigned long pinned;
449 struct page **pvec;
450 int ret;
451
452 ret = -ENOMEM;
453 pinned = 0;
454
455 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
456 if (pvec != NULL) {
457 struct mm_struct *mm = obj->userptr.mm->mm;
458 unsigned int flags = 0;
459 int locked = 0;
460
461 if (!i915_gem_object_is_readonly(obj))
462 flags |= FOLL_WRITE;
463
464 ret = -EFAULT;
465 if (mmget_not_zero(mm)) {
466 while (pinned < npages) {
467 if (!locked) {
468 mmap_read_lock(mm);
469 locked = 1;
470 }
471 ret = pin_user_pages_remote
472 (mm,
473 obj->userptr.ptr + pinned * PAGE_SIZE,
474 npages - pinned,
475 flags,
476 pvec + pinned, NULL, &locked);
477 if (ret < 0)
478 break;
479
480 pinned += ret;
481 }
482 if (locked)
483 mmap_read_unlock(mm);
484 mmput(mm);
485 }
486 }
487
488 mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
489 if (obj->userptr.work == &work->work) {
490 struct sg_table *pages = ERR_PTR(ret);
491
492 if (pinned == npages) {
493 pages = __i915_gem_userptr_alloc_pages(obj, pvec,
494 npages);
495 if (!IS_ERR(pages)) {
496 pinned = 0;
497 pages = NULL;
498 }
499 }
500
501 obj->userptr.work = ERR_CAST(pages);
502 if (IS_ERR(pages))
503 __i915_gem_userptr_set_active(obj, false);
504 }
505 mutex_unlock(&obj->mm.lock);
506
507 unpin_user_pages(pvec, pinned);
508 kvfree(pvec);
509
510 i915_gem_object_put(obj);
511 put_task_struct(work->task);
512 kfree(work);
513}
514
515static struct sg_table *
516__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
517{
518 struct get_pages_work *work;
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539 work = kmalloc(sizeof(*work), GFP_KERNEL);
540 if (work == NULL)
541 return ERR_PTR(-ENOMEM);
542
543 obj->userptr.work = &work->work;
544
545 work->obj = i915_gem_object_get(obj);
546
547 work->task = current;
548 get_task_struct(work->task);
549
550 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
551 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
552
553 return ERR_PTR(-EAGAIN);
554}
555
556static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
557{
558 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
559 struct mm_struct *mm = obj->userptr.mm->mm;
560 struct page **pvec;
561 struct sg_table *pages;
562 bool active;
563 int pinned;
564 unsigned int gup_flags = 0;
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583 if (obj->userptr.work) {
584
585 if (IS_ERR(obj->userptr.work))
586 return PTR_ERR(obj->userptr.work);
587 else
588 return -EAGAIN;
589 }
590
591 pvec = NULL;
592 pinned = 0;
593
594 if (mm == current->mm) {
595 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
596 GFP_KERNEL |
597 __GFP_NORETRY |
598 __GFP_NOWARN);
599 if (pvec) {
600
601 if (!i915_gem_object_is_readonly(obj))
602 gup_flags |= FOLL_WRITE;
603 pinned = pin_user_pages_fast_only(obj->userptr.ptr,
604 num_pages, gup_flags,
605 pvec);
606 }
607 }
608
609 active = false;
610 if (pinned < 0) {
611 pages = ERR_PTR(pinned);
612 pinned = 0;
613 } else if (pinned < num_pages) {
614 pages = __i915_gem_userptr_get_pages_schedule(obj);
615 active = pages == ERR_PTR(-EAGAIN);
616 } else {
617 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
618 active = !IS_ERR(pages);
619 }
620 if (active)
621 __i915_gem_userptr_set_active(obj, true);
622
623 if (IS_ERR(pages))
624 unpin_user_pages(pvec, pinned);
625 kvfree(pvec);
626
627 return PTR_ERR_OR_ZERO(pages);
628}
629
630static void
631i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
632 struct sg_table *pages)
633{
634 struct sgt_iter sgt_iter;
635 struct page *page;
636
637
638 obj->userptr.work = NULL;
639 __i915_gem_userptr_set_active(obj, false);
640 if (!pages)
641 return;
642
643 __i915_gem_object_release_shmem(obj, pages, true);
644 i915_gem_gtt_finish_pages(obj, pages);
645
646
647
648
649
650
651 if (i915_gem_object_is_readonly(obj))
652 obj->mm.dirty = false;
653
654 for_each_sgt_page(page, sgt_iter, pages) {
655 if (obj->mm.dirty && trylock_page(page)) {
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674 set_page_dirty(page);
675 unlock_page(page);
676 }
677
678 mark_page_accessed(page);
679 unpin_user_page(page);
680 }
681 obj->mm.dirty = false;
682
683 sg_free_table(pages);
684 kfree(pages);
685}
686
687static void
688i915_gem_userptr_release(struct drm_i915_gem_object *obj)
689{
690 i915_gem_userptr_release__mmu_notifier(obj);
691 i915_gem_userptr_release__mm_struct(obj);
692}
693
694static int
695i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
696{
697 if (obj->userptr.mmu_object)
698 return 0;
699
700 return i915_gem_userptr_init__mmu_notifier(obj, 0);
701}
702
703static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
704 .name = "i915_gem_object_userptr",
705 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
706 I915_GEM_OBJECT_IS_SHRINKABLE |
707 I915_GEM_OBJECT_NO_MMAP |
708 I915_GEM_OBJECT_ASYNC_CANCEL,
709 .get_pages = i915_gem_userptr_get_pages,
710 .put_pages = i915_gem_userptr_put_pages,
711 .dmabuf_export = i915_gem_userptr_dmabuf_export,
712 .release = i915_gem_userptr_release,
713};
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750int
751i915_gem_userptr_ioctl(struct drm_device *dev,
752 void *data,
753 struct drm_file *file)
754{
755 static struct lock_class_key lock_class;
756 struct drm_i915_private *dev_priv = to_i915(dev);
757 struct drm_i915_gem_userptr *args = data;
758 struct drm_i915_gem_object *obj;
759 int ret;
760 u32 handle;
761
762 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
763
764
765
766 return -ENODEV;
767 }
768
769 if (args->flags & ~(I915_USERPTR_READ_ONLY |
770 I915_USERPTR_UNSYNCHRONIZED))
771 return -EINVAL;
772
773
774
775
776
777
778
779
780
781
782
783
784 if (args->user_size >> PAGE_SHIFT > INT_MAX)
785 return -E2BIG;
786
787 if (overflows_type(args->user_size, obj->base.size))
788 return -E2BIG;
789
790 if (!args->user_size)
791 return -EINVAL;
792
793 if (offset_in_page(args->user_ptr | args->user_size))
794 return -EINVAL;
795
796 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
797 return -EFAULT;
798
799 if (args->flags & I915_USERPTR_READ_ONLY) {
800
801
802
803
804 if (!dev_priv->gt.vm->has_read_only)
805 return -ENODEV;
806 }
807
808 obj = i915_gem_object_alloc();
809 if (obj == NULL)
810 return -ENOMEM;
811
812 drm_gem_private_object_init(dev, &obj->base, args->user_size);
813 i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
814 obj->read_domains = I915_GEM_DOMAIN_CPU;
815 obj->write_domain = I915_GEM_DOMAIN_CPU;
816 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
817
818 obj->userptr.ptr = args->user_ptr;
819 if (args->flags & I915_USERPTR_READ_ONLY)
820 i915_gem_object_set_readonly(obj);
821
822
823
824
825
826 ret = i915_gem_userptr_init__mm_struct(obj);
827 if (ret == 0)
828 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
829 if (ret == 0)
830 ret = drm_gem_handle_create(file, &obj->base, &handle);
831
832
833 i915_gem_object_put(obj);
834 if (ret)
835 return ret;
836
837 args->handle = handle;
838 return 0;
839}
840
841int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
842{
843 spin_lock_init(&dev_priv->mm_lock);
844 hash_init(dev_priv->mm_structs);
845
846 dev_priv->mm.userptr_wq =
847 alloc_workqueue("i915-userptr-acquire",
848 WQ_HIGHPRI | WQ_UNBOUND,
849 0);
850 if (!dev_priv->mm.userptr_wq)
851 return -ENOMEM;
852
853 return 0;
854}
855
856void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
857{
858 destroy_workqueue(dev_priv->mm.userptr_wq);
859}
860