1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34#include <linux/sched/mm.h>
35
36struct i915_mm_struct {
37 struct mm_struct *mm;
38 struct drm_i915_private *i915;
39 struct i915_mmu_notifier *mn;
40 struct hlist_node node;
41 struct kref kref;
42 struct work_struct work;
43};
44
45#if defined(CONFIG_MMU_NOTIFIER)
46#include <linux/interval_tree.h>
47
48struct i915_mmu_notifier {
49 spinlock_t lock;
50 struct hlist_node node;
51 struct mmu_notifier mn;
52 struct rb_root_cached objects;
53 struct workqueue_struct *wq;
54};
55
56struct i915_mmu_object {
57 struct i915_mmu_notifier *mn;
58 struct drm_i915_gem_object *obj;
59 struct interval_tree_node it;
60 struct list_head link;
61 struct work_struct work;
62 bool attached;
63};
64
65static void cancel_userptr(struct work_struct *work)
66{
67 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
68 struct drm_i915_gem_object *obj = mo->obj;
69 struct work_struct *active;
70
71
72 mutex_lock(&obj->mm.lock);
73 active = fetch_and_zero(&obj->userptr.work);
74 mutex_unlock(&obj->mm.lock);
75 if (active)
76 goto out;
77
78 i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
79
80 mutex_lock(&obj->base.dev->struct_mutex);
81
82
83 if (i915_gem_object_unbind(obj) == 0)
84 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
85 WARN_ONCE(obj->mm.pages,
86 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
87 obj->bind_count,
88 atomic_read(&obj->mm.pages_pin_count),
89 obj->pin_display);
90
91 mutex_unlock(&obj->base.dev->struct_mutex);
92
93out:
94 i915_gem_object_put(obj);
95}
96
97static void add_object(struct i915_mmu_object *mo)
98{
99 if (mo->attached)
100 return;
101
102 interval_tree_insert(&mo->it, &mo->mn->objects);
103 mo->attached = true;
104}
105
106static void del_object(struct i915_mmu_object *mo)
107{
108 if (!mo->attached)
109 return;
110
111 interval_tree_remove(&mo->it, &mo->mn->objects);
112 mo->attached = false;
113}
114
115static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
116 struct mm_struct *mm,
117 unsigned long start,
118 unsigned long end)
119{
120 struct i915_mmu_notifier *mn =
121 container_of(_mn, struct i915_mmu_notifier, mn);
122 struct i915_mmu_object *mo;
123 struct interval_tree_node *it;
124 LIST_HEAD(cancelled);
125
126 if (RB_EMPTY_ROOT(&mn->objects.rb_root))
127 return;
128
129
130 end--;
131
132 spin_lock(&mn->lock);
133 it = interval_tree_iter_first(&mn->objects, start, end);
134 while (it) {
135
136
137
138
139
140
141
142
143
144 mo = container_of(it, struct i915_mmu_object, it);
145 if (kref_get_unless_zero(&mo->obj->base.refcount))
146 queue_work(mn->wq, &mo->work);
147
148 list_add(&mo->link, &cancelled);
149 it = interval_tree_iter_next(it, start, end);
150 }
151 list_for_each_entry(mo, &cancelled, link)
152 del_object(mo);
153 spin_unlock(&mn->lock);
154
155 if (!list_empty(&cancelled))
156 flush_workqueue(mn->wq);
157}
158
159static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
160 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
161};
162
163static struct i915_mmu_notifier *
164i915_mmu_notifier_create(struct mm_struct *mm)
165{
166 struct i915_mmu_notifier *mn;
167 int ret;
168
169 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
170 if (mn == NULL)
171 return ERR_PTR(-ENOMEM);
172
173 spin_lock_init(&mn->lock);
174 mn->mn.ops = &i915_gem_userptr_notifier;
175 mn->objects = RB_ROOT_CACHED;
176 mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
177 if (mn->wq == NULL) {
178 kfree(mn);
179 return ERR_PTR(-ENOMEM);
180 }
181
182
183 ret = __mmu_notifier_register(&mn->mn, mm);
184 if (ret) {
185 destroy_workqueue(mn->wq);
186 kfree(mn);
187 return ERR_PTR(ret);
188 }
189
190 return mn;
191}
192
193static void
194i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
195{
196 struct i915_mmu_object *mo;
197
198 mo = obj->userptr.mmu_object;
199 if (mo == NULL)
200 return;
201
202 spin_lock(&mo->mn->lock);
203 del_object(mo);
204 spin_unlock(&mo->mn->lock);
205 kfree(mo);
206
207 obj->userptr.mmu_object = NULL;
208}
209
210static struct i915_mmu_notifier *
211i915_mmu_notifier_find(struct i915_mm_struct *mm)
212{
213 struct i915_mmu_notifier *mn = mm->mn;
214
215 mn = mm->mn;
216 if (mn)
217 return mn;
218
219 down_write(&mm->mm->mmap_sem);
220 mutex_lock(&mm->i915->mm_lock);
221 if ((mn = mm->mn) == NULL) {
222 mn = i915_mmu_notifier_create(mm->mm);
223 if (!IS_ERR(mn))
224 mm->mn = mn;
225 }
226 mutex_unlock(&mm->i915->mm_lock);
227 up_write(&mm->mm->mmap_sem);
228
229 return mn;
230}
231
232static int
233i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
234 unsigned flags)
235{
236 struct i915_mmu_notifier *mn;
237 struct i915_mmu_object *mo;
238
239 if (flags & I915_USERPTR_UNSYNCHRONIZED)
240 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
241
242 if (WARN_ON(obj->userptr.mm == NULL))
243 return -EINVAL;
244
245 mn = i915_mmu_notifier_find(obj->userptr.mm);
246 if (IS_ERR(mn))
247 return PTR_ERR(mn);
248
249 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
250 if (mo == NULL)
251 return -ENOMEM;
252
253 mo->mn = mn;
254 mo->obj = obj;
255 mo->it.start = obj->userptr.ptr;
256 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
257 INIT_WORK(&mo->work, cancel_userptr);
258
259 obj->userptr.mmu_object = mo;
260 return 0;
261}
262
263static void
264i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
265 struct mm_struct *mm)
266{
267 if (mn == NULL)
268 return;
269
270 mmu_notifier_unregister(&mn->mn, mm);
271 destroy_workqueue(mn->wq);
272 kfree(mn);
273}
274
275#else
276
277static void
278i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
279{
280}
281
282static int
283i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
284 unsigned flags)
285{
286 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
287 return -ENODEV;
288
289 if (!capable(CAP_SYS_ADMIN))
290 return -EPERM;
291
292 return 0;
293}
294
295static void
296i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
297 struct mm_struct *mm)
298{
299}
300
301#endif
302
303static struct i915_mm_struct *
304__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
305{
306 struct i915_mm_struct *mm;
307
308
309 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
310 if (mm->mm == real)
311 return mm;
312
313 return NULL;
314}
315
316static int
317i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
318{
319 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
320 struct i915_mm_struct *mm;
321 int ret = 0;
322
323
324
325
326
327
328
329
330
331
332
333 mutex_lock(&dev_priv->mm_lock);
334 mm = __i915_mm_struct_find(dev_priv, current->mm);
335 if (mm == NULL) {
336 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
337 if (mm == NULL) {
338 ret = -ENOMEM;
339 goto out;
340 }
341
342 kref_init(&mm->kref);
343 mm->i915 = to_i915(obj->base.dev);
344
345 mm->mm = current->mm;
346 mmgrab(current->mm);
347
348 mm->mn = NULL;
349
350
351 hash_add(dev_priv->mm_structs,
352 &mm->node, (unsigned long)mm->mm);
353 } else
354 kref_get(&mm->kref);
355
356 obj->userptr.mm = mm;
357out:
358 mutex_unlock(&dev_priv->mm_lock);
359 return ret;
360}
361
362static void
363__i915_mm_struct_free__worker(struct work_struct *work)
364{
365 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
366 i915_mmu_notifier_free(mm->mn, mm->mm);
367 mmdrop(mm->mm);
368 kfree(mm);
369}
370
371static void
372__i915_mm_struct_free(struct kref *kref)
373{
374 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
375
376
377 hash_del(&mm->node);
378 mutex_unlock(&mm->i915->mm_lock);
379
380 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
381 queue_work(mm->i915->mm.userptr_wq, &mm->work);
382}
383
384static void
385i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
386{
387 if (obj->userptr.mm == NULL)
388 return;
389
390 kref_put_mutex(&obj->userptr.mm->kref,
391 __i915_mm_struct_free,
392 &to_i915(obj->base.dev)->mm_lock);
393 obj->userptr.mm = NULL;
394}
395
396struct get_pages_work {
397 struct work_struct work;
398 struct drm_i915_gem_object *obj;
399 struct task_struct *task;
400};
401
402#if IS_ENABLED(CONFIG_SWIOTLB)
403#define swiotlb_active() swiotlb_nr_tbl()
404#else
405#define swiotlb_active() 0
406#endif
407
408static int
409st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
410{
411 struct scatterlist *sg;
412 int ret, n;
413
414 *st = kmalloc(sizeof(**st), GFP_KERNEL);
415 if (*st == NULL)
416 return -ENOMEM;
417
418 if (swiotlb_active()) {
419 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
420 if (ret)
421 goto err;
422
423 for_each_sg((*st)->sgl, sg, num_pages, n)
424 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
425 } else {
426 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
427 0, num_pages << PAGE_SHIFT,
428 GFP_KERNEL);
429 if (ret)
430 goto err;
431 }
432
433 return 0;
434
435err:
436 kfree(*st);
437 *st = NULL;
438 return ret;
439}
440
441static struct sg_table *
442__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
443 struct page **pvec, int num_pages)
444{
445 struct sg_table *pages;
446 int ret;
447
448 ret = st_set_pages(&pages, pvec, num_pages);
449 if (ret)
450 return ERR_PTR(ret);
451
452 ret = i915_gem_gtt_prepare_pages(obj, pages);
453 if (ret) {
454 sg_free_table(pages);
455 kfree(pages);
456 return ERR_PTR(ret);
457 }
458
459 return pages;
460}
461
462static int
463__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
464 bool value)
465{
466 int ret = 0;
467
468
469
470
471
472
473
474
475
476
477
478#if defined(CONFIG_MMU_NOTIFIER)
479 if (obj->userptr.mmu_object == NULL)
480 return 0;
481
482 spin_lock(&obj->userptr.mmu_object->mn->lock);
483
484
485
486 if (!value)
487 del_object(obj->userptr.mmu_object);
488 else if (!work_pending(&obj->userptr.mmu_object->work))
489 add_object(obj->userptr.mmu_object);
490 else
491 ret = -EAGAIN;
492 spin_unlock(&obj->userptr.mmu_object->mn->lock);
493#endif
494
495 return ret;
496}
497
498static void
499__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
500{
501 struct get_pages_work *work = container_of(_work, typeof(*work), work);
502 struct drm_i915_gem_object *obj = work->obj;
503 const int npages = obj->base.size >> PAGE_SHIFT;
504 struct page **pvec;
505 int pinned, ret;
506
507 ret = -ENOMEM;
508 pinned = 0;
509
510 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
511 if (pvec != NULL) {
512 struct mm_struct *mm = obj->userptr.mm->mm;
513 unsigned int flags = 0;
514
515 if (!obj->userptr.read_only)
516 flags |= FOLL_WRITE;
517
518 ret = -EFAULT;
519 if (mmget_not_zero(mm)) {
520 down_read(&mm->mmap_sem);
521 while (pinned < npages) {
522 ret = get_user_pages_remote
523 (work->task, mm,
524 obj->userptr.ptr + pinned * PAGE_SIZE,
525 npages - pinned,
526 flags,
527 pvec + pinned, NULL, NULL);
528 if (ret < 0)
529 break;
530
531 pinned += ret;
532 }
533 up_read(&mm->mmap_sem);
534 mmput(mm);
535 }
536 }
537
538 mutex_lock(&obj->mm.lock);
539 if (obj->userptr.work == &work->work) {
540 struct sg_table *pages = ERR_PTR(ret);
541
542 if (pinned == npages) {
543 pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
544 if (!IS_ERR(pages)) {
545 __i915_gem_object_set_pages(obj, pages);
546 pinned = 0;
547 pages = NULL;
548 }
549 }
550
551 obj->userptr.work = ERR_CAST(pages);
552 if (IS_ERR(pages))
553 __i915_gem_userptr_set_active(obj, false);
554 }
555 mutex_unlock(&obj->mm.lock);
556
557 release_pages(pvec, pinned, 0);
558 kvfree(pvec);
559
560 i915_gem_object_put(obj);
561 put_task_struct(work->task);
562 kfree(work);
563}
564
565static struct sg_table *
566__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
567{
568 struct get_pages_work *work;
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589 work = kmalloc(sizeof(*work), GFP_KERNEL);
590 if (work == NULL)
591 return ERR_PTR(-ENOMEM);
592
593 obj->userptr.work = &work->work;
594
595 work->obj = i915_gem_object_get(obj);
596
597 work->task = current;
598 get_task_struct(work->task);
599
600 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
601 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
602
603 return ERR_PTR(-EAGAIN);
604}
605
606static struct sg_table *
607i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
608{
609 const int num_pages = obj->base.size >> PAGE_SHIFT;
610 struct mm_struct *mm = obj->userptr.mm->mm;
611 struct page **pvec;
612 struct sg_table *pages;
613 bool active;
614 int pinned;
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633 if (obj->userptr.work) {
634
635 if (IS_ERR(obj->userptr.work))
636 return ERR_CAST(obj->userptr.work);
637 else
638 return ERR_PTR(-EAGAIN);
639 }
640
641 pvec = NULL;
642 pinned = 0;
643
644 if (mm == current->mm) {
645 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
646 GFP_KERNEL |
647 __GFP_NORETRY |
648 __GFP_NOWARN);
649 if (pvec)
650 pinned = __get_user_pages_fast(obj->userptr.ptr,
651 num_pages,
652 !obj->userptr.read_only,
653 pvec);
654 }
655
656 active = false;
657 if (pinned < 0) {
658 pages = ERR_PTR(pinned);
659 pinned = 0;
660 } else if (pinned < num_pages) {
661 pages = __i915_gem_userptr_get_pages_schedule(obj);
662 active = pages == ERR_PTR(-EAGAIN);
663 } else {
664 pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
665 active = !IS_ERR(pages);
666 }
667 if (active)
668 __i915_gem_userptr_set_active(obj, true);
669
670 if (IS_ERR(pages))
671 release_pages(pvec, pinned, 0);
672 kvfree(pvec);
673
674 return pages;
675}
676
677static void
678i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
679 struct sg_table *pages)
680{
681 struct sgt_iter sgt_iter;
682 struct page *page;
683
684 BUG_ON(obj->userptr.work != NULL);
685 __i915_gem_userptr_set_active(obj, false);
686
687 if (obj->mm.madv != I915_MADV_WILLNEED)
688 obj->mm.dirty = false;
689
690 i915_gem_gtt_finish_pages(obj, pages);
691
692 for_each_sgt_page(page, sgt_iter, pages) {
693 if (obj->mm.dirty)
694 set_page_dirty(page);
695
696 mark_page_accessed(page);
697 put_page(page);
698 }
699 obj->mm.dirty = false;
700
701 sg_free_table(pages);
702 kfree(pages);
703}
704
705static void
706i915_gem_userptr_release(struct drm_i915_gem_object *obj)
707{
708 i915_gem_userptr_release__mmu_notifier(obj);
709 i915_gem_userptr_release__mm_struct(obj);
710}
711
712static int
713i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
714{
715 if (obj->userptr.mmu_object)
716 return 0;
717
718 return i915_gem_userptr_init__mmu_notifier(obj, 0);
719}
720
721static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
722 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
723 I915_GEM_OBJECT_IS_SHRINKABLE,
724 .get_pages = i915_gem_userptr_get_pages,
725 .put_pages = i915_gem_userptr_put_pages,
726 .dmabuf_export = i915_gem_userptr_dmabuf_export,
727 .release = i915_gem_userptr_release,
728};
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765int
766i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
767{
768 struct drm_i915_private *dev_priv = to_i915(dev);
769 struct drm_i915_gem_userptr *args = data;
770 struct drm_i915_gem_object *obj;
771 int ret;
772 u32 handle;
773
774 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
775
776
777
778 return -ENODEV;
779 }
780
781 if (args->flags & ~(I915_USERPTR_READ_ONLY |
782 I915_USERPTR_UNSYNCHRONIZED))
783 return -EINVAL;
784
785 if (offset_in_page(args->user_ptr | args->user_size))
786 return -EINVAL;
787
788 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
789 (char __user *)(unsigned long)args->user_ptr, args->user_size))
790 return -EFAULT;
791
792 if (args->flags & I915_USERPTR_READ_ONLY) {
793
794
795
796 return -ENODEV;
797 }
798
799 obj = i915_gem_object_alloc(dev_priv);
800 if (obj == NULL)
801 return -ENOMEM;
802
803 drm_gem_private_object_init(dev, &obj->base, args->user_size);
804 i915_gem_object_init(obj, &i915_gem_userptr_ops);
805 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
806 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
807 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
808
809 obj->userptr.ptr = args->user_ptr;
810 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
811
812
813
814
815
816 ret = i915_gem_userptr_init__mm_struct(obj);
817 if (ret == 0)
818 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
819 if (ret == 0)
820 ret = drm_gem_handle_create(file, &obj->base, &handle);
821
822
823 i915_gem_object_put(obj);
824 if (ret)
825 return ret;
826
827 args->handle = handle;
828 return 0;
829}
830
831int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
832{
833 mutex_init(&dev_priv->mm_lock);
834 hash_init(dev_priv->mm_structs);
835
836 dev_priv->mm.userptr_wq =
837 alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
838 if (!dev_priv->mm.userptr_wq)
839 return -ENOMEM;
840
841 return 0;
842}
843
844void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
845{
846 destroy_workqueue(dev_priv->mm.userptr_wq);
847}
848