1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34#include <linux/sched/mm.h>
35
36struct i915_mm_struct {
37 struct mm_struct *mm;
38 struct drm_i915_private *i915;
39 struct i915_mmu_notifier *mn;
40 struct hlist_node node;
41 struct kref kref;
42 struct work_struct work;
43};
44
45#if defined(CONFIG_MMU_NOTIFIER)
46#include <linux/interval_tree.h>
47
48struct i915_mmu_notifier {
49 spinlock_t lock;
50 struct hlist_node node;
51 struct mmu_notifier mn;
52 struct rb_root_cached objects;
53 struct workqueue_struct *wq;
54};
55
56struct i915_mmu_object {
57 struct i915_mmu_notifier *mn;
58 struct drm_i915_gem_object *obj;
59 struct interval_tree_node it;
60 struct list_head link;
61 struct work_struct work;
62 bool attached;
63};
64
65static void cancel_userptr(struct work_struct *work)
66{
67 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
68 struct drm_i915_gem_object *obj = mo->obj;
69 struct work_struct *active;
70
71
72 mutex_lock(&obj->mm.lock);
73 active = fetch_and_zero(&obj->userptr.work);
74 mutex_unlock(&obj->mm.lock);
75 if (active)
76 goto out;
77
78 i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
79
80 mutex_lock(&obj->base.dev->struct_mutex);
81
82
83 if (i915_gem_object_unbind(obj) == 0)
84 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
85 WARN_ONCE(i915_gem_object_has_pages(obj),
86 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
87 obj->bind_count,
88 atomic_read(&obj->mm.pages_pin_count),
89 obj->pin_global);
90
91 mutex_unlock(&obj->base.dev->struct_mutex);
92
93out:
94 i915_gem_object_put(obj);
95}
96
97static void add_object(struct i915_mmu_object *mo)
98{
99 if (mo->attached)
100 return;
101
102 interval_tree_insert(&mo->it, &mo->mn->objects);
103 mo->attached = true;
104}
105
106static void del_object(struct i915_mmu_object *mo)
107{
108 if (!mo->attached)
109 return;
110
111 interval_tree_remove(&mo->it, &mo->mn->objects);
112 mo->attached = false;
113}
114
115static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
116 struct mm_struct *mm,
117 unsigned long start,
118 unsigned long end,
119 bool blockable)
120{
121 struct i915_mmu_notifier *mn =
122 container_of(_mn, struct i915_mmu_notifier, mn);
123 struct i915_mmu_object *mo;
124 struct interval_tree_node *it;
125 LIST_HEAD(cancelled);
126
127 if (RB_EMPTY_ROOT(&mn->objects.rb_root))
128 return 0;
129
130
131 end--;
132
133 spin_lock(&mn->lock);
134 it = interval_tree_iter_first(&mn->objects, start, end);
135 while (it) {
136 if (!blockable) {
137 spin_unlock(&mn->lock);
138 return -EAGAIN;
139 }
140
141
142
143
144
145
146
147
148
149 mo = container_of(it, struct i915_mmu_object, it);
150 if (kref_get_unless_zero(&mo->obj->base.refcount))
151 queue_work(mn->wq, &mo->work);
152
153 list_add(&mo->link, &cancelled);
154 it = interval_tree_iter_next(it, start, end);
155 }
156 list_for_each_entry(mo, &cancelled, link)
157 del_object(mo);
158 spin_unlock(&mn->lock);
159
160 if (!list_empty(&cancelled))
161 flush_workqueue(mn->wq);
162
163 return 0;
164}
165
166static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
167 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
168};
169
170static struct i915_mmu_notifier *
171i915_mmu_notifier_create(struct mm_struct *mm)
172{
173 struct i915_mmu_notifier *mn;
174
175 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
176 if (mn == NULL)
177 return ERR_PTR(-ENOMEM);
178
179 spin_lock_init(&mn->lock);
180 mn->mn.ops = &i915_gem_userptr_notifier;
181 mn->objects = RB_ROOT_CACHED;
182 mn->wq = alloc_workqueue("i915-userptr-release",
183 WQ_UNBOUND | WQ_MEM_RECLAIM,
184 0);
185 if (mn->wq == NULL) {
186 kfree(mn);
187 return ERR_PTR(-ENOMEM);
188 }
189
190 return mn;
191}
192
193static void
194i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
195{
196 struct i915_mmu_object *mo;
197
198 mo = obj->userptr.mmu_object;
199 if (mo == NULL)
200 return;
201
202 spin_lock(&mo->mn->lock);
203 del_object(mo);
204 spin_unlock(&mo->mn->lock);
205 kfree(mo);
206
207 obj->userptr.mmu_object = NULL;
208}
209
210static struct i915_mmu_notifier *
211i915_mmu_notifier_find(struct i915_mm_struct *mm)
212{
213 struct i915_mmu_notifier *mn;
214 int err = 0;
215
216 mn = mm->mn;
217 if (mn)
218 return mn;
219
220 mn = i915_mmu_notifier_create(mm->mm);
221 if (IS_ERR(mn))
222 err = PTR_ERR(mn);
223
224 down_write(&mm->mm->mmap_sem);
225 mutex_lock(&mm->i915->mm_lock);
226 if (mm->mn == NULL && !err) {
227
228 err = __mmu_notifier_register(&mn->mn, mm->mm);
229 if (!err) {
230
231 mm->mn = fetch_and_zero(&mn);
232 }
233 } else if (mm->mn) {
234
235
236
237
238 err = 0;
239 }
240 mutex_unlock(&mm->i915->mm_lock);
241 up_write(&mm->mm->mmap_sem);
242
243 if (mn && !IS_ERR(mn)) {
244 destroy_workqueue(mn->wq);
245 kfree(mn);
246 }
247
248 return err ? ERR_PTR(err) : mm->mn;
249}
250
251static int
252i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
253 unsigned flags)
254{
255 struct i915_mmu_notifier *mn;
256 struct i915_mmu_object *mo;
257
258 if (flags & I915_USERPTR_UNSYNCHRONIZED)
259 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
260
261 if (WARN_ON(obj->userptr.mm == NULL))
262 return -EINVAL;
263
264 mn = i915_mmu_notifier_find(obj->userptr.mm);
265 if (IS_ERR(mn))
266 return PTR_ERR(mn);
267
268 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
269 if (mo == NULL)
270 return -ENOMEM;
271
272 mo->mn = mn;
273 mo->obj = obj;
274 mo->it.start = obj->userptr.ptr;
275 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
276 INIT_WORK(&mo->work, cancel_userptr);
277
278 obj->userptr.mmu_object = mo;
279 return 0;
280}
281
282static void
283i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
284 struct mm_struct *mm)
285{
286 if (mn == NULL)
287 return;
288
289 mmu_notifier_unregister(&mn->mn, mm);
290 destroy_workqueue(mn->wq);
291 kfree(mn);
292}
293
294#else
295
296static void
297i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
298{
299}
300
301static int
302i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
303 unsigned flags)
304{
305 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
306 return -ENODEV;
307
308 if (!capable(CAP_SYS_ADMIN))
309 return -EPERM;
310
311 return 0;
312}
313
314static void
315i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
316 struct mm_struct *mm)
317{
318}
319
320#endif
321
322static struct i915_mm_struct *
323__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
324{
325 struct i915_mm_struct *mm;
326
327
328 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
329 if (mm->mm == real)
330 return mm;
331
332 return NULL;
333}
334
335static int
336i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
337{
338 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
339 struct i915_mm_struct *mm;
340 int ret = 0;
341
342
343
344
345
346
347
348
349
350
351
352 mutex_lock(&dev_priv->mm_lock);
353 mm = __i915_mm_struct_find(dev_priv, current->mm);
354 if (mm == NULL) {
355 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
356 if (mm == NULL) {
357 ret = -ENOMEM;
358 goto out;
359 }
360
361 kref_init(&mm->kref);
362 mm->i915 = to_i915(obj->base.dev);
363
364 mm->mm = current->mm;
365 mmgrab(current->mm);
366
367 mm->mn = NULL;
368
369
370 hash_add(dev_priv->mm_structs,
371 &mm->node, (unsigned long)mm->mm);
372 } else
373 kref_get(&mm->kref);
374
375 obj->userptr.mm = mm;
376out:
377 mutex_unlock(&dev_priv->mm_lock);
378 return ret;
379}
380
381static void
382__i915_mm_struct_free__worker(struct work_struct *work)
383{
384 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
385 i915_mmu_notifier_free(mm->mn, mm->mm);
386 mmdrop(mm->mm);
387 kfree(mm);
388}
389
390static void
391__i915_mm_struct_free(struct kref *kref)
392{
393 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
394
395
396 hash_del(&mm->node);
397 mutex_unlock(&mm->i915->mm_lock);
398
399 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
400 queue_work(mm->i915->mm.userptr_wq, &mm->work);
401}
402
403static void
404i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
405{
406 if (obj->userptr.mm == NULL)
407 return;
408
409 kref_put_mutex(&obj->userptr.mm->kref,
410 __i915_mm_struct_free,
411 &to_i915(obj->base.dev)->mm_lock);
412 obj->userptr.mm = NULL;
413}
414
415struct get_pages_work {
416 struct work_struct work;
417 struct drm_i915_gem_object *obj;
418 struct task_struct *task;
419};
420
421static struct sg_table *
422__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
423 struct page **pvec, int num_pages)
424{
425 unsigned int max_segment = i915_sg_segment_size();
426 struct sg_table *st;
427 unsigned int sg_page_sizes;
428 int ret;
429
430 st = kmalloc(sizeof(*st), GFP_KERNEL);
431 if (!st)
432 return ERR_PTR(-ENOMEM);
433
434alloc_table:
435 ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
436 0, num_pages << PAGE_SHIFT,
437 max_segment,
438 GFP_KERNEL);
439 if (ret) {
440 kfree(st);
441 return ERR_PTR(ret);
442 }
443
444 ret = i915_gem_gtt_prepare_pages(obj, st);
445 if (ret) {
446 sg_free_table(st);
447
448 if (max_segment > PAGE_SIZE) {
449 max_segment = PAGE_SIZE;
450 goto alloc_table;
451 }
452
453 kfree(st);
454 return ERR_PTR(ret);
455 }
456
457 sg_page_sizes = i915_sg_page_sizes(st->sgl);
458
459 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
460
461 return st;
462}
463
464static int
465__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
466 bool value)
467{
468 int ret = 0;
469
470
471
472
473
474
475
476
477
478
479
480#if defined(CONFIG_MMU_NOTIFIER)
481 if (obj->userptr.mmu_object == NULL)
482 return 0;
483
484 spin_lock(&obj->userptr.mmu_object->mn->lock);
485
486
487
488 if (!value)
489 del_object(obj->userptr.mmu_object);
490 else if (!work_pending(&obj->userptr.mmu_object->work))
491 add_object(obj->userptr.mmu_object);
492 else
493 ret = -EAGAIN;
494 spin_unlock(&obj->userptr.mmu_object->mn->lock);
495#endif
496
497 return ret;
498}
499
500static void
501__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
502{
503 struct get_pages_work *work = container_of(_work, typeof(*work), work);
504 struct drm_i915_gem_object *obj = work->obj;
505 const int npages = obj->base.size >> PAGE_SHIFT;
506 struct page **pvec;
507 int pinned, ret;
508
509 ret = -ENOMEM;
510 pinned = 0;
511
512 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
513 if (pvec != NULL) {
514 struct mm_struct *mm = obj->userptr.mm->mm;
515 unsigned int flags = 0;
516
517 if (!i915_gem_object_is_readonly(obj))
518 flags |= FOLL_WRITE;
519
520 ret = -EFAULT;
521 if (mmget_not_zero(mm)) {
522 down_read(&mm->mmap_sem);
523 while (pinned < npages) {
524 ret = get_user_pages_remote
525 (work->task, mm,
526 obj->userptr.ptr + pinned * PAGE_SIZE,
527 npages - pinned,
528 flags,
529 pvec + pinned, NULL, NULL);
530 if (ret < 0)
531 break;
532
533 pinned += ret;
534 }
535 up_read(&mm->mmap_sem);
536 mmput(mm);
537 }
538 }
539
540 mutex_lock(&obj->mm.lock);
541 if (obj->userptr.work == &work->work) {
542 struct sg_table *pages = ERR_PTR(ret);
543
544 if (pinned == npages) {
545 pages = __i915_gem_userptr_alloc_pages(obj, pvec,
546 npages);
547 if (!IS_ERR(pages)) {
548 pinned = 0;
549 pages = NULL;
550 }
551 }
552
553 obj->userptr.work = ERR_CAST(pages);
554 if (IS_ERR(pages))
555 __i915_gem_userptr_set_active(obj, false);
556 }
557 mutex_unlock(&obj->mm.lock);
558
559 release_pages(pvec, pinned);
560 kvfree(pvec);
561
562 i915_gem_object_put(obj);
563 put_task_struct(work->task);
564 kfree(work);
565}
566
567static struct sg_table *
568__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
569{
570 struct get_pages_work *work;
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591 work = kmalloc(sizeof(*work), GFP_KERNEL);
592 if (work == NULL)
593 return ERR_PTR(-ENOMEM);
594
595 obj->userptr.work = &work->work;
596
597 work->obj = i915_gem_object_get(obj);
598
599 work->task = current;
600 get_task_struct(work->task);
601
602 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
603 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
604
605 return ERR_PTR(-EAGAIN);
606}
607
608static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
609{
610 const int num_pages = obj->base.size >> PAGE_SHIFT;
611 struct mm_struct *mm = obj->userptr.mm->mm;
612 struct page **pvec;
613 struct sg_table *pages;
614 bool active;
615 int pinned;
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634 if (obj->userptr.work) {
635
636 if (IS_ERR(obj->userptr.work))
637 return PTR_ERR(obj->userptr.work);
638 else
639 return -EAGAIN;
640 }
641
642 pvec = NULL;
643 pinned = 0;
644
645 if (mm == current->mm) {
646 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
647 GFP_KERNEL |
648 __GFP_NORETRY |
649 __GFP_NOWARN);
650 if (pvec)
651 pinned = __get_user_pages_fast(obj->userptr.ptr,
652 num_pages,
653 !i915_gem_object_is_readonly(obj),
654 pvec);
655 }
656
657 active = false;
658 if (pinned < 0) {
659 pages = ERR_PTR(pinned);
660 pinned = 0;
661 } else if (pinned < num_pages) {
662 pages = __i915_gem_userptr_get_pages_schedule(obj);
663 active = pages == ERR_PTR(-EAGAIN);
664 } else {
665 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
666 active = !IS_ERR(pages);
667 }
668 if (active)
669 __i915_gem_userptr_set_active(obj, true);
670
671 if (IS_ERR(pages))
672 release_pages(pvec, pinned);
673 kvfree(pvec);
674
675 return PTR_ERR_OR_ZERO(pages);
676}
677
678static void
679i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
680 struct sg_table *pages)
681{
682 struct sgt_iter sgt_iter;
683 struct page *page;
684
685 BUG_ON(obj->userptr.work != NULL);
686 __i915_gem_userptr_set_active(obj, false);
687
688 if (obj->mm.madv != I915_MADV_WILLNEED)
689 obj->mm.dirty = false;
690
691 i915_gem_gtt_finish_pages(obj, pages);
692
693 for_each_sgt_page(page, sgt_iter, pages) {
694 if (obj->mm.dirty)
695 set_page_dirty(page);
696
697 mark_page_accessed(page);
698 put_page(page);
699 }
700 obj->mm.dirty = false;
701
702 sg_free_table(pages);
703 kfree(pages);
704}
705
706static void
707i915_gem_userptr_release(struct drm_i915_gem_object *obj)
708{
709 i915_gem_userptr_release__mmu_notifier(obj);
710 i915_gem_userptr_release__mm_struct(obj);
711}
712
713static int
714i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
715{
716 if (obj->userptr.mmu_object)
717 return 0;
718
719 return i915_gem_userptr_init__mmu_notifier(obj, 0);
720}
721
722static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
723 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
724 I915_GEM_OBJECT_IS_SHRINKABLE,
725 .get_pages = i915_gem_userptr_get_pages,
726 .put_pages = i915_gem_userptr_put_pages,
727 .dmabuf_export = i915_gem_userptr_dmabuf_export,
728 .release = i915_gem_userptr_release,
729};
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766int
767i915_gem_userptr_ioctl(struct drm_device *dev,
768 void *data,
769 struct drm_file *file)
770{
771 struct drm_i915_private *dev_priv = to_i915(dev);
772 struct drm_i915_gem_userptr *args = data;
773 struct drm_i915_gem_object *obj;
774 int ret;
775 u32 handle;
776
777 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
778
779
780
781 return -ENODEV;
782 }
783
784 if (args->flags & ~(I915_USERPTR_READ_ONLY |
785 I915_USERPTR_UNSYNCHRONIZED))
786 return -EINVAL;
787
788 if (!args->user_size)
789 return -EINVAL;
790
791 if (offset_in_page(args->user_ptr | args->user_size))
792 return -EINVAL;
793
794 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
795 (char __user *)(unsigned long)args->user_ptr, args->user_size))
796 return -EFAULT;
797
798 if (args->flags & I915_USERPTR_READ_ONLY) {
799 struct i915_hw_ppgtt *ppgtt;
800
801
802
803
804
805 ppgtt = dev_priv->kernel_context->ppgtt;
806 if (!ppgtt || !ppgtt->vm.has_read_only)
807 return -ENODEV;
808 }
809
810 obj = i915_gem_object_alloc(dev_priv);
811 if (obj == NULL)
812 return -ENOMEM;
813
814 drm_gem_private_object_init(dev, &obj->base, args->user_size);
815 i915_gem_object_init(obj, &i915_gem_userptr_ops);
816 obj->read_domains = I915_GEM_DOMAIN_CPU;
817 obj->write_domain = I915_GEM_DOMAIN_CPU;
818 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
819
820 obj->userptr.ptr = args->user_ptr;
821 if (args->flags & I915_USERPTR_READ_ONLY)
822 i915_gem_object_set_readonly(obj);
823
824
825
826
827
828 ret = i915_gem_userptr_init__mm_struct(obj);
829 if (ret == 0)
830 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
831 if (ret == 0)
832 ret = drm_gem_handle_create(file, &obj->base, &handle);
833
834
835 i915_gem_object_put(obj);
836 if (ret)
837 return ret;
838
839 args->handle = handle;
840 return 0;
841}
842
843int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
844{
845 mutex_init(&dev_priv->mm_lock);
846 hash_init(dev_priv->mm_structs);
847
848 dev_priv->mm.userptr_wq =
849 alloc_workqueue("i915-userptr-acquire",
850 WQ_HIGHPRI | WQ_UNBOUND,
851 0);
852 if (!dev_priv->mm.userptr_wq)
853 return -ENOMEM;
854
855 return 0;
856}
857
858void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
859{
860 destroy_workqueue(dev_priv->mm.userptr_wq);
861}
862