1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34#include <linux/sched/mm.h>
35
36struct i915_mm_struct {
37 struct mm_struct *mm;
38 struct drm_i915_private *i915;
39 struct i915_mmu_notifier *mn;
40 struct hlist_node node;
41 struct kref kref;
42 struct work_struct work;
43};
44
45#if defined(CONFIG_MMU_NOTIFIER)
46#include <linux/interval_tree.h>
47
48struct i915_mmu_notifier {
49 spinlock_t lock;
50 struct hlist_node node;
51 struct mmu_notifier mn;
52 struct rb_root_cached objects;
53 struct workqueue_struct *wq;
54};
55
56struct i915_mmu_object {
57 struct i915_mmu_notifier *mn;
58 struct drm_i915_gem_object *obj;
59 struct interval_tree_node it;
60 struct list_head link;
61 struct work_struct work;
62 bool attached;
63};
64
65static void cancel_userptr(struct work_struct *work)
66{
67 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
68 struct drm_i915_gem_object *obj = mo->obj;
69 struct work_struct *active;
70
71
72 mutex_lock(&obj->mm.lock);
73 active = fetch_and_zero(&obj->userptr.work);
74 mutex_unlock(&obj->mm.lock);
75 if (active)
76 goto out;
77
78 i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
79
80 mutex_lock(&obj->base.dev->struct_mutex);
81
82
83 if (i915_gem_object_unbind(obj) == 0)
84 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
85 WARN_ONCE(i915_gem_object_has_pages(obj),
86 "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
87 obj->bind_count,
88 atomic_read(&obj->mm.pages_pin_count),
89 obj->pin_global);
90
91 mutex_unlock(&obj->base.dev->struct_mutex);
92
93out:
94 i915_gem_object_put(obj);
95}
96
97static void add_object(struct i915_mmu_object *mo)
98{
99 if (mo->attached)
100 return;
101
102 interval_tree_insert(&mo->it, &mo->mn->objects);
103 mo->attached = true;
104}
105
106static void del_object(struct i915_mmu_object *mo)
107{
108 if (!mo->attached)
109 return;
110
111 interval_tree_remove(&mo->it, &mo->mn->objects);
112 mo->attached = false;
113}
114
115static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
116 struct mm_struct *mm,
117 unsigned long start,
118 unsigned long end)
119{
120 struct i915_mmu_notifier *mn =
121 container_of(_mn, struct i915_mmu_notifier, mn);
122 struct i915_mmu_object *mo;
123 struct interval_tree_node *it;
124 LIST_HEAD(cancelled);
125
126 if (RB_EMPTY_ROOT(&mn->objects.rb_root))
127 return;
128
129
130 end--;
131
132 spin_lock(&mn->lock);
133 it = interval_tree_iter_first(&mn->objects, start, end);
134 while (it) {
135
136
137
138
139
140
141
142
143
144 mo = container_of(it, struct i915_mmu_object, it);
145 if (kref_get_unless_zero(&mo->obj->base.refcount))
146 queue_work(mn->wq, &mo->work);
147
148 list_add(&mo->link, &cancelled);
149 it = interval_tree_iter_next(it, start, end);
150 }
151 list_for_each_entry(mo, &cancelled, link)
152 del_object(mo);
153 spin_unlock(&mn->lock);
154
155 if (!list_empty(&cancelled))
156 flush_workqueue(mn->wq);
157}
158
159static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
160 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
161};
162
163static struct i915_mmu_notifier *
164i915_mmu_notifier_create(struct mm_struct *mm)
165{
166 struct i915_mmu_notifier *mn;
167
168 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
169 if (mn == NULL)
170 return ERR_PTR(-ENOMEM);
171
172 spin_lock_init(&mn->lock);
173 mn->mn.ops = &i915_gem_userptr_notifier;
174 mn->objects = RB_ROOT_CACHED;
175 mn->wq = alloc_workqueue("i915-userptr-release",
176 WQ_UNBOUND | WQ_MEM_RECLAIM,
177 0);
178 if (mn->wq == NULL) {
179 kfree(mn);
180 return ERR_PTR(-ENOMEM);
181 }
182
183 return mn;
184}
185
186static void
187i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
188{
189 struct i915_mmu_object *mo;
190
191 mo = obj->userptr.mmu_object;
192 if (mo == NULL)
193 return;
194
195 spin_lock(&mo->mn->lock);
196 del_object(mo);
197 spin_unlock(&mo->mn->lock);
198 kfree(mo);
199
200 obj->userptr.mmu_object = NULL;
201}
202
203static struct i915_mmu_notifier *
204i915_mmu_notifier_find(struct i915_mm_struct *mm)
205{
206 struct i915_mmu_notifier *mn;
207 int err = 0;
208
209 mn = mm->mn;
210 if (mn)
211 return mn;
212
213 mn = i915_mmu_notifier_create(mm->mm);
214 if (IS_ERR(mn))
215 err = PTR_ERR(mn);
216
217 down_write(&mm->mm->mmap_sem);
218 mutex_lock(&mm->i915->mm_lock);
219 if (mm->mn == NULL && !err) {
220
221 err = __mmu_notifier_register(&mn->mn, mm->mm);
222 if (!err) {
223
224 mm->mn = fetch_and_zero(&mn);
225 }
226 } else if (mm->mn) {
227
228
229
230
231 err = 0;
232 }
233 mutex_unlock(&mm->i915->mm_lock);
234 up_write(&mm->mm->mmap_sem);
235
236 if (mn && !IS_ERR(mn)) {
237 destroy_workqueue(mn->wq);
238 kfree(mn);
239 }
240
241 return err ? ERR_PTR(err) : mm->mn;
242}
243
244static int
245i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
246 unsigned flags)
247{
248 struct i915_mmu_notifier *mn;
249 struct i915_mmu_object *mo;
250
251 if (flags & I915_USERPTR_UNSYNCHRONIZED)
252 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
253
254 if (WARN_ON(obj->userptr.mm == NULL))
255 return -EINVAL;
256
257 mn = i915_mmu_notifier_find(obj->userptr.mm);
258 if (IS_ERR(mn))
259 return PTR_ERR(mn);
260
261 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
262 if (mo == NULL)
263 return -ENOMEM;
264
265 mo->mn = mn;
266 mo->obj = obj;
267 mo->it.start = obj->userptr.ptr;
268 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
269 INIT_WORK(&mo->work, cancel_userptr);
270
271 obj->userptr.mmu_object = mo;
272 return 0;
273}
274
275static void
276i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
277 struct mm_struct *mm)
278{
279 if (mn == NULL)
280 return;
281
282 mmu_notifier_unregister(&mn->mn, mm);
283 destroy_workqueue(mn->wq);
284 kfree(mn);
285}
286
287#else
288
289static void
290i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
291{
292}
293
294static int
295i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
296 unsigned flags)
297{
298 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
299 return -ENODEV;
300
301 if (!capable(CAP_SYS_ADMIN))
302 return -EPERM;
303
304 return 0;
305}
306
307static void
308i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
309 struct mm_struct *mm)
310{
311}
312
313#endif
314
315static struct i915_mm_struct *
316__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
317{
318 struct i915_mm_struct *mm;
319
320
321 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
322 if (mm->mm == real)
323 return mm;
324
325 return NULL;
326}
327
328static int
329i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
330{
331 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
332 struct i915_mm_struct *mm;
333 int ret = 0;
334
335
336
337
338
339
340
341
342
343
344
345 mutex_lock(&dev_priv->mm_lock);
346 mm = __i915_mm_struct_find(dev_priv, current->mm);
347 if (mm == NULL) {
348 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
349 if (mm == NULL) {
350 ret = -ENOMEM;
351 goto out;
352 }
353
354 kref_init(&mm->kref);
355 mm->i915 = to_i915(obj->base.dev);
356
357 mm->mm = current->mm;
358 mmgrab(current->mm);
359
360 mm->mn = NULL;
361
362
363 hash_add(dev_priv->mm_structs,
364 &mm->node, (unsigned long)mm->mm);
365 } else
366 kref_get(&mm->kref);
367
368 obj->userptr.mm = mm;
369out:
370 mutex_unlock(&dev_priv->mm_lock);
371 return ret;
372}
373
374static void
375__i915_mm_struct_free__worker(struct work_struct *work)
376{
377 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
378 i915_mmu_notifier_free(mm->mn, mm->mm);
379 mmdrop(mm->mm);
380 kfree(mm);
381}
382
383static void
384__i915_mm_struct_free(struct kref *kref)
385{
386 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
387
388
389 hash_del(&mm->node);
390 mutex_unlock(&mm->i915->mm_lock);
391
392 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
393 queue_work(mm->i915->mm.userptr_wq, &mm->work);
394}
395
396static void
397i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
398{
399 if (obj->userptr.mm == NULL)
400 return;
401
402 kref_put_mutex(&obj->userptr.mm->kref,
403 __i915_mm_struct_free,
404 &to_i915(obj->base.dev)->mm_lock);
405 obj->userptr.mm = NULL;
406}
407
408struct get_pages_work {
409 struct work_struct work;
410 struct drm_i915_gem_object *obj;
411 struct task_struct *task;
412};
413
414static struct sg_table *
415__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
416 struct page **pvec, int num_pages)
417{
418 unsigned int max_segment = i915_sg_segment_size();
419 struct sg_table *st;
420 unsigned int sg_page_sizes;
421 int ret;
422
423 st = kmalloc(sizeof(*st), GFP_KERNEL);
424 if (!st)
425 return ERR_PTR(-ENOMEM);
426
427alloc_table:
428 ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
429 0, num_pages << PAGE_SHIFT,
430 max_segment,
431 GFP_KERNEL);
432 if (ret) {
433 kfree(st);
434 return ERR_PTR(ret);
435 }
436
437 ret = i915_gem_gtt_prepare_pages(obj, st);
438 if (ret) {
439 sg_free_table(st);
440
441 if (max_segment > PAGE_SIZE) {
442 max_segment = PAGE_SIZE;
443 goto alloc_table;
444 }
445
446 kfree(st);
447 return ERR_PTR(ret);
448 }
449
450 sg_page_sizes = i915_sg_page_sizes(st->sgl);
451
452 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
453
454 return st;
455}
456
457static int
458__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
459 bool value)
460{
461 int ret = 0;
462
463
464
465
466
467
468
469
470
471
472
473#if defined(CONFIG_MMU_NOTIFIER)
474 if (obj->userptr.mmu_object == NULL)
475 return 0;
476
477 spin_lock(&obj->userptr.mmu_object->mn->lock);
478
479
480
481 if (!value)
482 del_object(obj->userptr.mmu_object);
483 else if (!work_pending(&obj->userptr.mmu_object->work))
484 add_object(obj->userptr.mmu_object);
485 else
486 ret = -EAGAIN;
487 spin_unlock(&obj->userptr.mmu_object->mn->lock);
488#endif
489
490 return ret;
491}
492
493static void
494__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
495{
496 struct get_pages_work *work = container_of(_work, typeof(*work), work);
497 struct drm_i915_gem_object *obj = work->obj;
498 const int npages = obj->base.size >> PAGE_SHIFT;
499 struct page **pvec;
500 int pinned, ret;
501
502 ret = -ENOMEM;
503 pinned = 0;
504
505 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
506 if (pvec != NULL) {
507 struct mm_struct *mm = obj->userptr.mm->mm;
508 unsigned int flags = 0;
509
510 if (!obj->userptr.read_only)
511 flags |= FOLL_WRITE;
512
513 ret = -EFAULT;
514 if (mmget_not_zero(mm)) {
515 down_read(&mm->mmap_sem);
516 while (pinned < npages) {
517 ret = get_user_pages_remote
518 (work->task, mm,
519 obj->userptr.ptr + pinned * PAGE_SIZE,
520 npages - pinned,
521 flags,
522 pvec + pinned, NULL, NULL);
523 if (ret < 0)
524 break;
525
526 pinned += ret;
527 }
528 up_read(&mm->mmap_sem);
529 mmput(mm);
530 }
531 }
532
533 mutex_lock(&obj->mm.lock);
534 if (obj->userptr.work == &work->work) {
535 struct sg_table *pages = ERR_PTR(ret);
536
537 if (pinned == npages) {
538 pages = __i915_gem_userptr_alloc_pages(obj, pvec,
539 npages);
540 if (!IS_ERR(pages)) {
541 pinned = 0;
542 pages = NULL;
543 }
544 }
545
546 obj->userptr.work = ERR_CAST(pages);
547 if (IS_ERR(pages))
548 __i915_gem_userptr_set_active(obj, false);
549 }
550 mutex_unlock(&obj->mm.lock);
551
552 release_pages(pvec, pinned);
553 kvfree(pvec);
554
555 i915_gem_object_put(obj);
556 put_task_struct(work->task);
557 kfree(work);
558}
559
560static struct sg_table *
561__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
562{
563 struct get_pages_work *work;
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584 work = kmalloc(sizeof(*work), GFP_KERNEL);
585 if (work == NULL)
586 return ERR_PTR(-ENOMEM);
587
588 obj->userptr.work = &work->work;
589
590 work->obj = i915_gem_object_get(obj);
591
592 work->task = current;
593 get_task_struct(work->task);
594
595 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
596 queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
597
598 return ERR_PTR(-EAGAIN);
599}
600
601static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
602{
603 const int num_pages = obj->base.size >> PAGE_SHIFT;
604 struct mm_struct *mm = obj->userptr.mm->mm;
605 struct page **pvec;
606 struct sg_table *pages;
607 bool active;
608 int pinned;
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627 if (obj->userptr.work) {
628
629 if (IS_ERR(obj->userptr.work))
630 return PTR_ERR(obj->userptr.work);
631 else
632 return -EAGAIN;
633 }
634
635 pvec = NULL;
636 pinned = 0;
637
638 if (mm == current->mm) {
639 pvec = kvmalloc_array(num_pages, sizeof(struct page *),
640 GFP_KERNEL |
641 __GFP_NORETRY |
642 __GFP_NOWARN);
643 if (pvec)
644 pinned = __get_user_pages_fast(obj->userptr.ptr,
645 num_pages,
646 !obj->userptr.read_only,
647 pvec);
648 }
649
650 active = false;
651 if (pinned < 0) {
652 pages = ERR_PTR(pinned);
653 pinned = 0;
654 } else if (pinned < num_pages) {
655 pages = __i915_gem_userptr_get_pages_schedule(obj);
656 active = pages == ERR_PTR(-EAGAIN);
657 } else {
658 pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
659 active = !IS_ERR(pages);
660 }
661 if (active)
662 __i915_gem_userptr_set_active(obj, true);
663
664 if (IS_ERR(pages))
665 release_pages(pvec, pinned);
666 kvfree(pvec);
667
668 return PTR_ERR_OR_ZERO(pages);
669}
670
671static void
672i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
673 struct sg_table *pages)
674{
675 struct sgt_iter sgt_iter;
676 struct page *page;
677
678 BUG_ON(obj->userptr.work != NULL);
679 __i915_gem_userptr_set_active(obj, false);
680
681 if (obj->mm.madv != I915_MADV_WILLNEED)
682 obj->mm.dirty = false;
683
684 i915_gem_gtt_finish_pages(obj, pages);
685
686 for_each_sgt_page(page, sgt_iter, pages) {
687 if (obj->mm.dirty)
688 set_page_dirty(page);
689
690 mark_page_accessed(page);
691 put_page(page);
692 }
693 obj->mm.dirty = false;
694
695 sg_free_table(pages);
696 kfree(pages);
697}
698
699static void
700i915_gem_userptr_release(struct drm_i915_gem_object *obj)
701{
702 i915_gem_userptr_release__mmu_notifier(obj);
703 i915_gem_userptr_release__mm_struct(obj);
704}
705
706static int
707i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
708{
709 if (obj->userptr.mmu_object)
710 return 0;
711
712 return i915_gem_userptr_init__mmu_notifier(obj, 0);
713}
714
715static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
716 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
717 I915_GEM_OBJECT_IS_SHRINKABLE,
718 .get_pages = i915_gem_userptr_get_pages,
719 .put_pages = i915_gem_userptr_put_pages,
720 .dmabuf_export = i915_gem_userptr_dmabuf_export,
721 .release = i915_gem_userptr_release,
722};
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759int
760i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
761{
762 struct drm_i915_private *dev_priv = to_i915(dev);
763 struct drm_i915_gem_userptr *args = data;
764 struct drm_i915_gem_object *obj;
765 int ret;
766 u32 handle;
767
768 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
769
770
771
772 return -ENODEV;
773 }
774
775 if (args->flags & ~(I915_USERPTR_READ_ONLY |
776 I915_USERPTR_UNSYNCHRONIZED))
777 return -EINVAL;
778
779 if (offset_in_page(args->user_ptr | args->user_size))
780 return -EINVAL;
781
782 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
783 (char __user *)(unsigned long)args->user_ptr, args->user_size))
784 return -EFAULT;
785
786 if (args->flags & I915_USERPTR_READ_ONLY) {
787
788
789
790 return -ENODEV;
791 }
792
793 obj = i915_gem_object_alloc(dev_priv);
794 if (obj == NULL)
795 return -ENOMEM;
796
797 drm_gem_private_object_init(dev, &obj->base, args->user_size);
798 i915_gem_object_init(obj, &i915_gem_userptr_ops);
799 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
800 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
801 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
802
803 obj->userptr.ptr = args->user_ptr;
804 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
805
806
807
808
809
810 ret = i915_gem_userptr_init__mm_struct(obj);
811 if (ret == 0)
812 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
813 if (ret == 0)
814 ret = drm_gem_handle_create(file, &obj->base, &handle);
815
816
817 i915_gem_object_put(obj);
818 if (ret)
819 return ret;
820
821 args->handle = handle;
822 return 0;
823}
824
825int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
826{
827 mutex_init(&dev_priv->mm_lock);
828 hash_init(dev_priv->mm_structs);
829
830 dev_priv->mm.userptr_wq =
831 alloc_workqueue("i915-userptr-acquire",
832 WQ_HIGHPRI | WQ_UNBOUND,
833 0);
834 if (!dev_priv->mm.userptr_wq)
835 return -ENOMEM;
836
837 return 0;
838}
839
840void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
841{
842 destroy_workqueue(dev_priv->mm.userptr_wq);
843}
844