1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30#include <linux/mmu_context.h>
31#include <linux/mmu_notifier.h>
32#include <linux/mempolicy.h>
33#include <linux/swap.h>
34
35struct i915_mm_struct {
36 struct mm_struct *mm;
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
40 struct kref kref;
41 struct work_struct work;
42};
43
44#if defined(CONFIG_MMU_NOTIFIER)
45#include <linux/interval_tree.h>
46
47struct i915_mmu_notifier {
48 spinlock_t lock;
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
52};
53
54struct i915_mmu_object {
55 struct i915_mmu_notifier *mn;
56 struct drm_i915_gem_object *obj;
57 struct interval_tree_node it;
58 struct list_head link;
59 struct work_struct work;
60 bool attached;
61};
62
63static void cancel_userptr(struct work_struct *work)
64{
65 struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
66 struct drm_i915_gem_object *obj = mo->obj;
67 struct drm_device *dev = obj->base.dev;
68
69 mutex_lock(&dev->struct_mutex);
70
71 obj->userptr.work = NULL;
72
73 if (obj->pages != NULL) {
74 struct drm_i915_private *dev_priv = to_i915(dev);
75 struct i915_vma *vma, *tmp;
76 bool was_interruptible;
77
78 was_interruptible = dev_priv->mm.interruptible;
79 dev_priv->mm.interruptible = false;
80
81 list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
82 int ret = i915_vma_unbind(vma);
83 WARN_ON(ret && ret != -EIO);
84 }
85 WARN_ON(i915_gem_object_put_pages(obj));
86
87 dev_priv->mm.interruptible = was_interruptible;
88 }
89
90 drm_gem_object_unreference(&obj->base);
91 mutex_unlock(&dev->struct_mutex);
92}
93
94static void add_object(struct i915_mmu_object *mo)
95{
96 if (mo->attached)
97 return;
98
99 interval_tree_insert(&mo->it, &mo->mn->objects);
100 mo->attached = true;
101}
102
103static void del_object(struct i915_mmu_object *mo)
104{
105 if (!mo->attached)
106 return;
107
108 interval_tree_remove(&mo->it, &mo->mn->objects);
109 mo->attached = false;
110}
111
112static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
113 struct mm_struct *mm,
114 unsigned long start,
115 unsigned long end)
116{
117 struct i915_mmu_notifier *mn =
118 container_of(_mn, struct i915_mmu_notifier, mn);
119 struct i915_mmu_object *mo;
120 struct interval_tree_node *it;
121 LIST_HEAD(cancelled);
122
123 if (RB_EMPTY_ROOT(&mn->objects))
124 return;
125
126
127 end--;
128
129 spin_lock(&mn->lock);
130 it = interval_tree_iter_first(&mn->objects, start, end);
131 while (it) {
132
133
134
135
136
137
138
139
140
141 mo = container_of(it, struct i915_mmu_object, it);
142 if (kref_get_unless_zero(&mo->obj->base.refcount))
143 schedule_work(&mo->work);
144
145 list_add(&mo->link, &cancelled);
146 it = interval_tree_iter_next(it, start, end);
147 }
148 list_for_each_entry(mo, &cancelled, link)
149 del_object(mo);
150 spin_unlock(&mn->lock);
151}
152
153static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
154 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
155};
156
157static struct i915_mmu_notifier *
158i915_mmu_notifier_create(struct mm_struct *mm)
159{
160 struct i915_mmu_notifier *mn;
161 int ret;
162
163 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
164 if (mn == NULL)
165 return ERR_PTR(-ENOMEM);
166
167 spin_lock_init(&mn->lock);
168 mn->mn.ops = &i915_gem_userptr_notifier;
169 mn->objects = RB_ROOT;
170
171
172 ret = __mmu_notifier_register(&mn->mn, mm);
173 if (ret) {
174 kfree(mn);
175 return ERR_PTR(ret);
176 }
177
178 return mn;
179}
180
181static void
182i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
183{
184 struct i915_mmu_object *mo;
185
186 mo = obj->userptr.mmu_object;
187 if (mo == NULL)
188 return;
189
190 spin_lock(&mo->mn->lock);
191 del_object(mo);
192 spin_unlock(&mo->mn->lock);
193 kfree(mo);
194
195 obj->userptr.mmu_object = NULL;
196}
197
198static struct i915_mmu_notifier *
199i915_mmu_notifier_find(struct i915_mm_struct *mm)
200{
201 struct i915_mmu_notifier *mn = mm->mn;
202
203 mn = mm->mn;
204 if (mn)
205 return mn;
206
207 down_write(&mm->mm->mmap_sem);
208 mutex_lock(&to_i915(mm->dev)->mm_lock);
209 if ((mn = mm->mn) == NULL) {
210 mn = i915_mmu_notifier_create(mm->mm);
211 if (!IS_ERR(mn))
212 mm->mn = mn;
213 }
214 mutex_unlock(&to_i915(mm->dev)->mm_lock);
215 up_write(&mm->mm->mmap_sem);
216
217 return mn;
218}
219
220static int
221i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
222 unsigned flags)
223{
224 struct i915_mmu_notifier *mn;
225 struct i915_mmu_object *mo;
226
227 if (flags & I915_USERPTR_UNSYNCHRONIZED)
228 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
229
230 if (WARN_ON(obj->userptr.mm == NULL))
231 return -EINVAL;
232
233 mn = i915_mmu_notifier_find(obj->userptr.mm);
234 if (IS_ERR(mn))
235 return PTR_ERR(mn);
236
237 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
238 if (mo == NULL)
239 return -ENOMEM;
240
241 mo->mn = mn;
242 mo->obj = obj;
243 mo->it.start = obj->userptr.ptr;
244 mo->it.last = obj->userptr.ptr + obj->base.size - 1;
245 INIT_WORK(&mo->work, cancel_userptr);
246
247 obj->userptr.mmu_object = mo;
248 return 0;
249}
250
251static void
252i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
253 struct mm_struct *mm)
254{
255 if (mn == NULL)
256 return;
257
258 mmu_notifier_unregister(&mn->mn, mm);
259 kfree(mn);
260}
261
262#else
263
264static void
265i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
266{
267}
268
269static int
270i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
271 unsigned flags)
272{
273 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
274 return -ENODEV;
275
276 if (!capable(CAP_SYS_ADMIN))
277 return -EPERM;
278
279 return 0;
280}
281
282static void
283i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
284 struct mm_struct *mm)
285{
286}
287
288#endif
289
290static struct i915_mm_struct *
291__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
292{
293 struct i915_mm_struct *mm;
294
295
296 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
297 if (mm->mm == real)
298 return mm;
299
300 return NULL;
301}
302
303static int
304i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
305{
306 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
307 struct i915_mm_struct *mm;
308 int ret = 0;
309
310
311
312
313
314
315
316
317
318
319
320 mutex_lock(&dev_priv->mm_lock);
321 mm = __i915_mm_struct_find(dev_priv, current->mm);
322 if (mm == NULL) {
323 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
324 if (mm == NULL) {
325 ret = -ENOMEM;
326 goto out;
327 }
328
329 kref_init(&mm->kref);
330 mm->dev = obj->base.dev;
331
332 mm->mm = current->mm;
333 atomic_inc(¤t->mm->mm_count);
334
335 mm->mn = NULL;
336
337
338 hash_add(dev_priv->mm_structs,
339 &mm->node, (unsigned long)mm->mm);
340 } else
341 kref_get(&mm->kref);
342
343 obj->userptr.mm = mm;
344out:
345 mutex_unlock(&dev_priv->mm_lock);
346 return ret;
347}
348
349static void
350__i915_mm_struct_free__worker(struct work_struct *work)
351{
352 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
353 i915_mmu_notifier_free(mm->mn, mm->mm);
354 mmdrop(mm->mm);
355 kfree(mm);
356}
357
358static void
359__i915_mm_struct_free(struct kref *kref)
360{
361 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
362
363
364 hash_del(&mm->node);
365 mutex_unlock(&to_i915(mm->dev)->mm_lock);
366
367 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
368 schedule_work(&mm->work);
369}
370
371static void
372i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
373{
374 if (obj->userptr.mm == NULL)
375 return;
376
377 kref_put_mutex(&obj->userptr.mm->kref,
378 __i915_mm_struct_free,
379 &to_i915(obj->base.dev)->mm_lock);
380 obj->userptr.mm = NULL;
381}
382
383struct get_pages_work {
384 struct work_struct work;
385 struct drm_i915_gem_object *obj;
386 struct task_struct *task;
387};
388
389#if IS_ENABLED(CONFIG_SWIOTLB)
390#define swiotlb_active() swiotlb_nr_tbl()
391#else
392#define swiotlb_active() 0
393#endif
394
395static int
396st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
397{
398 struct scatterlist *sg;
399 int ret, n;
400
401 *st = kmalloc(sizeof(**st), GFP_KERNEL);
402 if (*st == NULL)
403 return -ENOMEM;
404
405 if (swiotlb_active()) {
406 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
407 if (ret)
408 goto err;
409
410 for_each_sg((*st)->sgl, sg, num_pages, n)
411 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
412 } else {
413 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
414 0, num_pages << PAGE_SHIFT,
415 GFP_KERNEL);
416 if (ret)
417 goto err;
418 }
419
420 return 0;
421
422err:
423 kfree(*st);
424 *st = NULL;
425 return ret;
426}
427
428static int
429__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
430 struct page **pvec, int num_pages)
431{
432 int ret;
433
434 ret = st_set_pages(&obj->pages, pvec, num_pages);
435 if (ret)
436 return ret;
437
438 ret = i915_gem_gtt_prepare_object(obj);
439 if (ret) {
440 sg_free_table(obj->pages);
441 kfree(obj->pages);
442 obj->pages = NULL;
443 }
444
445 return ret;
446}
447
448static int
449__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
450 bool value)
451{
452 int ret = 0;
453
454
455
456
457
458
459
460
461
462
463
464#if defined(CONFIG_MMU_NOTIFIER)
465 if (obj->userptr.mmu_object == NULL)
466 return 0;
467
468 spin_lock(&obj->userptr.mmu_object->mn->lock);
469
470
471
472 if (!value)
473 del_object(obj->userptr.mmu_object);
474 else if (!work_pending(&obj->userptr.mmu_object->work))
475 add_object(obj->userptr.mmu_object);
476 else
477 ret = -EAGAIN;
478 spin_unlock(&obj->userptr.mmu_object->mn->lock);
479#endif
480
481 return ret;
482}
483
484static void
485__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
486{
487 struct get_pages_work *work = container_of(_work, typeof(*work), work);
488 struct drm_i915_gem_object *obj = work->obj;
489 struct drm_device *dev = obj->base.dev;
490 const int npages = obj->base.size >> PAGE_SHIFT;
491 struct page **pvec;
492 int pinned, ret;
493
494 ret = -ENOMEM;
495 pinned = 0;
496
497 pvec = kmalloc(npages*sizeof(struct page *),
498 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
499 if (pvec == NULL)
500 pvec = drm_malloc_ab(npages, sizeof(struct page *));
501 if (pvec != NULL) {
502 struct mm_struct *mm = obj->userptr.mm->mm;
503
504 ret = -EFAULT;
505 if (atomic_inc_not_zero(&mm->mm_users)) {
506 down_read(&mm->mmap_sem);
507 while (pinned < npages) {
508 ret = get_user_pages_remote
509 (work->task, mm,
510 obj->userptr.ptr + pinned * PAGE_SIZE,
511 npages - pinned,
512 !obj->userptr.read_only, 0,
513 pvec + pinned, NULL);
514 if (ret < 0)
515 break;
516
517 pinned += ret;
518 }
519 up_read(&mm->mmap_sem);
520 mmput(mm);
521 }
522 }
523
524 mutex_lock(&dev->struct_mutex);
525 if (obj->userptr.work == &work->work) {
526 if (pinned == npages) {
527 ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
528 if (ret == 0) {
529 list_add_tail(&obj->global_list,
530 &to_i915(dev)->mm.unbound_list);
531 obj->get_page.sg = obj->pages->sgl;
532 obj->get_page.last = 0;
533 pinned = 0;
534 }
535 }
536 obj->userptr.work = ERR_PTR(ret);
537 if (ret)
538 __i915_gem_userptr_set_active(obj, false);
539 }
540
541 obj->userptr.workers--;
542 drm_gem_object_unreference(&obj->base);
543 mutex_unlock(&dev->struct_mutex);
544
545 release_pages(pvec, pinned, 0);
546 drm_free_large(pvec);
547
548 put_task_struct(work->task);
549 kfree(work);
550}
551
552static int
553__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
554 bool *active)
555{
556 struct get_pages_work *work;
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577 if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
578 return -EAGAIN;
579
580 work = kmalloc(sizeof(*work), GFP_KERNEL);
581 if (work == NULL)
582 return -ENOMEM;
583
584 obj->userptr.work = &work->work;
585 obj->userptr.workers++;
586
587 work->obj = obj;
588 drm_gem_object_reference(&obj->base);
589
590 work->task = current;
591 get_task_struct(work->task);
592
593 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
594 schedule_work(&work->work);
595
596 *active = true;
597 return -EAGAIN;
598}
599
600static int
601i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
602{
603 const int num_pages = obj->base.size >> PAGE_SHIFT;
604 struct page **pvec;
605 int pinned, ret;
606 bool active;
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624 if (IS_ERR(obj->userptr.work)) {
625
626 ret = PTR_ERR(obj->userptr.work);
627 obj->userptr.work = NULL;
628 return ret;
629 }
630 if (obj->userptr.work)
631
632 return -EAGAIN;
633
634
635 ret = __i915_gem_userptr_set_active(obj, true);
636 if (ret)
637 return ret;
638
639 pvec = NULL;
640 pinned = 0;
641 if (obj->userptr.mm->mm == current->mm) {
642 pvec = kmalloc(num_pages*sizeof(struct page *),
643 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
644 if (pvec == NULL) {
645 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
646 if (pvec == NULL) {
647 __i915_gem_userptr_set_active(obj, false);
648 return -ENOMEM;
649 }
650 }
651
652 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
653 !obj->userptr.read_only, pvec);
654 }
655
656 active = false;
657 if (pinned < 0)
658 ret = pinned, pinned = 0;
659 else if (pinned < num_pages)
660 ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
661 else
662 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
663 if (ret) {
664 __i915_gem_userptr_set_active(obj, active);
665 release_pages(pvec, pinned, 0);
666 }
667 drm_free_large(pvec);
668 return ret;
669}
670
671static void
672i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
673{
674 struct sg_page_iter sg_iter;
675
676 BUG_ON(obj->userptr.work != NULL);
677 __i915_gem_userptr_set_active(obj, false);
678
679 if (obj->madv != I915_MADV_WILLNEED)
680 obj->dirty = 0;
681
682 i915_gem_gtt_finish_object(obj);
683
684 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
685 struct page *page = sg_page_iter_page(&sg_iter);
686
687 if (obj->dirty)
688 set_page_dirty(page);
689
690 mark_page_accessed(page);
691 put_page(page);
692 }
693 obj->dirty = 0;
694
695 sg_free_table(obj->pages);
696 kfree(obj->pages);
697}
698
699static void
700i915_gem_userptr_release(struct drm_i915_gem_object *obj)
701{
702 i915_gem_userptr_release__mmu_notifier(obj);
703 i915_gem_userptr_release__mm_struct(obj);
704}
705
706static int
707i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
708{
709 if (obj->userptr.mmu_object)
710 return 0;
711
712 return i915_gem_userptr_init__mmu_notifier(obj, 0);
713}
714
715static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
716 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
717 .get_pages = i915_gem_userptr_get_pages,
718 .put_pages = i915_gem_userptr_put_pages,
719 .dmabuf_export = i915_gem_userptr_dmabuf_export,
720 .release = i915_gem_userptr_release,
721};
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758int
759i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
760{
761 struct drm_i915_gem_userptr *args = data;
762 struct drm_i915_gem_object *obj;
763 int ret;
764 u32 handle;
765
766 if (args->flags & ~(I915_USERPTR_READ_ONLY |
767 I915_USERPTR_UNSYNCHRONIZED))
768 return -EINVAL;
769
770 if (offset_in_page(args->user_ptr | args->user_size))
771 return -EINVAL;
772
773 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
774 (char __user *)(unsigned long)args->user_ptr, args->user_size))
775 return -EFAULT;
776
777 if (args->flags & I915_USERPTR_READ_ONLY) {
778
779
780
781 return -ENODEV;
782 }
783
784 obj = i915_gem_object_alloc(dev);
785 if (obj == NULL)
786 return -ENOMEM;
787
788 drm_gem_private_object_init(dev, &obj->base, args->user_size);
789 i915_gem_object_init(obj, &i915_gem_userptr_ops);
790 obj->cache_level = I915_CACHE_LLC;
791 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
792 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
793
794 obj->userptr.ptr = args->user_ptr;
795 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
796
797
798
799
800
801 ret = i915_gem_userptr_init__mm_struct(obj);
802 if (ret == 0)
803 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
804 if (ret == 0)
805 ret = drm_gem_handle_create(file, &obj->base, &handle);
806
807
808 drm_gem_object_unreference_unlocked(&obj->base);
809 if (ret)
810 return ret;
811
812 args->handle = handle;
813 return 0;
814}
815
816int
817i915_gem_init_userptr(struct drm_device *dev)
818{
819 struct drm_i915_private *dev_priv = to_i915(dev);
820 mutex_init(&dev_priv->mm_lock);
821 hash_init(dev_priv->mm_structs);
822 return 0;
823}
824