1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <linux/mmu_context.h>
38#include <linux/mempolicy.h>
39#include <linux/swap.h>
40#include <linux/sched/mm.h>
41
42#include "i915_drv.h"
43#include "i915_gem_ioctls.h"
44#include "i915_gem_object.h"
45#include "i915_scatterlist.h"
46
47#ifdef CONFIG_MMU_NOTIFIER
48
49
50
51
52
53
54
55
56
57
58
59static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
60 const struct mmu_notifier_range *range,
61 unsigned long cur_seq)
62{
63 struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
64 struct drm_i915_private *i915 = to_i915(obj->base.dev);
65 long r;
66
67 if (!mmu_notifier_range_blockable(range))
68 return false;
69
70 write_lock(&i915->mm.notifier_lock);
71
72 mmu_interval_set_seq(mni, cur_seq);
73
74 write_unlock(&i915->mm.notifier_lock);
75
76
77
78
79
80
81
82
83
84 if (current->flags & PF_EXITING)
85 return true;
86
87
88 r = dma_resv_wait_timeout(obj->base.resv, true, false,
89 MAX_SCHEDULE_TIMEOUT);
90 if (r <= 0)
91 drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
92
93 return true;
94}
95
96static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
97 .invalidate = i915_gem_userptr_invalidate,
98};
99
100static int
101i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
102{
103 return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
104 obj->userptr.ptr, obj->base.size,
105 &i915_gem_userptr_notifier_ops);
106}
107
108static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
109{
110 struct page **pvec = NULL;
111
112 assert_object_held_shared(obj);
113
114 if (!--obj->userptr.page_ref) {
115 pvec = obj->userptr.pvec;
116 obj->userptr.pvec = NULL;
117 }
118 GEM_BUG_ON(obj->userptr.page_ref < 0);
119
120 if (pvec) {
121 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
122
123 unpin_user_pages(pvec, num_pages);
124 kvfree(pvec);
125 }
126}
127
128static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
129{
130 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
131 unsigned int max_segment = i915_sg_segment_size();
132 struct sg_table *st;
133 unsigned int sg_page_sizes;
134 struct page **pvec;
135 int ret;
136
137 st = kmalloc(sizeof(*st), GFP_KERNEL);
138 if (!st)
139 return -ENOMEM;
140
141 if (!obj->userptr.page_ref) {
142 ret = -EAGAIN;
143 goto err_free;
144 }
145
146 obj->userptr.page_ref++;
147 pvec = obj->userptr.pvec;
148
149alloc_table:
150 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
151 num_pages << PAGE_SHIFT,
152 max_segment, GFP_KERNEL);
153 if (ret)
154 goto err;
155
156 ret = i915_gem_gtt_prepare_pages(obj, st);
157 if (ret) {
158 sg_free_table(st);
159
160 if (max_segment > PAGE_SIZE) {
161 max_segment = PAGE_SIZE;
162 goto alloc_table;
163 }
164
165 goto err;
166 }
167
168 sg_page_sizes = i915_sg_dma_sizes(st->sgl);
169
170 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
171
172 return 0;
173
174err:
175 i915_gem_object_userptr_drop_ref(obj);
176err_free:
177 kfree(st);
178 return ret;
179}
180
181static void
182i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
183 struct sg_table *pages)
184{
185 struct sgt_iter sgt_iter;
186 struct page *page;
187
188 if (!pages)
189 return;
190
191 __i915_gem_object_release_shmem(obj, pages, true);
192 i915_gem_gtt_finish_pages(obj, pages);
193
194
195
196
197
198
199 if (i915_gem_object_is_readonly(obj))
200 obj->mm.dirty = false;
201
202 for_each_sgt_page(page, sgt_iter, pages) {
203 if (obj->mm.dirty && trylock_page(page)) {
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222 set_page_dirty(page);
223 unlock_page(page);
224 }
225
226 mark_page_accessed(page);
227 }
228 obj->mm.dirty = false;
229
230 sg_free_table(pages);
231 kfree(pages);
232
233 i915_gem_object_userptr_drop_ref(obj);
234}
235
236static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
237{
238 struct sg_table *pages;
239 int err;
240
241 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
242 if (err)
243 return err;
244
245 if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
246 return -EBUSY;
247
248 assert_object_held(obj);
249
250 pages = __i915_gem_object_unset_pages(obj);
251 if (!IS_ERR_OR_NULL(pages))
252 i915_gem_userptr_put_pages(obj, pages);
253
254 return err;
255}
256
257int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
258{
259 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
260 struct page **pvec;
261 unsigned int gup_flags = 0;
262 unsigned long notifier_seq;
263 int pinned, ret;
264
265 if (obj->userptr.notifier.mm != current->mm)
266 return -EFAULT;
267
268 notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
269
270 ret = i915_gem_object_lock_interruptible(obj, NULL);
271 if (ret)
272 return ret;
273
274 if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
275 i915_gem_object_unlock(obj);
276 return 0;
277 }
278
279 ret = i915_gem_object_userptr_unbind(obj);
280 i915_gem_object_unlock(obj);
281 if (ret)
282 return ret;
283
284 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
285 if (!pvec)
286 return -ENOMEM;
287
288 if (!i915_gem_object_is_readonly(obj))
289 gup_flags |= FOLL_WRITE;
290
291 pinned = ret = 0;
292 while (pinned < num_pages) {
293 ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
294 num_pages - pinned, gup_flags,
295 &pvec[pinned]);
296 if (ret < 0)
297 goto out;
298
299 pinned += ret;
300 }
301 ret = 0;
302
303 ret = i915_gem_object_lock_interruptible(obj, NULL);
304 if (ret)
305 goto out;
306
307 if (mmu_interval_read_retry(&obj->userptr.notifier,
308 !obj->userptr.page_ref ? notifier_seq :
309 obj->userptr.notifier_seq)) {
310 ret = -EAGAIN;
311 goto out_unlock;
312 }
313
314 if (!obj->userptr.page_ref++) {
315 obj->userptr.pvec = pvec;
316 obj->userptr.notifier_seq = notifier_seq;
317 pvec = NULL;
318 ret = ____i915_gem_object_get_pages(obj);
319 }
320
321 obj->userptr.page_ref--;
322
323out_unlock:
324 i915_gem_object_unlock(obj);
325
326out:
327 if (pvec) {
328 unpin_user_pages(pvec, pinned);
329 kvfree(pvec);
330 }
331
332 return ret;
333}
334
335int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
336{
337 if (mmu_interval_read_retry(&obj->userptr.notifier,
338 obj->userptr.notifier_seq)) {
339
340
341 return -EAGAIN;
342 }
343
344 return 0;
345}
346
347int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
348{
349 int err;
350
351 err = i915_gem_object_userptr_submit_init(obj);
352 if (err)
353 return err;
354
355 err = i915_gem_object_lock_interruptible(obj, NULL);
356 if (!err) {
357
358
359
360
361
362 err = i915_gem_object_pin_pages(obj);
363 if (!err)
364 i915_gem_object_unpin_pages(obj);
365
366 i915_gem_object_unlock(obj);
367 }
368
369 return err;
370}
371
372static void
373i915_gem_userptr_release(struct drm_i915_gem_object *obj)
374{
375 GEM_WARN_ON(obj->userptr.page_ref);
376
377 mmu_interval_notifier_remove(&obj->userptr.notifier);
378 obj->userptr.notifier.mm = NULL;
379}
380
381static int
382i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
383{
384 drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
385
386 return -EINVAL;
387}
388
389static int
390i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
391 const struct drm_i915_gem_pwrite *args)
392{
393 drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
394
395 return -EINVAL;
396}
397
398static int
399i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
400 const struct drm_i915_gem_pread *args)
401{
402 drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
403
404 return -EINVAL;
405}
406
407static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
408 .name = "i915_gem_object_userptr",
409 .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
410 I915_GEM_OBJECT_NO_MMAP |
411 I915_GEM_OBJECT_IS_PROXY,
412 .get_pages = i915_gem_userptr_get_pages,
413 .put_pages = i915_gem_userptr_put_pages,
414 .dmabuf_export = i915_gem_userptr_dmabuf_export,
415 .pwrite = i915_gem_userptr_pwrite,
416 .pread = i915_gem_userptr_pread,
417 .release = i915_gem_userptr_release,
418};
419
420#endif
421
422static int
423probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
424{
425 const unsigned long end = addr + len;
426 struct vm_area_struct *vma;
427 int ret = -EFAULT;
428
429 mmap_read_lock(mm);
430 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
431
432 if (vma->vm_start > addr)
433 break;
434
435 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
436 break;
437
438 if (vma->vm_end >= end) {
439 ret = 0;
440 break;
441 }
442
443 addr = vma->vm_end;
444 }
445 mmap_read_unlock(mm);
446
447 return ret;
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485int
486i915_gem_userptr_ioctl(struct drm_device *dev,
487 void *data,
488 struct drm_file *file)
489{
490 static struct lock_class_key __maybe_unused lock_class;
491 struct drm_i915_private *dev_priv = to_i915(dev);
492 struct drm_i915_gem_userptr *args = data;
493 struct drm_i915_gem_object __maybe_unused *obj;
494 int __maybe_unused ret;
495 u32 __maybe_unused handle;
496
497 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
498
499
500
501 return -ENODEV;
502 }
503
504 if (args->flags & ~(I915_USERPTR_READ_ONLY |
505 I915_USERPTR_UNSYNCHRONIZED |
506 I915_USERPTR_PROBE))
507 return -EINVAL;
508
509 if (i915_gem_object_size_2big(args->user_size))
510 return -E2BIG;
511
512 if (!args->user_size)
513 return -EINVAL;
514
515 if (offset_in_page(args->user_ptr | args->user_size))
516 return -EINVAL;
517
518 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
519 return -EFAULT;
520
521 if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
522 return -ENODEV;
523
524 if (args->flags & I915_USERPTR_READ_ONLY) {
525
526
527
528
529 if (!dev_priv->gt.vm->has_read_only)
530 return -ENODEV;
531 }
532
533 if (args->flags & I915_USERPTR_PROBE) {
534
535
536
537
538 ret = probe_range(current->mm, args->user_ptr, args->user_size);
539 if (ret)
540 return ret;
541 }
542
543#ifdef CONFIG_MMU_NOTIFIER
544 obj = i915_gem_object_alloc();
545 if (obj == NULL)
546 return -ENOMEM;
547
548 drm_gem_private_object_init(dev, &obj->base, args->user_size);
549 i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
550 obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
551 obj->read_domains = I915_GEM_DOMAIN_CPU;
552 obj->write_domain = I915_GEM_DOMAIN_CPU;
553 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
554
555 obj->userptr.ptr = args->user_ptr;
556 obj->userptr.notifier_seq = ULONG_MAX;
557 if (args->flags & I915_USERPTR_READ_ONLY)
558 i915_gem_object_set_readonly(obj);
559
560
561
562
563
564 ret = i915_gem_userptr_init__mmu_notifier(obj);
565 if (ret == 0)
566 ret = drm_gem_handle_create(file, &obj->base, &handle);
567
568
569 i915_gem_object_put(obj);
570 if (ret)
571 return ret;
572
573 args->handle = handle;
574 return 0;
575#else
576 return -ENODEV;
577#endif
578}
579
580int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
581{
582#ifdef CONFIG_MMU_NOTIFIER
583 rwlock_init(&dev_priv->mm.notifier_lock);
584#endif
585
586 return 0;
587}
588
589void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
590{
591}
592