1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <linux/mmu_context.h>
38#include <linux/mempolicy.h>
39#include <linux/swap.h>
40#include <linux/sched/mm.h>
41
42#include "i915_drv.h"
43#include "i915_gem_ioctls.h"
44#include "i915_gem_object.h"
45#include "i915_scatterlist.h"
46
47#ifdef CONFIG_MMU_NOTIFIER
48
49
50
51
52
53
54
55
56
57
58
59static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
60 const struct mmu_notifier_range *range,
61 unsigned long cur_seq)
62{
63 struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
64 struct drm_i915_private *i915 = to_i915(obj->base.dev);
65 long r;
66
67 if (!mmu_notifier_range_blockable(range))
68 return false;
69
70 write_lock(&i915->mm.notifier_lock);
71
72 mmu_interval_set_seq(mni, cur_seq);
73
74 write_unlock(&i915->mm.notifier_lock);
75
76
77
78
79
80
81
82
83
84 if (current->flags & PF_EXITING)
85 return true;
86
87
88 r = dma_resv_wait_timeout(obj->base.resv, true, false,
89 MAX_SCHEDULE_TIMEOUT);
90 if (r <= 0)
91 drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
92
93 return true;
94}
95
96static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
97 .invalidate = i915_gem_userptr_invalidate,
98};
99
100static int
101i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
102{
103 return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
104 obj->userptr.ptr, obj->base.size,
105 &i915_gem_userptr_notifier_ops);
106}
107
108static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
109{
110 struct page **pvec = NULL;
111
112 assert_object_held_shared(obj);
113
114 if (!--obj->userptr.page_ref) {
115 pvec = obj->userptr.pvec;
116 obj->userptr.pvec = NULL;
117 }
118 GEM_BUG_ON(obj->userptr.page_ref < 0);
119
120 if (pvec) {
121 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
122
123 unpin_user_pages(pvec, num_pages);
124 kvfree(pvec);
125 }
126}
127
128static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
129{
130 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
131 unsigned int max_segment = i915_sg_segment_size();
132 struct sg_table *st;
133 unsigned int sg_page_sizes;
134 struct page **pvec;
135 int ret;
136
137 st = kmalloc(sizeof(*st), GFP_KERNEL);
138 if (!st)
139 return -ENOMEM;
140
141 if (!obj->userptr.page_ref) {
142 ret = -EAGAIN;
143 goto err_free;
144 }
145
146 obj->userptr.page_ref++;
147 pvec = obj->userptr.pvec;
148
149alloc_table:
150 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
151 num_pages << PAGE_SHIFT,
152 max_segment, GFP_KERNEL);
153 if (ret)
154 goto err;
155
156 ret = i915_gem_gtt_prepare_pages(obj, st);
157 if (ret) {
158 sg_free_table(st);
159
160 if (max_segment > PAGE_SIZE) {
161 max_segment = PAGE_SIZE;
162 goto alloc_table;
163 }
164
165 goto err;
166 }
167
168 WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
169 if (i915_gem_object_can_bypass_llc(obj))
170 obj->cache_dirty = true;
171
172 sg_page_sizes = i915_sg_dma_sizes(st->sgl);
173 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
174
175 return 0;
176
177err:
178 i915_gem_object_userptr_drop_ref(obj);
179err_free:
180 kfree(st);
181 return ret;
182}
183
184static void
185i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
186 struct sg_table *pages)
187{
188 struct sgt_iter sgt_iter;
189 struct page *page;
190
191 if (!pages)
192 return;
193
194 __i915_gem_object_release_shmem(obj, pages, true);
195 i915_gem_gtt_finish_pages(obj, pages);
196
197
198
199
200
201
202 if (i915_gem_object_is_readonly(obj))
203 obj->mm.dirty = false;
204
205 for_each_sgt_page(page, sgt_iter, pages) {
206 if (obj->mm.dirty && trylock_page(page)) {
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225 set_page_dirty(page);
226 unlock_page(page);
227 }
228
229 mark_page_accessed(page);
230 }
231 obj->mm.dirty = false;
232
233 sg_free_table(pages);
234 kfree(pages);
235
236 i915_gem_object_userptr_drop_ref(obj);
237}
238
239static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
240{
241 struct sg_table *pages;
242 int err;
243
244 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
245 if (err)
246 return err;
247
248 if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
249 return -EBUSY;
250
251 assert_object_held(obj);
252
253 pages = __i915_gem_object_unset_pages(obj);
254 if (!IS_ERR_OR_NULL(pages))
255 i915_gem_userptr_put_pages(obj, pages);
256
257 return err;
258}
259
260int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
261{
262 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
263 struct page **pvec;
264 unsigned int gup_flags = 0;
265 unsigned long notifier_seq;
266 int pinned, ret;
267
268 if (obj->userptr.notifier.mm != current->mm)
269 return -EFAULT;
270
271 notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
272
273 ret = i915_gem_object_lock_interruptible(obj, NULL);
274 if (ret)
275 return ret;
276
277 if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
278 i915_gem_object_unlock(obj);
279 return 0;
280 }
281
282 ret = i915_gem_object_userptr_unbind(obj);
283 i915_gem_object_unlock(obj);
284 if (ret)
285 return ret;
286
287 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
288 if (!pvec)
289 return -ENOMEM;
290
291 if (!i915_gem_object_is_readonly(obj))
292 gup_flags |= FOLL_WRITE;
293
294 pinned = ret = 0;
295 while (pinned < num_pages) {
296 ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
297 num_pages - pinned, gup_flags,
298 &pvec[pinned]);
299 if (ret < 0)
300 goto out;
301
302 pinned += ret;
303 }
304 ret = 0;
305
306 ret = i915_gem_object_lock_interruptible(obj, NULL);
307 if (ret)
308 goto out;
309
310 if (mmu_interval_read_retry(&obj->userptr.notifier,
311 !obj->userptr.page_ref ? notifier_seq :
312 obj->userptr.notifier_seq)) {
313 ret = -EAGAIN;
314 goto out_unlock;
315 }
316
317 if (!obj->userptr.page_ref++) {
318 obj->userptr.pvec = pvec;
319 obj->userptr.notifier_seq = notifier_seq;
320 pvec = NULL;
321 ret = ____i915_gem_object_get_pages(obj);
322 }
323
324 obj->userptr.page_ref--;
325
326out_unlock:
327 i915_gem_object_unlock(obj);
328
329out:
330 if (pvec) {
331 unpin_user_pages(pvec, pinned);
332 kvfree(pvec);
333 }
334
335 return ret;
336}
337
338int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
339{
340 if (mmu_interval_read_retry(&obj->userptr.notifier,
341 obj->userptr.notifier_seq)) {
342
343
344 return -EAGAIN;
345 }
346
347 return 0;
348}
349
350int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
351{
352 int err;
353
354 err = i915_gem_object_userptr_submit_init(obj);
355 if (err)
356 return err;
357
358 err = i915_gem_object_lock_interruptible(obj, NULL);
359 if (!err) {
360
361
362
363
364
365 err = i915_gem_object_pin_pages(obj);
366 if (!err)
367 i915_gem_object_unpin_pages(obj);
368
369 i915_gem_object_unlock(obj);
370 }
371
372 return err;
373}
374
375static void
376i915_gem_userptr_release(struct drm_i915_gem_object *obj)
377{
378 GEM_WARN_ON(obj->userptr.page_ref);
379
380 mmu_interval_notifier_remove(&obj->userptr.notifier);
381 obj->userptr.notifier.mm = NULL;
382}
383
384static int
385i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
386{
387 drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
388
389 return -EINVAL;
390}
391
392static int
393i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
394 const struct drm_i915_gem_pwrite *args)
395{
396 drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
397
398 return -EINVAL;
399}
400
401static int
402i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
403 const struct drm_i915_gem_pread *args)
404{
405 drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
406
407 return -EINVAL;
408}
409
410static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
411 .name = "i915_gem_object_userptr",
412 .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
413 I915_GEM_OBJECT_NO_MMAP |
414 I915_GEM_OBJECT_IS_PROXY,
415 .get_pages = i915_gem_userptr_get_pages,
416 .put_pages = i915_gem_userptr_put_pages,
417 .dmabuf_export = i915_gem_userptr_dmabuf_export,
418 .pwrite = i915_gem_userptr_pwrite,
419 .pread = i915_gem_userptr_pread,
420 .release = i915_gem_userptr_release,
421};
422
423#endif
424
425static int
426probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
427{
428 const unsigned long end = addr + len;
429 struct vm_area_struct *vma;
430 int ret = -EFAULT;
431
432 mmap_read_lock(mm);
433 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
434
435 if (vma->vm_start > addr)
436 break;
437
438 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
439 break;
440
441 if (vma->vm_end >= end) {
442 ret = 0;
443 break;
444 }
445
446 addr = vma->vm_end;
447 }
448 mmap_read_unlock(mm);
449
450 return ret;
451}
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488int
489i915_gem_userptr_ioctl(struct drm_device *dev,
490 void *data,
491 struct drm_file *file)
492{
493 static struct lock_class_key __maybe_unused lock_class;
494 struct drm_i915_private *dev_priv = to_i915(dev);
495 struct drm_i915_gem_userptr *args = data;
496 struct drm_i915_gem_object __maybe_unused *obj;
497 int __maybe_unused ret;
498 u32 __maybe_unused handle;
499
500 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
501
502
503
504 return -ENODEV;
505 }
506
507 if (args->flags & ~(I915_USERPTR_READ_ONLY |
508 I915_USERPTR_UNSYNCHRONIZED |
509 I915_USERPTR_PROBE))
510 return -EINVAL;
511
512 if (i915_gem_object_size_2big(args->user_size))
513 return -E2BIG;
514
515 if (!args->user_size)
516 return -EINVAL;
517
518 if (offset_in_page(args->user_ptr | args->user_size))
519 return -EINVAL;
520
521 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
522 return -EFAULT;
523
524 if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
525 return -ENODEV;
526
527 if (args->flags & I915_USERPTR_READ_ONLY) {
528
529
530
531
532 if (!dev_priv->gt.vm->has_read_only)
533 return -ENODEV;
534 }
535
536 if (args->flags & I915_USERPTR_PROBE) {
537
538
539
540
541 ret = probe_range(current->mm, args->user_ptr, args->user_size);
542 if (ret)
543 return ret;
544 }
545
546#ifdef CONFIG_MMU_NOTIFIER
547 obj = i915_gem_object_alloc();
548 if (obj == NULL)
549 return -ENOMEM;
550
551 drm_gem_private_object_init(dev, &obj->base, args->user_size);
552 i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
553 I915_BO_ALLOC_USER);
554 obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
555 obj->read_domains = I915_GEM_DOMAIN_CPU;
556 obj->write_domain = I915_GEM_DOMAIN_CPU;
557 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
558
559 obj->userptr.ptr = args->user_ptr;
560 obj->userptr.notifier_seq = ULONG_MAX;
561 if (args->flags & I915_USERPTR_READ_ONLY)
562 i915_gem_object_set_readonly(obj);
563
564
565
566
567
568 ret = i915_gem_userptr_init__mmu_notifier(obj);
569 if (ret == 0)
570 ret = drm_gem_handle_create(file, &obj->base, &handle);
571
572
573 i915_gem_object_put(obj);
574 if (ret)
575 return ret;
576
577 args->handle = handle;
578 return 0;
579#else
580 return -ENODEV;
581#endif
582}
583
584int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
585{
586#ifdef CONFIG_MMU_NOTIFIER
587 rwlock_init(&dev_priv->mm.notifier_lock);
588#endif
589
590 return 0;
591}
592
593void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
594{
595}
596