1
2
3
4
5
6
7#include <linux/anon_inodes.h>
8#include <linux/mman.h>
9#include <linux/pfn_t.h>
10#include <linux/sizes.h>
11
12#include "gt/intel_gt.h"
13#include "gt/intel_gt_requests.h"
14
15#include "i915_drv.h"
16#include "i915_gem_gtt.h"
17#include "i915_gem_ioctls.h"
18#include "i915_gem_object.h"
19#include "i915_gem_mman.h"
20#include "i915_trace.h"
21#include "i915_user_extensions.h"
22#include "i915_gem_ttm.h"
23#include "i915_vma.h"
24
25static inline bool
26__vma_matches(struct vm_area_struct *vma, struct file *filp,
27 unsigned long addr, unsigned long size)
28{
29 if (vma->vm_file != filp)
30 return false;
31
32 return vma->vm_start == addr &&
33 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
34}
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56int
57i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
58 struct drm_file *file)
59{
60 struct drm_i915_private *i915 = to_i915(dev);
61 struct drm_i915_gem_mmap *args = data;
62 struct drm_i915_gem_object *obj;
63 unsigned long addr;
64
65
66
67
68
69 if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
70 return -EOPNOTSUPP;
71
72 if (args->flags & ~(I915_MMAP_WC))
73 return -EINVAL;
74
75 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
76 return -ENODEV;
77
78 obj = i915_gem_object_lookup(file, args->handle);
79 if (!obj)
80 return -ENOENT;
81
82
83
84
85 if (!obj->base.filp) {
86 addr = -ENXIO;
87 goto err;
88 }
89
90 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
91 addr = -EINVAL;
92 goto err;
93 }
94
95 addr = vm_mmap(obj->base.filp, 0, args->size,
96 PROT_READ | PROT_WRITE, MAP_SHARED,
97 args->offset);
98 if (IS_ERR_VALUE(addr))
99 goto err;
100
101 if (args->flags & I915_MMAP_WC) {
102 struct mm_struct *mm = current->mm;
103 struct vm_area_struct *vma;
104
105 if (mmap_write_lock_killable(mm)) {
106 addr = -EINTR;
107 goto err;
108 }
109 vma = find_vma(mm, addr);
110 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
111 vma->vm_page_prot =
112 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
113 else
114 addr = -ENOMEM;
115 mmap_write_unlock(mm);
116 if (IS_ERR_VALUE(addr))
117 goto err;
118 }
119 i915_gem_object_put(obj);
120
121 args->addr_ptr = (u64)addr;
122 return 0;
123
124err:
125 i915_gem_object_put(obj);
126 return addr;
127}
128
129static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
130{
131 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
132}
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188int i915_gem_mmap_gtt_version(void)
189{
190 return 4;
191}
192
193static inline struct i915_ggtt_view
194compute_partial_view(const struct drm_i915_gem_object *obj,
195 pgoff_t page_offset,
196 unsigned int chunk)
197{
198 struct i915_ggtt_view view;
199
200 if (i915_gem_object_is_tiled(obj))
201 chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
202
203 view.type = I915_GGTT_VIEW_PARTIAL;
204 view.partial.offset = rounddown(page_offset, chunk);
205 view.partial.size =
206 min_t(unsigned int, chunk,
207 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
208
209
210 if (chunk >= obj->base.size >> PAGE_SHIFT)
211 view.type = I915_GGTT_VIEW_NORMAL;
212
213 return view;
214}
215
216static vm_fault_t i915_error_to_vmf_fault(int err)
217{
218 switch (err) {
219 default:
220 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
221 fallthrough;
222 case -EIO:
223 case -EFAULT:
224 case -ENODEV:
225 case -ENXIO:
226 return VM_FAULT_SIGBUS;
227
228 case -ENOMEM:
229 return VM_FAULT_OOM;
230
231 case 0:
232 case -EAGAIN:
233 case -ENOSPC:
234 case -ERESTARTSYS:
235 case -EINTR:
236 case -EBUSY:
237
238
239
240
241 return VM_FAULT_NOPAGE;
242 }
243}
244
245static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
246{
247 struct vm_area_struct *area = vmf->vma;
248 struct i915_mmap_offset *mmo = area->vm_private_data;
249 struct drm_i915_gem_object *obj = mmo->obj;
250 resource_size_t iomap;
251 int err;
252
253
254 if (unlikely(i915_gem_object_is_readonly(obj) &&
255 area->vm_flags & VM_WRITE))
256 return VM_FAULT_SIGBUS;
257
258 if (i915_gem_object_lock_interruptible(obj, NULL))
259 return VM_FAULT_NOPAGE;
260
261 err = i915_gem_object_pin_pages(obj);
262 if (err)
263 goto out;
264
265 iomap = -1;
266 if (!i915_gem_object_has_struct_page(obj)) {
267 iomap = obj->mm.region->iomap.base;
268 iomap -= obj->mm.region->region.start;
269 }
270
271
272 err = remap_io_sg(area,
273 area->vm_start, area->vm_end - area->vm_start,
274 obj->mm.pages->sgl, iomap);
275
276 if (area->vm_flags & VM_WRITE) {
277 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
278 obj->mm.dirty = true;
279 }
280
281 i915_gem_object_unpin_pages(obj);
282
283out:
284 i915_gem_object_unlock(obj);
285 return i915_error_to_vmf_fault(err);
286}
287
288static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
289{
290#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
291 struct vm_area_struct *area = vmf->vma;
292 struct i915_mmap_offset *mmo = area->vm_private_data;
293 struct drm_i915_gem_object *obj = mmo->obj;
294 struct drm_device *dev = obj->base.dev;
295 struct drm_i915_private *i915 = to_i915(dev);
296 struct intel_runtime_pm *rpm = &i915->runtime_pm;
297 struct i915_ggtt *ggtt = &i915->ggtt;
298 bool write = area->vm_flags & VM_WRITE;
299 struct i915_gem_ww_ctx ww;
300 intel_wakeref_t wakeref;
301 struct i915_vma *vma;
302 pgoff_t page_offset;
303 int srcu;
304 int ret;
305
306
307 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
308
309 trace_i915_gem_object_fault(obj, page_offset, true, write);
310
311 wakeref = intel_runtime_pm_get(rpm);
312
313 i915_gem_ww_ctx_init(&ww, true);
314retry:
315 ret = i915_gem_object_lock(obj, &ww);
316 if (ret)
317 goto err_rpm;
318
319
320 if (i915_gem_object_is_readonly(obj) && write) {
321 ret = -EFAULT;
322 goto err_rpm;
323 }
324
325 ret = i915_gem_object_pin_pages(obj);
326 if (ret)
327 goto err_rpm;
328
329 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
330 if (ret)
331 goto err_pages;
332
333
334 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
335 PIN_MAPPABLE |
336 PIN_NONBLOCK |
337 PIN_NOEVICT);
338 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
339
340 struct i915_ggtt_view view =
341 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
342 unsigned int flags;
343
344 flags = PIN_MAPPABLE | PIN_NOSEARCH;
345 if (view.type == I915_GGTT_VIEW_NORMAL)
346 flags |= PIN_NONBLOCK;
347
348
349
350
351
352
353 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
354 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
355 flags = PIN_MAPPABLE;
356 view.type = I915_GGTT_VIEW_PARTIAL;
357 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
358 }
359
360
361 GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
362 }
363 if (IS_ERR(vma)) {
364 ret = PTR_ERR(vma);
365 goto err_reset;
366 }
367
368
369 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
370 ret = -EFAULT;
371 goto err_unpin;
372 }
373
374 ret = i915_vma_pin_fence(vma);
375 if (ret)
376 goto err_unpin;
377
378
379 ret = remap_io_mapping(area,
380 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
381 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
382 min_t(u64, vma->size, area->vm_end - area->vm_start),
383 &ggtt->iomap);
384 if (ret)
385 goto err_fence;
386
387 assert_rpm_wakelock_held(rpm);
388
389
390 mutex_lock(&i915->ggtt.vm.mutex);
391 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
392 list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
393 mutex_unlock(&i915->ggtt.vm.mutex);
394
395
396 vma->mmo = mmo;
397
398 if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
399 intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
400 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
401
402 if (write) {
403 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
404 i915_vma_set_ggtt_write(vma);
405 obj->mm.dirty = true;
406 }
407
408err_fence:
409 i915_vma_unpin_fence(vma);
410err_unpin:
411 __i915_vma_unpin(vma);
412err_reset:
413 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
414err_pages:
415 i915_gem_object_unpin_pages(obj);
416err_rpm:
417 if (ret == -EDEADLK) {
418 ret = i915_gem_ww_ctx_backoff(&ww);
419 if (!ret)
420 goto retry;
421 }
422 i915_gem_ww_ctx_fini(&ww);
423 intel_runtime_pm_put(rpm, wakeref);
424 return i915_error_to_vmf_fault(ret);
425}
426
427static int
428vm_access(struct vm_area_struct *area, unsigned long addr,
429 void *buf, int len, int write)
430{
431 struct i915_mmap_offset *mmo = area->vm_private_data;
432 struct drm_i915_gem_object *obj = mmo->obj;
433 struct i915_gem_ww_ctx ww;
434 void *vaddr;
435 int err = 0;
436
437 if (i915_gem_object_is_readonly(obj) && write)
438 return -EACCES;
439
440 addr -= area->vm_start;
441 if (addr >= obj->base.size)
442 return -EINVAL;
443
444 i915_gem_ww_ctx_init(&ww, true);
445retry:
446 err = i915_gem_object_lock(obj, &ww);
447 if (err)
448 goto out;
449
450
451 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
452 if (IS_ERR(vaddr)) {
453 err = PTR_ERR(vaddr);
454 goto out;
455 }
456
457 if (write) {
458 memcpy(vaddr + addr, buf, len);
459 __i915_gem_object_flush_map(obj, addr, len);
460 } else {
461 memcpy(buf, vaddr + addr, len);
462 }
463
464 i915_gem_object_unpin_map(obj);
465out:
466 if (err == -EDEADLK) {
467 err = i915_gem_ww_ctx_backoff(&ww);
468 if (!err)
469 goto retry;
470 }
471 i915_gem_ww_ctx_fini(&ww);
472
473 if (err)
474 return err;
475
476 return len;
477}
478
479void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
480{
481 struct i915_vma *vma;
482
483 GEM_BUG_ON(!obj->userfault_count);
484
485 for_each_ggtt_vma(vma, obj)
486 i915_vma_revoke_mmap(vma);
487
488 GEM_BUG_ON(obj->userfault_count);
489}
490
491
492
493
494
495
496
497
498
499void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
500{
501 struct drm_i915_private *i915 = to_i915(obj->base.dev);
502 intel_wakeref_t wakeref;
503
504
505
506
507
508
509
510
511
512
513 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
514 mutex_lock(&i915->ggtt.vm.mutex);
515
516 if (!obj->userfault_count)
517 goto out;
518
519 __i915_gem_object_release_mmap_gtt(obj);
520
521
522
523
524
525
526
527
528
529 wmb();
530
531out:
532 mutex_unlock(&i915->ggtt.vm.mutex);
533 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
534}
535
536void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
537{
538 struct i915_mmap_offset *mmo, *mn;
539
540 spin_lock(&obj->mmo.lock);
541 rbtree_postorder_for_each_entry_safe(mmo, mn,
542 &obj->mmo.offsets, offset) {
543
544
545
546
547 if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
548 continue;
549
550 spin_unlock(&obj->mmo.lock);
551 drm_vma_node_unmap(&mmo->vma_node,
552 obj->base.dev->anon_inode->i_mapping);
553 spin_lock(&obj->mmo.lock);
554 }
555 spin_unlock(&obj->mmo.lock);
556}
557
558static struct i915_mmap_offset *
559lookup_mmo(struct drm_i915_gem_object *obj,
560 enum i915_mmap_type mmap_type)
561{
562 struct rb_node *rb;
563
564 spin_lock(&obj->mmo.lock);
565 rb = obj->mmo.offsets.rb_node;
566 while (rb) {
567 struct i915_mmap_offset *mmo =
568 rb_entry(rb, typeof(*mmo), offset);
569
570 if (mmo->mmap_type == mmap_type) {
571 spin_unlock(&obj->mmo.lock);
572 return mmo;
573 }
574
575 if (mmo->mmap_type < mmap_type)
576 rb = rb->rb_right;
577 else
578 rb = rb->rb_left;
579 }
580 spin_unlock(&obj->mmo.lock);
581
582 return NULL;
583}
584
585static struct i915_mmap_offset *
586insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
587{
588 struct rb_node *rb, **p;
589
590 spin_lock(&obj->mmo.lock);
591 rb = NULL;
592 p = &obj->mmo.offsets.rb_node;
593 while (*p) {
594 struct i915_mmap_offset *pos;
595
596 rb = *p;
597 pos = rb_entry(rb, typeof(*pos), offset);
598
599 if (pos->mmap_type == mmo->mmap_type) {
600 spin_unlock(&obj->mmo.lock);
601 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
602 &mmo->vma_node);
603 kfree(mmo);
604 return pos;
605 }
606
607 if (pos->mmap_type < mmo->mmap_type)
608 p = &rb->rb_right;
609 else
610 p = &rb->rb_left;
611 }
612 rb_link_node(&mmo->offset, rb, p);
613 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
614 spin_unlock(&obj->mmo.lock);
615
616 return mmo;
617}
618
619static struct i915_mmap_offset *
620mmap_offset_attach(struct drm_i915_gem_object *obj,
621 enum i915_mmap_type mmap_type,
622 struct drm_file *file)
623{
624 struct drm_i915_private *i915 = to_i915(obj->base.dev);
625 struct i915_mmap_offset *mmo;
626 int err;
627
628 GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
629
630 mmo = lookup_mmo(obj, mmap_type);
631 if (mmo)
632 goto out;
633
634 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
635 if (!mmo)
636 return ERR_PTR(-ENOMEM);
637
638 mmo->obj = obj;
639 mmo->mmap_type = mmap_type;
640 drm_vma_node_reset(&mmo->vma_node);
641
642 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
643 &mmo->vma_node, obj->base.size / PAGE_SIZE);
644 if (likely(!err))
645 goto insert;
646
647
648 err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
649 NULL);
650 if (err)
651 goto err;
652
653 i915_gem_drain_freed_objects(i915);
654 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
655 &mmo->vma_node, obj->base.size / PAGE_SIZE);
656 if (err)
657 goto err;
658
659insert:
660 mmo = insert_mmo(obj, mmo);
661 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
662out:
663 if (file)
664 drm_vma_node_allow(&mmo->vma_node, file);
665 return mmo;
666
667err:
668 kfree(mmo);
669 return ERR_PTR(err);
670}
671
672static int
673__assign_mmap_offset(struct drm_i915_gem_object *obj,
674 enum i915_mmap_type mmap_type,
675 u64 *offset, struct drm_file *file)
676{
677 struct i915_mmap_offset *mmo;
678
679 if (i915_gem_object_never_mmap(obj))
680 return -ENODEV;
681
682 if (obj->ops->mmap_offset) {
683 if (mmap_type != I915_MMAP_TYPE_FIXED)
684 return -ENODEV;
685
686 *offset = obj->ops->mmap_offset(obj);
687 return 0;
688 }
689
690 if (mmap_type == I915_MMAP_TYPE_FIXED)
691 return -ENODEV;
692
693 if (mmap_type != I915_MMAP_TYPE_GTT &&
694 !i915_gem_object_has_struct_page(obj) &&
695 !i915_gem_object_has_iomem(obj))
696 return -ENODEV;
697
698 mmo = mmap_offset_attach(obj, mmap_type, file);
699 if (IS_ERR(mmo))
700 return PTR_ERR(mmo);
701
702 *offset = drm_vma_node_offset_addr(&mmo->vma_node);
703 return 0;
704}
705
706static int
707__assign_mmap_offset_handle(struct drm_file *file,
708 u32 handle,
709 enum i915_mmap_type mmap_type,
710 u64 *offset)
711{
712 struct drm_i915_gem_object *obj;
713 int err;
714
715 obj = i915_gem_object_lookup(file, handle);
716 if (!obj)
717 return -ENOENT;
718
719 err = i915_gem_object_lock_interruptible(obj, NULL);
720 if (err)
721 goto out_put;
722 err = __assign_mmap_offset(obj, mmap_type, offset, file);
723 i915_gem_object_unlock(obj);
724out_put:
725 i915_gem_object_put(obj);
726 return err;
727}
728
729int
730i915_gem_dumb_mmap_offset(struct drm_file *file,
731 struct drm_device *dev,
732 u32 handle,
733 u64 *offset)
734{
735 enum i915_mmap_type mmap_type;
736
737 if (HAS_LMEM(to_i915(dev)))
738 mmap_type = I915_MMAP_TYPE_FIXED;
739 else if (boot_cpu_has(X86_FEATURE_PAT))
740 mmap_type = I915_MMAP_TYPE_WC;
741 else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
742 return -ENODEV;
743 else
744 mmap_type = I915_MMAP_TYPE_GTT;
745
746 return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
747}
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764int
765i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
766 struct drm_file *file)
767{
768 struct drm_i915_private *i915 = to_i915(dev);
769 struct drm_i915_gem_mmap_offset *args = data;
770 enum i915_mmap_type type;
771 int err;
772
773
774
775
776
777
778
779
780
781
782 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
783 NULL, 0, NULL);
784 if (err)
785 return err;
786
787 switch (args->flags) {
788 case I915_MMAP_OFFSET_GTT:
789 if (!i915_ggtt_has_aperture(&i915->ggtt))
790 return -ENODEV;
791 type = I915_MMAP_TYPE_GTT;
792 break;
793
794 case I915_MMAP_OFFSET_WC:
795 if (!boot_cpu_has(X86_FEATURE_PAT))
796 return -ENODEV;
797 type = I915_MMAP_TYPE_WC;
798 break;
799
800 case I915_MMAP_OFFSET_WB:
801 type = I915_MMAP_TYPE_WB;
802 break;
803
804 case I915_MMAP_OFFSET_UC:
805 if (!boot_cpu_has(X86_FEATURE_PAT))
806 return -ENODEV;
807 type = I915_MMAP_TYPE_UC;
808 break;
809
810 case I915_MMAP_OFFSET_FIXED:
811 type = I915_MMAP_TYPE_FIXED;
812 break;
813
814 default:
815 return -EINVAL;
816 }
817
818 return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
819}
820
821static void vm_open(struct vm_area_struct *vma)
822{
823 struct i915_mmap_offset *mmo = vma->vm_private_data;
824 struct drm_i915_gem_object *obj = mmo->obj;
825
826 GEM_BUG_ON(!obj);
827 i915_gem_object_get(obj);
828}
829
830static void vm_close(struct vm_area_struct *vma)
831{
832 struct i915_mmap_offset *mmo = vma->vm_private_data;
833 struct drm_i915_gem_object *obj = mmo->obj;
834
835 GEM_BUG_ON(!obj);
836 i915_gem_object_put(obj);
837}
838
839static const struct vm_operations_struct vm_ops_gtt = {
840 .fault = vm_fault_gtt,
841 .access = vm_access,
842 .open = vm_open,
843 .close = vm_close,
844};
845
846static const struct vm_operations_struct vm_ops_cpu = {
847 .fault = vm_fault_cpu,
848 .access = vm_access,
849 .open = vm_open,
850 .close = vm_close,
851};
852
853static int singleton_release(struct inode *inode, struct file *file)
854{
855 struct drm_i915_private *i915 = file->private_data;
856
857 cmpxchg(&i915->gem.mmap_singleton, file, NULL);
858 drm_dev_put(&i915->drm);
859
860 return 0;
861}
862
863static const struct file_operations singleton_fops = {
864 .owner = THIS_MODULE,
865 .release = singleton_release,
866};
867
868static struct file *mmap_singleton(struct drm_i915_private *i915)
869{
870 struct file *file;
871
872 rcu_read_lock();
873 file = READ_ONCE(i915->gem.mmap_singleton);
874 if (file && !get_file_rcu(file))
875 file = NULL;
876 rcu_read_unlock();
877 if (file)
878 return file;
879
880 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
881 if (IS_ERR(file))
882 return file;
883
884
885 file->f_mapping = i915->drm.anon_inode->i_mapping;
886
887 smp_store_mb(i915->gem.mmap_singleton, file);
888 drm_dev_get(&i915->drm);
889
890 return file;
891}
892
893
894
895
896
897
898
899int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
900{
901 struct drm_vma_offset_node *node;
902 struct drm_file *priv = filp->private_data;
903 struct drm_device *dev = priv->minor->dev;
904 struct drm_i915_gem_object *obj = NULL;
905 struct i915_mmap_offset *mmo = NULL;
906 struct file *anon;
907
908 if (drm_dev_is_unplugged(dev))
909 return -ENODEV;
910
911 rcu_read_lock();
912 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
913 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
914 vma->vm_pgoff,
915 vma_pages(vma));
916 if (node && drm_vma_node_is_allowed(node, priv)) {
917
918
919
920
921
922 if (!node->driver_private) {
923 mmo = container_of(node, struct i915_mmap_offset, vma_node);
924 obj = i915_gem_object_get_rcu(mmo->obj);
925
926 GEM_BUG_ON(obj && obj->ops->mmap_ops);
927 } else {
928 obj = i915_gem_object_get_rcu
929 (container_of(node, struct drm_i915_gem_object,
930 base.vma_node));
931
932 GEM_BUG_ON(obj && !obj->ops->mmap_ops);
933 }
934 }
935 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
936 rcu_read_unlock();
937 if (!obj)
938 return node ? -EACCES : -EINVAL;
939
940 if (i915_gem_object_is_readonly(obj)) {
941 if (vma->vm_flags & VM_WRITE) {
942 i915_gem_object_put(obj);
943 return -EINVAL;
944 }
945 vma->vm_flags &= ~VM_MAYWRITE;
946 }
947
948 anon = mmap_singleton(to_i915(dev));
949 if (IS_ERR(anon)) {
950 i915_gem_object_put(obj);
951 return PTR_ERR(anon);
952 }
953
954 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
955
956
957
958
959
960
961
962
963
964 vma_set_file(vma, anon);
965
966 fput(anon);
967
968 if (obj->ops->mmap_ops) {
969 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
970 vma->vm_ops = obj->ops->mmap_ops;
971 vma->vm_private_data = node->driver_private;
972 return 0;
973 }
974
975 vma->vm_private_data = mmo;
976
977 switch (mmo->mmap_type) {
978 case I915_MMAP_TYPE_WC:
979 vma->vm_page_prot =
980 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
981 vma->vm_ops = &vm_ops_cpu;
982 break;
983
984 case I915_MMAP_TYPE_FIXED:
985 GEM_WARN_ON(1);
986 fallthrough;
987 case I915_MMAP_TYPE_WB:
988 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
989 vma->vm_ops = &vm_ops_cpu;
990 break;
991
992 case I915_MMAP_TYPE_UC:
993 vma->vm_page_prot =
994 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
995 vma->vm_ops = &vm_ops_cpu;
996 break;
997
998 case I915_MMAP_TYPE_GTT:
999 vma->vm_page_prot =
1000 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1001 vma->vm_ops = &vm_ops_gtt;
1002 break;
1003 }
1004 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1005
1006 return 0;
1007}
1008
1009#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1010#include "selftests/i915_gem_mman.c"
1011#endif
1012