1
2
3
4
5
6
7#include <linux/anon_inodes.h>
8#include <linux/mman.h>
9#include <linux/pfn_t.h>
10#include <linux/sizes.h>
11
12#include "gt/intel_gt.h"
13#include "gt/intel_gt_requests.h"
14
15#include "i915_drv.h"
16#include "i915_gem_gtt.h"
17#include "i915_gem_ioctls.h"
18#include "i915_gem_object.h"
19#include "i915_gem_mman.h"
20#include "i915_trace.h"
21#include "i915_user_extensions.h"
22#include "i915_vma.h"
23
24static inline bool
25__vma_matches(struct vm_area_struct *vma, struct file *filp,
26 unsigned long addr, unsigned long size)
27{
28 if (vma->vm_file != filp)
29 return false;
30
31 return vma->vm_start == addr &&
32 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
33}
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55int
56i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
57 struct drm_file *file)
58{
59 struct drm_i915_gem_mmap *args = data;
60 struct drm_i915_gem_object *obj;
61 unsigned long addr;
62
63 if (args->flags & ~(I915_MMAP_WC))
64 return -EINVAL;
65
66 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
67 return -ENODEV;
68
69 obj = i915_gem_object_lookup(file, args->handle);
70 if (!obj)
71 return -ENOENT;
72
73
74
75
76 if (!obj->base.filp) {
77 addr = -ENXIO;
78 goto err;
79 }
80
81 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
82 addr = -EINVAL;
83 goto err;
84 }
85
86 addr = vm_mmap(obj->base.filp, 0, args->size,
87 PROT_READ | PROT_WRITE, MAP_SHARED,
88 args->offset);
89 if (IS_ERR_VALUE(addr))
90 goto err;
91
92 if (args->flags & I915_MMAP_WC) {
93 struct mm_struct *mm = current->mm;
94 struct vm_area_struct *vma;
95
96 if (mmap_write_lock_killable(mm)) {
97 addr = -EINTR;
98 goto err;
99 }
100 vma = find_vma(mm, addr);
101 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
102 vma->vm_page_prot =
103 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
104 else
105 addr = -ENOMEM;
106 mmap_write_unlock(mm);
107 if (IS_ERR_VALUE(addr))
108 goto err;
109 }
110 i915_gem_object_put(obj);
111
112 args->addr_ptr = (u64)addr;
113 return 0;
114
115err:
116 i915_gem_object_put(obj);
117 return addr;
118}
119
120static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
121{
122 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
123}
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179int i915_gem_mmap_gtt_version(void)
180{
181 return 4;
182}
183
184static inline struct i915_ggtt_view
185compute_partial_view(const struct drm_i915_gem_object *obj,
186 pgoff_t page_offset,
187 unsigned int chunk)
188{
189 struct i915_ggtt_view view;
190
191 if (i915_gem_object_is_tiled(obj))
192 chunk = roundup(chunk, tile_row_pages(obj));
193
194 view.type = I915_GGTT_VIEW_PARTIAL;
195 view.partial.offset = rounddown(page_offset, chunk);
196 view.partial.size =
197 min_t(unsigned int, chunk,
198 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
199
200
201 if (chunk >= obj->base.size >> PAGE_SHIFT)
202 view.type = I915_GGTT_VIEW_NORMAL;
203
204 return view;
205}
206
207static vm_fault_t i915_error_to_vmf_fault(int err)
208{
209 switch (err) {
210 default:
211 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
212 fallthrough;
213 case -EIO:
214 case -EFAULT:
215 case -ENODEV:
216 case -ENXIO:
217 return VM_FAULT_SIGBUS;
218
219 case -ENOMEM:
220 return VM_FAULT_OOM;
221
222 case 0:
223 case -EAGAIN:
224 case -ENOSPC:
225 case -ERESTARTSYS:
226 case -EINTR:
227 case -EBUSY:
228
229
230
231
232 return VM_FAULT_NOPAGE;
233 }
234}
235
236static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
237{
238 struct vm_area_struct *area = vmf->vma;
239 struct i915_mmap_offset *mmo = area->vm_private_data;
240 struct drm_i915_gem_object *obj = mmo->obj;
241 resource_size_t iomap;
242 int err;
243
244
245 if (unlikely(i915_gem_object_is_readonly(obj) &&
246 area->vm_flags & VM_WRITE))
247 return VM_FAULT_SIGBUS;
248
249 err = i915_gem_object_pin_pages(obj);
250 if (err)
251 goto out;
252
253 iomap = -1;
254 if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
255 iomap = obj->mm.region->iomap.base;
256 iomap -= obj->mm.region->region.start;
257 }
258
259
260 err = remap_io_sg(area,
261 area->vm_start, area->vm_end - area->vm_start,
262 obj->mm.pages->sgl, iomap);
263
264 if (area->vm_flags & VM_WRITE) {
265 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
266 obj->mm.dirty = true;
267 }
268
269 i915_gem_object_unpin_pages(obj);
270
271out:
272 return i915_error_to_vmf_fault(err);
273}
274
275static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
276{
277#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
278 struct vm_area_struct *area = vmf->vma;
279 struct i915_mmap_offset *mmo = area->vm_private_data;
280 struct drm_i915_gem_object *obj = mmo->obj;
281 struct drm_device *dev = obj->base.dev;
282 struct drm_i915_private *i915 = to_i915(dev);
283 struct intel_runtime_pm *rpm = &i915->runtime_pm;
284 struct i915_ggtt *ggtt = &i915->ggtt;
285 bool write = area->vm_flags & VM_WRITE;
286 intel_wakeref_t wakeref;
287 struct i915_vma *vma;
288 pgoff_t page_offset;
289 int srcu;
290 int ret;
291
292
293 if (i915_gem_object_is_readonly(obj) && write)
294 return VM_FAULT_SIGBUS;
295
296
297 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
298
299 trace_i915_gem_object_fault(obj, page_offset, true, write);
300
301 ret = i915_gem_object_pin_pages(obj);
302 if (ret)
303 goto err;
304
305 wakeref = intel_runtime_pm_get(rpm);
306
307 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
308 if (ret)
309 goto err_rpm;
310
311
312 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
313 PIN_MAPPABLE |
314 PIN_NONBLOCK |
315 PIN_NOEVICT);
316 if (IS_ERR(vma)) {
317
318 struct i915_ggtt_view view =
319 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
320 unsigned int flags;
321
322 flags = PIN_MAPPABLE | PIN_NOSEARCH;
323 if (view.type == I915_GGTT_VIEW_NORMAL)
324 flags |= PIN_NONBLOCK;
325
326
327
328
329
330
331 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
332 if (IS_ERR(vma)) {
333 flags = PIN_MAPPABLE;
334 view.type = I915_GGTT_VIEW_PARTIAL;
335 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
336 }
337
338
339 GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
340 }
341 if (IS_ERR(vma)) {
342 ret = PTR_ERR(vma);
343 goto err_reset;
344 }
345
346
347 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
348 ret = -EFAULT;
349 goto err_unpin;
350 }
351
352 ret = i915_vma_pin_fence(vma);
353 if (ret)
354 goto err_unpin;
355
356
357 ret = remap_io_mapping(area,
358 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
359 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
360 min_t(u64, vma->size, area->vm_end - area->vm_start),
361 &ggtt->iomap);
362 if (ret)
363 goto err_fence;
364
365 assert_rpm_wakelock_held(rpm);
366
367
368 mutex_lock(&i915->ggtt.vm.mutex);
369 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
370 list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
371 mutex_unlock(&i915->ggtt.vm.mutex);
372
373
374 vma->mmo = mmo;
375
376 if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
377 intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
378 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
379
380 if (write) {
381 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
382 i915_vma_set_ggtt_write(vma);
383 obj->mm.dirty = true;
384 }
385
386err_fence:
387 i915_vma_unpin_fence(vma);
388err_unpin:
389 __i915_vma_unpin(vma);
390err_reset:
391 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
392err_rpm:
393 intel_runtime_pm_put(rpm, wakeref);
394 i915_gem_object_unpin_pages(obj);
395err:
396 return i915_error_to_vmf_fault(ret);
397}
398
399static int
400vm_access(struct vm_area_struct *area, unsigned long addr,
401 void *buf, int len, int write)
402{
403 struct i915_mmap_offset *mmo = area->vm_private_data;
404 struct drm_i915_gem_object *obj = mmo->obj;
405 void *vaddr;
406
407 if (i915_gem_object_is_readonly(obj) && write)
408 return -EACCES;
409
410 addr -= area->vm_start;
411 if (addr >= obj->base.size)
412 return -EINVAL;
413
414
415 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
416 if (IS_ERR(vaddr))
417 return PTR_ERR(vaddr);
418
419 if (write) {
420 memcpy(vaddr + addr, buf, len);
421 __i915_gem_object_flush_map(obj, addr, len);
422 } else {
423 memcpy(buf, vaddr + addr, len);
424 }
425
426 i915_gem_object_unpin_map(obj);
427
428 return len;
429}
430
431void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
432{
433 struct i915_vma *vma;
434
435 GEM_BUG_ON(!obj->userfault_count);
436
437 for_each_ggtt_vma(vma, obj)
438 i915_vma_revoke_mmap(vma);
439
440 GEM_BUG_ON(obj->userfault_count);
441}
442
443
444
445
446
447
448
449
450
451void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
452{
453 struct drm_i915_private *i915 = to_i915(obj->base.dev);
454 intel_wakeref_t wakeref;
455
456
457
458
459
460
461
462
463
464
465 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
466 mutex_lock(&i915->ggtt.vm.mutex);
467
468 if (!obj->userfault_count)
469 goto out;
470
471 __i915_gem_object_release_mmap_gtt(obj);
472
473
474
475
476
477
478
479
480
481 wmb();
482
483out:
484 mutex_unlock(&i915->ggtt.vm.mutex);
485 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
486}
487
488void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
489{
490 struct i915_mmap_offset *mmo, *mn;
491
492 spin_lock(&obj->mmo.lock);
493 rbtree_postorder_for_each_entry_safe(mmo, mn,
494 &obj->mmo.offsets, offset) {
495
496
497
498
499 if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
500 continue;
501
502 spin_unlock(&obj->mmo.lock);
503 drm_vma_node_unmap(&mmo->vma_node,
504 obj->base.dev->anon_inode->i_mapping);
505 spin_lock(&obj->mmo.lock);
506 }
507 spin_unlock(&obj->mmo.lock);
508}
509
510static struct i915_mmap_offset *
511lookup_mmo(struct drm_i915_gem_object *obj,
512 enum i915_mmap_type mmap_type)
513{
514 struct rb_node *rb;
515
516 spin_lock(&obj->mmo.lock);
517 rb = obj->mmo.offsets.rb_node;
518 while (rb) {
519 struct i915_mmap_offset *mmo =
520 rb_entry(rb, typeof(*mmo), offset);
521
522 if (mmo->mmap_type == mmap_type) {
523 spin_unlock(&obj->mmo.lock);
524 return mmo;
525 }
526
527 if (mmo->mmap_type < mmap_type)
528 rb = rb->rb_right;
529 else
530 rb = rb->rb_left;
531 }
532 spin_unlock(&obj->mmo.lock);
533
534 return NULL;
535}
536
537static struct i915_mmap_offset *
538insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
539{
540 struct rb_node *rb, **p;
541
542 spin_lock(&obj->mmo.lock);
543 rb = NULL;
544 p = &obj->mmo.offsets.rb_node;
545 while (*p) {
546 struct i915_mmap_offset *pos;
547
548 rb = *p;
549 pos = rb_entry(rb, typeof(*pos), offset);
550
551 if (pos->mmap_type == mmo->mmap_type) {
552 spin_unlock(&obj->mmo.lock);
553 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
554 &mmo->vma_node);
555 kfree(mmo);
556 return pos;
557 }
558
559 if (pos->mmap_type < mmo->mmap_type)
560 p = &rb->rb_right;
561 else
562 p = &rb->rb_left;
563 }
564 rb_link_node(&mmo->offset, rb, p);
565 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
566 spin_unlock(&obj->mmo.lock);
567
568 return mmo;
569}
570
571static struct i915_mmap_offset *
572mmap_offset_attach(struct drm_i915_gem_object *obj,
573 enum i915_mmap_type mmap_type,
574 struct drm_file *file)
575{
576 struct drm_i915_private *i915 = to_i915(obj->base.dev);
577 struct i915_mmap_offset *mmo;
578 int err;
579
580 mmo = lookup_mmo(obj, mmap_type);
581 if (mmo)
582 goto out;
583
584 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
585 if (!mmo)
586 return ERR_PTR(-ENOMEM);
587
588 mmo->obj = obj;
589 mmo->mmap_type = mmap_type;
590 drm_vma_node_reset(&mmo->vma_node);
591
592 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
593 &mmo->vma_node, obj->base.size / PAGE_SIZE);
594 if (likely(!err))
595 goto insert;
596
597
598 err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
599 if (err)
600 goto err;
601
602 i915_gem_drain_freed_objects(i915);
603 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
604 &mmo->vma_node, obj->base.size / PAGE_SIZE);
605 if (err)
606 goto err;
607
608insert:
609 mmo = insert_mmo(obj, mmo);
610 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
611out:
612 if (file)
613 drm_vma_node_allow(&mmo->vma_node, file);
614 return mmo;
615
616err:
617 kfree(mmo);
618 return ERR_PTR(err);
619}
620
621static int
622__assign_mmap_offset(struct drm_file *file,
623 u32 handle,
624 enum i915_mmap_type mmap_type,
625 u64 *offset)
626{
627 struct drm_i915_gem_object *obj;
628 struct i915_mmap_offset *mmo;
629 int err;
630
631 obj = i915_gem_object_lookup(file, handle);
632 if (!obj)
633 return -ENOENT;
634
635 if (i915_gem_object_never_mmap(obj)) {
636 err = -ENODEV;
637 goto out;
638 }
639
640 if (mmap_type != I915_MMAP_TYPE_GTT &&
641 !i915_gem_object_type_has(obj,
642 I915_GEM_OBJECT_HAS_STRUCT_PAGE |
643 I915_GEM_OBJECT_HAS_IOMEM)) {
644 err = -ENODEV;
645 goto out;
646 }
647
648 mmo = mmap_offset_attach(obj, mmap_type, file);
649 if (IS_ERR(mmo)) {
650 err = PTR_ERR(mmo);
651 goto out;
652 }
653
654 *offset = drm_vma_node_offset_addr(&mmo->vma_node);
655 err = 0;
656out:
657 i915_gem_object_put(obj);
658 return err;
659}
660
661int
662i915_gem_dumb_mmap_offset(struct drm_file *file,
663 struct drm_device *dev,
664 u32 handle,
665 u64 *offset)
666{
667 enum i915_mmap_type mmap_type;
668
669 if (boot_cpu_has(X86_FEATURE_PAT))
670 mmap_type = I915_MMAP_TYPE_WC;
671 else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
672 return -ENODEV;
673 else
674 mmap_type = I915_MMAP_TYPE_GTT;
675
676 return __assign_mmap_offset(file, handle, mmap_type, offset);
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694int
695i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
696 struct drm_file *file)
697{
698 struct drm_i915_private *i915 = to_i915(dev);
699 struct drm_i915_gem_mmap_offset *args = data;
700 enum i915_mmap_type type;
701 int err;
702
703
704
705
706
707
708
709
710
711
712 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
713 NULL, 0, NULL);
714 if (err)
715 return err;
716
717 switch (args->flags) {
718 case I915_MMAP_OFFSET_GTT:
719 if (!i915_ggtt_has_aperture(&i915->ggtt))
720 return -ENODEV;
721 type = I915_MMAP_TYPE_GTT;
722 break;
723
724 case I915_MMAP_OFFSET_WC:
725 if (!boot_cpu_has(X86_FEATURE_PAT))
726 return -ENODEV;
727 type = I915_MMAP_TYPE_WC;
728 break;
729
730 case I915_MMAP_OFFSET_WB:
731 type = I915_MMAP_TYPE_WB;
732 break;
733
734 case I915_MMAP_OFFSET_UC:
735 if (!boot_cpu_has(X86_FEATURE_PAT))
736 return -ENODEV;
737 type = I915_MMAP_TYPE_UC;
738 break;
739
740 default:
741 return -EINVAL;
742 }
743
744 return __assign_mmap_offset(file, args->handle, type, &args->offset);
745}
746
747static void vm_open(struct vm_area_struct *vma)
748{
749 struct i915_mmap_offset *mmo = vma->vm_private_data;
750 struct drm_i915_gem_object *obj = mmo->obj;
751
752 GEM_BUG_ON(!obj);
753 i915_gem_object_get(obj);
754}
755
756static void vm_close(struct vm_area_struct *vma)
757{
758 struct i915_mmap_offset *mmo = vma->vm_private_data;
759 struct drm_i915_gem_object *obj = mmo->obj;
760
761 GEM_BUG_ON(!obj);
762 i915_gem_object_put(obj);
763}
764
765static const struct vm_operations_struct vm_ops_gtt = {
766 .fault = vm_fault_gtt,
767 .access = vm_access,
768 .open = vm_open,
769 .close = vm_close,
770};
771
772static const struct vm_operations_struct vm_ops_cpu = {
773 .fault = vm_fault_cpu,
774 .access = vm_access,
775 .open = vm_open,
776 .close = vm_close,
777};
778
779static int singleton_release(struct inode *inode, struct file *file)
780{
781 struct drm_i915_private *i915 = file->private_data;
782
783 cmpxchg(&i915->gem.mmap_singleton, file, NULL);
784 drm_dev_put(&i915->drm);
785
786 return 0;
787}
788
789static const struct file_operations singleton_fops = {
790 .owner = THIS_MODULE,
791 .release = singleton_release,
792};
793
794static struct file *mmap_singleton(struct drm_i915_private *i915)
795{
796 struct file *file;
797
798 rcu_read_lock();
799 file = READ_ONCE(i915->gem.mmap_singleton);
800 if (file && !get_file_rcu(file))
801 file = NULL;
802 rcu_read_unlock();
803 if (file)
804 return file;
805
806 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
807 if (IS_ERR(file))
808 return file;
809
810
811 file->f_mapping = i915->drm.anon_inode->i_mapping;
812
813 smp_store_mb(i915->gem.mmap_singleton, file);
814 drm_dev_get(&i915->drm);
815
816 return file;
817}
818
819
820
821
822
823
824
825int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
826{
827 struct drm_vma_offset_node *node;
828 struct drm_file *priv = filp->private_data;
829 struct drm_device *dev = priv->minor->dev;
830 struct drm_i915_gem_object *obj = NULL;
831 struct i915_mmap_offset *mmo = NULL;
832 struct file *anon;
833
834 if (drm_dev_is_unplugged(dev))
835 return -ENODEV;
836
837 rcu_read_lock();
838 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
839 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
840 vma->vm_pgoff,
841 vma_pages(vma));
842 if (node && drm_vma_node_is_allowed(node, priv)) {
843
844
845
846
847
848 mmo = container_of(node, struct i915_mmap_offset, vma_node);
849 obj = i915_gem_object_get_rcu(mmo->obj);
850 }
851 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
852 rcu_read_unlock();
853 if (!obj)
854 return node ? -EACCES : -EINVAL;
855
856 if (i915_gem_object_is_readonly(obj)) {
857 if (vma->vm_flags & VM_WRITE) {
858 i915_gem_object_put(obj);
859 return -EINVAL;
860 }
861 vma->vm_flags &= ~VM_MAYWRITE;
862 }
863
864 anon = mmap_singleton(to_i915(dev));
865 if (IS_ERR(anon)) {
866 i915_gem_object_put(obj);
867 return PTR_ERR(anon);
868 }
869
870 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
871 vma->vm_private_data = mmo;
872
873
874
875
876
877
878
879
880
881 fput(vma->vm_file);
882 vma->vm_file = anon;
883
884 switch (mmo->mmap_type) {
885 case I915_MMAP_TYPE_WC:
886 vma->vm_page_prot =
887 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
888 vma->vm_ops = &vm_ops_cpu;
889 break;
890
891 case I915_MMAP_TYPE_WB:
892 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
893 vma->vm_ops = &vm_ops_cpu;
894 break;
895
896 case I915_MMAP_TYPE_UC:
897 vma->vm_page_prot =
898 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
899 vma->vm_ops = &vm_ops_cpu;
900 break;
901
902 case I915_MMAP_TYPE_GTT:
903 vma->vm_page_prot =
904 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
905 vma->vm_ops = &vm_ops_gtt;
906 break;
907 }
908 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
909
910 return 0;
911}
912
913#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
914#include "selftests/i915_gem_mman.c"
915#endif
916