1
2
3
4
5
6
7#include <linux/anon_inodes.h>
8#include <linux/mman.h>
9#include <linux/pfn_t.h>
10#include <linux/sizes.h>
11
12#include "gt/intel_gt.h"
13#include "gt/intel_gt_requests.h"
14
15#include "i915_drv.h"
16#include "i915_gem_gtt.h"
17#include "i915_gem_ioctls.h"
18#include "i915_gem_object.h"
19#include "i915_gem_mman.h"
20#include "i915_trace.h"
21#include "i915_user_extensions.h"
22#include "i915_vma.h"
23
24static inline bool
25__vma_matches(struct vm_area_struct *vma, struct file *filp,
26 unsigned long addr, unsigned long size)
27{
28 if (vma->vm_file != filp)
29 return false;
30
31 return vma->vm_start == addr &&
32 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
33}
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55int
56i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
57 struct drm_file *file)
58{
59 struct drm_i915_gem_mmap *args = data;
60 struct drm_i915_gem_object *obj;
61 unsigned long addr;
62
63 if (args->flags & ~(I915_MMAP_WC))
64 return -EINVAL;
65
66 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
67 return -ENODEV;
68
69 obj = i915_gem_object_lookup(file, args->handle);
70 if (!obj)
71 return -ENOENT;
72
73
74
75
76 if (!obj->base.filp) {
77 addr = -ENXIO;
78 goto err;
79 }
80
81 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
82 addr = -EINVAL;
83 goto err;
84 }
85
86 addr = vm_mmap(obj->base.filp, 0, args->size,
87 PROT_READ | PROT_WRITE, MAP_SHARED,
88 args->offset);
89 if (IS_ERR_VALUE(addr))
90 goto err;
91
92 if (args->flags & I915_MMAP_WC) {
93 struct mm_struct *mm = current->mm;
94 struct vm_area_struct *vma;
95
96 if (mmap_write_lock_killable(mm)) {
97 addr = -EINTR;
98 goto err;
99 }
100 vma = find_vma(mm, addr);
101 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
102 vma->vm_page_prot =
103 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
104 else
105 addr = -ENOMEM;
106 mmap_write_unlock(mm);
107 if (IS_ERR_VALUE(addr))
108 goto err;
109 }
110 i915_gem_object_put(obj);
111
112 args->addr_ptr = (u64)addr;
113 return 0;
114
115err:
116 i915_gem_object_put(obj);
117 return addr;
118}
119
120static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
121{
122 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
123}
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179int i915_gem_mmap_gtt_version(void)
180{
181 return 4;
182}
183
184static inline struct i915_ggtt_view
185compute_partial_view(const struct drm_i915_gem_object *obj,
186 pgoff_t page_offset,
187 unsigned int chunk)
188{
189 struct i915_ggtt_view view;
190
191 if (i915_gem_object_is_tiled(obj))
192 chunk = roundup(chunk, tile_row_pages(obj));
193
194 view.type = I915_GGTT_VIEW_PARTIAL;
195 view.partial.offset = rounddown(page_offset, chunk);
196 view.partial.size =
197 min_t(unsigned int, chunk,
198 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
199
200
201 if (chunk >= obj->base.size >> PAGE_SHIFT)
202 view.type = I915_GGTT_VIEW_NORMAL;
203
204 return view;
205}
206
207static vm_fault_t i915_error_to_vmf_fault(int err)
208{
209 switch (err) {
210 default:
211 WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
212 fallthrough;
213 case -EIO:
214 case -EFAULT:
215 case -ENODEV:
216 case -ENXIO:
217 return VM_FAULT_SIGBUS;
218
219 case -ENOMEM:
220 return VM_FAULT_OOM;
221
222 case 0:
223 case -EAGAIN:
224 case -ENOSPC:
225 case -ERESTARTSYS:
226 case -EINTR:
227 case -EBUSY:
228
229
230
231
232 return VM_FAULT_NOPAGE;
233 }
234}
235
236static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
237{
238 struct vm_area_struct *area = vmf->vma;
239 struct i915_mmap_offset *mmo = area->vm_private_data;
240 struct drm_i915_gem_object *obj = mmo->obj;
241 resource_size_t iomap;
242 int err;
243
244
245 if (unlikely(i915_gem_object_is_readonly(obj) &&
246 area->vm_flags & VM_WRITE))
247 return VM_FAULT_SIGBUS;
248
249 err = i915_gem_object_pin_pages(obj);
250 if (err)
251 goto out;
252
253 iomap = -1;
254 if (!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE)) {
255 iomap = obj->mm.region->iomap.base;
256 iomap -= obj->mm.region->region.start;
257 }
258
259
260 err = remap_io_sg(area,
261 area->vm_start, area->vm_end - area->vm_start,
262 obj->mm.pages->sgl, iomap);
263
264 if (area->vm_flags & VM_WRITE) {
265 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
266 obj->mm.dirty = true;
267 }
268
269 i915_gem_object_unpin_pages(obj);
270
271out:
272 return i915_error_to_vmf_fault(err);
273}
274
275static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
276{
277#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
278 struct vm_area_struct *area = vmf->vma;
279 struct i915_mmap_offset *mmo = area->vm_private_data;
280 struct drm_i915_gem_object *obj = mmo->obj;
281 struct drm_device *dev = obj->base.dev;
282 struct drm_i915_private *i915 = to_i915(dev);
283 struct intel_runtime_pm *rpm = &i915->runtime_pm;
284 struct i915_ggtt *ggtt = &i915->ggtt;
285 bool write = area->vm_flags & VM_WRITE;
286 struct i915_gem_ww_ctx ww;
287 intel_wakeref_t wakeref;
288 struct i915_vma *vma;
289 pgoff_t page_offset;
290 int srcu;
291 int ret;
292
293
294 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
295
296 trace_i915_gem_object_fault(obj, page_offset, true, write);
297
298 wakeref = intel_runtime_pm_get(rpm);
299
300 i915_gem_ww_ctx_init(&ww, true);
301retry:
302 ret = i915_gem_object_lock(obj, &ww);
303 if (ret)
304 goto err_rpm;
305
306
307 if (i915_gem_object_is_readonly(obj) && write) {
308 ret = -EFAULT;
309 goto err_rpm;
310 }
311
312 ret = i915_gem_object_pin_pages(obj);
313 if (ret)
314 goto err_rpm;
315
316 ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
317 if (ret)
318 goto err_pages;
319
320
321 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
322 PIN_MAPPABLE |
323 PIN_NONBLOCK |
324 PIN_NOEVICT);
325 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
326
327 struct i915_ggtt_view view =
328 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
329 unsigned int flags;
330
331 flags = PIN_MAPPABLE | PIN_NOSEARCH;
332 if (view.type == I915_GGTT_VIEW_NORMAL)
333 flags |= PIN_NONBLOCK;
334
335
336
337
338
339
340 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
341 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
342 flags = PIN_MAPPABLE;
343 view.type = I915_GGTT_VIEW_PARTIAL;
344 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
345 }
346
347
348 GEM_BUG_ON(vma == ERR_PTR(-ENOSPC));
349 }
350 if (IS_ERR(vma)) {
351 ret = PTR_ERR(vma);
352 goto err_reset;
353 }
354
355
356 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) {
357 ret = -EFAULT;
358 goto err_unpin;
359 }
360
361 ret = i915_vma_pin_fence(vma);
362 if (ret)
363 goto err_unpin;
364
365
366 ret = remap_io_mapping(area,
367 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
368 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
369 min_t(u64, vma->size, area->vm_end - area->vm_start),
370 &ggtt->iomap);
371 if (ret)
372 goto err_fence;
373
374 assert_rpm_wakelock_held(rpm);
375
376
377 mutex_lock(&i915->ggtt.vm.mutex);
378 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
379 list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
380 mutex_unlock(&i915->ggtt.vm.mutex);
381
382
383 vma->mmo = mmo;
384
385 if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
386 intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
387 msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
388
389 if (write) {
390 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
391 i915_vma_set_ggtt_write(vma);
392 obj->mm.dirty = true;
393 }
394
395err_fence:
396 i915_vma_unpin_fence(vma);
397err_unpin:
398 __i915_vma_unpin(vma);
399err_reset:
400 intel_gt_reset_unlock(ggtt->vm.gt, srcu);
401err_pages:
402 i915_gem_object_unpin_pages(obj);
403err_rpm:
404 if (ret == -EDEADLK) {
405 ret = i915_gem_ww_ctx_backoff(&ww);
406 if (!ret)
407 goto retry;
408 }
409 i915_gem_ww_ctx_fini(&ww);
410 intel_runtime_pm_put(rpm, wakeref);
411 return i915_error_to_vmf_fault(ret);
412}
413
414static int
415vm_access(struct vm_area_struct *area, unsigned long addr,
416 void *buf, int len, int write)
417{
418 struct i915_mmap_offset *mmo = area->vm_private_data;
419 struct drm_i915_gem_object *obj = mmo->obj;
420 void *vaddr;
421
422 if (i915_gem_object_is_readonly(obj) && write)
423 return -EACCES;
424
425 addr -= area->vm_start;
426 if (addr >= obj->base.size)
427 return -EINVAL;
428
429
430 vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
431 if (IS_ERR(vaddr))
432 return PTR_ERR(vaddr);
433
434 if (write) {
435 memcpy(vaddr + addr, buf, len);
436 __i915_gem_object_flush_map(obj, addr, len);
437 } else {
438 memcpy(buf, vaddr + addr, len);
439 }
440
441 i915_gem_object_unpin_map(obj);
442
443 return len;
444}
445
446void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
447{
448 struct i915_vma *vma;
449
450 GEM_BUG_ON(!obj->userfault_count);
451
452 for_each_ggtt_vma(vma, obj)
453 i915_vma_revoke_mmap(vma);
454
455 GEM_BUG_ON(obj->userfault_count);
456}
457
458
459
460
461
462
463
464
465
466void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
467{
468 struct drm_i915_private *i915 = to_i915(obj->base.dev);
469 intel_wakeref_t wakeref;
470
471
472
473
474
475
476
477
478
479
480 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
481 mutex_lock(&i915->ggtt.vm.mutex);
482
483 if (!obj->userfault_count)
484 goto out;
485
486 __i915_gem_object_release_mmap_gtt(obj);
487
488
489
490
491
492
493
494
495
496 wmb();
497
498out:
499 mutex_unlock(&i915->ggtt.vm.mutex);
500 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
501}
502
503void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
504{
505 struct i915_mmap_offset *mmo, *mn;
506
507 spin_lock(&obj->mmo.lock);
508 rbtree_postorder_for_each_entry_safe(mmo, mn,
509 &obj->mmo.offsets, offset) {
510
511
512
513
514 if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
515 continue;
516
517 spin_unlock(&obj->mmo.lock);
518 drm_vma_node_unmap(&mmo->vma_node,
519 obj->base.dev->anon_inode->i_mapping);
520 spin_lock(&obj->mmo.lock);
521 }
522 spin_unlock(&obj->mmo.lock);
523}
524
525static struct i915_mmap_offset *
526lookup_mmo(struct drm_i915_gem_object *obj,
527 enum i915_mmap_type mmap_type)
528{
529 struct rb_node *rb;
530
531 spin_lock(&obj->mmo.lock);
532 rb = obj->mmo.offsets.rb_node;
533 while (rb) {
534 struct i915_mmap_offset *mmo =
535 rb_entry(rb, typeof(*mmo), offset);
536
537 if (mmo->mmap_type == mmap_type) {
538 spin_unlock(&obj->mmo.lock);
539 return mmo;
540 }
541
542 if (mmo->mmap_type < mmap_type)
543 rb = rb->rb_right;
544 else
545 rb = rb->rb_left;
546 }
547 spin_unlock(&obj->mmo.lock);
548
549 return NULL;
550}
551
552static struct i915_mmap_offset *
553insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
554{
555 struct rb_node *rb, **p;
556
557 spin_lock(&obj->mmo.lock);
558 rb = NULL;
559 p = &obj->mmo.offsets.rb_node;
560 while (*p) {
561 struct i915_mmap_offset *pos;
562
563 rb = *p;
564 pos = rb_entry(rb, typeof(*pos), offset);
565
566 if (pos->mmap_type == mmo->mmap_type) {
567 spin_unlock(&obj->mmo.lock);
568 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
569 &mmo->vma_node);
570 kfree(mmo);
571 return pos;
572 }
573
574 if (pos->mmap_type < mmo->mmap_type)
575 p = &rb->rb_right;
576 else
577 p = &rb->rb_left;
578 }
579 rb_link_node(&mmo->offset, rb, p);
580 rb_insert_color(&mmo->offset, &obj->mmo.offsets);
581 spin_unlock(&obj->mmo.lock);
582
583 return mmo;
584}
585
586static struct i915_mmap_offset *
587mmap_offset_attach(struct drm_i915_gem_object *obj,
588 enum i915_mmap_type mmap_type,
589 struct drm_file *file)
590{
591 struct drm_i915_private *i915 = to_i915(obj->base.dev);
592 struct i915_mmap_offset *mmo;
593 int err;
594
595 mmo = lookup_mmo(obj, mmap_type);
596 if (mmo)
597 goto out;
598
599 mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
600 if (!mmo)
601 return ERR_PTR(-ENOMEM);
602
603 mmo->obj = obj;
604 mmo->mmap_type = mmap_type;
605 drm_vma_node_reset(&mmo->vma_node);
606
607 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
608 &mmo->vma_node, obj->base.size / PAGE_SIZE);
609 if (likely(!err))
610 goto insert;
611
612
613 err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
614 if (err)
615 goto err;
616
617 i915_gem_drain_freed_objects(i915);
618 err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
619 &mmo->vma_node, obj->base.size / PAGE_SIZE);
620 if (err)
621 goto err;
622
623insert:
624 mmo = insert_mmo(obj, mmo);
625 GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
626out:
627 if (file)
628 drm_vma_node_allow(&mmo->vma_node, file);
629 return mmo;
630
631err:
632 kfree(mmo);
633 return ERR_PTR(err);
634}
635
636static int
637__assign_mmap_offset(struct drm_file *file,
638 u32 handle,
639 enum i915_mmap_type mmap_type,
640 u64 *offset)
641{
642 struct drm_i915_gem_object *obj;
643 struct i915_mmap_offset *mmo;
644 int err;
645
646 obj = i915_gem_object_lookup(file, handle);
647 if (!obj)
648 return -ENOENT;
649
650 if (i915_gem_object_never_mmap(obj)) {
651 err = -ENODEV;
652 goto out;
653 }
654
655 if (mmap_type != I915_MMAP_TYPE_GTT &&
656 !i915_gem_object_type_has(obj,
657 I915_GEM_OBJECT_HAS_STRUCT_PAGE |
658 I915_GEM_OBJECT_HAS_IOMEM)) {
659 err = -ENODEV;
660 goto out;
661 }
662
663 mmo = mmap_offset_attach(obj, mmap_type, file);
664 if (IS_ERR(mmo)) {
665 err = PTR_ERR(mmo);
666 goto out;
667 }
668
669 *offset = drm_vma_node_offset_addr(&mmo->vma_node);
670 err = 0;
671out:
672 i915_gem_object_put(obj);
673 return err;
674}
675
676int
677i915_gem_dumb_mmap_offset(struct drm_file *file,
678 struct drm_device *dev,
679 u32 handle,
680 u64 *offset)
681{
682 enum i915_mmap_type mmap_type;
683
684 if (boot_cpu_has(X86_FEATURE_PAT))
685 mmap_type = I915_MMAP_TYPE_WC;
686 else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
687 return -ENODEV;
688 else
689 mmap_type = I915_MMAP_TYPE_GTT;
690
691 return __assign_mmap_offset(file, handle, mmap_type, offset);
692}
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709int
710i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
711 struct drm_file *file)
712{
713 struct drm_i915_private *i915 = to_i915(dev);
714 struct drm_i915_gem_mmap_offset *args = data;
715 enum i915_mmap_type type;
716 int err;
717
718
719
720
721
722
723
724
725
726
727 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
728 NULL, 0, NULL);
729 if (err)
730 return err;
731
732 switch (args->flags) {
733 case I915_MMAP_OFFSET_GTT:
734 if (!i915_ggtt_has_aperture(&i915->ggtt))
735 return -ENODEV;
736 type = I915_MMAP_TYPE_GTT;
737 break;
738
739 case I915_MMAP_OFFSET_WC:
740 if (!boot_cpu_has(X86_FEATURE_PAT))
741 return -ENODEV;
742 type = I915_MMAP_TYPE_WC;
743 break;
744
745 case I915_MMAP_OFFSET_WB:
746 type = I915_MMAP_TYPE_WB;
747 break;
748
749 case I915_MMAP_OFFSET_UC:
750 if (!boot_cpu_has(X86_FEATURE_PAT))
751 return -ENODEV;
752 type = I915_MMAP_TYPE_UC;
753 break;
754
755 default:
756 return -EINVAL;
757 }
758
759 return __assign_mmap_offset(file, args->handle, type, &args->offset);
760}
761
762static void vm_open(struct vm_area_struct *vma)
763{
764 struct i915_mmap_offset *mmo = vma->vm_private_data;
765 struct drm_i915_gem_object *obj = mmo->obj;
766
767 GEM_BUG_ON(!obj);
768 i915_gem_object_get(obj);
769}
770
771static void vm_close(struct vm_area_struct *vma)
772{
773 struct i915_mmap_offset *mmo = vma->vm_private_data;
774 struct drm_i915_gem_object *obj = mmo->obj;
775
776 GEM_BUG_ON(!obj);
777 i915_gem_object_put(obj);
778}
779
780static const struct vm_operations_struct vm_ops_gtt = {
781 .fault = vm_fault_gtt,
782 .access = vm_access,
783 .open = vm_open,
784 .close = vm_close,
785};
786
787static const struct vm_operations_struct vm_ops_cpu = {
788 .fault = vm_fault_cpu,
789 .access = vm_access,
790 .open = vm_open,
791 .close = vm_close,
792};
793
794static int singleton_release(struct inode *inode, struct file *file)
795{
796 struct drm_i915_private *i915 = file->private_data;
797
798 cmpxchg(&i915->gem.mmap_singleton, file, NULL);
799 drm_dev_put(&i915->drm);
800
801 return 0;
802}
803
804static const struct file_operations singleton_fops = {
805 .owner = THIS_MODULE,
806 .release = singleton_release,
807};
808
809static struct file *mmap_singleton(struct drm_i915_private *i915)
810{
811 struct file *file;
812
813 rcu_read_lock();
814 file = READ_ONCE(i915->gem.mmap_singleton);
815 if (file && !get_file_rcu(file))
816 file = NULL;
817 rcu_read_unlock();
818 if (file)
819 return file;
820
821 file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
822 if (IS_ERR(file))
823 return file;
824
825
826 file->f_mapping = i915->drm.anon_inode->i_mapping;
827
828 smp_store_mb(i915->gem.mmap_singleton, file);
829 drm_dev_get(&i915->drm);
830
831 return file;
832}
833
834
835
836
837
838
839
840int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
841{
842 struct drm_vma_offset_node *node;
843 struct drm_file *priv = filp->private_data;
844 struct drm_device *dev = priv->minor->dev;
845 struct drm_i915_gem_object *obj = NULL;
846 struct i915_mmap_offset *mmo = NULL;
847 struct file *anon;
848
849 if (drm_dev_is_unplugged(dev))
850 return -ENODEV;
851
852 rcu_read_lock();
853 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
854 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
855 vma->vm_pgoff,
856 vma_pages(vma));
857 if (node && drm_vma_node_is_allowed(node, priv)) {
858
859
860
861
862
863 mmo = container_of(node, struct i915_mmap_offset, vma_node);
864 obj = i915_gem_object_get_rcu(mmo->obj);
865 }
866 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
867 rcu_read_unlock();
868 if (!obj)
869 return node ? -EACCES : -EINVAL;
870
871 if (i915_gem_object_is_readonly(obj)) {
872 if (vma->vm_flags & VM_WRITE) {
873 i915_gem_object_put(obj);
874 return -EINVAL;
875 }
876 vma->vm_flags &= ~VM_MAYWRITE;
877 }
878
879 anon = mmap_singleton(to_i915(dev));
880 if (IS_ERR(anon)) {
881 i915_gem_object_put(obj);
882 return PTR_ERR(anon);
883 }
884
885 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
886 vma->vm_private_data = mmo;
887
888
889
890
891
892
893
894
895
896 fput(vma->vm_file);
897 vma->vm_file = anon;
898
899 switch (mmo->mmap_type) {
900 case I915_MMAP_TYPE_WC:
901 vma->vm_page_prot =
902 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
903 vma->vm_ops = &vm_ops_cpu;
904 break;
905
906 case I915_MMAP_TYPE_WB:
907 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
908 vma->vm_ops = &vm_ops_cpu;
909 break;
910
911 case I915_MMAP_TYPE_UC:
912 vma->vm_page_prot =
913 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
914 vma->vm_ops = &vm_ops_cpu;
915 break;
916
917 case I915_MMAP_TYPE_GTT:
918 vma->vm_page_prot =
919 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
920 vma->vm_ops = &vm_ops_gtt;
921 break;
922 }
923 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
924
925 return 0;
926}
927
928#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
929#include "selftests/i915_gem_mman.c"
930#endif
931