1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include <drm/drm_vma_manager.h>
29#include <linux/dma-fence-array.h>
30#include <linux/kthread.h>
31#include <linux/dma-resv.h>
32#include <linux/shmem_fs.h>
33#include <linux/slab.h>
34#include <linux/stop_machine.h>
35#include <linux/swap.h>
36#include <linux/pci.h>
37#include <linux/dma-buf.h>
38#include <linux/mman.h>
39
40#include "display/intel_display.h"
41#include "display/intel_frontbuffer.h"
42
43#include "gem/i915_gem_clflush.h"
44#include "gem/i915_gem_context.h"
45#include "gem/i915_gem_ioctls.h"
46#include "gem/i915_gem_mman.h"
47#include "gem/i915_gem_region.h"
48#include "gt/intel_engine_user.h"
49#include "gt/intel_gt.h"
50#include "gt/intel_gt_pm.h"
51#include "gt/intel_workarounds.h"
52
53#include "i915_drv.h"
54#include "i915_trace.h"
55#include "i915_vgpu.h"
56
57#include "intel_pm.h"
58
59static int
60insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
61{
62 int err;
63
64 err = mutex_lock_interruptible(&ggtt->vm.mutex);
65 if (err)
66 return err;
67
68 memset(node, 0, sizeof(*node));
69 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
70 size, 0, I915_COLOR_UNEVICTABLE,
71 0, ggtt->mappable_end,
72 DRM_MM_INSERT_LOW);
73
74 mutex_unlock(&ggtt->vm.mutex);
75
76 return err;
77}
78
79static void
80remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
81{
82 mutex_lock(&ggtt->vm.mutex);
83 drm_mm_remove_node(node);
84 mutex_unlock(&ggtt->vm.mutex);
85}
86
87int
88i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
89 struct drm_file *file)
90{
91 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
92 struct drm_i915_gem_get_aperture *args = data;
93 struct i915_vma *vma;
94 u64 pinned;
95
96 if (mutex_lock_interruptible(&ggtt->vm.mutex))
97 return -EINTR;
98
99 pinned = ggtt->vm.reserved;
100 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
101 if (i915_vma_is_pinned(vma))
102 pinned += vma->node.size;
103
104 mutex_unlock(&ggtt->vm.mutex);
105
106 args->aper_size = ggtt->vm.total;
107 args->aper_available_size = args->aper_size - pinned;
108
109 return 0;
110}
111
112int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
113 unsigned long flags)
114{
115 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
116 LIST_HEAD(still_in_list);
117 intel_wakeref_t wakeref;
118 struct i915_vma *vma;
119 int ret;
120
121 if (list_empty(&obj->vma.list))
122 return 0;
123
124
125
126
127
128
129
130 wakeref = intel_runtime_pm_get(rpm);
131
132try_again:
133 ret = 0;
134 spin_lock(&obj->vma.lock);
135 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
136 struct i915_vma,
137 obj_link))) {
138 struct i915_address_space *vm = vma->vm;
139
140 list_move_tail(&vma->obj_link, &still_in_list);
141 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
142 continue;
143
144 if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
145 ret = -EBUSY;
146 break;
147 }
148
149 ret = -EAGAIN;
150 if (!i915_vm_tryopen(vm))
151 break;
152
153
154 vma = __i915_vma_get(vma);
155 spin_unlock(&obj->vma.lock);
156
157 if (vma) {
158 ret = -EBUSY;
159 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
160 !i915_vma_is_active(vma))
161 ret = i915_vma_unbind(vma);
162
163 __i915_vma_put(vma);
164 }
165
166 i915_vm_close(vm);
167 spin_lock(&obj->vma.lock);
168 }
169 list_splice_init(&still_in_list, &obj->vma.list);
170 spin_unlock(&obj->vma.lock);
171
172 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
173 rcu_barrier();
174 goto try_again;
175 }
176
177 intel_runtime_pm_put(rpm, wakeref);
178
179 return ret;
180}
181
182static int
183i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
184 struct drm_i915_gem_pwrite *args,
185 struct drm_file *file)
186{
187 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
188 char __user *user_data = u64_to_user_ptr(args->data_ptr);
189
190
191
192
193
194 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
195
196 if (copy_from_user(vaddr, user_data, args->size))
197 return -EFAULT;
198
199 drm_clflush_virt_range(vaddr, args->size);
200 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
201
202 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
203 return 0;
204}
205
206static int
207i915_gem_create(struct drm_file *file,
208 struct intel_memory_region *mr,
209 u64 *size_p,
210 u32 *handle_p)
211{
212 struct drm_i915_gem_object *obj;
213 u32 handle;
214 u64 size;
215 int ret;
216
217 GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
218 size = round_up(*size_p, mr->min_page_size);
219 if (size == 0)
220 return -EINVAL;
221
222
223 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
224
225
226 obj = i915_gem_object_create_region(mr, size, 0);
227 if (IS_ERR(obj))
228 return PTR_ERR(obj);
229
230 ret = drm_gem_handle_create(file, &obj->base, &handle);
231
232 i915_gem_object_put(obj);
233 if (ret)
234 return ret;
235
236 *handle_p = handle;
237 *size_p = size;
238 return 0;
239}
240
241int
242i915_gem_dumb_create(struct drm_file *file,
243 struct drm_device *dev,
244 struct drm_mode_create_dumb *args)
245{
246 enum intel_memory_type mem_type;
247 int cpp = DIV_ROUND_UP(args->bpp, 8);
248 u32 format;
249
250 switch (cpp) {
251 case 1:
252 format = DRM_FORMAT_C8;
253 break;
254 case 2:
255 format = DRM_FORMAT_RGB565;
256 break;
257 case 4:
258 format = DRM_FORMAT_XRGB8888;
259 break;
260 default:
261 return -EINVAL;
262 }
263
264
265 args->pitch = ALIGN(args->width * cpp, 64);
266
267
268 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
269 DRM_FORMAT_MOD_LINEAR))
270 args->pitch = ALIGN(args->pitch, 4096);
271
272 if (args->pitch < args->width)
273 return -EINVAL;
274
275 args->size = mul_u32_u32(args->pitch, args->height);
276
277 mem_type = INTEL_MEMORY_SYSTEM;
278 if (HAS_LMEM(to_i915(dev)))
279 mem_type = INTEL_MEMORY_LOCAL;
280
281 return i915_gem_create(file,
282 intel_memory_region_by_type(to_i915(dev),
283 mem_type),
284 &args->size, &args->handle);
285}
286
287
288
289
290
291
292
293int
294i915_gem_create_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file)
296{
297 struct drm_i915_private *i915 = to_i915(dev);
298 struct drm_i915_gem_create *args = data;
299
300 i915_gem_flush_free_objects(i915);
301
302 return i915_gem_create(file,
303 intel_memory_region_by_type(i915,
304 INTEL_MEMORY_SYSTEM),
305 &args->size, &args->handle);
306}
307
308static int
309shmem_pread(struct page *page, int offset, int len, char __user *user_data,
310 bool needs_clflush)
311{
312 char *vaddr;
313 int ret;
314
315 vaddr = kmap(page);
316
317 if (needs_clflush)
318 drm_clflush_virt_range(vaddr + offset, len);
319
320 ret = __copy_to_user(user_data, vaddr + offset, len);
321
322 kunmap(page);
323
324 return ret ? -EFAULT : 0;
325}
326
327static int
328i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
329 struct drm_i915_gem_pread *args)
330{
331 unsigned int needs_clflush;
332 unsigned int idx, offset;
333 struct dma_fence *fence;
334 char __user *user_data;
335 u64 remain;
336 int ret;
337
338 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
339 if (ret)
340 return ret;
341
342 fence = i915_gem_object_lock_fence(obj);
343 i915_gem_object_finish_access(obj);
344 if (!fence)
345 return -ENOMEM;
346
347 remain = args->size;
348 user_data = u64_to_user_ptr(args->data_ptr);
349 offset = offset_in_page(args->offset);
350 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
351 struct page *page = i915_gem_object_get_page(obj, idx);
352 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
353
354 ret = shmem_pread(page, offset, length, user_data,
355 needs_clflush);
356 if (ret)
357 break;
358
359 remain -= length;
360 user_data += length;
361 offset = 0;
362 }
363
364 i915_gem_object_unlock_fence(obj, fence);
365 return ret;
366}
367
368static inline bool
369gtt_user_read(struct io_mapping *mapping,
370 loff_t base, int offset,
371 char __user *user_data, int length)
372{
373 void __iomem *vaddr;
374 unsigned long unwritten;
375
376
377 vaddr = io_mapping_map_atomic_wc(mapping, base);
378 unwritten = __copy_to_user_inatomic(user_data,
379 (void __force *)vaddr + offset,
380 length);
381 io_mapping_unmap_atomic(vaddr);
382 if (unwritten) {
383 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
384 unwritten = copy_to_user(user_data,
385 (void __force *)vaddr + offset,
386 length);
387 io_mapping_unmap(vaddr);
388 }
389 return unwritten;
390}
391
392static int
393i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
394 const struct drm_i915_gem_pread *args)
395{
396 struct drm_i915_private *i915 = to_i915(obj->base.dev);
397 struct i915_ggtt *ggtt = &i915->ggtt;
398 intel_wakeref_t wakeref;
399 struct drm_mm_node node;
400 struct dma_fence *fence;
401 void __user *user_data;
402 struct i915_vma *vma;
403 u64 remain, offset;
404 int ret;
405
406 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
407 vma = ERR_PTR(-ENODEV);
408 if (!i915_gem_object_is_tiled(obj))
409 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
410 PIN_MAPPABLE |
411 PIN_NONBLOCK |
412 PIN_NOEVICT);
413 if (!IS_ERR(vma)) {
414 node.start = i915_ggtt_offset(vma);
415 node.flags = 0;
416 } else {
417 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
418 if (ret)
419 goto out_rpm;
420 GEM_BUG_ON(!drm_mm_node_allocated(&node));
421 }
422
423 ret = i915_gem_object_lock_interruptible(obj);
424 if (ret)
425 goto out_unpin;
426
427 ret = i915_gem_object_set_to_gtt_domain(obj, false);
428 if (ret) {
429 i915_gem_object_unlock(obj);
430 goto out_unpin;
431 }
432
433 fence = i915_gem_object_lock_fence(obj);
434 i915_gem_object_unlock(obj);
435 if (!fence) {
436 ret = -ENOMEM;
437 goto out_unpin;
438 }
439
440 user_data = u64_to_user_ptr(args->data_ptr);
441 remain = args->size;
442 offset = args->offset;
443
444 while (remain > 0) {
445
446
447
448
449
450
451 u32 page_base = node.start;
452 unsigned page_offset = offset_in_page(offset);
453 unsigned page_length = PAGE_SIZE - page_offset;
454 page_length = remain < page_length ? remain : page_length;
455 if (drm_mm_node_allocated(&node)) {
456 ggtt->vm.insert_page(&ggtt->vm,
457 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
458 node.start, I915_CACHE_NONE, 0);
459 } else {
460 page_base += offset & PAGE_MASK;
461 }
462
463 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
464 user_data, page_length)) {
465 ret = -EFAULT;
466 break;
467 }
468
469 remain -= page_length;
470 user_data += page_length;
471 offset += page_length;
472 }
473
474 i915_gem_object_unlock_fence(obj, fence);
475out_unpin:
476 if (drm_mm_node_allocated(&node)) {
477 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
478 remove_mappable_node(ggtt, &node);
479 } else {
480 i915_vma_unpin(vma);
481 }
482out_rpm:
483 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
484 return ret;
485}
486
487
488
489
490
491
492
493
494
495int
496i915_gem_pread_ioctl(struct drm_device *dev, void *data,
497 struct drm_file *file)
498{
499 struct drm_i915_gem_pread *args = data;
500 struct drm_i915_gem_object *obj;
501 int ret;
502
503 if (args->size == 0)
504 return 0;
505
506 if (!access_ok(u64_to_user_ptr(args->data_ptr),
507 args->size))
508 return -EFAULT;
509
510 obj = i915_gem_object_lookup(file, args->handle);
511 if (!obj)
512 return -ENOENT;
513
514
515 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
516 ret = -EINVAL;
517 goto out;
518 }
519
520 trace_i915_gem_object_pread(obj, args->offset, args->size);
521
522 ret = i915_gem_object_wait(obj,
523 I915_WAIT_INTERRUPTIBLE,
524 MAX_SCHEDULE_TIMEOUT);
525 if (ret)
526 goto out;
527
528 ret = i915_gem_object_pin_pages(obj);
529 if (ret)
530 goto out;
531
532 ret = i915_gem_shmem_pread(obj, args);
533 if (ret == -EFAULT || ret == -ENODEV)
534 ret = i915_gem_gtt_pread(obj, args);
535
536 i915_gem_object_unpin_pages(obj);
537out:
538 i915_gem_object_put(obj);
539 return ret;
540}
541
542
543
544
545
546static inline bool
547ggtt_write(struct io_mapping *mapping,
548 loff_t base, int offset,
549 char __user *user_data, int length)
550{
551 void __iomem *vaddr;
552 unsigned long unwritten;
553
554
555 vaddr = io_mapping_map_atomic_wc(mapping, base);
556 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
557 user_data, length);
558 io_mapping_unmap_atomic(vaddr);
559 if (unwritten) {
560 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
561 unwritten = copy_from_user((void __force *)vaddr + offset,
562 user_data, length);
563 io_mapping_unmap(vaddr);
564 }
565
566 return unwritten;
567}
568
569
570
571
572
573
574
575static int
576i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
577 const struct drm_i915_gem_pwrite *args)
578{
579 struct drm_i915_private *i915 = to_i915(obj->base.dev);
580 struct i915_ggtt *ggtt = &i915->ggtt;
581 struct intel_runtime_pm *rpm = &i915->runtime_pm;
582 intel_wakeref_t wakeref;
583 struct drm_mm_node node;
584 struct dma_fence *fence;
585 struct i915_vma *vma;
586 u64 remain, offset;
587 void __user *user_data;
588 int ret;
589
590 if (i915_gem_object_has_struct_page(obj)) {
591
592
593
594
595
596
597
598 wakeref = intel_runtime_pm_get_if_in_use(rpm);
599 if (!wakeref)
600 return -EFAULT;
601 } else {
602
603 wakeref = intel_runtime_pm_get(rpm);
604 }
605
606 vma = ERR_PTR(-ENODEV);
607 if (!i915_gem_object_is_tiled(obj))
608 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
609 PIN_MAPPABLE |
610 PIN_NONBLOCK |
611 PIN_NOEVICT);
612 if (!IS_ERR(vma)) {
613 node.start = i915_ggtt_offset(vma);
614 node.flags = 0;
615 } else {
616 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
617 if (ret)
618 goto out_rpm;
619 GEM_BUG_ON(!drm_mm_node_allocated(&node));
620 }
621
622 ret = i915_gem_object_lock_interruptible(obj);
623 if (ret)
624 goto out_unpin;
625
626 ret = i915_gem_object_set_to_gtt_domain(obj, true);
627 if (ret) {
628 i915_gem_object_unlock(obj);
629 goto out_unpin;
630 }
631
632 fence = i915_gem_object_lock_fence(obj);
633 i915_gem_object_unlock(obj);
634 if (!fence) {
635 ret = -ENOMEM;
636 goto out_unpin;
637 }
638
639 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
640
641 user_data = u64_to_user_ptr(args->data_ptr);
642 offset = args->offset;
643 remain = args->size;
644 while (remain) {
645
646
647
648
649
650
651 u32 page_base = node.start;
652 unsigned int page_offset = offset_in_page(offset);
653 unsigned int page_length = PAGE_SIZE - page_offset;
654 page_length = remain < page_length ? remain : page_length;
655 if (drm_mm_node_allocated(&node)) {
656
657 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
658 ggtt->vm.insert_page(&ggtt->vm,
659 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
660 node.start, I915_CACHE_NONE, 0);
661 wmb();
662 } else {
663 page_base += offset & PAGE_MASK;
664 }
665
666
667
668
669
670
671 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
672 user_data, page_length)) {
673 ret = -EFAULT;
674 break;
675 }
676
677 remain -= page_length;
678 user_data += page_length;
679 offset += page_length;
680 }
681
682 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
683 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
684
685 i915_gem_object_unlock_fence(obj, fence);
686out_unpin:
687 if (drm_mm_node_allocated(&node)) {
688 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
689 remove_mappable_node(ggtt, &node);
690 } else {
691 i915_vma_unpin(vma);
692 }
693out_rpm:
694 intel_runtime_pm_put(rpm, wakeref);
695 return ret;
696}
697
698
699
700
701
702
703static int
704shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
705 bool needs_clflush_before,
706 bool needs_clflush_after)
707{
708 char *vaddr;
709 int ret;
710
711 vaddr = kmap(page);
712
713 if (needs_clflush_before)
714 drm_clflush_virt_range(vaddr + offset, len);
715
716 ret = __copy_from_user(vaddr + offset, user_data, len);
717 if (!ret && needs_clflush_after)
718 drm_clflush_virt_range(vaddr + offset, len);
719
720 kunmap(page);
721
722 return ret ? -EFAULT : 0;
723}
724
725static int
726i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
727 const struct drm_i915_gem_pwrite *args)
728{
729 unsigned int partial_cacheline_write;
730 unsigned int needs_clflush;
731 unsigned int offset, idx;
732 struct dma_fence *fence;
733 void __user *user_data;
734 u64 remain;
735 int ret;
736
737 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
738 if (ret)
739 return ret;
740
741 fence = i915_gem_object_lock_fence(obj);
742 i915_gem_object_finish_access(obj);
743 if (!fence)
744 return -ENOMEM;
745
746
747
748
749
750 partial_cacheline_write = 0;
751 if (needs_clflush & CLFLUSH_BEFORE)
752 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
753
754 user_data = u64_to_user_ptr(args->data_ptr);
755 remain = args->size;
756 offset = offset_in_page(args->offset);
757 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
758 struct page *page = i915_gem_object_get_page(obj, idx);
759 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
760
761 ret = shmem_pwrite(page, offset, length, user_data,
762 (offset | length) & partial_cacheline_write,
763 needs_clflush & CLFLUSH_AFTER);
764 if (ret)
765 break;
766
767 remain -= length;
768 user_data += length;
769 offset = 0;
770 }
771
772 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
773 i915_gem_object_unlock_fence(obj, fence);
774
775 return ret;
776}
777
778
779
780
781
782
783
784
785
786int
787i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
788 struct drm_file *file)
789{
790 struct drm_i915_gem_pwrite *args = data;
791 struct drm_i915_gem_object *obj;
792 int ret;
793
794 if (args->size == 0)
795 return 0;
796
797 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
798 return -EFAULT;
799
800 obj = i915_gem_object_lookup(file, args->handle);
801 if (!obj)
802 return -ENOENT;
803
804
805 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
806 ret = -EINVAL;
807 goto err;
808 }
809
810
811 if (i915_gem_object_is_readonly(obj)) {
812 ret = -EINVAL;
813 goto err;
814 }
815
816 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
817
818 ret = -ENODEV;
819 if (obj->ops->pwrite)
820 ret = obj->ops->pwrite(obj, args);
821 if (ret != -ENODEV)
822 goto err;
823
824 ret = i915_gem_object_wait(obj,
825 I915_WAIT_INTERRUPTIBLE |
826 I915_WAIT_ALL,
827 MAX_SCHEDULE_TIMEOUT);
828 if (ret)
829 goto err;
830
831 ret = i915_gem_object_pin_pages(obj);
832 if (ret)
833 goto err;
834
835 ret = -EFAULT;
836
837
838
839
840
841
842 if (!i915_gem_object_has_struct_page(obj) ||
843 cpu_write_needs_clflush(obj))
844
845
846
847
848 ret = i915_gem_gtt_pwrite_fast(obj, args);
849
850 if (ret == -EFAULT || ret == -ENOSPC) {
851 if (i915_gem_object_has_struct_page(obj))
852 ret = i915_gem_shmem_pwrite(obj, args);
853 else
854 ret = i915_gem_phys_pwrite(obj, args, file);
855 }
856
857 i915_gem_object_unpin_pages(obj);
858err:
859 i915_gem_object_put(obj);
860 return ret;
861}
862
863
864
865
866
867
868
869int
870i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
871 struct drm_file *file)
872{
873 struct drm_i915_gem_sw_finish *args = data;
874 struct drm_i915_gem_object *obj;
875
876 obj = i915_gem_object_lookup(file, args->handle);
877 if (!obj)
878 return -ENOENT;
879
880
881
882
883
884
885
886 i915_gem_object_flush_if_display(obj);
887 i915_gem_object_put(obj);
888
889 return 0;
890}
891
892void i915_gem_runtime_suspend(struct drm_i915_private *i915)
893{
894 struct drm_i915_gem_object *obj, *on;
895 int i;
896
897
898
899
900
901
902
903
904 list_for_each_entry_safe(obj, on,
905 &i915->ggtt.userfault_list, userfault_link)
906 __i915_gem_object_release_mmap_gtt(obj);
907
908
909
910
911
912
913 for (i = 0; i < i915->ggtt.num_fences; i++) {
914 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
915
916
917
918
919
920
921
922
923
924
925
926
927
928 if (!reg->vma)
929 continue;
930
931 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
932 reg->dirty = true;
933 }
934}
935
936static void discard_ggtt_vma(struct i915_vma *vma)
937{
938 struct drm_i915_gem_object *obj = vma->obj;
939
940 spin_lock(&obj->vma.lock);
941 if (!RB_EMPTY_NODE(&vma->obj_node)) {
942 rb_erase(&vma->obj_node, &obj->vma.tree);
943 RB_CLEAR_NODE(&vma->obj_node);
944 }
945 spin_unlock(&obj->vma.lock);
946}
947
948struct i915_vma *
949i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
950 const struct i915_ggtt_view *view,
951 u64 size,
952 u64 alignment,
953 u64 flags)
954{
955 struct drm_i915_private *i915 = to_i915(obj->base.dev);
956 struct i915_ggtt *ggtt = &i915->ggtt;
957 struct i915_vma *vma;
958 int ret;
959
960 if (flags & PIN_MAPPABLE &&
961 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
962
963
964
965
966
967
968
969
970 if (obj->base.size > ggtt->mappable_end)
971 return ERR_PTR(-E2BIG);
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989 if (flags & PIN_NONBLOCK &&
990 obj->base.size > ggtt->mappable_end / 2)
991 return ERR_PTR(-ENOSPC);
992 }
993
994new_vma:
995 vma = i915_vma_instance(obj, &ggtt->vm, view);
996 if (IS_ERR(vma))
997 return vma;
998
999 if (i915_vma_misplaced(vma, size, alignment, flags)) {
1000 if (flags & PIN_NONBLOCK) {
1001 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1002 return ERR_PTR(-ENOSPC);
1003
1004 if (flags & PIN_MAPPABLE &&
1005 vma->fence_size > ggtt->mappable_end / 2)
1006 return ERR_PTR(-ENOSPC);
1007 }
1008
1009 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
1010 discard_ggtt_vma(vma);
1011 goto new_vma;
1012 }
1013
1014 ret = i915_vma_unbind(vma);
1015 if (ret)
1016 return ERR_PTR(ret);
1017 }
1018
1019 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1020 if (ret)
1021 return ERR_PTR(ret);
1022
1023 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1024 mutex_lock(&ggtt->vm.mutex);
1025 i915_vma_revoke_fence(vma);
1026 mutex_unlock(&ggtt->vm.mutex);
1027 }
1028
1029 ret = i915_vma_wait_for_bind(vma);
1030 if (ret) {
1031 i915_vma_unpin(vma);
1032 return ERR_PTR(ret);
1033 }
1034
1035 return vma;
1036}
1037
1038int
1039i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1040 struct drm_file *file_priv)
1041{
1042 struct drm_i915_private *i915 = to_i915(dev);
1043 struct drm_i915_gem_madvise *args = data;
1044 struct drm_i915_gem_object *obj;
1045 int err;
1046
1047 switch (args->madv) {
1048 case I915_MADV_DONTNEED:
1049 case I915_MADV_WILLNEED:
1050 break;
1051 default:
1052 return -EINVAL;
1053 }
1054
1055 obj = i915_gem_object_lookup(file_priv, args->handle);
1056 if (!obj)
1057 return -ENOENT;
1058
1059 err = mutex_lock_interruptible(&obj->mm.lock);
1060 if (err)
1061 goto out;
1062
1063 if (i915_gem_object_has_pages(obj) &&
1064 i915_gem_object_is_tiled(obj) &&
1065 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1066 if (obj->mm.madv == I915_MADV_WILLNEED) {
1067 GEM_BUG_ON(!obj->mm.quirked);
1068 __i915_gem_object_unpin_pages(obj);
1069 obj->mm.quirked = false;
1070 }
1071 if (args->madv == I915_MADV_WILLNEED) {
1072 GEM_BUG_ON(obj->mm.quirked);
1073 __i915_gem_object_pin_pages(obj);
1074 obj->mm.quirked = true;
1075 }
1076 }
1077
1078 if (obj->mm.madv != __I915_MADV_PURGED)
1079 obj->mm.madv = args->madv;
1080
1081 if (i915_gem_object_has_pages(obj)) {
1082 struct list_head *list;
1083
1084 if (i915_gem_object_is_shrinkable(obj)) {
1085 unsigned long flags;
1086
1087 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1088
1089 if (obj->mm.madv != I915_MADV_WILLNEED)
1090 list = &i915->mm.purge_list;
1091 else
1092 list = &i915->mm.shrink_list;
1093 list_move_tail(&obj->mm.link, list);
1094
1095 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1096 }
1097 }
1098
1099
1100 if (obj->mm.madv == I915_MADV_DONTNEED &&
1101 !i915_gem_object_has_pages(obj))
1102 i915_gem_object_truncate(obj);
1103
1104 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1105 mutex_unlock(&obj->mm.lock);
1106
1107out:
1108 i915_gem_object_put(obj);
1109 return err;
1110}
1111
1112int i915_gem_init(struct drm_i915_private *dev_priv)
1113{
1114 int ret;
1115
1116
1117 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1118 mkwrite_device_info(dev_priv)->page_sizes =
1119 I915_GTT_PAGE_SIZE_4K;
1120
1121 ret = i915_gem_init_userptr(dev_priv);
1122 if (ret)
1123 return ret;
1124
1125 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1126 intel_wopcm_init(&dev_priv->wopcm);
1127
1128 ret = i915_init_ggtt(dev_priv);
1129 if (ret) {
1130 GEM_BUG_ON(ret == -EIO);
1131 goto err_unlock;
1132 }
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 intel_init_clock_gating(dev_priv);
1144
1145 ret = intel_gt_init(&dev_priv->gt);
1146 if (ret)
1147 goto err_unlock;
1148
1149 return 0;
1150
1151
1152
1153
1154
1155
1156
1157err_unlock:
1158 i915_gem_drain_workqueue(dev_priv);
1159
1160 if (ret != -EIO) {
1161 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1162 i915_gem_cleanup_userptr(dev_priv);
1163 }
1164
1165 if (ret == -EIO) {
1166
1167
1168
1169
1170
1171 if (!intel_gt_is_wedged(&dev_priv->gt)) {
1172 i915_probe_error(dev_priv,
1173 "Failed to initialize GPU, declaring it wedged!\n");
1174 intel_gt_set_wedged(&dev_priv->gt);
1175 }
1176
1177
1178 ret = i915_ggtt_enable_hw(dev_priv);
1179 i915_ggtt_resume(&dev_priv->ggtt);
1180 intel_init_clock_gating(dev_priv);
1181 }
1182
1183 i915_gem_drain_freed_objects(dev_priv);
1184 return ret;
1185}
1186
1187void i915_gem_driver_register(struct drm_i915_private *i915)
1188{
1189 i915_gem_driver_register__shrinker(i915);
1190
1191 intel_engines_driver_register(i915);
1192}
1193
1194void i915_gem_driver_unregister(struct drm_i915_private *i915)
1195{
1196 i915_gem_driver_unregister__shrinker(i915);
1197}
1198
1199void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1200{
1201 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1202
1203 i915_gem_suspend_late(dev_priv);
1204 intel_gt_driver_remove(&dev_priv->gt);
1205 dev_priv->uabi_engines = RB_ROOT;
1206
1207
1208 i915_gem_drain_workqueue(dev_priv);
1209
1210 i915_gem_drain_freed_objects(dev_priv);
1211}
1212
1213void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1214{
1215 i915_gem_driver_release__contexts(dev_priv);
1216
1217 intel_gt_driver_release(&dev_priv->gt);
1218
1219 intel_wa_list_free(&dev_priv->gt_wa_list);
1220
1221 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1222 i915_gem_cleanup_userptr(dev_priv);
1223
1224 i915_gem_drain_freed_objects(dev_priv);
1225
1226 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1227}
1228
1229static void i915_gem_init__mm(struct drm_i915_private *i915)
1230{
1231 spin_lock_init(&i915->mm.obj_lock);
1232
1233 init_llist_head(&i915->mm.free_list);
1234
1235 INIT_LIST_HEAD(&i915->mm.purge_list);
1236 INIT_LIST_HEAD(&i915->mm.shrink_list);
1237
1238 i915_gem_init__objects(i915);
1239}
1240
1241void i915_gem_init_early(struct drm_i915_private *dev_priv)
1242{
1243 i915_gem_init__mm(dev_priv);
1244 i915_gem_init__contexts(dev_priv);
1245
1246 spin_lock_init(&dev_priv->fb_tracking.lock);
1247}
1248
1249void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1250{
1251 i915_gem_drain_freed_objects(dev_priv);
1252 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1253 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1254 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1255}
1256
1257int i915_gem_freeze(struct drm_i915_private *dev_priv)
1258{
1259
1260
1261
1262 i915_gem_shrink_all(dev_priv);
1263
1264 return 0;
1265}
1266
1267int i915_gem_freeze_late(struct drm_i915_private *i915)
1268{
1269 struct drm_i915_gem_object *obj;
1270 intel_wakeref_t wakeref;
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1288
1289 i915_gem_shrink(i915, -1UL, NULL, ~0);
1290 i915_gem_drain_freed_objects(i915);
1291
1292 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1293 i915_gem_object_lock(obj);
1294 drm_WARN_ON(&i915->drm,
1295 i915_gem_object_set_to_cpu_domain(obj, true));
1296 i915_gem_object_unlock(obj);
1297 }
1298
1299 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1300
1301 return 0;
1302}
1303
1304void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1305{
1306 struct drm_i915_file_private *file_priv = file->driver_priv;
1307 struct i915_request *request;
1308
1309
1310
1311
1312
1313 spin_lock(&file_priv->mm.lock);
1314 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1315 request->file_priv = NULL;
1316 spin_unlock(&file_priv->mm.lock);
1317}
1318
1319int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1320{
1321 struct drm_i915_file_private *file_priv;
1322 int ret;
1323
1324 DRM_DEBUG("\n");
1325
1326 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1327 if (!file_priv)
1328 return -ENOMEM;
1329
1330 file->driver_priv = file_priv;
1331 file_priv->dev_priv = i915;
1332 file_priv->file = file;
1333
1334 spin_lock_init(&file_priv->mm.lock);
1335 INIT_LIST_HEAD(&file_priv->mm.request_list);
1336
1337 file_priv->bsd_engine = -1;
1338 file_priv->hang_timestamp = jiffies;
1339
1340 ret = i915_gem_context_open(i915, file);
1341 if (ret)
1342 kfree(file_priv);
1343
1344 return ret;
1345}
1346
1347#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1348#include "selftests/mock_gem_device.c"
1349#include "selftests/i915_gem.c"
1350#endif
1351