1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/sched/mm.h>
26#include <drm/drm_gem.h>
27
28#include "display/intel_frontbuffer.h"
29#include "gem/i915_gem_lmem.h"
30#include "gem/i915_gem_tiling.h"
31#include "gt/intel_engine.h"
32#include "gt/intel_engine_heartbeat.h"
33#include "gt/intel_gt.h"
34#include "gt/intel_gt_requests.h"
35
36#include "i915_drv.h"
37#include "i915_gem_evict.h"
38#include "i915_sw_fence_work.h"
39#include "i915_trace.h"
40#include "i915_vma.h"
41#include "i915_vma_resource.h"
42
43static inline void assert_vma_held_evict(const struct i915_vma *vma)
44{
45
46
47
48
49
50 if (atomic_read(&vma->vm->open))
51 assert_object_held_shared(vma->obj);
52}
53
54static struct kmem_cache *slab_vmas;
55
56static struct i915_vma *i915_vma_alloc(void)
57{
58 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
59}
60
61static void i915_vma_free(struct i915_vma *vma)
62{
63 return kmem_cache_free(slab_vmas, vma);
64}
65
66#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
67
68#include <linux/stackdepot.h>
69
70static void vma_print_allocator(struct i915_vma *vma, const char *reason)
71{
72 char buf[512];
73
74 if (!vma->node.stack) {
75 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
76 vma->node.start, vma->node.size, reason);
77 return;
78 }
79
80 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
81 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
82 vma->node.start, vma->node.size, reason, buf);
83}
84
85#else
86
87static void vma_print_allocator(struct i915_vma *vma, const char *reason)
88{
89}
90
91#endif
92
93static inline struct i915_vma *active_to_vma(struct i915_active *ref)
94{
95 return container_of(ref, typeof(struct i915_vma), active);
96}
97
98static int __i915_vma_active(struct i915_active *ref)
99{
100 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
101}
102
103static void __i915_vma_retire(struct i915_active *ref)
104{
105 i915_vma_put(active_to_vma(ref));
106}
107
108static struct i915_vma *
109vma_create(struct drm_i915_gem_object *obj,
110 struct i915_address_space *vm,
111 const struct i915_ggtt_view *view)
112{
113 struct i915_vma *pos = ERR_PTR(-E2BIG);
114 struct i915_vma *vma;
115 struct rb_node *rb, **p;
116
117
118 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
119
120 vma = i915_vma_alloc();
121 if (vma == NULL)
122 return ERR_PTR(-ENOMEM);
123
124 kref_init(&vma->ref);
125 vma->vm = i915_vm_get(vm);
126 vma->ops = &vm->vma_ops;
127 vma->obj = obj;
128 vma->size = obj->base.size;
129 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
130
131 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
132
133
134 if (IS_ENABLED(CONFIG_LOCKDEP)) {
135 fs_reclaim_acquire(GFP_KERNEL);
136 might_lock(&vma->active.mutex);
137 fs_reclaim_release(GFP_KERNEL);
138 }
139
140 INIT_LIST_HEAD(&vma->closed_link);
141
142 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
143 vma->ggtt_view = *view;
144 if (view->type == I915_GGTT_VIEW_PARTIAL) {
145 GEM_BUG_ON(range_overflows_t(u64,
146 view->partial.offset,
147 view->partial.size,
148 obj->base.size >> PAGE_SHIFT));
149 vma->size = view->partial.size;
150 vma->size <<= PAGE_SHIFT;
151 GEM_BUG_ON(vma->size > obj->base.size);
152 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
153 vma->size = intel_rotation_info_size(&view->rotated);
154 vma->size <<= PAGE_SHIFT;
155 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
156 vma->size = intel_remapped_info_size(&view->remapped);
157 vma->size <<= PAGE_SHIFT;
158 }
159 }
160
161 if (unlikely(vma->size > vm->total))
162 goto err_vma;
163
164 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
165
166 spin_lock(&obj->vma.lock);
167
168 if (i915_is_ggtt(vm)) {
169 if (unlikely(overflows_type(vma->size, u32)))
170 goto err_unlock;
171
172 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
173 i915_gem_object_get_tiling(obj),
174 i915_gem_object_get_stride(obj));
175 if (unlikely(vma->fence_size < vma->size ||
176 vma->fence_size > vm->total))
177 goto err_unlock;
178
179 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
180
181 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
182 i915_gem_object_get_tiling(obj),
183 i915_gem_object_get_stride(obj));
184 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
185
186 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
187 }
188
189 rb = NULL;
190 p = &obj->vma.tree.rb_node;
191 while (*p) {
192 long cmp;
193
194 rb = *p;
195 pos = rb_entry(rb, struct i915_vma, obj_node);
196
197
198
199
200
201
202 cmp = i915_vma_compare(pos, vm, view);
203 if (cmp < 0)
204 p = &rb->rb_right;
205 else if (cmp > 0)
206 p = &rb->rb_left;
207 else
208 goto err_unlock;
209 }
210 rb_link_node(&vma->obj_node, rb, p);
211 rb_insert_color(&vma->obj_node, &obj->vma.tree);
212
213 if (i915_vma_is_ggtt(vma))
214
215
216
217
218
219
220 list_add(&vma->obj_link, &obj->vma.list);
221 else
222 list_add_tail(&vma->obj_link, &obj->vma.list);
223
224 spin_unlock(&obj->vma.lock);
225
226 return vma;
227
228err_unlock:
229 spin_unlock(&obj->vma.lock);
230err_vma:
231 i915_vm_put(vm);
232 i915_vma_free(vma);
233 return pos;
234}
235
236static struct i915_vma *
237i915_vma_lookup(struct drm_i915_gem_object *obj,
238 struct i915_address_space *vm,
239 const struct i915_ggtt_view *view)
240{
241 struct rb_node *rb;
242
243 rb = obj->vma.tree.rb_node;
244 while (rb) {
245 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
246 long cmp;
247
248 cmp = i915_vma_compare(vma, vm, view);
249 if (cmp == 0)
250 return vma;
251
252 if (cmp < 0)
253 rb = rb->rb_right;
254 else
255 rb = rb->rb_left;
256 }
257
258 return NULL;
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274struct i915_vma *
275i915_vma_instance(struct drm_i915_gem_object *obj,
276 struct i915_address_space *vm,
277 const struct i915_ggtt_view *view)
278{
279 struct i915_vma *vma;
280
281 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
282 GEM_BUG_ON(!atomic_read(&vm->open));
283
284 spin_lock(&obj->vma.lock);
285 vma = i915_vma_lookup(obj, vm, view);
286 spin_unlock(&obj->vma.lock);
287
288
289 if (unlikely(!vma))
290 vma = vma_create(obj, vm, view);
291
292 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
293 return vma;
294}
295
296struct i915_vma_work {
297 struct dma_fence_work base;
298 struct i915_address_space *vm;
299 struct i915_vm_pt_stash stash;
300 struct i915_vma_resource *vma_res;
301 struct drm_i915_gem_object *pinned;
302 struct i915_sw_dma_fence_cb cb;
303 enum i915_cache_level cache_level;
304 unsigned int flags;
305};
306
307static void __vma_bind(struct dma_fence_work *work)
308{
309 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
310 struct i915_vma_resource *vma_res = vw->vma_res;
311
312 vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
313 vma_res, vw->cache_level, vw->flags);
314
315}
316
317static void __vma_release(struct dma_fence_work *work)
318{
319 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
320
321 if (vw->pinned)
322 i915_gem_object_put(vw->pinned);
323
324 i915_vm_free_pt_stash(vw->vm, &vw->stash);
325 i915_vm_put(vw->vm);
326 if (vw->vma_res)
327 i915_vma_resource_put(vw->vma_res);
328}
329
330static const struct dma_fence_work_ops bind_ops = {
331 .name = "bind",
332 .work = __vma_bind,
333 .release = __vma_release,
334};
335
336struct i915_vma_work *i915_vma_work(void)
337{
338 struct i915_vma_work *vw;
339
340 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
341 if (!vw)
342 return NULL;
343
344 dma_fence_work_init(&vw->base, &bind_ops);
345 vw->base.dma.error = -EAGAIN;
346
347 return vw;
348}
349
350int i915_vma_wait_for_bind(struct i915_vma *vma)
351{
352 int err = 0;
353
354 if (rcu_access_pointer(vma->active.excl.fence)) {
355 struct dma_fence *fence;
356
357 rcu_read_lock();
358 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
359 rcu_read_unlock();
360 if (fence) {
361 err = dma_fence_wait(fence, true);
362 dma_fence_put(fence);
363 }
364 }
365
366 return err;
367}
368
369#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
370static int i915_vma_verify_bind_complete(struct i915_vma *vma)
371{
372 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
373 int err;
374
375 if (!fence)
376 return 0;
377
378 if (dma_fence_is_signaled(fence))
379 err = fence->error;
380 else
381 err = -EBUSY;
382
383 dma_fence_put(fence);
384
385 return err;
386}
387#else
388#define i915_vma_verify_bind_complete(_vma) 0
389#endif
390
391I915_SELFTEST_EXPORT void
392i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
393 struct i915_vma *vma)
394{
395 struct drm_i915_gem_object *obj = vma->obj;
396
397 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
398 obj->mm.rsgt, i915_gem_object_is_readonly(obj),
399 i915_gem_object_is_lmem(obj), obj->mm.region,
400 vma->ops, vma->private, vma->node.start,
401 vma->node.size, vma->size);
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417int i915_vma_bind(struct i915_vma *vma,
418 enum i915_cache_level cache_level,
419 u32 flags,
420 struct i915_vma_work *work,
421 struct i915_vma_resource *vma_res)
422{
423 u32 bind_flags;
424 u32 vma_flags;
425 int ret;
426
427 lockdep_assert_held(&vma->vm->mutex);
428 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
429 GEM_BUG_ON(vma->size > vma->node.size);
430
431 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
432 vma->node.size,
433 vma->vm->total))) {
434 i915_vma_resource_free(vma_res);
435 return -ENODEV;
436 }
437
438 if (GEM_DEBUG_WARN_ON(!flags)) {
439 i915_vma_resource_free(vma_res);
440 return -EINVAL;
441 }
442
443 bind_flags = flags;
444 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
445
446 vma_flags = atomic_read(&vma->flags);
447 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
448
449 bind_flags &= ~vma_flags;
450 if (bind_flags == 0) {
451 i915_vma_resource_free(vma_res);
452 return 0;
453 }
454
455 GEM_BUG_ON(!atomic_read(&vma->pages_count));
456
457
458 if (work && bind_flags & vma->vm->bind_async_flags)
459 ret = i915_vma_resource_bind_dep_await(vma->vm,
460 &work->base.chain,
461 vma->node.start,
462 vma->node.size,
463 true,
464 GFP_NOWAIT |
465 __GFP_RETRY_MAYFAIL |
466 __GFP_NOWARN);
467 else
468 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
469 vma->node.size, true);
470 if (ret) {
471 i915_vma_resource_free(vma_res);
472 return ret;
473 }
474
475 if (vma->resource || !vma_res) {
476
477 GEM_WARN_ON(!vma_flags);
478 i915_vma_resource_free(vma_res);
479 } else {
480 i915_vma_resource_init_from_vma(vma_res, vma);
481 vma->resource = vma_res;
482 }
483 trace_i915_vma_bind(vma, bind_flags);
484 if (work && bind_flags & vma->vm->bind_async_flags) {
485 struct dma_fence *prev;
486
487 work->vma_res = i915_vma_resource_get(vma->resource);
488 work->cache_level = cache_level;
489 work->flags = bind_flags;
490
491
492
493
494
495
496
497
498
499
500 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
501 if (prev) {
502 __i915_sw_fence_await_dma_fence(&work->base.chain,
503 prev,
504 &work->cb);
505 dma_fence_put(prev);
506 }
507
508 work->base.dma.error = 0;
509
510
511
512
513
514
515 if (!work->vma_res->bi.pages_rsgt)
516 work->pinned = i915_gem_object_get(vma->obj);
517 } else {
518 if (vma->obj) {
519 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
520 if (ret) {
521 i915_vma_resource_free(vma->resource);
522 vma->resource = NULL;
523
524 return ret;
525 }
526 }
527 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
528 bind_flags);
529 }
530
531 if (vma->obj)
532 set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
533
534 atomic_or(bind_flags, &vma->flags);
535 return 0;
536}
537
538void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
539{
540 void __iomem *ptr;
541 int err;
542
543 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
544 return IO_ERR_PTR(-EINVAL);
545
546 if (!i915_gem_object_is_lmem(vma->obj)) {
547 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
548 err = -ENODEV;
549 goto err;
550 }
551 }
552
553 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
554 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
555 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
556
557 ptr = READ_ONCE(vma->iomap);
558 if (ptr == NULL) {
559
560
561
562
563
564
565 if (i915_gem_object_is_lmem(vma->obj))
566 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
567 vma->obj->base.size);
568 else
569 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
570 vma->node.start,
571 vma->node.size);
572 if (ptr == NULL) {
573 err = -ENOMEM;
574 goto err;
575 }
576
577 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
578 io_mapping_unmap(ptr);
579 ptr = vma->iomap;
580 }
581 }
582
583 __i915_vma_pin(vma);
584
585 err = i915_vma_pin_fence(vma);
586 if (err)
587 goto err_unpin;
588
589 i915_vma_set_ggtt_write(vma);
590
591
592 return ptr;
593
594err_unpin:
595 __i915_vma_unpin(vma);
596err:
597 return IO_ERR_PTR(err);
598}
599
600void i915_vma_flush_writes(struct i915_vma *vma)
601{
602 if (i915_vma_unset_ggtt_write(vma))
603 intel_gt_flush_ggtt_writes(vma->vm->gt);
604}
605
606void i915_vma_unpin_iomap(struct i915_vma *vma)
607{
608 GEM_BUG_ON(vma->iomap == NULL);
609
610 i915_vma_flush_writes(vma);
611
612 i915_vma_unpin_fence(vma);
613 i915_vma_unpin(vma);
614}
615
616void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
617{
618 struct i915_vma *vma;
619 struct drm_i915_gem_object *obj;
620
621 vma = fetch_and_zero(p_vma);
622 if (!vma)
623 return;
624
625 obj = vma->obj;
626 GEM_BUG_ON(!obj);
627
628 i915_vma_unpin(vma);
629
630 if (flags & I915_VMA_RELEASE_MAP)
631 i915_gem_object_unpin_map(obj);
632
633 i915_gem_object_put(obj);
634}
635
636bool i915_vma_misplaced(const struct i915_vma *vma,
637 u64 size, u64 alignment, u64 flags)
638{
639 if (!drm_mm_node_allocated(&vma->node))
640 return false;
641
642 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
643 return true;
644
645 if (vma->node.size < size)
646 return true;
647
648 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
649 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
650 return true;
651
652 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
653 return true;
654
655 if (flags & PIN_OFFSET_BIAS &&
656 vma->node.start < (flags & PIN_OFFSET_MASK))
657 return true;
658
659 if (flags & PIN_OFFSET_FIXED &&
660 vma->node.start != (flags & PIN_OFFSET_MASK))
661 return true;
662
663 return false;
664}
665
666void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
667{
668 bool mappable, fenceable;
669
670 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
671 GEM_BUG_ON(!vma->fence_size);
672
673 fenceable = (vma->node.size >= vma->fence_size &&
674 IS_ALIGNED(vma->node.start, vma->fence_alignment));
675
676 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
677
678 if (mappable && fenceable)
679 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
680 else
681 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
682}
683
684bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
685{
686 struct drm_mm_node *node = &vma->node;
687 struct drm_mm_node *other;
688
689
690
691
692
693
694
695
696 if (!i915_vm_has_cache_coloring(vma->vm))
697 return true;
698
699
700 GEM_BUG_ON(!drm_mm_node_allocated(node));
701 GEM_BUG_ON(list_empty(&node->node_list));
702
703 other = list_prev_entry(node, node_list);
704 if (i915_node_color_differs(other, color) &&
705 !drm_mm_hole_follows(other))
706 return false;
707
708 other = list_next_entry(node, node_list);
709 if (i915_node_color_differs(other, color) &&
710 !drm_mm_hole_follows(node))
711 return false;
712
713 return true;
714}
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730static int
731i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
732 u64 size, u64 alignment, u64 flags)
733{
734 unsigned long color;
735 u64 start, end;
736 int ret;
737
738 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
739 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
740
741 size = max(size, vma->size);
742 alignment = max(alignment, vma->display_alignment);
743 if (flags & PIN_MAPPABLE) {
744 size = max_t(typeof(size), size, vma->fence_size);
745 alignment = max_t(typeof(alignment),
746 alignment, vma->fence_alignment);
747 }
748
749 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
750 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
751 GEM_BUG_ON(!is_power_of_2(alignment));
752
753 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
754 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
755
756 end = vma->vm->total;
757 if (flags & PIN_MAPPABLE)
758 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
759 if (flags & PIN_ZONE_4G)
760 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
761 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
762
763 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
764
765
766
767
768 if (NEEDS_COMPACT_PT(vma->vm->i915))
769 size = round_up(size, alignment);
770
771
772
773
774
775 if (size > end) {
776 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
777 size, flags & PIN_MAPPABLE ? "mappable" : "total",
778 end);
779 return -ENOSPC;
780 }
781
782 color = 0;
783
784 if (i915_vm_has_cache_coloring(vma->vm))
785 color = vma->obj->cache_level;
786
787 if (flags & PIN_OFFSET_FIXED) {
788 u64 offset = flags & PIN_OFFSET_MASK;
789 if (!IS_ALIGNED(offset, alignment) ||
790 range_overflows(offset, size, end))
791 return -EINVAL;
792
793 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
794 size, offset, color,
795 flags);
796 if (ret)
797 return ret;
798 } else {
799
800
801
802
803
804
805
806
807 if (upper_32_bits(end - 1) &&
808 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
809
810
811
812
813
814
815 u64 page_alignment =
816 rounddown_pow_of_two(vma->page_sizes.sg |
817 I915_GTT_PAGE_SIZE_2M);
818
819
820
821
822
823
824 GEM_BUG_ON(i915_vma_is_ggtt(vma));
825
826 alignment = max(alignment, page_alignment);
827
828 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
829 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
830 }
831
832 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
833 size, alignment, color,
834 start, end, flags);
835 if (ret)
836 return ret;
837
838 GEM_BUG_ON(vma->node.start < start);
839 GEM_BUG_ON(vma->node.start + vma->node.size > end);
840 }
841 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
842 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
843
844 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
845
846 return 0;
847}
848
849static void
850i915_vma_detach(struct i915_vma *vma)
851{
852 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
853 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
854
855
856
857
858
859
860 list_del(&vma->vm_link);
861}
862
863static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
864{
865 unsigned int bound;
866
867 bound = atomic_read(&vma->flags);
868
869 if (flags & PIN_VALIDATE) {
870 flags &= I915_VMA_BIND_MASK;
871
872 return (flags & bound) == flags;
873 }
874
875
876 flags &= I915_VMA_BIND_MASK;
877 do {
878 if (unlikely(flags & ~bound))
879 return false;
880
881 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
882 return false;
883
884 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
885 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
886
887 return true;
888}
889
890static struct scatterlist *
891rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
892 unsigned int width, unsigned int height,
893 unsigned int src_stride, unsigned int dst_stride,
894 struct sg_table *st, struct scatterlist *sg)
895{
896 unsigned int column, row;
897 unsigned int src_idx;
898
899 for (column = 0; column < width; column++) {
900 unsigned int left;
901
902 src_idx = src_stride * (height - 1) + column + offset;
903 for (row = 0; row < height; row++) {
904 st->nents++;
905
906
907
908
909
910 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
911 sg_dma_address(sg) =
912 i915_gem_object_get_dma_address(obj, src_idx);
913 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
914 sg = sg_next(sg);
915 src_idx -= src_stride;
916 }
917
918 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
919
920 if (!left)
921 continue;
922
923 st->nents++;
924
925
926
927
928
929
930 sg_set_page(sg, NULL, left, 0);
931 sg_dma_address(sg) = 0;
932 sg_dma_len(sg) = left;
933 sg = sg_next(sg);
934 }
935
936 return sg;
937}
938
939static noinline struct sg_table *
940intel_rotate_pages(struct intel_rotation_info *rot_info,
941 struct drm_i915_gem_object *obj)
942{
943 unsigned int size = intel_rotation_info_size(rot_info);
944 struct drm_i915_private *i915 = to_i915(obj->base.dev);
945 struct sg_table *st;
946 struct scatterlist *sg;
947 int ret = -ENOMEM;
948 int i;
949
950
951 st = kmalloc(sizeof(*st), GFP_KERNEL);
952 if (!st)
953 goto err_st_alloc;
954
955 ret = sg_alloc_table(st, size, GFP_KERNEL);
956 if (ret)
957 goto err_sg_alloc;
958
959 st->nents = 0;
960 sg = st->sgl;
961
962 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
963 sg = rotate_pages(obj, rot_info->plane[i].offset,
964 rot_info->plane[i].width, rot_info->plane[i].height,
965 rot_info->plane[i].src_stride,
966 rot_info->plane[i].dst_stride,
967 st, sg);
968
969 return st;
970
971err_sg_alloc:
972 kfree(st);
973err_st_alloc:
974
975 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
976 obj->base.size, rot_info->plane[0].width,
977 rot_info->plane[0].height, size);
978
979 return ERR_PTR(ret);
980}
981
982static struct scatterlist *
983add_padding_pages(unsigned int count,
984 struct sg_table *st, struct scatterlist *sg)
985{
986 st->nents++;
987
988
989
990
991
992
993 sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
994 sg_dma_address(sg) = 0;
995 sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
996 sg = sg_next(sg);
997
998 return sg;
999}
1000
1001static struct scatterlist *
1002remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
1003 unsigned int offset, unsigned int alignment_pad,
1004 unsigned int width, unsigned int height,
1005 unsigned int src_stride, unsigned int dst_stride,
1006 struct sg_table *st, struct scatterlist *sg,
1007 unsigned int *gtt_offset)
1008{
1009 unsigned int row;
1010
1011 if (!width || !height)
1012 return sg;
1013
1014 if (alignment_pad)
1015 sg = add_padding_pages(alignment_pad, st, sg);
1016
1017 for (row = 0; row < height; row++) {
1018 unsigned int left = width * I915_GTT_PAGE_SIZE;
1019
1020 while (left) {
1021 dma_addr_t addr;
1022 unsigned int length;
1023
1024
1025
1026
1027
1028
1029
1030 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1031
1032 length = min(left, length);
1033
1034 st->nents++;
1035
1036 sg_set_page(sg, NULL, length, 0);
1037 sg_dma_address(sg) = addr;
1038 sg_dma_len(sg) = length;
1039 sg = sg_next(sg);
1040
1041 offset += length / I915_GTT_PAGE_SIZE;
1042 left -= length;
1043 }
1044
1045 offset += src_stride - width;
1046
1047 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1048
1049 if (!left)
1050 continue;
1051
1052 sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
1053 }
1054
1055 *gtt_offset += alignment_pad + dst_stride * height;
1056
1057 return sg;
1058}
1059
1060static struct scatterlist *
1061remap_contiguous_pages(struct drm_i915_gem_object *obj,
1062 unsigned int obj_offset,
1063 unsigned int count,
1064 struct sg_table *st, struct scatterlist *sg)
1065{
1066 struct scatterlist *iter;
1067 unsigned int offset;
1068
1069 iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
1070 GEM_BUG_ON(!iter);
1071
1072 do {
1073 unsigned int len;
1074
1075 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1076 count << PAGE_SHIFT);
1077 sg_set_page(sg, NULL, len, 0);
1078 sg_dma_address(sg) =
1079 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1080 sg_dma_len(sg) = len;
1081
1082 st->nents++;
1083 count -= len >> PAGE_SHIFT;
1084 if (count == 0)
1085 return sg;
1086
1087 sg = __sg_next(sg);
1088 iter = __sg_next(iter);
1089 offset = 0;
1090 } while (1);
1091}
1092
1093static struct scatterlist *
1094remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
1095 unsigned int obj_offset, unsigned int alignment_pad,
1096 unsigned int size,
1097 struct sg_table *st, struct scatterlist *sg,
1098 unsigned int *gtt_offset)
1099{
1100 if (!size)
1101 return sg;
1102
1103 if (alignment_pad)
1104 sg = add_padding_pages(alignment_pad, st, sg);
1105
1106 sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
1107 sg = sg_next(sg);
1108
1109 *gtt_offset += alignment_pad + size;
1110
1111 return sg;
1112}
1113
1114static struct scatterlist *
1115remap_color_plane_pages(const struct intel_remapped_info *rem_info,
1116 struct drm_i915_gem_object *obj,
1117 int color_plane,
1118 struct sg_table *st, struct scatterlist *sg,
1119 unsigned int *gtt_offset)
1120{
1121 unsigned int alignment_pad = 0;
1122
1123 if (rem_info->plane_alignment)
1124 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
1125
1126 if (rem_info->plane[color_plane].linear)
1127 sg = remap_linear_color_plane_pages(obj,
1128 rem_info->plane[color_plane].offset,
1129 alignment_pad,
1130 rem_info->plane[color_plane].size,
1131 st, sg,
1132 gtt_offset);
1133
1134 else
1135 sg = remap_tiled_color_plane_pages(obj,
1136 rem_info->plane[color_plane].offset,
1137 alignment_pad,
1138 rem_info->plane[color_plane].width,
1139 rem_info->plane[color_plane].height,
1140 rem_info->plane[color_plane].src_stride,
1141 rem_info->plane[color_plane].dst_stride,
1142 st, sg,
1143 gtt_offset);
1144
1145 return sg;
1146}
1147
1148static noinline struct sg_table *
1149intel_remap_pages(struct intel_remapped_info *rem_info,
1150 struct drm_i915_gem_object *obj)
1151{
1152 unsigned int size = intel_remapped_info_size(rem_info);
1153 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1154 struct sg_table *st;
1155 struct scatterlist *sg;
1156 unsigned int gtt_offset = 0;
1157 int ret = -ENOMEM;
1158 int i;
1159
1160
1161 st = kmalloc(sizeof(*st), GFP_KERNEL);
1162 if (!st)
1163 goto err_st_alloc;
1164
1165 ret = sg_alloc_table(st, size, GFP_KERNEL);
1166 if (ret)
1167 goto err_sg_alloc;
1168
1169 st->nents = 0;
1170 sg = st->sgl;
1171
1172 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1173 sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset);
1174
1175 i915_sg_trim(st);
1176
1177 return st;
1178
1179err_sg_alloc:
1180 kfree(st);
1181err_st_alloc:
1182
1183 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1184 obj->base.size, rem_info->plane[0].width,
1185 rem_info->plane[0].height, size);
1186
1187 return ERR_PTR(ret);
1188}
1189
1190static noinline struct sg_table *
1191intel_partial_pages(const struct i915_ggtt_view *view,
1192 struct drm_i915_gem_object *obj)
1193{
1194 struct sg_table *st;
1195 struct scatterlist *sg;
1196 unsigned int count = view->partial.size;
1197 int ret = -ENOMEM;
1198
1199 st = kmalloc(sizeof(*st), GFP_KERNEL);
1200 if (!st)
1201 goto err_st_alloc;
1202
1203 ret = sg_alloc_table(st, count, GFP_KERNEL);
1204 if (ret)
1205 goto err_sg_alloc;
1206
1207 st->nents = 0;
1208
1209 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
1210
1211 sg_mark_end(sg);
1212 i915_sg_trim(st);
1213
1214 return st;
1215
1216err_sg_alloc:
1217 kfree(st);
1218err_st_alloc:
1219 return ERR_PTR(ret);
1220}
1221
1222static int
1223__i915_vma_get_pages(struct i915_vma *vma)
1224{
1225 struct sg_table *pages;
1226
1227
1228
1229
1230
1231
1232
1233 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1234
1235 switch (vma->ggtt_view.type) {
1236 default:
1237 GEM_BUG_ON(vma->ggtt_view.type);
1238 fallthrough;
1239 case I915_GGTT_VIEW_NORMAL:
1240 pages = vma->obj->mm.pages;
1241 break;
1242
1243 case I915_GGTT_VIEW_ROTATED:
1244 pages =
1245 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1246 break;
1247
1248 case I915_GGTT_VIEW_REMAPPED:
1249 pages =
1250 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1251 break;
1252
1253 case I915_GGTT_VIEW_PARTIAL:
1254 pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1255 break;
1256 }
1257
1258 if (IS_ERR(pages)) {
1259 drm_err(&vma->vm->i915->drm,
1260 "Failed to get pages for VMA view type %u (%ld)!\n",
1261 vma->ggtt_view.type, PTR_ERR(pages));
1262 return PTR_ERR(pages);
1263 }
1264
1265 vma->pages = pages;
1266
1267 return 0;
1268}
1269
1270I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1271{
1272 int err;
1273
1274 if (atomic_add_unless(&vma->pages_count, 1, 0))
1275 return 0;
1276
1277 err = i915_gem_object_pin_pages(vma->obj);
1278 if (err)
1279 return err;
1280
1281 err = __i915_vma_get_pages(vma);
1282 if (err)
1283 goto err_unpin;
1284
1285 vma->page_sizes = vma->obj->mm.page_sizes;
1286 atomic_inc(&vma->pages_count);
1287
1288 return 0;
1289
1290err_unpin:
1291 __i915_gem_object_unpin_pages(vma->obj);
1292
1293 return err;
1294}
1295
1296static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1297{
1298
1299 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1300
1301 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1302 if (vma->pages != vma->obj->mm.pages) {
1303 sg_free_table(vma->pages);
1304 kfree(vma->pages);
1305 }
1306 vma->pages = NULL;
1307
1308 i915_gem_object_unpin_pages(vma->obj);
1309 }
1310}
1311
1312I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1313{
1314 if (atomic_add_unless(&vma->pages_count, -1, 1))
1315 return;
1316
1317 __vma_put_pages(vma, 1);
1318}
1319
1320static void vma_unbind_pages(struct i915_vma *vma)
1321{
1322 unsigned int count;
1323
1324 lockdep_assert_held(&vma->vm->mutex);
1325
1326
1327 count = atomic_read(&vma->pages_count);
1328 count >>= I915_VMA_PAGES_BIAS;
1329 GEM_BUG_ON(!count);
1330
1331 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1332}
1333
1334int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1335 u64 size, u64 alignment, u64 flags)
1336{
1337 struct i915_vma_work *work = NULL;
1338 struct dma_fence *moving = NULL;
1339 struct i915_vma_resource *vma_res = NULL;
1340 intel_wakeref_t wakeref = 0;
1341 unsigned int bound;
1342 int err;
1343
1344 assert_vma_held(vma);
1345 GEM_BUG_ON(!ww);
1346
1347 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
1348 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
1349
1350 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
1351
1352
1353 if (try_qad_pin(vma, flags))
1354 return 0;
1355
1356 err = i915_vma_get_pages(vma);
1357 if (err)
1358 return err;
1359
1360 if (flags & PIN_GLOBAL)
1361 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1362
1363 moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
1364 if (flags & vma->vm->bind_async_flags || moving) {
1365
1366 err = i915_vm_lock_objects(vma->vm, ww);
1367 if (err)
1368 goto err_rpm;
1369
1370 work = i915_vma_work();
1371 if (!work) {
1372 err = -ENOMEM;
1373 goto err_rpm;
1374 }
1375
1376 work->vm = i915_vm_get(vma->vm);
1377
1378 dma_fence_work_chain(&work->base, moving);
1379
1380
1381 if (vma->vm->allocate_va_range) {
1382 err = i915_vm_alloc_pt_stash(vma->vm,
1383 &work->stash,
1384 vma->size);
1385 if (err)
1386 goto err_fence;
1387
1388 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1389 if (err)
1390 goto err_fence;
1391 }
1392 }
1393
1394 vma_res = i915_vma_resource_alloc();
1395 if (IS_ERR(vma_res)) {
1396 err = PTR_ERR(vma_res);
1397 goto err_fence;
1398 }
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1418 !(flags & PIN_GLOBAL));
1419 if (err)
1420 goto err_vma_res;
1421
1422
1423
1424 if (unlikely(i915_vma_is_closed(vma))) {
1425 err = -ENOENT;
1426 goto err_unlock;
1427 }
1428
1429 bound = atomic_read(&vma->flags);
1430 if (unlikely(bound & I915_VMA_ERROR)) {
1431 err = -ENOMEM;
1432 goto err_unlock;
1433 }
1434
1435 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
1436 err = -EAGAIN;
1437 goto err_unlock;
1438 }
1439
1440 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
1441 if (!(flags & PIN_VALIDATE))
1442 __i915_vma_pin(vma);
1443 goto err_unlock;
1444 }
1445
1446 err = i915_active_acquire(&vma->active);
1447 if (err)
1448 goto err_unlock;
1449
1450 if (!(bound & I915_VMA_BIND_MASK)) {
1451 err = i915_vma_insert(vma, ww, size, alignment, flags);
1452 if (err)
1453 goto err_active;
1454
1455 if (i915_is_ggtt(vma->vm))
1456 __i915_vma_set_map_and_fenceable(vma);
1457 }
1458
1459 GEM_BUG_ON(!vma->pages);
1460 err = i915_vma_bind(vma,
1461 vma->obj->cache_level,
1462 flags, work, vma_res);
1463 vma_res = NULL;
1464 if (err)
1465 goto err_remove;
1466
1467
1468 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1469 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1470 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1471
1472 if (!(flags & PIN_VALIDATE)) {
1473 __i915_vma_pin(vma);
1474 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1475 }
1476 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1477 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1478
1479err_remove:
1480 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1481 i915_vma_detach(vma);
1482 drm_mm_remove_node(&vma->node);
1483 }
1484err_active:
1485 i915_active_release(&vma->active);
1486err_unlock:
1487 mutex_unlock(&vma->vm->mutex);
1488err_vma_res:
1489 i915_vma_resource_free(vma_res);
1490err_fence:
1491 if (work)
1492 dma_fence_work_commit_imm(&work->base);
1493err_rpm:
1494 if (wakeref)
1495 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1496
1497 if (moving)
1498 dma_fence_put(moving);
1499
1500 i915_vma_put_pages(vma);
1501 return err;
1502}
1503
1504static void flush_idle_contexts(struct intel_gt *gt)
1505{
1506 struct intel_engine_cs *engine;
1507 enum intel_engine_id id;
1508
1509 for_each_engine(engine, gt, id)
1510 intel_engine_flush_barriers(engine);
1511
1512 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1513}
1514
1515static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1516 u32 align, unsigned int flags)
1517{
1518 struct i915_address_space *vm = vma->vm;
1519 int err;
1520
1521 do {
1522 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1523
1524 if (err != -ENOSPC) {
1525 if (!err) {
1526 err = i915_vma_wait_for_bind(vma);
1527 if (err)
1528 i915_vma_unpin(vma);
1529 }
1530 return err;
1531 }
1532
1533
1534 flush_idle_contexts(vm->gt);
1535 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1536
1537
1538
1539
1540
1541 i915_gem_evict_vm(vm, NULL);
1542 mutex_unlock(&vm->mutex);
1543 }
1544 } while (1);
1545}
1546
1547int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1548 u32 align, unsigned int flags)
1549{
1550 struct i915_gem_ww_ctx _ww;
1551 int err;
1552
1553 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1554
1555 if (ww)
1556 return __i915_ggtt_pin(vma, ww, align, flags);
1557
1558#ifdef CONFIG_LOCKDEP
1559 WARN_ON(dma_resv_held(vma->obj->base.resv));
1560#endif
1561
1562 for_i915_gem_ww(&_ww, err, true) {
1563 err = i915_gem_object_lock(vma->obj, &_ww);
1564 if (!err)
1565 err = __i915_ggtt_pin(vma, &_ww, align, flags);
1566 }
1567
1568 return err;
1569}
1570
1571static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1572{
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 GEM_BUG_ON(i915_vma_is_closed(vma));
1586 list_add(&vma->closed_link, >->closed_vma);
1587}
1588
1589void i915_vma_close(struct i915_vma *vma)
1590{
1591 struct intel_gt *gt = vma->vm->gt;
1592 unsigned long flags;
1593
1594 if (i915_vma_is_ggtt(vma))
1595 return;
1596
1597 GEM_BUG_ON(!atomic_read(&vma->open_count));
1598 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1599 >->closed_lock,
1600 flags)) {
1601 __vma_close(vma, gt);
1602 spin_unlock_irqrestore(>->closed_lock, flags);
1603 }
1604}
1605
1606static void __i915_vma_remove_closed(struct i915_vma *vma)
1607{
1608 list_del_init(&vma->closed_link);
1609}
1610
1611void i915_vma_reopen(struct i915_vma *vma)
1612{
1613 struct intel_gt *gt = vma->vm->gt;
1614
1615 spin_lock_irq(>->closed_lock);
1616 if (i915_vma_is_closed(vma))
1617 __i915_vma_remove_closed(vma);
1618 spin_unlock_irq(>->closed_lock);
1619}
1620
1621void i915_vma_release(struct kref *ref)
1622{
1623 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1624
1625 i915_vm_put(vma->vm);
1626 i915_active_fini(&vma->active);
1627 GEM_WARN_ON(vma->resource);
1628 i915_vma_free(vma);
1629}
1630
1631static void force_unbind(struct i915_vma *vma)
1632{
1633 if (!drm_mm_node_allocated(&vma->node))
1634 return;
1635
1636 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1637 WARN_ON(__i915_vma_unbind(vma));
1638 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1639}
1640
1641static void release_references(struct i915_vma *vma)
1642{
1643 struct drm_i915_gem_object *obj = vma->obj;
1644 struct intel_gt *gt = vma->vm->gt;
1645
1646 GEM_BUG_ON(i915_vma_is_active(vma));
1647
1648 spin_lock(&obj->vma.lock);
1649 list_del(&vma->obj_link);
1650 if (!RB_EMPTY_NODE(&vma->obj_node))
1651 rb_erase(&vma->obj_node, &obj->vma.tree);
1652 spin_unlock(&obj->vma.lock);
1653
1654 spin_lock_irq(>->closed_lock);
1655 __i915_vma_remove_closed(vma);
1656 spin_unlock_irq(>->closed_lock);
1657
1658 __i915_vma_put(vma);
1659}
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686void i915_vma_destroy_locked(struct i915_vma *vma)
1687{
1688 lockdep_assert_held(&vma->vm->mutex);
1689
1690 force_unbind(vma);
1691 release_references(vma);
1692}
1693
1694void i915_vma_destroy(struct i915_vma *vma)
1695{
1696 mutex_lock(&vma->vm->mutex);
1697 force_unbind(vma);
1698 mutex_unlock(&vma->vm->mutex);
1699 release_references(vma);
1700}
1701
1702void i915_vma_parked(struct intel_gt *gt)
1703{
1704 struct i915_vma *vma, *next;
1705 LIST_HEAD(closed);
1706
1707 spin_lock_irq(>->closed_lock);
1708 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1709 struct drm_i915_gem_object *obj = vma->obj;
1710 struct i915_address_space *vm = vma->vm;
1711
1712
1713
1714 if (!kref_get_unless_zero(&obj->base.refcount))
1715 continue;
1716
1717 if (!i915_vm_tryopen(vm)) {
1718 i915_gem_object_put(obj);
1719 continue;
1720 }
1721
1722 list_move(&vma->closed_link, &closed);
1723 }
1724 spin_unlock_irq(>->closed_lock);
1725
1726
1727 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1728 struct drm_i915_gem_object *obj = vma->obj;
1729 struct i915_address_space *vm = vma->vm;
1730
1731 if (i915_gem_object_trylock(obj, NULL)) {
1732 INIT_LIST_HEAD(&vma->closed_link);
1733 i915_vma_destroy(vma);
1734 i915_gem_object_unlock(obj);
1735 } else {
1736
1737 spin_lock_irq(>->closed_lock);
1738 list_add(&vma->closed_link, >->closed_vma);
1739 spin_unlock_irq(>->closed_lock);
1740 }
1741
1742 i915_gem_object_put(obj);
1743 i915_vm_close(vm);
1744 }
1745}
1746
1747static void __i915_vma_iounmap(struct i915_vma *vma)
1748{
1749 GEM_BUG_ON(i915_vma_is_pinned(vma));
1750
1751 if (vma->iomap == NULL)
1752 return;
1753
1754 io_mapping_unmap(vma->iomap);
1755 vma->iomap = NULL;
1756}
1757
1758void i915_vma_revoke_mmap(struct i915_vma *vma)
1759{
1760 struct drm_vma_offset_node *node;
1761 u64 vma_offset;
1762
1763 if (!i915_vma_has_userfault(vma))
1764 return;
1765
1766 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1767 GEM_BUG_ON(!vma->obj->userfault_count);
1768
1769 node = &vma->mmo->vma_node;
1770 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1771 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1772 drm_vma_node_offset_addr(node) + vma_offset,
1773 vma->size,
1774 1);
1775
1776 i915_vma_unset_userfault(vma);
1777 if (!--vma->obj->userfault_count)
1778 list_del(&vma->obj->userfault_link);
1779}
1780
1781static int
1782__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1783{
1784 return __i915_request_await_exclusive(rq, &vma->active);
1785}
1786
1787static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1788{
1789 int err;
1790
1791
1792 err = __i915_request_await_bind(rq, vma);
1793 if (err)
1794 return err;
1795
1796 return i915_active_add_request(&vma->active, rq);
1797}
1798
1799int _i915_vma_move_to_active(struct i915_vma *vma,
1800 struct i915_request *rq,
1801 struct dma_fence *fence,
1802 unsigned int flags)
1803{
1804 struct drm_i915_gem_object *obj = vma->obj;
1805 int err;
1806
1807 assert_object_held(obj);
1808
1809 GEM_BUG_ON(!vma->pages);
1810
1811 err = __i915_vma_move_to_active(vma, rq);
1812 if (unlikely(err))
1813 return err;
1814
1815 if (flags & EXEC_OBJECT_WRITE) {
1816 struct intel_frontbuffer *front;
1817
1818 front = __intel_frontbuffer_get(obj);
1819 if (unlikely(front)) {
1820 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1821 i915_active_add_request(&front->write, rq);
1822 intel_frontbuffer_put(front);
1823 }
1824
1825 if (fence) {
1826 dma_resv_add_excl_fence(vma->obj->base.resv, fence);
1827 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1828 obj->read_domains = 0;
1829 }
1830 } else {
1831 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
1832 err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
1833 if (unlikely(err))
1834 return err;
1835 }
1836
1837 if (fence) {
1838 dma_resv_add_shared_fence(vma->obj->base.resv, fence);
1839 obj->write_domain = 0;
1840 }
1841 }
1842
1843 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1844 i915_active_add_request(&vma->fence->active, rq);
1845
1846 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1847 obj->mm.dirty = true;
1848
1849 GEM_BUG_ON(!i915_vma_is_active(vma));
1850 return 0;
1851}
1852
1853struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
1854{
1855 struct i915_vma_resource *vma_res = vma->resource;
1856 struct dma_fence *unbind_fence;
1857
1858 GEM_BUG_ON(i915_vma_is_pinned(vma));
1859 assert_vma_held_evict(vma);
1860
1861 if (i915_vma_is_map_and_fenceable(vma)) {
1862
1863 i915_vma_revoke_mmap(vma);
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878 i915_vma_flush_writes(vma);
1879
1880
1881 i915_vma_revoke_fence(vma);
1882
1883 __i915_vma_iounmap(vma);
1884 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1885 }
1886 GEM_BUG_ON(vma->fence);
1887 GEM_BUG_ON(i915_vma_has_userfault(vma));
1888
1889
1890 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
1891
1892
1893 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
1894 atomic_read(&vma->vm->open);
1895 trace_i915_vma_unbind(vma);
1896
1897 unbind_fence = i915_vma_resource_unbind(vma_res);
1898 vma->resource = NULL;
1899
1900 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1901 &vma->flags);
1902
1903 i915_vma_detach(vma);
1904
1905 if (!async && unbind_fence) {
1906 dma_fence_wait(unbind_fence, false);
1907 dma_fence_put(unbind_fence);
1908 unbind_fence = NULL;
1909 }
1910
1911
1912
1913
1914
1915
1916
1917 vma_unbind_pages(vma);
1918 return unbind_fence;
1919}
1920
1921int __i915_vma_unbind(struct i915_vma *vma)
1922{
1923 int ret;
1924
1925 lockdep_assert_held(&vma->vm->mutex);
1926 assert_vma_held_evict(vma);
1927
1928 if (!drm_mm_node_allocated(&vma->node))
1929 return 0;
1930
1931 if (i915_vma_is_pinned(vma)) {
1932 vma_print_allocator(vma, "is pinned");
1933 return -EAGAIN;
1934 }
1935
1936
1937
1938
1939
1940
1941 ret = i915_vma_sync(vma);
1942 if (ret)
1943 return ret;
1944
1945 GEM_BUG_ON(i915_vma_is_active(vma));
1946 __i915_vma_evict(vma, false);
1947
1948 drm_mm_remove_node(&vma->node);
1949 return 0;
1950}
1951
1952static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
1953{
1954 struct dma_fence *fence;
1955
1956 lockdep_assert_held(&vma->vm->mutex);
1957
1958 if (!drm_mm_node_allocated(&vma->node))
1959 return NULL;
1960
1961 if (i915_vma_is_pinned(vma) ||
1962 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
1963 return ERR_PTR(-EAGAIN);
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
1975 I915_ACTIVE_AWAIT_EXCL |
1976 I915_ACTIVE_AWAIT_ACTIVE) < 0) {
1977 return ERR_PTR(-EBUSY);
1978 }
1979
1980 fence = __i915_vma_evict(vma, true);
1981
1982 drm_mm_remove_node(&vma->node);
1983
1984 return fence;
1985}
1986
1987int i915_vma_unbind(struct i915_vma *vma)
1988{
1989 struct i915_address_space *vm = vma->vm;
1990 intel_wakeref_t wakeref = 0;
1991 int err;
1992
1993 assert_object_held_shared(vma->obj);
1994
1995
1996 err = i915_vma_sync(vma);
1997 if (err)
1998 return err;
1999
2000 if (!drm_mm_node_allocated(&vma->node))
2001 return 0;
2002
2003 if (i915_vma_is_pinned(vma)) {
2004 vma_print_allocator(vma, "is pinned");
2005 return -EAGAIN;
2006 }
2007
2008 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2009
2010 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2011
2012 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2013 if (err)
2014 goto out_rpm;
2015
2016 err = __i915_vma_unbind(vma);
2017 mutex_unlock(&vm->mutex);
2018
2019out_rpm:
2020 if (wakeref)
2021 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2022 return err;
2023}
2024
2025int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
2026{
2027 struct drm_i915_gem_object *obj = vma->obj;
2028 struct i915_address_space *vm = vma->vm;
2029 intel_wakeref_t wakeref = 0;
2030 struct dma_fence *fence;
2031 int err;
2032
2033
2034
2035
2036
2037 assert_object_held(obj);
2038
2039 if (!drm_mm_node_allocated(&vma->node))
2040 return 0;
2041
2042 if (i915_vma_is_pinned(vma)) {
2043 vma_print_allocator(vma, "is pinned");
2044 return -EAGAIN;
2045 }
2046
2047 if (!obj->mm.rsgt)
2048 return -EBUSY;
2049
2050 err = dma_resv_reserve_shared(obj->base.resv, 1);
2051 if (err)
2052 return -EBUSY;
2053
2054
2055
2056
2057
2058
2059 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2060 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2061
2062 if (trylock_vm && !mutex_trylock(&vm->mutex)) {
2063 err = -EBUSY;
2064 goto out_rpm;
2065 } else if (!trylock_vm) {
2066 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
2067 if (err)
2068 goto out_rpm;
2069 }
2070
2071 fence = __i915_vma_unbind_async(vma);
2072 mutex_unlock(&vm->mutex);
2073 if (IS_ERR_OR_NULL(fence)) {
2074 err = PTR_ERR_OR_ZERO(fence);
2075 goto out_rpm;
2076 }
2077
2078 dma_resv_add_shared_fence(obj->base.resv, fence);
2079 dma_fence_put(fence);
2080
2081out_rpm:
2082 if (wakeref)
2083 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2084 return err;
2085}
2086
2087int i915_vma_unbind_unlocked(struct i915_vma *vma)
2088{
2089 int err;
2090
2091 i915_gem_object_lock(vma->obj, NULL);
2092 err = i915_vma_unbind(vma);
2093 i915_gem_object_unlock(vma->obj);
2094
2095 return err;
2096}
2097
2098struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2099{
2100 i915_gem_object_make_unshrinkable(vma->obj);
2101 return vma;
2102}
2103
2104void i915_vma_make_shrinkable(struct i915_vma *vma)
2105{
2106 i915_gem_object_make_shrinkable(vma->obj);
2107}
2108
2109void i915_vma_make_purgeable(struct i915_vma *vma)
2110{
2111 i915_gem_object_make_purgeable(vma->obj);
2112}
2113
2114#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2115#include "selftests/i915_vma.c"
2116#endif
2117
2118void i915_vma_module_exit(void)
2119{
2120 kmem_cache_destroy(slab_vmas);
2121}
2122
2123int __init i915_vma_module_init(void)
2124{
2125 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
2126 if (!slab_vmas)
2127 return -ENOMEM;
2128
2129 return 0;
2130}
2131