1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "i915_vma.h"
26
27#include "i915_drv.h"
28#include "intel_ringbuffer.h"
29#include "intel_frontbuffer.h"
30
31#include <drm/drm_gem.h>
32
33#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
34
35#include <linux/stackdepot.h>
36
37static void vma_print_allocator(struct i915_vma *vma, const char *reason)
38{
39 unsigned long entries[12];
40 struct stack_trace trace = {
41 .entries = entries,
42 .max_entries = ARRAY_SIZE(entries),
43 };
44 char buf[512];
45
46 if (!vma->node.stack) {
47 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
48 vma->node.start, vma->node.size, reason);
49 return;
50 }
51
52 depot_fetch_stack(vma->node.stack, &trace);
53 snprint_stack_trace(buf, sizeof(buf), &trace, 0);
54 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
55 vma->node.start, vma->node.size, reason, buf);
56}
57
58#else
59
60static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61{
62}
63
64#endif
65
66struct i915_vma_active {
67 struct i915_gem_active base;
68 struct i915_vma *vma;
69 struct rb_node node;
70 u64 timeline;
71};
72
73static void
74__i915_vma_retire(struct i915_vma *vma, struct i915_request *rq)
75{
76 struct drm_i915_gem_object *obj = vma->obj;
77
78 GEM_BUG_ON(!i915_vma_is_active(vma));
79 if (--vma->active_count)
80 return;
81
82 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
83 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
84
85 GEM_BUG_ON(!i915_gem_object_is_active(obj));
86 if (--obj->active_count)
87 return;
88
89
90 if (reservation_object_trylock(obj->resv)) {
91 if (reservation_object_test_signaled_rcu(obj->resv, true))
92 reservation_object_add_excl_fence(obj->resv, NULL);
93 reservation_object_unlock(obj->resv);
94 }
95
96
97
98
99
100 spin_lock(&rq->i915->mm.obj_lock);
101 if (obj->bind_count)
102 list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
103 spin_unlock(&rq->i915->mm.obj_lock);
104
105 obj->mm.dirty = true;
106
107 if (i915_gem_object_has_active_reference(obj)) {
108 i915_gem_object_clear_active_reference(obj);
109 i915_gem_object_put(obj);
110 }
111}
112
113static void
114i915_vma_retire(struct i915_gem_active *base, struct i915_request *rq)
115{
116 struct i915_vma_active *active =
117 container_of(base, typeof(*active), base);
118
119 __i915_vma_retire(active->vma, rq);
120}
121
122static void
123i915_vma_last_retire(struct i915_gem_active *base, struct i915_request *rq)
124{
125 __i915_vma_retire(container_of(base, struct i915_vma, last_active), rq);
126}
127
128static struct i915_vma *
129vma_create(struct drm_i915_gem_object *obj,
130 struct i915_address_space *vm,
131 const struct i915_ggtt_view *view)
132{
133 struct i915_vma *vma;
134 struct rb_node *rb, **p;
135
136
137 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
138
139 vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
140 if (vma == NULL)
141 return ERR_PTR(-ENOMEM);
142
143 vma->active = RB_ROOT;
144
145 init_request_active(&vma->last_active, i915_vma_last_retire);
146 init_request_active(&vma->last_fence, NULL);
147 vma->vm = vm;
148 vma->ops = &vm->vma_ops;
149 vma->obj = obj;
150 vma->resv = obj->resv;
151 vma->size = obj->base.size;
152 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
153
154 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
155 vma->ggtt_view = *view;
156 if (view->type == I915_GGTT_VIEW_PARTIAL) {
157 GEM_BUG_ON(range_overflows_t(u64,
158 view->partial.offset,
159 view->partial.size,
160 obj->base.size >> PAGE_SHIFT));
161 vma->size = view->partial.size;
162 vma->size <<= PAGE_SHIFT;
163 GEM_BUG_ON(vma->size > obj->base.size);
164 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
165 vma->size = intel_rotation_info_size(&view->rotated);
166 vma->size <<= PAGE_SHIFT;
167 }
168 }
169
170 if (unlikely(vma->size > vm->total))
171 goto err_vma;
172
173 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
174
175 if (i915_is_ggtt(vm)) {
176 if (unlikely(overflows_type(vma->size, u32)))
177 goto err_vma;
178
179 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
180 i915_gem_object_get_tiling(obj),
181 i915_gem_object_get_stride(obj));
182 if (unlikely(vma->fence_size < vma->size ||
183 vma->fence_size > vm->total))
184 goto err_vma;
185
186 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
187
188 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
189 i915_gem_object_get_tiling(obj),
190 i915_gem_object_get_stride(obj));
191 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
192
193
194
195
196
197
198
199 vma->flags |= I915_VMA_GGTT;
200 list_add(&vma->obj_link, &obj->vma_list);
201 } else {
202 list_add_tail(&vma->obj_link, &obj->vma_list);
203 }
204
205 rb = NULL;
206 p = &obj->vma_tree.rb_node;
207 while (*p) {
208 struct i915_vma *pos;
209
210 rb = *p;
211 pos = rb_entry(rb, struct i915_vma, obj_node);
212 if (i915_vma_compare(pos, vm, view) < 0)
213 p = &rb->rb_right;
214 else
215 p = &rb->rb_left;
216 }
217 rb_link_node(&vma->obj_node, rb, p);
218 rb_insert_color(&vma->obj_node, &obj->vma_tree);
219 list_add(&vma->vm_link, &vm->unbound_list);
220
221 return vma;
222
223err_vma:
224 kmem_cache_free(vm->i915->vmas, vma);
225 return ERR_PTR(-E2BIG);
226}
227
228static struct i915_vma *
229vma_lookup(struct drm_i915_gem_object *obj,
230 struct i915_address_space *vm,
231 const struct i915_ggtt_view *view)
232{
233 struct rb_node *rb;
234
235 rb = obj->vma_tree.rb_node;
236 while (rb) {
237 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
238 long cmp;
239
240 cmp = i915_vma_compare(vma, vm, view);
241 if (cmp == 0)
242 return vma;
243
244 if (cmp < 0)
245 rb = rb->rb_right;
246 else
247 rb = rb->rb_left;
248 }
249
250 return NULL;
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268struct i915_vma *
269i915_vma_instance(struct drm_i915_gem_object *obj,
270 struct i915_address_space *vm,
271 const struct i915_ggtt_view *view)
272{
273 struct i915_vma *vma;
274
275 lockdep_assert_held(&obj->base.dev->struct_mutex);
276 GEM_BUG_ON(view && !i915_is_ggtt(vm));
277 GEM_BUG_ON(vm->closed);
278
279 vma = vma_lookup(obj, vm, view);
280 if (!vma)
281 vma = vma_create(obj, vm, view);
282
283 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
284 GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
285 return vma;
286}
287
288
289
290
291
292
293
294
295
296
297
298int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
299 u32 flags)
300{
301 u32 bind_flags;
302 u32 vma_flags;
303 int ret;
304
305 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
306 GEM_BUG_ON(vma->size > vma->node.size);
307
308 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
309 vma->node.size,
310 vma->vm->total)))
311 return -ENODEV;
312
313 if (GEM_DEBUG_WARN_ON(!flags))
314 return -EINVAL;
315
316 bind_flags = 0;
317 if (flags & PIN_GLOBAL)
318 bind_flags |= I915_VMA_GLOBAL_BIND;
319 if (flags & PIN_USER)
320 bind_flags |= I915_VMA_LOCAL_BIND;
321
322 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
323 if (flags & PIN_UPDATE)
324 bind_flags |= vma_flags;
325 else
326 bind_flags &= ~vma_flags;
327 if (bind_flags == 0)
328 return 0;
329
330 GEM_BUG_ON(!vma->pages);
331
332 trace_i915_vma_bind(vma, bind_flags);
333 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
334 if (ret)
335 return ret;
336
337 vma->flags |= bind_flags;
338 return 0;
339}
340
341void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
342{
343 void __iomem *ptr;
344 int err;
345
346
347 assert_rpm_wakelock_held(vma->vm->i915);
348
349 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
350 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
351 err = -ENODEV;
352 goto err;
353 }
354
355 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
356 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
357
358 ptr = vma->iomap;
359 if (ptr == NULL) {
360 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
361 vma->node.start,
362 vma->node.size);
363 if (ptr == NULL) {
364 err = -ENOMEM;
365 goto err;
366 }
367
368 vma->iomap = ptr;
369 }
370
371 __i915_vma_pin(vma);
372
373 err = i915_vma_pin_fence(vma);
374 if (err)
375 goto err_unpin;
376
377 i915_vma_set_ggtt_write(vma);
378 return ptr;
379
380err_unpin:
381 __i915_vma_unpin(vma);
382err:
383 return IO_ERR_PTR(err);
384}
385
386void i915_vma_flush_writes(struct i915_vma *vma)
387{
388 if (!i915_vma_has_ggtt_write(vma))
389 return;
390
391 i915_gem_flush_ggtt_writes(vma->vm->i915);
392
393 i915_vma_unset_ggtt_write(vma);
394}
395
396void i915_vma_unpin_iomap(struct i915_vma *vma)
397{
398 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
399
400 GEM_BUG_ON(vma->iomap == NULL);
401
402 i915_vma_flush_writes(vma);
403
404 i915_vma_unpin_fence(vma);
405 i915_vma_unpin(vma);
406}
407
408void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
409{
410 struct i915_vma *vma;
411 struct drm_i915_gem_object *obj;
412
413 vma = fetch_and_zero(p_vma);
414 if (!vma)
415 return;
416
417 obj = vma->obj;
418 GEM_BUG_ON(!obj);
419
420 i915_vma_unpin(vma);
421 i915_vma_close(vma);
422
423 if (flags & I915_VMA_RELEASE_MAP)
424 i915_gem_object_unpin_map(obj);
425
426 __i915_gem_object_release_unless_active(obj);
427}
428
429bool i915_vma_misplaced(const struct i915_vma *vma,
430 u64 size, u64 alignment, u64 flags)
431{
432 if (!drm_mm_node_allocated(&vma->node))
433 return false;
434
435 if (vma->node.size < size)
436 return true;
437
438 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
439 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
440 return true;
441
442 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
443 return true;
444
445 if (flags & PIN_OFFSET_BIAS &&
446 vma->node.start < (flags & PIN_OFFSET_MASK))
447 return true;
448
449 if (flags & PIN_OFFSET_FIXED &&
450 vma->node.start != (flags & PIN_OFFSET_MASK))
451 return true;
452
453 return false;
454}
455
456void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
457{
458 bool mappable, fenceable;
459
460 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
461 GEM_BUG_ON(!vma->fence_size);
462
463
464
465
466
467 if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
468 return;
469
470 fenceable = (vma->node.size >= vma->fence_size &&
471 IS_ALIGNED(vma->node.start, vma->fence_alignment));
472
473 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
474
475 if (mappable && fenceable)
476 vma->flags |= I915_VMA_CAN_FENCE;
477 else
478 vma->flags &= ~I915_VMA_CAN_FENCE;
479}
480
481static bool color_differs(struct drm_mm_node *node, unsigned long color)
482{
483 return node->allocated && node->color != color;
484}
485
486bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
487{
488 struct drm_mm_node *node = &vma->node;
489 struct drm_mm_node *other;
490
491
492
493
494
495
496
497
498 if (vma->vm->mm.color_adjust == NULL)
499 return true;
500
501
502 GEM_BUG_ON(!drm_mm_node_allocated(node));
503 GEM_BUG_ON(list_empty(&node->node_list));
504
505 other = list_prev_entry(node, node_list);
506 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
507 return false;
508
509 other = list_next_entry(node, node_list);
510 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
511 return false;
512
513 return true;
514}
515
516static void assert_bind_count(const struct drm_i915_gem_object *obj)
517{
518
519
520
521
522
523
524
525 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
526}
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542static int
543i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
544{
545 struct drm_i915_private *dev_priv = vma->vm->i915;
546 unsigned int cache_level;
547 u64 start, end;
548 int ret;
549
550 GEM_BUG_ON(i915_vma_is_closed(vma));
551 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
552 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
553
554 size = max(size, vma->size);
555 alignment = max(alignment, vma->display_alignment);
556 if (flags & PIN_MAPPABLE) {
557 size = max_t(typeof(size), size, vma->fence_size);
558 alignment = max_t(typeof(alignment),
559 alignment, vma->fence_alignment);
560 }
561
562 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
563 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
564 GEM_BUG_ON(!is_power_of_2(alignment));
565
566 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
567 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
568
569 end = vma->vm->total;
570 if (flags & PIN_MAPPABLE)
571 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
572 if (flags & PIN_ZONE_4G)
573 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
574 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
575
576
577
578
579
580 if (size > end) {
581 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
582 size, flags & PIN_MAPPABLE ? "mappable" : "total",
583 end);
584 return -ENOSPC;
585 }
586
587 if (vma->obj) {
588 ret = i915_gem_object_pin_pages(vma->obj);
589 if (ret)
590 return ret;
591
592 cache_level = vma->obj->cache_level;
593 } else {
594 cache_level = 0;
595 }
596
597 GEM_BUG_ON(vma->pages);
598
599 ret = vma->ops->set_pages(vma);
600 if (ret)
601 goto err_unpin;
602
603 if (flags & PIN_OFFSET_FIXED) {
604 u64 offset = flags & PIN_OFFSET_MASK;
605 if (!IS_ALIGNED(offset, alignment) ||
606 range_overflows(offset, size, end)) {
607 ret = -EINVAL;
608 goto err_clear;
609 }
610
611 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
612 size, offset, cache_level,
613 flags);
614 if (ret)
615 goto err_clear;
616 } else {
617
618
619
620
621
622
623
624
625 if (upper_32_bits(end - 1) &&
626 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
627
628
629
630
631
632
633 u64 page_alignment =
634 rounddown_pow_of_two(vma->page_sizes.sg |
635 I915_GTT_PAGE_SIZE_2M);
636
637
638
639
640
641
642 GEM_BUG_ON(i915_vma_is_ggtt(vma));
643
644 alignment = max(alignment, page_alignment);
645
646 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
647 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
648 }
649
650 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
651 size, alignment, cache_level,
652 start, end, flags);
653 if (ret)
654 goto err_clear;
655
656 GEM_BUG_ON(vma->node.start < start);
657 GEM_BUG_ON(vma->node.start + vma->node.size > end);
658 }
659 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
660 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
661
662 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
663
664 if (vma->obj) {
665 struct drm_i915_gem_object *obj = vma->obj;
666
667 spin_lock(&dev_priv->mm.obj_lock);
668 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
669 obj->bind_count++;
670 spin_unlock(&dev_priv->mm.obj_lock);
671
672 assert_bind_count(obj);
673 }
674
675 return 0;
676
677err_clear:
678 vma->ops->clear_pages(vma);
679err_unpin:
680 if (vma->obj)
681 i915_gem_object_unpin_pages(vma->obj);
682 return ret;
683}
684
685static void
686i915_vma_remove(struct i915_vma *vma)
687{
688 struct drm_i915_private *i915 = vma->vm->i915;
689
690 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
691 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
692
693 vma->ops->clear_pages(vma);
694
695 drm_mm_remove_node(&vma->node);
696 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
697
698
699
700
701
702 if (vma->obj) {
703 struct drm_i915_gem_object *obj = vma->obj;
704
705 spin_lock(&i915->mm.obj_lock);
706 if (--obj->bind_count == 0)
707 list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
708 spin_unlock(&i915->mm.obj_lock);
709
710
711
712
713
714
715 i915_gem_object_unpin_pages(obj);
716 assert_bind_count(obj);
717 }
718}
719
720int __i915_vma_do_pin(struct i915_vma *vma,
721 u64 size, u64 alignment, u64 flags)
722{
723 const unsigned int bound = vma->flags;
724 int ret;
725
726 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
727 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
728 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
729
730 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
731 ret = -EBUSY;
732 goto err_unpin;
733 }
734
735 if ((bound & I915_VMA_BIND_MASK) == 0) {
736 ret = i915_vma_insert(vma, size, alignment, flags);
737 if (ret)
738 goto err_unpin;
739 }
740 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
741
742 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
743 if (ret)
744 goto err_remove;
745
746 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
747
748 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
749 __i915_vma_set_map_and_fenceable(vma);
750
751 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
752 return 0;
753
754err_remove:
755 if ((bound & I915_VMA_BIND_MASK) == 0) {
756 i915_vma_remove(vma);
757 GEM_BUG_ON(vma->pages);
758 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
759 }
760err_unpin:
761 __i915_vma_unpin(vma);
762 return ret;
763}
764
765void i915_vma_close(struct i915_vma *vma)
766{
767 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
768
769 GEM_BUG_ON(i915_vma_is_closed(vma));
770 vma->flags |= I915_VMA_CLOSED;
771
772
773
774
775
776
777
778
779
780
781
782
783
784 list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
785}
786
787void i915_vma_reopen(struct i915_vma *vma)
788{
789 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
790
791 if (vma->flags & I915_VMA_CLOSED) {
792 vma->flags &= ~I915_VMA_CLOSED;
793 list_del(&vma->closed_link);
794 }
795}
796
797static void __i915_vma_destroy(struct i915_vma *vma)
798{
799 struct drm_i915_private *i915 = vma->vm->i915;
800 struct i915_vma_active *iter, *n;
801
802 GEM_BUG_ON(vma->node.allocated);
803 GEM_BUG_ON(vma->fence);
804
805 GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
806
807 list_del(&vma->obj_link);
808 list_del(&vma->vm_link);
809 if (vma->obj)
810 rb_erase(&vma->obj_node, &vma->obj->vma_tree);
811
812 rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
813 GEM_BUG_ON(i915_gem_active_isset(&iter->base));
814 kfree(iter);
815 }
816
817 kmem_cache_free(i915->vmas, vma);
818}
819
820void i915_vma_destroy(struct i915_vma *vma)
821{
822 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
823
824 GEM_BUG_ON(i915_vma_is_active(vma));
825 GEM_BUG_ON(i915_vma_is_pinned(vma));
826
827 if (i915_vma_is_closed(vma))
828 list_del(&vma->closed_link);
829
830 WARN_ON(i915_vma_unbind(vma));
831 __i915_vma_destroy(vma);
832}
833
834void i915_vma_parked(struct drm_i915_private *i915)
835{
836 struct i915_vma *vma, *next;
837
838 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
839 GEM_BUG_ON(!i915_vma_is_closed(vma));
840 i915_vma_destroy(vma);
841 }
842
843 GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
844}
845
846static void __i915_vma_iounmap(struct i915_vma *vma)
847{
848 GEM_BUG_ON(i915_vma_is_pinned(vma));
849
850 if (vma->iomap == NULL)
851 return;
852
853 io_mapping_unmap(vma->iomap);
854 vma->iomap = NULL;
855}
856
857void i915_vma_revoke_mmap(struct i915_vma *vma)
858{
859 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
860 u64 vma_offset;
861
862 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
863
864 if (!i915_vma_has_userfault(vma))
865 return;
866
867 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
868 GEM_BUG_ON(!vma->obj->userfault_count);
869
870 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
871 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
872 drm_vma_node_offset_addr(node) + vma_offset,
873 vma->size,
874 1);
875
876 i915_vma_unset_userfault(vma);
877 if (!--vma->obj->userfault_count)
878 list_del(&vma->obj->userfault_link);
879}
880
881static void export_fence(struct i915_vma *vma,
882 struct i915_request *rq,
883 unsigned int flags)
884{
885 struct reservation_object *resv = vma->resv;
886
887
888
889
890
891
892 reservation_object_lock(resv, NULL);
893 if (flags & EXEC_OBJECT_WRITE)
894 reservation_object_add_excl_fence(resv, &rq->fence);
895 else if (reservation_object_reserve_shared(resv, 1) == 0)
896 reservation_object_add_shared_fence(resv, &rq->fence);
897 reservation_object_unlock(resv);
898}
899
900static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
901{
902 struct i915_vma_active *active;
903 struct rb_node **p, *parent;
904 struct i915_request *old;
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920 old = i915_gem_active_raw(&vma->last_active,
921 &vma->vm->i915->drm.struct_mutex);
922 if (!old || old->fence.context == idx)
923 goto out;
924
925
926 idx = old->fence.context;
927
928 parent = NULL;
929 p = &vma->active.rb_node;
930 while (*p) {
931 parent = *p;
932
933 active = rb_entry(parent, struct i915_vma_active, node);
934 if (active->timeline == idx)
935 goto replace;
936
937 if (active->timeline < idx)
938 p = &parent->rb_right;
939 else
940 p = &parent->rb_left;
941 }
942
943 active = kmalloc(sizeof(*active), GFP_KERNEL);
944
945
946 if (unlikely(!i915_gem_active_raw(&vma->last_active,
947 &vma->vm->i915->drm.struct_mutex))) {
948 kfree(active);
949 goto out;
950 }
951
952 if (unlikely(!active))
953 return ERR_PTR(-ENOMEM);
954
955 init_request_active(&active->base, i915_vma_retire);
956 active->vma = vma;
957 active->timeline = idx;
958
959 rb_link_node(&active->node, parent, p);
960 rb_insert_color(&active->node, &vma->active);
961
962replace:
963
964
965
966
967
968
969
970 if (i915_gem_active_isset(&active->base)) {
971
972 __list_del_entry(&active->base.link);
973 vma->active_count--;
974 GEM_BUG_ON(!vma->active_count);
975 }
976 GEM_BUG_ON(list_empty(&vma->last_active.link));
977 list_replace_init(&vma->last_active.link, &active->base.link);
978 active->base.request = fetch_and_zero(&vma->last_active.request);
979
980out:
981 return &vma->last_active;
982}
983
984int i915_vma_move_to_active(struct i915_vma *vma,
985 struct i915_request *rq,
986 unsigned int flags)
987{
988 struct drm_i915_gem_object *obj = vma->obj;
989 struct i915_gem_active *active;
990
991 lockdep_assert_held(&rq->i915->drm.struct_mutex);
992 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
993
994 active = active_instance(vma, rq->fence.context);
995 if (IS_ERR(active))
996 return PTR_ERR(active);
997
998
999
1000
1001
1002
1003
1004
1005
1006 if (!i915_gem_active_isset(active) && !vma->active_count++) {
1007 list_move_tail(&vma->vm_link, &vma->vm->active_list);
1008 obj->active_count++;
1009 }
1010 i915_gem_active_set(active, rq);
1011 GEM_BUG_ON(!i915_vma_is_active(vma));
1012 GEM_BUG_ON(!obj->active_count);
1013
1014 obj->write_domain = 0;
1015 if (flags & EXEC_OBJECT_WRITE) {
1016 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1017
1018 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1019 i915_gem_active_set(&obj->frontbuffer_write, rq);
1020
1021 obj->read_domains = 0;
1022 }
1023 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1024
1025 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1026 i915_gem_active_set(&vma->last_fence, rq);
1027
1028 export_fence(vma, rq, flags);
1029 return 0;
1030}
1031
1032int i915_vma_unbind(struct i915_vma *vma)
1033{
1034 int ret;
1035
1036 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
1037
1038
1039
1040
1041
1042 might_sleep();
1043 if (i915_vma_is_active(vma)) {
1044 struct i915_vma_active *active, *n;
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 __i915_vma_pin(vma);
1060
1061 ret = i915_gem_active_retire(&vma->last_active,
1062 &vma->vm->i915->drm.struct_mutex);
1063 if (ret)
1064 goto unpin;
1065
1066 rbtree_postorder_for_each_entry_safe(active, n,
1067 &vma->active, node) {
1068 ret = i915_gem_active_retire(&active->base,
1069 &vma->vm->i915->drm.struct_mutex);
1070 if (ret)
1071 goto unpin;
1072 }
1073
1074 ret = i915_gem_active_retire(&vma->last_fence,
1075 &vma->vm->i915->drm.struct_mutex);
1076unpin:
1077 __i915_vma_unpin(vma);
1078 if (ret)
1079 return ret;
1080 }
1081 GEM_BUG_ON(i915_vma_is_active(vma));
1082
1083 if (i915_vma_is_pinned(vma)) {
1084 vma_print_allocator(vma, "is pinned");
1085 return -EBUSY;
1086 }
1087
1088 if (!drm_mm_node_allocated(&vma->node))
1089 return 0;
1090
1091 if (i915_vma_is_map_and_fenceable(vma)) {
1092
1093
1094
1095
1096
1097
1098 i915_vma_flush_writes(vma);
1099 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1100
1101
1102 ret = i915_vma_put_fence(vma);
1103 if (ret)
1104 return ret;
1105
1106
1107 i915_vma_revoke_mmap(vma);
1108
1109 __i915_vma_iounmap(vma);
1110 vma->flags &= ~I915_VMA_CAN_FENCE;
1111 }
1112 GEM_BUG_ON(vma->fence);
1113 GEM_BUG_ON(i915_vma_has_userfault(vma));
1114
1115 if (likely(!vma->vm->closed)) {
1116 trace_i915_vma_unbind(vma);
1117 vma->ops->unbind_vma(vma);
1118 }
1119 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1120
1121 i915_vma_remove(vma);
1122
1123 return 0;
1124}
1125
1126#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1127#include "selftests/i915_vma.c"
1128#endif
1129