1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <drm/drm_gem.h>
26
27#include "display/intel_frontbuffer.h"
28
29#include "gt/intel_engine.h"
30
31#include "i915_drv.h"
32#include "i915_globals.h"
33#include "i915_vma.h"
34
35static struct i915_global_vma {
36 struct i915_global base;
37 struct kmem_cache *slab_vmas;
38} global;
39
40struct i915_vma *i915_vma_alloc(void)
41{
42 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
43}
44
45void i915_vma_free(struct i915_vma *vma)
46{
47 return kmem_cache_free(global.slab_vmas, vma);
48}
49
50#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
51
52#include <linux/stackdepot.h>
53
54static void vma_print_allocator(struct i915_vma *vma, const char *reason)
55{
56 unsigned long *entries;
57 unsigned int nr_entries;
58 char buf[512];
59
60 if (!vma->node.stack) {
61 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
62 vma->node.start, vma->node.size, reason);
63 return;
64 }
65
66 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
67 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
68 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
69 vma->node.start, vma->node.size, reason, buf);
70}
71
72#else
73
74static void vma_print_allocator(struct i915_vma *vma, const char *reason)
75{
76}
77
78#endif
79
80static void obj_bump_mru(struct drm_i915_gem_object *obj)
81{
82 struct drm_i915_private *i915 = to_i915(obj->base.dev);
83 unsigned long flags;
84
85 spin_lock_irqsave(&i915->mm.obj_lock, flags);
86 list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
87 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
88
89 obj->mm.dirty = true;
90}
91
92static void __i915_vma_retire(struct i915_active *ref)
93{
94 struct i915_vma *vma = container_of(ref, typeof(*vma), active);
95 struct drm_i915_gem_object *obj = vma->obj;
96
97 GEM_BUG_ON(!i915_gem_object_is_active(obj));
98 if (--obj->active_count)
99 return;
100
101
102 if (reservation_object_trylock(obj->base.resv)) {
103 if (reservation_object_test_signaled_rcu(obj->base.resv, true))
104 reservation_object_add_excl_fence(obj->base.resv, NULL);
105 reservation_object_unlock(obj->base.resv);
106 }
107
108
109
110
111
112
113 if (i915_gem_object_is_shrinkable(obj))
114 obj_bump_mru(obj);
115
116 i915_gem_object_put(obj);
117}
118
119static struct i915_vma *
120vma_create(struct drm_i915_gem_object *obj,
121 struct i915_address_space *vm,
122 const struct i915_ggtt_view *view)
123{
124 struct i915_vma *vma;
125 struct rb_node *rb, **p;
126
127
128 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
129
130 vma = i915_vma_alloc();
131 if (vma == NULL)
132 return ERR_PTR(-ENOMEM);
133
134 vma->vm = vm;
135 vma->ops = &vm->vma_ops;
136 vma->obj = obj;
137 vma->resv = obj->base.resv;
138 vma->size = obj->base.size;
139 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
140
141 i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
142 INIT_ACTIVE_REQUEST(&vma->last_fence);
143
144 INIT_LIST_HEAD(&vma->closed_link);
145
146 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
147 vma->ggtt_view = *view;
148 if (view->type == I915_GGTT_VIEW_PARTIAL) {
149 GEM_BUG_ON(range_overflows_t(u64,
150 view->partial.offset,
151 view->partial.size,
152 obj->base.size >> PAGE_SHIFT));
153 vma->size = view->partial.size;
154 vma->size <<= PAGE_SHIFT;
155 GEM_BUG_ON(vma->size > obj->base.size);
156 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
157 vma->size = intel_rotation_info_size(&view->rotated);
158 vma->size <<= PAGE_SHIFT;
159 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
160 vma->size = intel_remapped_info_size(&view->remapped);
161 vma->size <<= PAGE_SHIFT;
162 }
163 }
164
165 if (unlikely(vma->size > vm->total))
166 goto err_vma;
167
168 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
169
170 if (i915_is_ggtt(vm)) {
171 if (unlikely(overflows_type(vma->size, u32)))
172 goto err_vma;
173
174 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
175 i915_gem_object_get_tiling(obj),
176 i915_gem_object_get_stride(obj));
177 if (unlikely(vma->fence_size < vma->size ||
178 vma->fence_size > vm->total))
179 goto err_vma;
180
181 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
182
183 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
184 i915_gem_object_get_tiling(obj),
185 i915_gem_object_get_stride(obj));
186 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
187
188 vma->flags |= I915_VMA_GGTT;
189 }
190
191 spin_lock(&obj->vma.lock);
192
193 rb = NULL;
194 p = &obj->vma.tree.rb_node;
195 while (*p) {
196 struct i915_vma *pos;
197 long cmp;
198
199 rb = *p;
200 pos = rb_entry(rb, struct i915_vma, obj_node);
201
202
203
204
205
206
207 cmp = i915_vma_compare(pos, vm, view);
208 if (cmp == 0) {
209 spin_unlock(&obj->vma.lock);
210 i915_vma_free(vma);
211 return pos;
212 }
213
214 if (cmp < 0)
215 p = &rb->rb_right;
216 else
217 p = &rb->rb_left;
218 }
219 rb_link_node(&vma->obj_node, rb, p);
220 rb_insert_color(&vma->obj_node, &obj->vma.tree);
221
222 if (i915_vma_is_ggtt(vma))
223
224
225
226
227
228
229 list_add(&vma->obj_link, &obj->vma.list);
230 else
231 list_add_tail(&vma->obj_link, &obj->vma.list);
232
233 spin_unlock(&obj->vma.lock);
234
235 mutex_lock(&vm->mutex);
236 list_add(&vma->vm_link, &vm->unbound_list);
237 mutex_unlock(&vm->mutex);
238
239 return vma;
240
241err_vma:
242 i915_vma_free(vma);
243 return ERR_PTR(-E2BIG);
244}
245
246static struct i915_vma *
247vma_lookup(struct drm_i915_gem_object *obj,
248 struct i915_address_space *vm,
249 const struct i915_ggtt_view *view)
250{
251 struct rb_node *rb;
252
253 rb = obj->vma.tree.rb_node;
254 while (rb) {
255 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
256 long cmp;
257
258 cmp = i915_vma_compare(vma, vm, view);
259 if (cmp == 0)
260 return vma;
261
262 if (cmp < 0)
263 rb = rb->rb_right;
264 else
265 rb = rb->rb_left;
266 }
267
268 return NULL;
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286struct i915_vma *
287i915_vma_instance(struct drm_i915_gem_object *obj,
288 struct i915_address_space *vm,
289 const struct i915_ggtt_view *view)
290{
291 struct i915_vma *vma;
292
293 GEM_BUG_ON(view && !i915_is_ggtt(vm));
294 GEM_BUG_ON(vm->closed);
295
296 spin_lock(&obj->vma.lock);
297 vma = vma_lookup(obj, vm, view);
298 spin_unlock(&obj->vma.lock);
299
300
301 if (unlikely(!vma))
302 vma = vma_create(obj, vm, view);
303
304 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
305 return vma;
306}
307
308
309
310
311
312
313
314
315
316
317
318int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
319 u32 flags)
320{
321 u32 bind_flags;
322 u32 vma_flags;
323 int ret;
324
325 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
326 GEM_BUG_ON(vma->size > vma->node.size);
327
328 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
329 vma->node.size,
330 vma->vm->total)))
331 return -ENODEV;
332
333 if (GEM_DEBUG_WARN_ON(!flags))
334 return -EINVAL;
335
336 bind_flags = 0;
337 if (flags & PIN_GLOBAL)
338 bind_flags |= I915_VMA_GLOBAL_BIND;
339 if (flags & PIN_USER)
340 bind_flags |= I915_VMA_LOCAL_BIND;
341
342 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
343 if (flags & PIN_UPDATE)
344 bind_flags |= vma_flags;
345 else
346 bind_flags &= ~vma_flags;
347 if (bind_flags == 0)
348 return 0;
349
350 GEM_BUG_ON(!vma->pages);
351
352 trace_i915_vma_bind(vma, bind_flags);
353 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
354 if (ret)
355 return ret;
356
357 vma->flags |= bind_flags;
358 return 0;
359}
360
361void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
362{
363 void __iomem *ptr;
364 int err;
365
366
367 assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
368
369 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
370 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
371 err = -ENODEV;
372 goto err;
373 }
374
375 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
376 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
377
378 ptr = vma->iomap;
379 if (ptr == NULL) {
380 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
381 vma->node.start,
382 vma->node.size);
383 if (ptr == NULL) {
384 err = -ENOMEM;
385 goto err;
386 }
387
388 vma->iomap = ptr;
389 }
390
391 __i915_vma_pin(vma);
392
393 err = i915_vma_pin_fence(vma);
394 if (err)
395 goto err_unpin;
396
397 i915_vma_set_ggtt_write(vma);
398 return ptr;
399
400err_unpin:
401 __i915_vma_unpin(vma);
402err:
403 return IO_ERR_PTR(err);
404}
405
406void i915_vma_flush_writes(struct i915_vma *vma)
407{
408 if (!i915_vma_has_ggtt_write(vma))
409 return;
410
411 i915_gem_flush_ggtt_writes(vma->vm->i915);
412
413 i915_vma_unset_ggtt_write(vma);
414}
415
416void i915_vma_unpin_iomap(struct i915_vma *vma)
417{
418 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
419
420 GEM_BUG_ON(vma->iomap == NULL);
421
422 i915_vma_flush_writes(vma);
423
424 i915_vma_unpin_fence(vma);
425 i915_vma_unpin(vma);
426}
427
428void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
429{
430 struct i915_vma *vma;
431 struct drm_i915_gem_object *obj;
432
433 vma = fetch_and_zero(p_vma);
434 if (!vma)
435 return;
436
437 obj = vma->obj;
438 GEM_BUG_ON(!obj);
439
440 i915_vma_unpin(vma);
441 i915_vma_close(vma);
442
443 if (flags & I915_VMA_RELEASE_MAP)
444 i915_gem_object_unpin_map(obj);
445
446 i915_gem_object_put(obj);
447}
448
449bool i915_vma_misplaced(const struct i915_vma *vma,
450 u64 size, u64 alignment, u64 flags)
451{
452 if (!drm_mm_node_allocated(&vma->node))
453 return false;
454
455 if (vma->node.size < size)
456 return true;
457
458 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
459 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
460 return true;
461
462 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
463 return true;
464
465 if (flags & PIN_OFFSET_BIAS &&
466 vma->node.start < (flags & PIN_OFFSET_MASK))
467 return true;
468
469 if (flags & PIN_OFFSET_FIXED &&
470 vma->node.start != (flags & PIN_OFFSET_MASK))
471 return true;
472
473 return false;
474}
475
476void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
477{
478 bool mappable, fenceable;
479
480 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
481 GEM_BUG_ON(!vma->fence_size);
482
483 fenceable = (vma->node.size >= vma->fence_size &&
484 IS_ALIGNED(vma->node.start, vma->fence_alignment));
485
486 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
487
488 if (mappable && fenceable)
489 vma->flags |= I915_VMA_CAN_FENCE;
490 else
491 vma->flags &= ~I915_VMA_CAN_FENCE;
492}
493
494static bool color_differs(struct drm_mm_node *node, unsigned long color)
495{
496 return node->allocated && node->color != color;
497}
498
499bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
500{
501 struct drm_mm_node *node = &vma->node;
502 struct drm_mm_node *other;
503
504
505
506
507
508
509
510
511 if (vma->vm->mm.color_adjust == NULL)
512 return true;
513
514
515 GEM_BUG_ON(!drm_mm_node_allocated(node));
516 GEM_BUG_ON(list_empty(&node->node_list));
517
518 other = list_prev_entry(node, node_list);
519 if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
520 return false;
521
522 other = list_next_entry(node, node_list);
523 if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
524 return false;
525
526 return true;
527}
528
529static void assert_bind_count(const struct drm_i915_gem_object *obj)
530{
531
532
533
534
535
536
537
538 GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
539}
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static int
556i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
557{
558 struct drm_i915_private *dev_priv = vma->vm->i915;
559 unsigned int cache_level;
560 u64 start, end;
561 int ret;
562
563 GEM_BUG_ON(i915_vma_is_closed(vma));
564 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
565 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
566
567 size = max(size, vma->size);
568 alignment = max(alignment, vma->display_alignment);
569 if (flags & PIN_MAPPABLE) {
570 size = max_t(typeof(size), size, vma->fence_size);
571 alignment = max_t(typeof(alignment),
572 alignment, vma->fence_alignment);
573 }
574
575 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
576 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
577 GEM_BUG_ON(!is_power_of_2(alignment));
578
579 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
580 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
581
582 end = vma->vm->total;
583 if (flags & PIN_MAPPABLE)
584 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
585 if (flags & PIN_ZONE_4G)
586 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
587 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
588
589
590
591
592
593 if (size > end) {
594 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
595 size, flags & PIN_MAPPABLE ? "mappable" : "total",
596 end);
597 return -ENOSPC;
598 }
599
600 if (vma->obj) {
601 ret = i915_gem_object_pin_pages(vma->obj);
602 if (ret)
603 return ret;
604
605 cache_level = vma->obj->cache_level;
606 } else {
607 cache_level = 0;
608 }
609
610 GEM_BUG_ON(vma->pages);
611
612 ret = vma->ops->set_pages(vma);
613 if (ret)
614 goto err_unpin;
615
616 if (flags & PIN_OFFSET_FIXED) {
617 u64 offset = flags & PIN_OFFSET_MASK;
618 if (!IS_ALIGNED(offset, alignment) ||
619 range_overflows(offset, size, end)) {
620 ret = -EINVAL;
621 goto err_clear;
622 }
623
624 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
625 size, offset, cache_level,
626 flags);
627 if (ret)
628 goto err_clear;
629 } else {
630
631
632
633
634
635
636
637
638 if (upper_32_bits(end - 1) &&
639 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
640
641
642
643
644
645
646 u64 page_alignment =
647 rounddown_pow_of_two(vma->page_sizes.sg |
648 I915_GTT_PAGE_SIZE_2M);
649
650
651
652
653
654
655 GEM_BUG_ON(i915_vma_is_ggtt(vma));
656
657 alignment = max(alignment, page_alignment);
658
659 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
660 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
661 }
662
663 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
664 size, alignment, cache_level,
665 start, end, flags);
666 if (ret)
667 goto err_clear;
668
669 GEM_BUG_ON(vma->node.start < start);
670 GEM_BUG_ON(vma->node.start + vma->node.size > end);
671 }
672 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
673 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
674
675 mutex_lock(&vma->vm->mutex);
676 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
677 mutex_unlock(&vma->vm->mutex);
678
679 if (vma->obj) {
680 atomic_inc(&vma->obj->bind_count);
681 assert_bind_count(vma->obj);
682 }
683
684 return 0;
685
686err_clear:
687 vma->ops->clear_pages(vma);
688err_unpin:
689 if (vma->obj)
690 i915_gem_object_unpin_pages(vma->obj);
691 return ret;
692}
693
694static void
695i915_vma_remove(struct i915_vma *vma)
696{
697 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
698 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
699
700 vma->ops->clear_pages(vma);
701
702 mutex_lock(&vma->vm->mutex);
703 drm_mm_remove_node(&vma->node);
704 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
705 mutex_unlock(&vma->vm->mutex);
706
707
708
709
710
711 if (vma->obj) {
712 struct drm_i915_gem_object *obj = vma->obj;
713
714 atomic_dec(&obj->bind_count);
715
716
717
718
719
720
721 i915_gem_object_unpin_pages(obj);
722 assert_bind_count(obj);
723 }
724}
725
726int __i915_vma_do_pin(struct i915_vma *vma,
727 u64 size, u64 alignment, u64 flags)
728{
729 const unsigned int bound = vma->flags;
730 int ret;
731
732 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
733 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
734 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
735
736 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
737 ret = -EBUSY;
738 goto err_unpin;
739 }
740
741 if ((bound & I915_VMA_BIND_MASK) == 0) {
742 ret = i915_vma_insert(vma, size, alignment, flags);
743 if (ret)
744 goto err_unpin;
745 }
746 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
747
748 ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
749 if (ret)
750 goto err_remove;
751
752 GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
753
754 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
755 __i915_vma_set_map_and_fenceable(vma);
756
757 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
758 return 0;
759
760err_remove:
761 if ((bound & I915_VMA_BIND_MASK) == 0) {
762 i915_vma_remove(vma);
763 GEM_BUG_ON(vma->pages);
764 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
765 }
766err_unpin:
767 __i915_vma_unpin(vma);
768 return ret;
769}
770
771void i915_vma_close(struct i915_vma *vma)
772{
773 struct drm_i915_private *i915 = vma->vm->i915;
774 unsigned long flags;
775
776 GEM_BUG_ON(i915_vma_is_closed(vma));
777
778
779
780
781
782
783
784
785
786
787
788
789
790 spin_lock_irqsave(&i915->gt.closed_lock, flags);
791 list_add(&vma->closed_link, &i915->gt.closed_vma);
792 spin_unlock_irqrestore(&i915->gt.closed_lock, flags);
793}
794
795static void __i915_vma_remove_closed(struct i915_vma *vma)
796{
797 struct drm_i915_private *i915 = vma->vm->i915;
798
799 if (!i915_vma_is_closed(vma))
800 return;
801
802 spin_lock_irq(&i915->gt.closed_lock);
803 list_del_init(&vma->closed_link);
804 spin_unlock_irq(&i915->gt.closed_lock);
805}
806
807void i915_vma_reopen(struct i915_vma *vma)
808{
809 __i915_vma_remove_closed(vma);
810}
811
812static void __i915_vma_destroy(struct i915_vma *vma)
813{
814 GEM_BUG_ON(vma->node.allocated);
815 GEM_BUG_ON(vma->fence);
816
817 GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
818
819 mutex_lock(&vma->vm->mutex);
820 list_del(&vma->vm_link);
821 mutex_unlock(&vma->vm->mutex);
822
823 if (vma->obj) {
824 struct drm_i915_gem_object *obj = vma->obj;
825
826 spin_lock(&obj->vma.lock);
827 list_del(&vma->obj_link);
828 rb_erase(&vma->obj_node, &vma->obj->vma.tree);
829 spin_unlock(&obj->vma.lock);
830 }
831
832 i915_active_fini(&vma->active);
833
834 i915_vma_free(vma);
835}
836
837void i915_vma_destroy(struct i915_vma *vma)
838{
839 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
840
841 GEM_BUG_ON(i915_vma_is_pinned(vma));
842
843 __i915_vma_remove_closed(vma);
844
845 WARN_ON(i915_vma_unbind(vma));
846 GEM_BUG_ON(i915_vma_is_active(vma));
847
848 __i915_vma_destroy(vma);
849}
850
851void i915_vma_parked(struct drm_i915_private *i915)
852{
853 struct i915_vma *vma, *next;
854
855 spin_lock_irq(&i915->gt.closed_lock);
856 list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
857 list_del_init(&vma->closed_link);
858 spin_unlock_irq(&i915->gt.closed_lock);
859
860 i915_vma_destroy(vma);
861
862 spin_lock_irq(&i915->gt.closed_lock);
863 }
864 spin_unlock_irq(&i915->gt.closed_lock);
865}
866
867static void __i915_vma_iounmap(struct i915_vma *vma)
868{
869 GEM_BUG_ON(i915_vma_is_pinned(vma));
870
871 if (vma->iomap == NULL)
872 return;
873
874 io_mapping_unmap(vma->iomap);
875 vma->iomap = NULL;
876}
877
878void i915_vma_revoke_mmap(struct i915_vma *vma)
879{
880 struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
881 u64 vma_offset;
882
883 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
884
885 if (!i915_vma_has_userfault(vma))
886 return;
887
888 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
889 GEM_BUG_ON(!vma->obj->userfault_count);
890
891 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
892 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
893 drm_vma_node_offset_addr(node) + vma_offset,
894 vma->size,
895 1);
896
897 i915_vma_unset_userfault(vma);
898 if (!--vma->obj->userfault_count)
899 list_del(&vma->obj->userfault_link);
900}
901
902static void export_fence(struct i915_vma *vma,
903 struct i915_request *rq,
904 unsigned int flags)
905{
906 struct reservation_object *resv = vma->resv;
907
908
909
910
911
912
913 if (flags & EXEC_OBJECT_WRITE)
914 reservation_object_add_excl_fence(resv, &rq->fence);
915 else if (reservation_object_reserve_shared(resv, 1) == 0)
916 reservation_object_add_shared_fence(resv, &rq->fence);
917}
918
919int i915_vma_move_to_active(struct i915_vma *vma,
920 struct i915_request *rq,
921 unsigned int flags)
922{
923 struct drm_i915_gem_object *obj = vma->obj;
924
925 assert_vma_held(vma);
926 assert_object_held(obj);
927 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
928
929
930
931
932
933
934
935
936
937 if (!vma->active.count && !obj->active_count++)
938 i915_gem_object_get(obj);
939
940 if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
941 if (!vma->active.count && !--obj->active_count)
942 i915_gem_object_put(obj);
943 return -ENOMEM;
944 }
945
946 GEM_BUG_ON(!i915_vma_is_active(vma));
947 GEM_BUG_ON(!obj->active_count);
948
949 obj->write_domain = 0;
950 if (flags & EXEC_OBJECT_WRITE) {
951 obj->write_domain = I915_GEM_DOMAIN_RENDER;
952
953 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
954 __i915_active_request_set(&obj->frontbuffer_write, rq);
955
956 obj->read_domains = 0;
957 }
958 obj->read_domains |= I915_GEM_GPU_DOMAINS;
959
960 if (flags & EXEC_OBJECT_NEEDS_FENCE)
961 __i915_active_request_set(&vma->last_fence, rq);
962
963 export_fence(vma, rq, flags);
964 return 0;
965}
966
967int i915_vma_unbind(struct i915_vma *vma)
968{
969 int ret;
970
971 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
972
973
974
975
976
977 might_sleep();
978 if (i915_vma_is_active(vma)) {
979
980
981
982
983
984
985
986
987
988
989
990
991
992 __i915_vma_pin(vma);
993
994 ret = i915_active_wait(&vma->active);
995 if (ret)
996 goto unpin;
997
998 ret = i915_active_request_retire(&vma->last_fence,
999 &vma->vm->i915->drm.struct_mutex);
1000unpin:
1001 __i915_vma_unpin(vma);
1002 if (ret)
1003 return ret;
1004 }
1005 GEM_BUG_ON(i915_vma_is_active(vma));
1006
1007 if (i915_vma_is_pinned(vma)) {
1008 vma_print_allocator(vma, "is pinned");
1009 return -EBUSY;
1010 }
1011
1012 if (!drm_mm_node_allocated(&vma->node))
1013 return 0;
1014
1015 if (i915_vma_is_map_and_fenceable(vma)) {
1016
1017
1018
1019
1020
1021
1022 i915_vma_flush_writes(vma);
1023 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1024
1025
1026 ret = i915_vma_put_fence(vma);
1027 if (ret)
1028 return ret;
1029
1030
1031 i915_vma_revoke_mmap(vma);
1032
1033 __i915_vma_iounmap(vma);
1034 vma->flags &= ~I915_VMA_CAN_FENCE;
1035 }
1036 GEM_BUG_ON(vma->fence);
1037 GEM_BUG_ON(i915_vma_has_userfault(vma));
1038
1039 if (likely(!vma->vm->closed)) {
1040 trace_i915_vma_unbind(vma);
1041 vma->ops->unbind_vma(vma);
1042 }
1043 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1044
1045 i915_vma_remove(vma);
1046
1047 return 0;
1048}
1049
1050#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1051#include "selftests/i915_vma.c"
1052#endif
1053
1054static void i915_global_vma_shrink(void)
1055{
1056 kmem_cache_shrink(global.slab_vmas);
1057}
1058
1059static void i915_global_vma_exit(void)
1060{
1061 kmem_cache_destroy(global.slab_vmas);
1062}
1063
1064static struct i915_global_vma global = { {
1065 .shrink = i915_global_vma_shrink,
1066 .exit = i915_global_vma_exit,
1067} };
1068
1069int __init i915_global_vma_init(void)
1070{
1071 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1072 if (!global.slab_vmas)
1073 return -ENOMEM;
1074
1075 i915_global_register(&global.base);
1076 return 0;
1077}
1078