1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/sched/mm.h>
26#include <drm/drm_gem.h>
27
28#include "display/intel_frontbuffer.h"
29
30#include "gem/i915_gem_lmem.h"
31#include "gt/intel_engine.h"
32#include "gt/intel_engine_heartbeat.h"
33#include "gt/intel_gt.h"
34#include "gt/intel_gt_requests.h"
35
36#include "i915_drv.h"
37#include "i915_sw_fence_work.h"
38#include "i915_trace.h"
39#include "i915_vma.h"
40
41static struct kmem_cache *slab_vmas;
42
43struct i915_vma *i915_vma_alloc(void)
44{
45 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
46}
47
48void i915_vma_free(struct i915_vma *vma)
49{
50 return kmem_cache_free(slab_vmas, vma);
51}
52
53#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
54
55#include <linux/stackdepot.h>
56
57static void vma_print_allocator(struct i915_vma *vma, const char *reason)
58{
59 unsigned long *entries;
60 unsigned int nr_entries;
61 char buf[512];
62
63 if (!vma->node.stack) {
64 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
65 vma->node.start, vma->node.size, reason);
66 return;
67 }
68
69 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
70 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
71 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
72 vma->node.start, vma->node.size, reason, buf);
73}
74
75#else
76
77static void vma_print_allocator(struct i915_vma *vma, const char *reason)
78{
79}
80
81#endif
82
83static inline struct i915_vma *active_to_vma(struct i915_active *ref)
84{
85 return container_of(ref, typeof(struct i915_vma), active);
86}
87
88static int __i915_vma_active(struct i915_active *ref)
89{
90 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
91}
92
93static void __i915_vma_retire(struct i915_active *ref)
94{
95 i915_vma_put(active_to_vma(ref));
96}
97
98static struct i915_vma *
99vma_create(struct drm_i915_gem_object *obj,
100 struct i915_address_space *vm,
101 const struct i915_ggtt_view *view)
102{
103 struct i915_vma *pos = ERR_PTR(-E2BIG);
104 struct i915_vma *vma;
105 struct rb_node *rb, **p;
106
107
108 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
109
110 vma = i915_vma_alloc();
111 if (vma == NULL)
112 return ERR_PTR(-ENOMEM);
113
114 kref_init(&vma->ref);
115 mutex_init(&vma->pages_mutex);
116 vma->vm = i915_vm_get(vm);
117 vma->ops = &vm->vma_ops;
118 vma->obj = obj;
119 vma->resv = obj->base.resv;
120 vma->size = obj->base.size;
121 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
122
123 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
124
125
126 if (IS_ENABLED(CONFIG_LOCKDEP)) {
127 fs_reclaim_acquire(GFP_KERNEL);
128 might_lock(&vma->active.mutex);
129 fs_reclaim_release(GFP_KERNEL);
130 }
131
132 INIT_LIST_HEAD(&vma->closed_link);
133
134 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
135 vma->ggtt_view = *view;
136 if (view->type == I915_GGTT_VIEW_PARTIAL) {
137 GEM_BUG_ON(range_overflows_t(u64,
138 view->partial.offset,
139 view->partial.size,
140 obj->base.size >> PAGE_SHIFT));
141 vma->size = view->partial.size;
142 vma->size <<= PAGE_SHIFT;
143 GEM_BUG_ON(vma->size > obj->base.size);
144 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
145 vma->size = intel_rotation_info_size(&view->rotated);
146 vma->size <<= PAGE_SHIFT;
147 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
148 vma->size = intel_remapped_info_size(&view->remapped);
149 vma->size <<= PAGE_SHIFT;
150 }
151 }
152
153 if (unlikely(vma->size > vm->total))
154 goto err_vma;
155
156 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
157
158 spin_lock(&obj->vma.lock);
159
160 if (i915_is_ggtt(vm)) {
161 if (unlikely(overflows_type(vma->size, u32)))
162 goto err_unlock;
163
164 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
165 i915_gem_object_get_tiling(obj),
166 i915_gem_object_get_stride(obj));
167 if (unlikely(vma->fence_size < vma->size ||
168 vma->fence_size > vm->total))
169 goto err_unlock;
170
171 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
172
173 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
174 i915_gem_object_get_tiling(obj),
175 i915_gem_object_get_stride(obj));
176 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
177
178 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
179 }
180
181 rb = NULL;
182 p = &obj->vma.tree.rb_node;
183 while (*p) {
184 long cmp;
185
186 rb = *p;
187 pos = rb_entry(rb, struct i915_vma, obj_node);
188
189
190
191
192
193
194 cmp = i915_vma_compare(pos, vm, view);
195 if (cmp < 0)
196 p = &rb->rb_right;
197 else if (cmp > 0)
198 p = &rb->rb_left;
199 else
200 goto err_unlock;
201 }
202 rb_link_node(&vma->obj_node, rb, p);
203 rb_insert_color(&vma->obj_node, &obj->vma.tree);
204
205 if (i915_vma_is_ggtt(vma))
206
207
208
209
210
211
212 list_add(&vma->obj_link, &obj->vma.list);
213 else
214 list_add_tail(&vma->obj_link, &obj->vma.list);
215
216 spin_unlock(&obj->vma.lock);
217
218 return vma;
219
220err_unlock:
221 spin_unlock(&obj->vma.lock);
222err_vma:
223 i915_vm_put(vm);
224 i915_vma_free(vma);
225 return pos;
226}
227
228static struct i915_vma *
229i915_vma_lookup(struct drm_i915_gem_object *obj,
230 struct i915_address_space *vm,
231 const struct i915_ggtt_view *view)
232{
233 struct rb_node *rb;
234
235 rb = obj->vma.tree.rb_node;
236 while (rb) {
237 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
238 long cmp;
239
240 cmp = i915_vma_compare(vma, vm, view);
241 if (cmp == 0)
242 return vma;
243
244 if (cmp < 0)
245 rb = rb->rb_right;
246 else
247 rb = rb->rb_left;
248 }
249
250 return NULL;
251}
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266struct i915_vma *
267i915_vma_instance(struct drm_i915_gem_object *obj,
268 struct i915_address_space *vm,
269 const struct i915_ggtt_view *view)
270{
271 struct i915_vma *vma;
272
273 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
274 GEM_BUG_ON(!atomic_read(&vm->open));
275
276 spin_lock(&obj->vma.lock);
277 vma = i915_vma_lookup(obj, vm, view);
278 spin_unlock(&obj->vma.lock);
279
280
281 if (unlikely(!vma))
282 vma = vma_create(obj, vm, view);
283
284 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
285 return vma;
286}
287
288struct i915_vma_work {
289 struct dma_fence_work base;
290 struct i915_address_space *vm;
291 struct i915_vm_pt_stash stash;
292 struct i915_vma *vma;
293 struct drm_i915_gem_object *pinned;
294 struct i915_sw_dma_fence_cb cb;
295 enum i915_cache_level cache_level;
296 unsigned int flags;
297};
298
299static void __vma_bind(struct dma_fence_work *work)
300{
301 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
302 struct i915_vma *vma = vw->vma;
303
304 vma->ops->bind_vma(vw->vm, &vw->stash,
305 vma, vw->cache_level, vw->flags);
306}
307
308static void __vma_release(struct dma_fence_work *work)
309{
310 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
311
312 if (vw->pinned) {
313 __i915_gem_object_unpin_pages(vw->pinned);
314 i915_gem_object_put(vw->pinned);
315 }
316
317 i915_vm_free_pt_stash(vw->vm, &vw->stash);
318 i915_vm_put(vw->vm);
319}
320
321static const struct dma_fence_work_ops bind_ops = {
322 .name = "bind",
323 .work = __vma_bind,
324 .release = __vma_release,
325};
326
327struct i915_vma_work *i915_vma_work(void)
328{
329 struct i915_vma_work *vw;
330
331 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
332 if (!vw)
333 return NULL;
334
335 dma_fence_work_init(&vw->base, &bind_ops);
336 vw->base.dma.error = -EAGAIN;
337
338 return vw;
339}
340
341int i915_vma_wait_for_bind(struct i915_vma *vma)
342{
343 int err = 0;
344
345 if (rcu_access_pointer(vma->active.excl.fence)) {
346 struct dma_fence *fence;
347
348 rcu_read_lock();
349 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
350 rcu_read_unlock();
351 if (fence) {
352 err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
353 dma_fence_put(fence);
354 }
355 }
356
357 return err;
358}
359
360
361
362
363
364
365
366
367
368
369
370
371int i915_vma_bind(struct i915_vma *vma,
372 enum i915_cache_level cache_level,
373 u32 flags,
374 struct i915_vma_work *work)
375{
376 u32 bind_flags;
377 u32 vma_flags;
378
379 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
380 GEM_BUG_ON(vma->size > vma->node.size);
381
382 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
383 vma->node.size,
384 vma->vm->total)))
385 return -ENODEV;
386
387 if (GEM_DEBUG_WARN_ON(!flags))
388 return -EINVAL;
389
390 bind_flags = flags;
391 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
392
393 vma_flags = atomic_read(&vma->flags);
394 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
395
396 bind_flags &= ~vma_flags;
397 if (bind_flags == 0)
398 return 0;
399
400 GEM_BUG_ON(!vma->pages);
401
402 trace_i915_vma_bind(vma, bind_flags);
403 if (work && bind_flags & vma->vm->bind_async_flags) {
404 struct dma_fence *prev;
405
406 work->vma = vma;
407 work->cache_level = cache_level;
408 work->flags = bind_flags;
409
410
411
412
413
414
415
416
417
418
419 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
420 if (prev) {
421 __i915_sw_fence_await_dma_fence(&work->base.chain,
422 prev,
423 &work->cb);
424 dma_fence_put(prev);
425 }
426
427 work->base.dma.error = 0;
428
429 if (vma->obj) {
430 __i915_gem_object_pin_pages(vma->obj);
431 work->pinned = i915_gem_object_get(vma->obj);
432 }
433 } else {
434 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
435 }
436
437 atomic_or(bind_flags, &vma->flags);
438 return 0;
439}
440
441void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
442{
443 void __iomem *ptr;
444 int err;
445
446 if (!i915_gem_object_is_lmem(vma->obj)) {
447 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
448 err = -ENODEV;
449 goto err;
450 }
451 }
452
453 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
454 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
455
456 ptr = READ_ONCE(vma->iomap);
457 if (ptr == NULL) {
458
459
460
461
462
463
464 if (i915_gem_object_is_lmem(vma->obj))
465 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
466 vma->obj->base.size);
467 else
468 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
469 vma->node.start,
470 vma->node.size);
471 if (ptr == NULL) {
472 err = -ENOMEM;
473 goto err;
474 }
475
476 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
477 io_mapping_unmap(ptr);
478 ptr = vma->iomap;
479 }
480 }
481
482 __i915_vma_pin(vma);
483
484 err = i915_vma_pin_fence(vma);
485 if (err)
486 goto err_unpin;
487
488 i915_vma_set_ggtt_write(vma);
489
490
491 return ptr;
492
493err_unpin:
494 __i915_vma_unpin(vma);
495err:
496 return IO_ERR_PTR(err);
497}
498
499void i915_vma_flush_writes(struct i915_vma *vma)
500{
501 if (i915_vma_unset_ggtt_write(vma))
502 intel_gt_flush_ggtt_writes(vma->vm->gt);
503}
504
505void i915_vma_unpin_iomap(struct i915_vma *vma)
506{
507 GEM_BUG_ON(vma->iomap == NULL);
508
509 i915_vma_flush_writes(vma);
510
511 i915_vma_unpin_fence(vma);
512 i915_vma_unpin(vma);
513}
514
515void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
516{
517 struct i915_vma *vma;
518 struct drm_i915_gem_object *obj;
519
520 vma = fetch_and_zero(p_vma);
521 if (!vma)
522 return;
523
524 obj = vma->obj;
525 GEM_BUG_ON(!obj);
526
527 i915_vma_unpin(vma);
528
529 if (flags & I915_VMA_RELEASE_MAP)
530 i915_gem_object_unpin_map(obj);
531
532 i915_gem_object_put(obj);
533}
534
535bool i915_vma_misplaced(const struct i915_vma *vma,
536 u64 size, u64 alignment, u64 flags)
537{
538 if (!drm_mm_node_allocated(&vma->node))
539 return false;
540
541 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
542 return true;
543
544 if (vma->node.size < size)
545 return true;
546
547 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
548 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
549 return true;
550
551 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
552 return true;
553
554 if (flags & PIN_OFFSET_BIAS &&
555 vma->node.start < (flags & PIN_OFFSET_MASK))
556 return true;
557
558 if (flags & PIN_OFFSET_FIXED &&
559 vma->node.start != (flags & PIN_OFFSET_MASK))
560 return true;
561
562 return false;
563}
564
565void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
566{
567 bool mappable, fenceable;
568
569 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
570 GEM_BUG_ON(!vma->fence_size);
571
572 fenceable = (vma->node.size >= vma->fence_size &&
573 IS_ALIGNED(vma->node.start, vma->fence_alignment));
574
575 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
576
577 if (mappable && fenceable)
578 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
579 else
580 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
581}
582
583bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
584{
585 struct drm_mm_node *node = &vma->node;
586 struct drm_mm_node *other;
587
588
589
590
591
592
593
594
595 if (!i915_vm_has_cache_coloring(vma->vm))
596 return true;
597
598
599 GEM_BUG_ON(!drm_mm_node_allocated(node));
600 GEM_BUG_ON(list_empty(&node->node_list));
601
602 other = list_prev_entry(node, node_list);
603 if (i915_node_color_differs(other, color) &&
604 !drm_mm_hole_follows(other))
605 return false;
606
607 other = list_next_entry(node, node_list);
608 if (i915_node_color_differs(other, color) &&
609 !drm_mm_hole_follows(node))
610 return false;
611
612 return true;
613}
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629static int
630i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
631{
632 unsigned long color;
633 u64 start, end;
634 int ret;
635
636 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
637 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
638
639 size = max(size, vma->size);
640 alignment = max(alignment, vma->display_alignment);
641 if (flags & PIN_MAPPABLE) {
642 size = max_t(typeof(size), size, vma->fence_size);
643 alignment = max_t(typeof(alignment),
644 alignment, vma->fence_alignment);
645 }
646
647 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
648 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
649 GEM_BUG_ON(!is_power_of_2(alignment));
650
651 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
652 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
653
654 end = vma->vm->total;
655 if (flags & PIN_MAPPABLE)
656 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
657 if (flags & PIN_ZONE_4G)
658 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
659 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
660
661
662
663
664
665 if (size > end) {
666 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
667 size, flags & PIN_MAPPABLE ? "mappable" : "total",
668 end);
669 return -ENOSPC;
670 }
671
672 color = 0;
673 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
674 color = vma->obj->cache_level;
675
676 if (flags & PIN_OFFSET_FIXED) {
677 u64 offset = flags & PIN_OFFSET_MASK;
678 if (!IS_ALIGNED(offset, alignment) ||
679 range_overflows(offset, size, end))
680 return -EINVAL;
681
682 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
683 size, offset, color,
684 flags);
685 if (ret)
686 return ret;
687 } else {
688
689
690
691
692
693
694
695
696 if (upper_32_bits(end - 1) &&
697 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
698
699
700
701
702
703
704 u64 page_alignment =
705 rounddown_pow_of_two(vma->page_sizes.sg |
706 I915_GTT_PAGE_SIZE_2M);
707
708
709
710
711
712
713 GEM_BUG_ON(i915_vma_is_ggtt(vma));
714
715 alignment = max(alignment, page_alignment);
716
717 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
718 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
719 }
720
721 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
722 size, alignment, color,
723 start, end, flags);
724 if (ret)
725 return ret;
726
727 GEM_BUG_ON(vma->node.start < start);
728 GEM_BUG_ON(vma->node.start + vma->node.size > end);
729 }
730 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
731 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
732
733 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
734
735 return 0;
736}
737
738static void
739i915_vma_detach(struct i915_vma *vma)
740{
741 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
742 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
743
744
745
746
747
748
749 list_del(&vma->vm_link);
750}
751
752static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
753{
754 unsigned int bound;
755 bool pinned = true;
756
757 bound = atomic_read(&vma->flags);
758 do {
759 if (unlikely(flags & ~bound))
760 return false;
761
762 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
763 return false;
764
765 if (!(bound & I915_VMA_PIN_MASK))
766 goto unpinned;
767
768 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
769 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
770
771 return true;
772
773unpinned:
774
775
776
777
778 mutex_lock(&vma->vm->mutex);
779 do {
780 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
781 pinned = false;
782 break;
783 }
784
785 if (unlikely(flags & ~bound)) {
786 pinned = false;
787 break;
788 }
789 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
790 mutex_unlock(&vma->vm->mutex);
791
792 return pinned;
793}
794
795static int vma_get_pages(struct i915_vma *vma)
796{
797 int err = 0;
798 bool pinned_pages = false;
799
800 if (atomic_add_unless(&vma->pages_count, 1, 0))
801 return 0;
802
803 if (vma->obj) {
804 err = i915_gem_object_pin_pages(vma->obj);
805 if (err)
806 return err;
807 pinned_pages = true;
808 }
809
810
811 if (mutex_lock_interruptible(&vma->pages_mutex)) {
812 err = -EINTR;
813 goto unpin;
814 }
815
816 if (!atomic_read(&vma->pages_count)) {
817 err = vma->ops->set_pages(vma);
818 if (err)
819 goto unlock;
820 pinned_pages = false;
821 }
822 atomic_inc(&vma->pages_count);
823
824unlock:
825 mutex_unlock(&vma->pages_mutex);
826unpin:
827 if (pinned_pages)
828 __i915_gem_object_unpin_pages(vma->obj);
829
830 return err;
831}
832
833static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
834{
835
836 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
837 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
838 if (atomic_sub_return(count, &vma->pages_count) == 0) {
839 vma->ops->clear_pages(vma);
840 GEM_BUG_ON(vma->pages);
841 if (vma->obj)
842 i915_gem_object_unpin_pages(vma->obj);
843 }
844 mutex_unlock(&vma->pages_mutex);
845}
846
847static void vma_put_pages(struct i915_vma *vma)
848{
849 if (atomic_add_unless(&vma->pages_count, -1, 1))
850 return;
851
852 __vma_put_pages(vma, 1);
853}
854
855static void vma_unbind_pages(struct i915_vma *vma)
856{
857 unsigned int count;
858
859 lockdep_assert_held(&vma->vm->mutex);
860
861
862 count = atomic_read(&vma->pages_count);
863 count >>= I915_VMA_PAGES_BIAS;
864 GEM_BUG_ON(!count);
865
866 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
867}
868
869int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
870 u64 size, u64 alignment, u64 flags)
871{
872 struct i915_vma_work *work = NULL;
873 intel_wakeref_t wakeref = 0;
874 unsigned int bound;
875 int err;
876
877#ifdef CONFIG_PROVE_LOCKING
878 if (debug_locks && !WARN_ON(!ww) && vma->resv)
879 assert_vma_held(vma);
880#endif
881
882 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
883 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
884
885 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
886
887
888 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
889 return 0;
890
891 err = vma_get_pages(vma);
892 if (err)
893 return err;
894
895 if (flags & PIN_GLOBAL)
896 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
897
898 if (flags & vma->vm->bind_async_flags) {
899
900 err = i915_vm_lock_objects(vma->vm, ww);
901 if (err)
902 goto err_rpm;
903
904 work = i915_vma_work();
905 if (!work) {
906 err = -ENOMEM;
907 goto err_rpm;
908 }
909
910 work->vm = i915_vm_get(vma->vm);
911
912
913 if (vma->vm->allocate_va_range) {
914 err = i915_vm_alloc_pt_stash(vma->vm,
915 &work->stash,
916 vma->size);
917 if (err)
918 goto err_fence;
919
920 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
921 if (err)
922 goto err_fence;
923 }
924 }
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
944 !(flags & PIN_GLOBAL));
945 if (err)
946 goto err_fence;
947
948
949
950 if (unlikely(i915_vma_is_closed(vma))) {
951 err = -ENOENT;
952 goto err_unlock;
953 }
954
955 bound = atomic_read(&vma->flags);
956 if (unlikely(bound & I915_VMA_ERROR)) {
957 err = -ENOMEM;
958 goto err_unlock;
959 }
960
961 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
962 err = -EAGAIN;
963 goto err_unlock;
964 }
965
966 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
967 __i915_vma_pin(vma);
968 goto err_unlock;
969 }
970
971 err = i915_active_acquire(&vma->active);
972 if (err)
973 goto err_unlock;
974
975 if (!(bound & I915_VMA_BIND_MASK)) {
976 err = i915_vma_insert(vma, size, alignment, flags);
977 if (err)
978 goto err_active;
979
980 if (i915_is_ggtt(vma->vm))
981 __i915_vma_set_map_and_fenceable(vma);
982 }
983
984 GEM_BUG_ON(!vma->pages);
985 err = i915_vma_bind(vma,
986 vma->obj ? vma->obj->cache_level : 0,
987 flags, work);
988 if (err)
989 goto err_remove;
990
991
992 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
993 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
994 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
995
996 __i915_vma_pin(vma);
997 GEM_BUG_ON(!i915_vma_is_pinned(vma));
998 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
999 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1000
1001err_remove:
1002 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1003 i915_vma_detach(vma);
1004 drm_mm_remove_node(&vma->node);
1005 }
1006err_active:
1007 i915_active_release(&vma->active);
1008err_unlock:
1009 mutex_unlock(&vma->vm->mutex);
1010err_fence:
1011 if (work)
1012 dma_fence_work_commit_imm(&work->base);
1013err_rpm:
1014 if (wakeref)
1015 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1016 vma_put_pages(vma);
1017 return err;
1018}
1019
1020static void flush_idle_contexts(struct intel_gt *gt)
1021{
1022 struct intel_engine_cs *engine;
1023 enum intel_engine_id id;
1024
1025 for_each_engine(engine, gt, id)
1026 intel_engine_flush_barriers(engine);
1027
1028 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1029}
1030
1031int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1032 u32 align, unsigned int flags)
1033{
1034 struct i915_address_space *vm = vma->vm;
1035 int err;
1036
1037 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1038
1039#ifdef CONFIG_LOCKDEP
1040 WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv));
1041#endif
1042
1043 do {
1044 if (ww)
1045 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1046 else
1047 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
1048 if (err != -ENOSPC) {
1049 if (!err) {
1050 err = i915_vma_wait_for_bind(vma);
1051 if (err)
1052 i915_vma_unpin(vma);
1053 }
1054 return err;
1055 }
1056
1057
1058 flush_idle_contexts(vm->gt);
1059 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1060 i915_gem_evict_vm(vm);
1061 mutex_unlock(&vm->mutex);
1062 }
1063 } while (1);
1064}
1065
1066static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1067{
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 GEM_BUG_ON(i915_vma_is_closed(vma));
1081 list_add(&vma->closed_link, >->closed_vma);
1082}
1083
1084void i915_vma_close(struct i915_vma *vma)
1085{
1086 struct intel_gt *gt = vma->vm->gt;
1087 unsigned long flags;
1088
1089 if (i915_vma_is_ggtt(vma))
1090 return;
1091
1092 GEM_BUG_ON(!atomic_read(&vma->open_count));
1093 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1094 >->closed_lock,
1095 flags)) {
1096 __vma_close(vma, gt);
1097 spin_unlock_irqrestore(>->closed_lock, flags);
1098 }
1099}
1100
1101static void __i915_vma_remove_closed(struct i915_vma *vma)
1102{
1103 struct intel_gt *gt = vma->vm->gt;
1104
1105 spin_lock_irq(>->closed_lock);
1106 list_del_init(&vma->closed_link);
1107 spin_unlock_irq(>->closed_lock);
1108}
1109
1110void i915_vma_reopen(struct i915_vma *vma)
1111{
1112 if (i915_vma_is_closed(vma))
1113 __i915_vma_remove_closed(vma);
1114}
1115
1116void i915_vma_release(struct kref *ref)
1117{
1118 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1119
1120 if (drm_mm_node_allocated(&vma->node)) {
1121 mutex_lock(&vma->vm->mutex);
1122 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1123 WARN_ON(__i915_vma_unbind(vma));
1124 mutex_unlock(&vma->vm->mutex);
1125 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1126 }
1127 GEM_BUG_ON(i915_vma_is_active(vma));
1128
1129 if (vma->obj) {
1130 struct drm_i915_gem_object *obj = vma->obj;
1131
1132 spin_lock(&obj->vma.lock);
1133 list_del(&vma->obj_link);
1134 if (!RB_EMPTY_NODE(&vma->obj_node))
1135 rb_erase(&vma->obj_node, &obj->vma.tree);
1136 spin_unlock(&obj->vma.lock);
1137 }
1138
1139 __i915_vma_remove_closed(vma);
1140 i915_vm_put(vma->vm);
1141
1142 i915_active_fini(&vma->active);
1143 i915_vma_free(vma);
1144}
1145
1146void i915_vma_parked(struct intel_gt *gt)
1147{
1148 struct i915_vma *vma, *next;
1149 LIST_HEAD(closed);
1150
1151 spin_lock_irq(>->closed_lock);
1152 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1153 struct drm_i915_gem_object *obj = vma->obj;
1154 struct i915_address_space *vm = vma->vm;
1155
1156
1157
1158 if (!kref_get_unless_zero(&obj->base.refcount))
1159 continue;
1160
1161 if (!i915_vm_tryopen(vm)) {
1162 i915_gem_object_put(obj);
1163 continue;
1164 }
1165
1166 list_move(&vma->closed_link, &closed);
1167 }
1168 spin_unlock_irq(>->closed_lock);
1169
1170
1171 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1172 struct drm_i915_gem_object *obj = vma->obj;
1173 struct i915_address_space *vm = vma->vm;
1174
1175 INIT_LIST_HEAD(&vma->closed_link);
1176 __i915_vma_put(vma);
1177
1178 i915_gem_object_put(obj);
1179 i915_vm_close(vm);
1180 }
1181}
1182
1183static void __i915_vma_iounmap(struct i915_vma *vma)
1184{
1185 GEM_BUG_ON(i915_vma_is_pinned(vma));
1186
1187 if (vma->iomap == NULL)
1188 return;
1189
1190 io_mapping_unmap(vma->iomap);
1191 vma->iomap = NULL;
1192}
1193
1194void i915_vma_revoke_mmap(struct i915_vma *vma)
1195{
1196 struct drm_vma_offset_node *node;
1197 u64 vma_offset;
1198
1199 if (!i915_vma_has_userfault(vma))
1200 return;
1201
1202 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1203 GEM_BUG_ON(!vma->obj->userfault_count);
1204
1205 node = &vma->mmo->vma_node;
1206 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1207 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1208 drm_vma_node_offset_addr(node) + vma_offset,
1209 vma->size,
1210 1);
1211
1212 i915_vma_unset_userfault(vma);
1213 if (!--vma->obj->userfault_count)
1214 list_del(&vma->obj->userfault_link);
1215}
1216
1217static int
1218__i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1219{
1220 return __i915_request_await_exclusive(rq, &vma->active);
1221}
1222
1223int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1224{
1225 int err;
1226
1227 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1228
1229
1230 err = __i915_request_await_bind(rq, vma);
1231 if (err)
1232 return err;
1233
1234 return i915_active_add_request(&vma->active, rq);
1235}
1236
1237int i915_vma_move_to_active(struct i915_vma *vma,
1238 struct i915_request *rq,
1239 unsigned int flags)
1240{
1241 struct drm_i915_gem_object *obj = vma->obj;
1242 int err;
1243
1244 assert_object_held(obj);
1245
1246 err = __i915_vma_move_to_active(vma, rq);
1247 if (unlikely(err))
1248 return err;
1249
1250 if (flags & EXEC_OBJECT_WRITE) {
1251 struct intel_frontbuffer *front;
1252
1253 front = __intel_frontbuffer_get(obj);
1254 if (unlikely(front)) {
1255 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1256 i915_active_add_request(&front->write, rq);
1257 intel_frontbuffer_put(front);
1258 }
1259
1260 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1261 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1262 obj->read_domains = 0;
1263 } else {
1264 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
1265 err = dma_resv_reserve_shared(vma->resv, 1);
1266 if (unlikely(err))
1267 return err;
1268 }
1269
1270 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1271 obj->write_domain = 0;
1272 }
1273
1274 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1275 i915_active_add_request(&vma->fence->active, rq);
1276
1277 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1278 obj->mm.dirty = true;
1279
1280 GEM_BUG_ON(!i915_vma_is_active(vma));
1281 return 0;
1282}
1283
1284void __i915_vma_evict(struct i915_vma *vma)
1285{
1286 GEM_BUG_ON(i915_vma_is_pinned(vma));
1287
1288 if (i915_vma_is_map_and_fenceable(vma)) {
1289
1290 i915_vma_revoke_mmap(vma);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 i915_vma_flush_writes(vma);
1306
1307
1308 i915_vma_revoke_fence(vma);
1309
1310 __i915_vma_iounmap(vma);
1311 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1312 }
1313 GEM_BUG_ON(vma->fence);
1314 GEM_BUG_ON(i915_vma_has_userfault(vma));
1315
1316 if (likely(atomic_read(&vma->vm->open))) {
1317 trace_i915_vma_unbind(vma);
1318 vma->ops->unbind_vma(vma->vm, vma);
1319 }
1320 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1321 &vma->flags);
1322
1323 i915_vma_detach(vma);
1324 vma_unbind_pages(vma);
1325}
1326
1327int __i915_vma_unbind(struct i915_vma *vma)
1328{
1329 int ret;
1330
1331 lockdep_assert_held(&vma->vm->mutex);
1332
1333 if (!drm_mm_node_allocated(&vma->node))
1334 return 0;
1335
1336 if (i915_vma_is_pinned(vma)) {
1337 vma_print_allocator(vma, "is pinned");
1338 return -EAGAIN;
1339 }
1340
1341
1342
1343
1344
1345
1346 ret = i915_vma_sync(vma);
1347 if (ret)
1348 return ret;
1349
1350 GEM_BUG_ON(i915_vma_is_active(vma));
1351 __i915_vma_evict(vma);
1352
1353 drm_mm_remove_node(&vma->node);
1354 return 0;
1355}
1356
1357int i915_vma_unbind(struct i915_vma *vma)
1358{
1359 struct i915_address_space *vm = vma->vm;
1360 intel_wakeref_t wakeref = 0;
1361 int err;
1362
1363
1364 err = i915_vma_sync(vma);
1365 if (err)
1366 return err;
1367
1368 if (!drm_mm_node_allocated(&vma->node))
1369 return 0;
1370
1371 if (i915_vma_is_pinned(vma)) {
1372 vma_print_allocator(vma, "is pinned");
1373 return -EAGAIN;
1374 }
1375
1376 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1377
1378 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1379
1380 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1381 if (err)
1382 goto out_rpm;
1383
1384 err = __i915_vma_unbind(vma);
1385 mutex_unlock(&vm->mutex);
1386
1387out_rpm:
1388 if (wakeref)
1389 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1390 return err;
1391}
1392
1393struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1394{
1395 i915_gem_object_make_unshrinkable(vma->obj);
1396 return vma;
1397}
1398
1399void i915_vma_make_shrinkable(struct i915_vma *vma)
1400{
1401 i915_gem_object_make_shrinkable(vma->obj);
1402}
1403
1404void i915_vma_make_purgeable(struct i915_vma *vma)
1405{
1406 i915_gem_object_make_purgeable(vma->obj);
1407}
1408
1409#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1410#include "selftests/i915_vma.c"
1411#endif
1412
1413void i915_vma_module_exit(void)
1414{
1415 kmem_cache_destroy(slab_vmas);
1416}
1417
1418int __init i915_vma_module_init(void)
1419{
1420 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1421 if (!slab_vmas)
1422 return -ENOMEM;
1423
1424 return 0;
1425}
1426