1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/sched/mm.h>
26
27#include "display/intel_frontbuffer.h"
28#include "i915_drv.h"
29#include "i915_gem_clflush.h"
30#include "i915_gem_context.h"
31#include "i915_gem_mman.h"
32#include "i915_gem_object.h"
33#include "i915_memcpy.h"
34#include "i915_trace.h"
35
36static struct kmem_cache *slab_objects;
37
38static const struct drm_gem_object_funcs i915_gem_object_funcs;
39
40struct drm_i915_gem_object *i915_gem_object_alloc(void)
41{
42 struct drm_i915_gem_object *obj;
43
44 obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
45 if (!obj)
46 return NULL;
47 obj->base.funcs = &i915_gem_object_funcs;
48
49 return obj;
50}
51
52void i915_gem_object_free(struct drm_i915_gem_object *obj)
53{
54 return kmem_cache_free(slab_objects, obj);
55}
56
57void i915_gem_object_init(struct drm_i915_gem_object *obj,
58 const struct drm_i915_gem_object_ops *ops,
59 struct lock_class_key *key, unsigned flags)
60{
61
62
63
64
65 BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
66 offsetof(typeof(*obj), __do_not_access.base));
67
68 spin_lock_init(&obj->vma.lock);
69 INIT_LIST_HEAD(&obj->vma.list);
70
71 INIT_LIST_HEAD(&obj->mm.link);
72
73 INIT_LIST_HEAD(&obj->lut_list);
74 spin_lock_init(&obj->lut_lock);
75
76 spin_lock_init(&obj->mmo.lock);
77 obj->mmo.offsets = RB_ROOT;
78
79 init_rcu_head(&obj->rcu);
80
81 obj->ops = ops;
82 GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
83 obj->flags = flags;
84
85 obj->mm.madv = I915_MADV_WILLNEED;
86 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
87 mutex_init(&obj->mm.get_page.lock);
88 INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
89 mutex_init(&obj->mm.get_dma_page.lock);
90}
91
92
93
94
95
96
97void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
98 unsigned int cache_level)
99{
100 obj->cache_level = cache_level;
101
102 if (cache_level != I915_CACHE_NONE)
103 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
104 I915_BO_CACHE_COHERENT_FOR_WRITE);
105 else if (HAS_LLC(to_i915(obj->base.dev)))
106 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
107 else
108 obj->cache_coherent = 0;
109
110 obj->cache_dirty =
111 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
112}
113
114static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
115{
116 struct drm_i915_gem_object *obj = to_intel_bo(gem);
117 struct drm_i915_file_private *fpriv = file->driver_priv;
118 struct i915_lut_handle bookmark = {};
119 struct i915_mmap_offset *mmo, *mn;
120 struct i915_lut_handle *lut, *ln;
121 LIST_HEAD(close);
122
123 spin_lock(&obj->lut_lock);
124 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
125 struct i915_gem_context *ctx = lut->ctx;
126
127 if (ctx && ctx->file_priv == fpriv) {
128 i915_gem_context_get(ctx);
129 list_move(&lut->obj_link, &close);
130 }
131
132
133 if (&ln->obj_link != &obj->lut_list) {
134 list_add_tail(&bookmark.obj_link, &ln->obj_link);
135 if (cond_resched_lock(&obj->lut_lock))
136 list_safe_reset_next(&bookmark, ln, obj_link);
137 __list_del_entry(&bookmark.obj_link);
138 }
139 }
140 spin_unlock(&obj->lut_lock);
141
142 spin_lock(&obj->mmo.lock);
143 rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
144 drm_vma_node_revoke(&mmo->vma_node, file);
145 spin_unlock(&obj->mmo.lock);
146
147 list_for_each_entry_safe(lut, ln, &close, obj_link) {
148 struct i915_gem_context *ctx = lut->ctx;
149 struct i915_vma *vma;
150
151
152
153
154
155
156 mutex_lock(&ctx->lut_mutex);
157 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
158 if (vma) {
159 GEM_BUG_ON(vma->obj != obj);
160 GEM_BUG_ON(!atomic_read(&vma->open_count));
161 i915_vma_close(vma);
162 }
163 mutex_unlock(&ctx->lut_mutex);
164
165 i915_gem_context_put(lut->ctx);
166 i915_lut_handle_free(lut);
167 i915_gem_object_put(obj);
168 }
169}
170
171void __i915_gem_free_object_rcu(struct rcu_head *head)
172{
173 struct drm_i915_gem_object *obj =
174 container_of(head, typeof(*obj), rcu);
175 struct drm_i915_private *i915 = to_i915(obj->base.dev);
176
177 dma_resv_fini(&obj->base._resv);
178 i915_gem_object_free(obj);
179
180 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
181 atomic_dec(&i915->mm.free_count);
182}
183
184static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
185{
186
187
188 if (obj->userfault_count)
189 i915_gem_object_release_mmap_gtt(obj);
190
191 if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
192 struct i915_mmap_offset *mmo, *mn;
193
194 i915_gem_object_release_mmap_offset(obj);
195
196 rbtree_postorder_for_each_entry_safe(mmo, mn,
197 &obj->mmo.offsets,
198 offset) {
199 drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
200 &mmo->vma_node);
201 kfree(mmo);
202 }
203 obj->mmo.offsets = RB_ROOT;
204 }
205}
206
207void __i915_gem_free_object(struct drm_i915_gem_object *obj)
208{
209 trace_i915_gem_object_destroy(obj);
210
211 if (!list_empty(&obj->vma.list)) {
212 struct i915_vma *vma;
213
214
215
216
217
218
219
220 spin_lock(&obj->vma.lock);
221 while ((vma = list_first_entry_or_null(&obj->vma.list,
222 struct i915_vma,
223 obj_link))) {
224 GEM_BUG_ON(vma->obj != obj);
225 spin_unlock(&obj->vma.lock);
226
227 __i915_vma_put(vma);
228
229 spin_lock(&obj->vma.lock);
230 }
231 spin_unlock(&obj->vma.lock);
232 }
233
234 __i915_gem_object_free_mmaps(obj);
235
236 GEM_BUG_ON(!list_empty(&obj->lut_list));
237
238 atomic_set(&obj->mm.pages_pin_count, 0);
239 __i915_gem_object_put_pages(obj);
240 GEM_BUG_ON(i915_gem_object_has_pages(obj));
241 bitmap_free(obj->bit_17);
242
243 if (obj->base.import_attach)
244 drm_prime_gem_destroy(&obj->base, NULL);
245
246 drm_gem_free_mmap_offset(&obj->base);
247
248 if (obj->ops->release)
249 obj->ops->release(obj);
250
251 if (obj->mm.n_placements > 1)
252 kfree(obj->mm.placements);
253
254 if (obj->shares_resv_from)
255 i915_vm_resv_put(obj->shares_resv_from);
256}
257
258static void __i915_gem_free_objects(struct drm_i915_private *i915,
259 struct llist_node *freed)
260{
261 struct drm_i915_gem_object *obj, *on;
262
263 llist_for_each_entry_safe(obj, on, freed, freed) {
264 might_sleep();
265 if (obj->ops->delayed_free) {
266 obj->ops->delayed_free(obj);
267 continue;
268 }
269 __i915_gem_free_object(obj);
270
271
272 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
273 cond_resched();
274 }
275}
276
277void i915_gem_flush_free_objects(struct drm_i915_private *i915)
278{
279 struct llist_node *freed = llist_del_all(&i915->mm.free_list);
280
281 if (unlikely(freed))
282 __i915_gem_free_objects(i915, freed);
283}
284
285static void __i915_gem_free_work(struct work_struct *work)
286{
287 struct drm_i915_private *i915 =
288 container_of(work, struct drm_i915_private, mm.free_work);
289
290 i915_gem_flush_free_objects(i915);
291}
292
293static void i915_gem_free_object(struct drm_gem_object *gem_obj)
294{
295 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
296 struct drm_i915_private *i915 = to_i915(obj->base.dev);
297
298 GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
299
300
301
302
303
304
305
306 atomic_inc(&i915->mm.free_count);
307
308
309
310
311
312
313
314
315 i915_gem_object_make_unshrinkable(obj);
316
317
318
319
320
321
322
323
324
325
326
327
328 if (llist_add(&obj->freed, &i915->mm.free_list))
329 queue_work(i915->wq, &i915->mm.free_work);
330}
331
332void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
333 enum fb_op_origin origin)
334{
335 struct intel_frontbuffer *front;
336
337 front = __intel_frontbuffer_get(obj);
338 if (front) {
339 intel_frontbuffer_flush(front, origin);
340 intel_frontbuffer_put(front);
341 }
342}
343
344void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
345 enum fb_op_origin origin)
346{
347 struct intel_frontbuffer *front;
348
349 front = __intel_frontbuffer_get(obj);
350 if (front) {
351 intel_frontbuffer_invalidate(front, origin);
352 intel_frontbuffer_put(front);
353 }
354}
355
356static void
357i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
358{
359 void *src_map;
360 void *src_ptr;
361
362 src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
363
364 src_ptr = src_map + offset_in_page(offset);
365 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
366 drm_clflush_virt_range(src_ptr, size);
367 memcpy(dst, src_ptr, size);
368
369 kunmap_atomic(src_map);
370}
371
372static void
373i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
374{
375 void __iomem *src_map;
376 void __iomem *src_ptr;
377 dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
378
379 src_map = io_mapping_map_wc(&obj->mm.region->iomap,
380 dma - obj->mm.region->region.start,
381 PAGE_SIZE);
382
383 src_ptr = src_map + offset_in_page(offset);
384 if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
385 memcpy_fromio(dst, src_ptr, size);
386
387 io_mapping_unmap(src_map);
388}
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
405{
406 GEM_BUG_ON(offset >= obj->base.size);
407 GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
408 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
409
410 if (i915_gem_object_has_struct_page(obj))
411 i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
412 else if (i915_gem_object_has_iomem(obj))
413 i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
414 else
415 return -ENODEV;
416
417 return 0;
418}
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
434{
435 struct i915_vma *vma;
436 int pin_count = atomic_read(&obj->mm.pages_pin_count);
437
438 if (!pin_count)
439 return true;
440
441 spin_lock(&obj->vma.lock);
442 list_for_each_entry(vma, &obj->vma.list, obj_link) {
443 if (i915_vma_is_pinned(vma)) {
444 spin_unlock(&obj->vma.lock);
445 return false;
446 }
447 if (atomic_read(&vma->pages_count))
448 pin_count--;
449 }
450 spin_unlock(&obj->vma.lock);
451 GEM_WARN_ON(pin_count < 0);
452
453 return pin_count == 0;
454}
455
456
457
458
459
460
461
462
463
464bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
465{
466 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
467
468 if (!mr)
469 return false;
470
471 return obj->mm.n_placements > 1;
472}
473
474
475
476
477
478
479
480
481
482
483bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
484{
485#ifdef CONFIG_LOCKDEP
486 if (IS_DGFX(to_i915(obj->base.dev)) &&
487 i915_gem_object_evictable((void __force *)obj))
488 assert_object_held_shared(obj);
489#endif
490 return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
491}
492
493
494
495
496
497
498
499
500
501
502bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
503{
504#ifdef CONFIG_LOCKDEP
505 if (IS_DGFX(to_i915(obj->base.dev)) &&
506 i915_gem_object_evictable((void __force *)obj))
507 assert_object_held_shared(obj);
508#endif
509 return obj->mem_flags & I915_BO_FLAG_IOMEM;
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
530 enum intel_region_id id)
531{
532 struct drm_i915_private *i915 = to_i915(obj->base.dev);
533 unsigned int num_allowed = obj->mm.n_placements;
534 struct intel_memory_region *mr;
535 unsigned int i;
536
537 GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
538 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
539
540 mr = i915->mm.regions[id];
541 if (!mr)
542 return false;
543
544 if (obj->mm.region == mr)
545 return true;
546
547 if (!i915_gem_object_evictable(obj))
548 return false;
549
550 if (!obj->ops->migrate)
551 return false;
552
553 if (!(obj->flags & I915_BO_ALLOC_USER))
554 return true;
555
556 if (num_allowed == 0)
557 return false;
558
559 for (i = 0; i < num_allowed; ++i) {
560 if (mr == obj->mm.placements[i])
561 return true;
562 }
563
564 return false;
565}
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
593 struct i915_gem_ww_ctx *ww,
594 enum intel_region_id id)
595{
596 struct drm_i915_private *i915 = to_i915(obj->base.dev);
597 struct intel_memory_region *mr;
598
599 GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
600 GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
601 assert_object_held(obj);
602
603 mr = i915->mm.regions[id];
604 GEM_BUG_ON(!mr);
605
606 if (!i915_gem_object_can_migrate(obj, id))
607 return -EINVAL;
608
609 if (!obj->ops->migrate) {
610 if (GEM_WARN_ON(obj->mm.region != mr))
611 return -EINVAL;
612 return 0;
613 }
614
615 return obj->ops->migrate(obj, mr);
616}
617
618
619
620
621
622
623
624
625
626bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
627 enum intel_memory_type type)
628{
629 unsigned int i;
630
631 if (!obj->mm.n_placements) {
632 switch (type) {
633 case INTEL_MEMORY_LOCAL:
634 return i915_gem_object_has_iomem(obj);
635 case INTEL_MEMORY_SYSTEM:
636 return i915_gem_object_has_pages(obj);
637 default:
638
639 GEM_BUG_ON(1);
640 return false;
641 }
642 }
643
644 for (i = 0; i < obj->mm.n_placements; i++) {
645 if (obj->mm.placements[i]->type == type)
646 return true;
647 }
648
649 return false;
650}
651
652void i915_gem_init__objects(struct drm_i915_private *i915)
653{
654 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
655}
656
657void i915_objects_module_exit(void)
658{
659 kmem_cache_destroy(slab_objects);
660}
661
662int __init i915_objects_module_init(void)
663{
664 slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
665 if (!slab_objects)
666 return -ENOMEM;
667
668 return 0;
669}
670
671static const struct drm_gem_object_funcs i915_gem_object_funcs = {
672 .free = i915_gem_free_object,
673 .close = i915_gem_close_object,
674 .export = i915_gem_prime_export,
675};
676
677#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
678#include "selftests/huge_gem_object.c"
679#include "selftests/huge_pages.c"
680#include "selftests/i915_gem_migrate.c"
681#include "selftests/i915_gem_object.c"
682#include "selftests/i915_gem_coherency.c"
683#endif
684