1
2
3
4
5
6
7#ifndef __I915_GEM_OBJECT_H__
8#define __I915_GEM_OBJECT_H__
9
10#include <drm/drm_gem.h>
11#include <drm/drm_file.h>
12#include <drm/drm_device.h>
13
14#include "display/intel_frontbuffer.h"
15#include "intel_memory_region.h"
16#include "i915_gem_object_types.h"
17#include "i915_gem_gtt.h"
18#include "i915_gem_ww.h"
19#include "i915_vma_types.h"
20
21enum intel_region_id;
22
23
24
25
26
27
28
29
30
31
32
33#define GEM_CHECK_SIZE_OVERFLOW(sz) \
34 GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
35
36static inline bool i915_gem_object_size_2big(u64 size)
37{
38 struct drm_i915_gem_object *obj;
39
40 if (GEM_CHECK_SIZE_OVERFLOW(size))
41 return true;
42
43 if (overflows_type(size, obj->base.size))
44 return true;
45
46 return false;
47}
48
49void i915_gem_init__objects(struct drm_i915_private *i915);
50
51void i915_objects_module_exit(void);
52int i915_objects_module_init(void);
53
54struct drm_i915_gem_object *i915_gem_object_alloc(void);
55void i915_gem_object_free(struct drm_i915_gem_object *obj);
56
57void i915_gem_object_init(struct drm_i915_gem_object *obj,
58 const struct drm_i915_gem_object_ops *ops,
59 struct lock_class_key *key,
60 unsigned alloc_flags);
61struct drm_i915_gem_object *
62i915_gem_object_create_shmem(struct drm_i915_private *i915,
63 resource_size_t size);
64struct drm_i915_gem_object *
65i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
66 const void *data, resource_size_t size);
67struct drm_i915_gem_object *
68__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
69 struct intel_memory_region **placements,
70 unsigned int n_placements);
71
72extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
73
74void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
75 struct sg_table *pages,
76 bool needs_clflush);
77
78int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
79 const struct drm_i915_gem_pwrite *args);
80int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
81 const struct drm_i915_gem_pread *args);
82
83int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
84void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj,
85 struct sg_table *pages);
86void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
87 struct sg_table *pages);
88
89void i915_gem_flush_free_objects(struct drm_i915_private *i915);
90
91struct sg_table *
92__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
93void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
94
95
96
97
98
99
100
101
102
103
104
105
106static inline struct drm_i915_gem_object *
107i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
108{
109#ifdef CONFIG_LOCKDEP
110 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
111#endif
112 return idr_find(&file->object_idr, handle);
113}
114
115static inline struct drm_i915_gem_object *
116i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
117{
118 if (obj && !kref_get_unless_zero(&obj->base.refcount))
119 obj = NULL;
120
121 return obj;
122}
123
124static inline struct drm_i915_gem_object *
125i915_gem_object_lookup(struct drm_file *file, u32 handle)
126{
127 struct drm_i915_gem_object *obj;
128
129 rcu_read_lock();
130 obj = i915_gem_object_lookup_rcu(file, handle);
131 obj = i915_gem_object_get_rcu(obj);
132 rcu_read_unlock();
133
134 return obj;
135}
136
137__deprecated
138struct drm_gem_object *
139drm_gem_object_lookup(struct drm_file *file, u32 handle);
140
141__attribute__((nonnull))
142static inline struct drm_i915_gem_object *
143i915_gem_object_get(struct drm_i915_gem_object *obj)
144{
145 drm_gem_object_get(&obj->base);
146 return obj;
147}
148
149__attribute__((nonnull))
150static inline void
151i915_gem_object_put(struct drm_i915_gem_object *obj)
152{
153 __drm_gem_object_put(&obj->base);
154}
155
156#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
157
158
159
160
161static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
162{
163
164
165
166
167 if (IS_ENABLED(CONFIG_LOCKDEP) &&
168 kref_read(&obj->base.refcount) > 0)
169 assert_object_held(obj);
170}
171
172static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
173 struct i915_gem_ww_ctx *ww,
174 bool intr)
175{
176 int ret;
177
178 if (intr)
179 ret = dma_resv_lock_interruptible(obj->base.resv, ww ? &ww->ctx : NULL);
180 else
181 ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
182
183 if (!ret && ww) {
184 i915_gem_object_get(obj);
185 list_add_tail(&obj->obj_link, &ww->obj_list);
186 }
187 if (ret == -EALREADY)
188 ret = 0;
189
190 if (ret == -EDEADLK) {
191 i915_gem_object_get(obj);
192 ww->contended = obj;
193 }
194
195 return ret;
196}
197
198static inline int i915_gem_object_lock(struct drm_i915_gem_object *obj,
199 struct i915_gem_ww_ctx *ww)
200{
201 return __i915_gem_object_lock(obj, ww, ww && ww->intr);
202}
203
204static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj,
205 struct i915_gem_ww_ctx *ww)
206{
207 WARN_ON(ww && !ww->intr);
208 return __i915_gem_object_lock(obj, ww, true);
209}
210
211static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
212{
213 return dma_resv_trylock(obj->base.resv);
214}
215
216static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
217{
218 if (obj->ops->adjust_lru)
219 obj->ops->adjust_lru(obj);
220
221 dma_resv_unlock(obj->base.resv);
222}
223
224static inline void
225i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
226{
227 obj->flags |= I915_BO_READONLY;
228}
229
230static inline bool
231i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
232{
233 return obj->flags & I915_BO_READONLY;
234}
235
236static inline bool
237i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
238{
239 return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
240}
241
242static inline bool
243i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
244{
245 return obj->flags & I915_BO_ALLOC_VOLATILE;
246}
247
248static inline void
249i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
250{
251 obj->flags |= I915_BO_ALLOC_VOLATILE;
252}
253
254static inline bool
255i915_gem_object_has_tiling_quirk(struct drm_i915_gem_object *obj)
256{
257 return test_bit(I915_TILING_QUIRK_BIT, &obj->flags);
258}
259
260static inline void
261i915_gem_object_set_tiling_quirk(struct drm_i915_gem_object *obj)
262{
263 set_bit(I915_TILING_QUIRK_BIT, &obj->flags);
264}
265
266static inline void
267i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
268{
269 clear_bit(I915_TILING_QUIRK_BIT, &obj->flags);
270}
271
272static inline bool
273i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
274 unsigned long flags)
275{
276 return obj->ops->flags & flags;
277}
278
279bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
280
281bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
282
283static inline bool
284i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
285{
286 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
287}
288
289static inline bool
290i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
291{
292 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
293}
294
295static inline bool
296i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
297{
298 return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
299}
300
301static inline bool
302i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
303{
304 return READ_ONCE(obj->frontbuffer);
305}
306
307static inline unsigned int
308i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
309{
310 return obj->tiling_and_stride & TILING_MASK;
311}
312
313static inline bool
314i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
315{
316 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
317}
318
319static inline unsigned int
320i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
321{
322 return obj->tiling_and_stride & STRIDE_MASK;
323}
324
325static inline unsigned int
326i915_gem_tile_height(unsigned int tiling)
327{
328 GEM_BUG_ON(!tiling);
329 return tiling == I915_TILING_Y ? 32 : 8;
330}
331
332static inline unsigned int
333i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
334{
335 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
336}
337
338static inline unsigned int
339i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
340{
341 return (i915_gem_object_get_stride(obj) *
342 i915_gem_object_get_tile_height(obj));
343}
344
345int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
346 unsigned int tiling, unsigned int stride);
347
348struct scatterlist *
349__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
350 struct i915_gem_object_page_iter *iter,
351 unsigned int n,
352 unsigned int *offset, bool dma);
353
354static inline struct scatterlist *
355i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
356 unsigned int n,
357 unsigned int *offset)
358{
359 return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
360}
361
362static inline struct scatterlist *
363i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
364 unsigned int n,
365 unsigned int *offset)
366{
367 return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
368}
369
370struct page *
371i915_gem_object_get_page(struct drm_i915_gem_object *obj,
372 unsigned int n);
373
374struct page *
375i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
376 unsigned int n);
377
378dma_addr_t
379i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
380 unsigned long n,
381 unsigned int *len);
382
383dma_addr_t
384i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
385 unsigned long n);
386
387void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
388 struct sg_table *pages,
389 unsigned int sg_page_sizes);
390
391int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
392int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
393
394static inline int __must_check
395i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
396{
397 assert_object_held(obj);
398
399 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
400 return 0;
401
402 return __i915_gem_object_get_pages(obj);
403}
404
405int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj);
406
407static inline bool
408i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
409{
410 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
411}
412
413static inline void
414__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
415{
416 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
417
418 atomic_inc(&obj->mm.pages_pin_count);
419}
420
421static inline bool
422i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
423{
424 return atomic_read(&obj->mm.pages_pin_count);
425}
426
427static inline void
428__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
429{
430 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
431 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
432
433 atomic_dec(&obj->mm.pages_pin_count);
434}
435
436static inline void
437i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
438{
439 __i915_gem_object_unpin_pages(obj);
440}
441
442int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
443void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
444void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
463 enum i915_map_type type);
464
465void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
466 enum i915_map_type type);
467
468void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
469 unsigned long offset,
470 unsigned long size);
471static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
472{
473 __i915_gem_object_flush_map(obj, 0, obj->base.size);
474}
475
476
477
478
479
480
481
482
483
484
485static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
486{
487 i915_gem_object_unpin_pages(obj);
488}
489
490void __i915_gem_object_release_map(struct drm_i915_gem_object *obj);
491
492int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
493 unsigned int *needs_clflush);
494int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
495 unsigned int *needs_clflush);
496#define CLFLUSH_BEFORE BIT(0)
497#define CLFLUSH_AFTER BIT(1)
498#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
499
500static inline void
501i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
502{
503 i915_gem_object_unpin_pages(obj);
504}
505
506static inline struct intel_engine_cs *
507i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
508{
509 struct intel_engine_cs *engine = NULL;
510 struct dma_fence *fence;
511
512 rcu_read_lock();
513 fence = dma_resv_get_excl_unlocked(obj->base.resv);
514 rcu_read_unlock();
515
516 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
517 engine = to_request(fence)->engine;
518 dma_fence_put(fence);
519
520 return engine;
521}
522
523void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
524 unsigned int cache_level);
525void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
526void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
527
528int __must_check
529i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
530int __must_check
531i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
532int __must_check
533i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
534struct i915_vma * __must_check
535i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
536 struct i915_gem_ww_ctx *ww,
537 u32 alignment,
538 const struct i915_ggtt_view *view,
539 unsigned int flags);
540
541void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
542void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
543void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
544
545static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
546{
547 if (obj->cache_dirty)
548 return false;
549
550 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
551 return true;
552
553
554 return i915_gem_object_is_framebuffer(obj);
555}
556
557static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
558{
559 obj->read_domains = I915_GEM_DOMAIN_CPU;
560 obj->write_domain = I915_GEM_DOMAIN_CPU;
561 if (cpu_write_needs_clflush(obj))
562 obj->cache_dirty = true;
563}
564
565void i915_gem_fence_wait_priority(struct dma_fence *fence,
566 const struct i915_sched_attr *attr);
567
568int i915_gem_object_wait(struct drm_i915_gem_object *obj,
569 unsigned int flags,
570 long timeout);
571int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
572 unsigned int flags,
573 const struct i915_sched_attr *attr);
574
575void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
576 enum fb_op_origin origin);
577void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
578 enum fb_op_origin origin);
579
580static inline void
581i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
582 enum fb_op_origin origin)
583{
584 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
585 __i915_gem_object_flush_frontbuffer(obj, origin);
586}
587
588static inline void
589i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
590 enum fb_op_origin origin)
591{
592 if (unlikely(rcu_access_pointer(obj->frontbuffer)))
593 __i915_gem_object_invalidate_frontbuffer(obj, origin);
594}
595
596int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size);
597
598bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
599
600void __i915_gem_free_object_rcu(struct rcu_head *head);
601
602void __i915_gem_free_object(struct drm_i915_gem_object *obj);
603
604bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
605
606bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
607
608int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
609 struct i915_gem_ww_ctx *ww,
610 enum intel_region_id id);
611
612bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
613 enum intel_region_id id);
614
615int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
616 unsigned int flags);
617
618bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
619 enum intel_memory_type type);
620
621#ifdef CONFIG_MMU_NOTIFIER
622static inline bool
623i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
624{
625 return obj->userptr.notifier.mm;
626}
627
628int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
629int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
630int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
631#else
632static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
633
634static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
635static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
636static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
637
638#endif
639
640#endif
641