1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include "i915_drv.h"
26#include "intel_frontbuffer.h"
27#include "i915_gem_clflush.h"
28
29static DEFINE_SPINLOCK(clflush_lock);
30
31struct clflush {
32 struct dma_fence dma;
33 struct i915_sw_fence wait;
34 struct work_struct work;
35 struct drm_i915_gem_object *obj;
36};
37
38static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
39{
40 return DRIVER_NAME;
41}
42
43static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
44{
45 return "clflush";
46}
47
48static bool i915_clflush_enable_signaling(struct dma_fence *fence)
49{
50 return true;
51}
52
53static void i915_clflush_release(struct dma_fence *fence)
54{
55 struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
56
57 i915_sw_fence_fini(&clflush->wait);
58
59 BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
60 dma_fence_free(&clflush->dma);
61}
62
63static const struct dma_fence_ops i915_clflush_ops = {
64 .get_driver_name = i915_clflush_get_driver_name,
65 .get_timeline_name = i915_clflush_get_timeline_name,
66 .enable_signaling = i915_clflush_enable_signaling,
67 .wait = dma_fence_default_wait,
68 .release = i915_clflush_release,
69};
70
71static void __i915_do_clflush(struct drm_i915_gem_object *obj)
72{
73 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
74 drm_clflush_sg(obj->mm.pages);
75 intel_fb_obj_flush(obj, ORIGIN_CPU);
76}
77
78static void i915_clflush_work(struct work_struct *work)
79{
80 struct clflush *clflush = container_of(work, typeof(*clflush), work);
81 struct drm_i915_gem_object *obj = clflush->obj;
82
83 if (i915_gem_object_pin_pages(obj)) {
84 DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
85 goto out;
86 }
87
88 __i915_do_clflush(obj);
89
90 i915_gem_object_unpin_pages(obj);
91
92out:
93 i915_gem_object_put(obj);
94
95 dma_fence_signal(&clflush->dma);
96 dma_fence_put(&clflush->dma);
97}
98
99static int __i915_sw_fence_call
100i915_clflush_notify(struct i915_sw_fence *fence,
101 enum i915_sw_fence_notify state)
102{
103 struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
104
105 switch (state) {
106 case FENCE_COMPLETE:
107 schedule_work(&clflush->work);
108 break;
109
110 case FENCE_FREE:
111 dma_fence_put(&clflush->dma);
112 break;
113 }
114
115 return NOTIFY_DONE;
116}
117
118bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
119 unsigned int flags)
120{
121 struct clflush *clflush;
122
123
124
125
126
127
128
129
130 if (!i915_gem_object_has_struct_page(obj)) {
131 obj->cache_dirty = false;
132 return false;
133 }
134
135
136
137
138
139
140
141
142
143 if (!(flags & I915_CLFLUSH_FORCE) &&
144 obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
145 return false;
146
147 trace_i915_gem_object_clflush(obj);
148
149 clflush = NULL;
150 if (!(flags & I915_CLFLUSH_SYNC))
151 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
152 if (clflush) {
153 GEM_BUG_ON(!obj->cache_dirty);
154
155 dma_fence_init(&clflush->dma,
156 &i915_clflush_ops,
157 &clflush_lock,
158 to_i915(obj->base.dev)->mm.unordered_timeline,
159 0);
160 i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
161
162 clflush->obj = i915_gem_object_get(obj);
163 INIT_WORK(&clflush->work, i915_clflush_work);
164
165 dma_fence_get(&clflush->dma);
166
167 i915_sw_fence_await_reservation(&clflush->wait,
168 obj->resv, NULL,
169 true, I915_FENCE_TIMEOUT,
170 I915_FENCE_GFP);
171
172 reservation_object_lock(obj->resv, NULL);
173 reservation_object_add_excl_fence(obj->resv, &clflush->dma);
174 reservation_object_unlock(obj->resv);
175
176 i915_sw_fence_commit(&clflush->wait);
177 } else if (obj->mm.pages) {
178 __i915_do_clflush(obj);
179 } else {
180 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
181 }
182
183 obj->cache_dirty = false;
184 return true;
185}
186