1
2
3
4
5#include "i915_gem_client_blt.h"
6
7#include "i915_gem_object_blt.h"
8#include "intel_drv.h"
9
10struct i915_sleeve {
11 struct i915_vma *vma;
12 struct drm_i915_gem_object *obj;
13 struct sg_table *pages;
14 struct i915_page_sizes page_sizes;
15};
16
17static int vma_set_pages(struct i915_vma *vma)
18{
19 struct i915_sleeve *sleeve = vma->private;
20
21 vma->pages = sleeve->pages;
22 vma->page_sizes = sleeve->page_sizes;
23
24 return 0;
25}
26
27static void vma_clear_pages(struct i915_vma *vma)
28{
29 GEM_BUG_ON(!vma->pages);
30 vma->pages = NULL;
31}
32
33static int vma_bind(struct i915_vma *vma,
34 enum i915_cache_level cache_level,
35 u32 flags)
36{
37 return vma->vm->vma_ops.bind_vma(vma, cache_level, flags);
38}
39
40static void vma_unbind(struct i915_vma *vma)
41{
42 vma->vm->vma_ops.unbind_vma(vma);
43}
44
45static const struct i915_vma_ops proxy_vma_ops = {
46 .set_pages = vma_set_pages,
47 .clear_pages = vma_clear_pages,
48 .bind_vma = vma_bind,
49 .unbind_vma = vma_unbind,
50};
51
52static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
53 struct drm_i915_gem_object *obj,
54 struct sg_table *pages,
55 struct i915_page_sizes *page_sizes)
56{
57 struct i915_sleeve *sleeve;
58 struct i915_vma *vma;
59 int err;
60
61 sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
62 if (!sleeve)
63 return ERR_PTR(-ENOMEM);
64
65 vma = i915_vma_instance(obj, vm, NULL);
66 if (IS_ERR(vma)) {
67 err = PTR_ERR(vma);
68 goto err_free;
69 }
70
71 vma->private = sleeve;
72 vma->ops = &proxy_vma_ops;
73
74 sleeve->vma = vma;
75 sleeve->obj = i915_gem_object_get(obj);
76 sleeve->pages = pages;
77 sleeve->page_sizes = *page_sizes;
78
79 return sleeve;
80
81err_free:
82 kfree(sleeve);
83 return ERR_PTR(err);
84}
85
86static void destroy_sleeve(struct i915_sleeve *sleeve)
87{
88 i915_gem_object_put(sleeve->obj);
89 kfree(sleeve);
90}
91
92struct clear_pages_work {
93 struct dma_fence dma;
94 struct dma_fence_cb cb;
95 struct i915_sw_fence wait;
96 struct work_struct work;
97 struct irq_work irq_work;
98 struct i915_sleeve *sleeve;
99 struct intel_context *ce;
100 u32 value;
101};
102
103static const char *clear_pages_work_driver_name(struct dma_fence *fence)
104{
105 return DRIVER_NAME;
106}
107
108static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
109{
110 return "clear";
111}
112
113static void clear_pages_work_release(struct dma_fence *fence)
114{
115 struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
116
117 destroy_sleeve(w->sleeve);
118
119 i915_sw_fence_fini(&w->wait);
120
121 BUILD_BUG_ON(offsetof(typeof(*w), dma));
122 dma_fence_free(&w->dma);
123}
124
125static const struct dma_fence_ops clear_pages_work_ops = {
126 .get_driver_name = clear_pages_work_driver_name,
127 .get_timeline_name = clear_pages_work_timeline_name,
128 .release = clear_pages_work_release,
129};
130
131static void clear_pages_signal_irq_worker(struct irq_work *work)
132{
133 struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
134
135 dma_fence_signal(&w->dma);
136 dma_fence_put(&w->dma);
137}
138
139static void clear_pages_dma_fence_cb(struct dma_fence *fence,
140 struct dma_fence_cb *cb)
141{
142 struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
143
144 if (fence->error)
145 dma_fence_set_error(&w->dma, fence->error);
146
147
148
149
150
151 irq_work_queue(&w->irq_work);
152}
153
154static void clear_pages_worker(struct work_struct *work)
155{
156 struct clear_pages_work *w = container_of(work, typeof(*w), work);
157 struct drm_i915_private *i915 = w->ce->gem_context->i915;
158 struct drm_i915_gem_object *obj = w->sleeve->obj;
159 struct i915_vma *vma = w->sleeve->vma;
160 struct i915_request *rq;
161 int err = w->dma.error;
162
163 if (unlikely(err))
164 goto out_signal;
165
166 if (obj->cache_dirty) {
167 obj->write_domain = 0;
168 if (i915_gem_object_has_struct_page(obj))
169 drm_clflush_sg(w->sleeve->pages);
170 obj->cache_dirty = false;
171 }
172
173
174 mutex_lock(&i915->drm.struct_mutex);
175 err = i915_vma_pin(vma, 0, 0, PIN_USER);
176 if (unlikely(err))
177 goto out_unlock;
178
179 rq = i915_request_create(w->ce);
180 if (IS_ERR(rq)) {
181 err = PTR_ERR(rq);
182 goto out_unpin;
183 }
184
185
186 if (dma_fence_add_callback(&rq->fence, &w->cb,
187 clear_pages_dma_fence_cb))
188 GEM_BUG_ON(1);
189
190 if (w->ce->engine->emit_init_breadcrumb) {
191 err = w->ce->engine->emit_init_breadcrumb(rq);
192 if (unlikely(err))
193 goto out_request;
194 }
195
196
197 i915_vma_lock(vma);
198 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
199 i915_vma_unlock(vma);
200 if (err)
201 goto out_request;
202
203 err = intel_emit_vma_fill_blt(rq, vma, w->value);
204out_request:
205 if (unlikely(err)) {
206 i915_request_skip(rq, err);
207 err = 0;
208 }
209
210 i915_request_add(rq);
211out_unpin:
212 i915_vma_unpin(vma);
213out_unlock:
214 mutex_unlock(&i915->drm.struct_mutex);
215out_signal:
216 if (unlikely(err)) {
217 dma_fence_set_error(&w->dma, err);
218 dma_fence_signal(&w->dma);
219 dma_fence_put(&w->dma);
220 }
221}
222
223static int __i915_sw_fence_call
224clear_pages_work_notify(struct i915_sw_fence *fence,
225 enum i915_sw_fence_notify state)
226{
227 struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
228
229 switch (state) {
230 case FENCE_COMPLETE:
231 schedule_work(&w->work);
232 break;
233
234 case FENCE_FREE:
235 dma_fence_put(&w->dma);
236 break;
237 }
238
239 return NOTIFY_DONE;
240}
241
242static DEFINE_SPINLOCK(fence_lock);
243
244
245int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
246 struct intel_context *ce,
247 struct sg_table *pages,
248 struct i915_page_sizes *page_sizes,
249 u32 value)
250{
251 struct drm_i915_private *i915 = to_i915(obj->base.dev);
252 struct i915_gem_context *ctx = ce->gem_context;
253 struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
254 struct clear_pages_work *work;
255 struct i915_sleeve *sleeve;
256 int err;
257
258 sleeve = create_sleeve(vm, obj, pages, page_sizes);
259 if (IS_ERR(sleeve))
260 return PTR_ERR(sleeve);
261
262 work = kmalloc(sizeof(*work), GFP_KERNEL);
263 if (!work) {
264 destroy_sleeve(sleeve);
265 return -ENOMEM;
266 }
267
268 work->value = value;
269 work->sleeve = sleeve;
270 work->ce = ce;
271
272 INIT_WORK(&work->work, clear_pages_worker);
273
274 init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
275
276 dma_fence_init(&work->dma,
277 &clear_pages_work_ops,
278 &fence_lock,
279 i915->mm.unordered_timeline,
280 0);
281 i915_sw_fence_init(&work->wait, clear_pages_work_notify);
282
283 i915_gem_object_lock(obj);
284 err = i915_sw_fence_await_reservation(&work->wait,
285 obj->base.resv, NULL,
286 true, I915_FENCE_TIMEOUT,
287 I915_FENCE_GFP);
288 if (err < 0) {
289 dma_fence_set_error(&work->dma, err);
290 } else {
291 reservation_object_add_excl_fence(obj->base.resv, &work->dma);
292 err = 0;
293 }
294 i915_gem_object_unlock(obj);
295
296 dma_fence_get(&work->dma);
297 i915_sw_fence_commit(&work->wait);
298
299 return err;
300}
301
302#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
303#include "selftests/i915_gem_client_blt.c"
304#endif
305