1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <drm/ttm/ttm_placement.h>
30
31#include "vmwgfx_drv.h"
32#include "vmwgfx_resource_priv.h"
33#include "vmwgfx_binding.h"
34
35
36
37
38
39
40
41
42
43
44
45struct vmw_dx_streamoutput {
46 struct vmw_resource res;
47 struct vmw_resource *ctx;
48 struct vmw_resource *cotable;
49 struct list_head cotable_head;
50 u32 id;
51 u32 size;
52 bool committed;
53};
54
55static int vmw_dx_streamoutput_create(struct vmw_resource *res);
56static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
57 struct ttm_validate_buffer *val_buf);
58static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
59 struct ttm_validate_buffer *val_buf);
60static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
61 enum vmw_cmdbuf_res_state state);
62
63static size_t vmw_streamoutput_size;
64
65static const struct vmw_res_func vmw_dx_streamoutput_func = {
66 .res_type = vmw_res_streamoutput,
67 .needs_backup = true,
68 .may_evict = false,
69 .type_name = "DX streamoutput",
70 .backup_placement = &vmw_mob_placement,
71 .create = vmw_dx_streamoutput_create,
72 .destroy = NULL,
73 .bind = vmw_dx_streamoutput_bind,
74 .unbind = vmw_dx_streamoutput_unbind,
75 .commit_notify = vmw_dx_streamoutput_commit_notify,
76};
77
78static inline struct vmw_dx_streamoutput *
79vmw_res_to_dx_streamoutput(struct vmw_resource *res)
80{
81 return container_of(res, struct vmw_dx_streamoutput, res);
82}
83
84
85
86
87
88
89
90static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
91{
92 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
93 struct vmw_private *dev_priv = res->dev_priv;
94 struct {
95 SVGA3dCmdHeader header;
96 SVGA3dCmdDXBindStreamOutput body;
97 } *cmd;
98
99 if (!list_empty(&so->cotable_head) || !so->committed )
100 return 0;
101
102 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
103 if (!cmd)
104 return -ENOMEM;
105
106 cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
107 cmd->header.size = sizeof(cmd->body);
108 cmd->body.soid = so->id;
109 cmd->body.mobid = res->backup->base.resource->start;
110 cmd->body.offsetInBytes = res->backup_offset;
111 cmd->body.sizeInBytes = so->size;
112 vmw_cmd_commit(dev_priv, sizeof(*cmd));
113
114 vmw_cotable_add_resource(so->cotable, &so->cotable_head);
115
116 return 0;
117}
118
119static int vmw_dx_streamoutput_create(struct vmw_resource *res)
120{
121 struct vmw_private *dev_priv = res->dev_priv;
122 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
123 int ret = 0;
124
125 WARN_ON_ONCE(!so->committed);
126
127 if (vmw_resource_mob_attached(res)) {
128 mutex_lock(&dev_priv->binding_mutex);
129 ret = vmw_dx_streamoutput_unscrub(res);
130 mutex_unlock(&dev_priv->binding_mutex);
131 }
132
133 res->id = so->id;
134
135 return ret;
136}
137
138static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
139 struct ttm_validate_buffer *val_buf)
140{
141 struct vmw_private *dev_priv = res->dev_priv;
142 struct ttm_buffer_object *bo = val_buf->bo;
143 int ret;
144
145 if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB))
146 return -EINVAL;
147
148 mutex_lock(&dev_priv->binding_mutex);
149 ret = vmw_dx_streamoutput_unscrub(res);
150 mutex_unlock(&dev_priv->binding_mutex);
151
152 return ret;
153}
154
155
156
157
158
159
160
161static int vmw_dx_streamoutput_scrub(struct vmw_resource *res)
162{
163 struct vmw_private *dev_priv = res->dev_priv;
164 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
165 struct {
166 SVGA3dCmdHeader header;
167 SVGA3dCmdDXBindStreamOutput body;
168 } *cmd;
169
170 if (list_empty(&so->cotable_head))
171 return 0;
172
173 WARN_ON_ONCE(!so->committed);
174
175 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id);
176 if (!cmd)
177 return -ENOMEM;
178
179 cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
180 cmd->header.size = sizeof(cmd->body);
181 cmd->body.soid = res->id;
182 cmd->body.mobid = SVGA3D_INVALID_ID;
183 cmd->body.offsetInBytes = 0;
184 cmd->body.sizeInBytes = so->size;
185 vmw_cmd_commit(dev_priv, sizeof(*cmd));
186
187 res->id = -1;
188 list_del_init(&so->cotable_head);
189
190 return 0;
191}
192
193static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
194 struct ttm_validate_buffer *val_buf)
195{
196 struct vmw_private *dev_priv = res->dev_priv;
197 struct vmw_fence_obj *fence;
198 int ret;
199
200 if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
201 return -EINVAL;
202
203 mutex_lock(&dev_priv->binding_mutex);
204 ret = vmw_dx_streamoutput_scrub(res);
205 mutex_unlock(&dev_priv->binding_mutex);
206
207 if (ret)
208 return ret;
209
210 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
211 vmw_bo_fence_single(val_buf->bo, fence);
212
213 if (fence != NULL)
214 vmw_fence_obj_unreference(&fence);
215
216 return 0;
217}
218
219static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
220 enum vmw_cmdbuf_res_state state)
221{
222 struct vmw_private *dev_priv = res->dev_priv;
223 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
224
225 if (state == VMW_CMDBUF_RES_ADD) {
226 mutex_lock(&dev_priv->binding_mutex);
227 vmw_cotable_add_resource(so->cotable, &so->cotable_head);
228 so->committed = true;
229 res->id = so->id;
230 mutex_unlock(&dev_priv->binding_mutex);
231 } else {
232 mutex_lock(&dev_priv->binding_mutex);
233 list_del_init(&so->cotable_head);
234 so->committed = false;
235 res->id = -1;
236 mutex_unlock(&dev_priv->binding_mutex);
237 }
238}
239
240
241
242
243
244
245
246
247struct vmw_resource *
248vmw_dx_streamoutput_lookup(struct vmw_cmdbuf_res_manager *man,
249 u32 user_key)
250{
251 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_streamoutput,
252 user_key);
253}
254
255static void vmw_dx_streamoutput_res_free(struct vmw_resource *res)
256{
257 struct vmw_private *dev_priv = res->dev_priv;
258 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
259
260 vmw_resource_unreference(&so->cotable);
261 kfree(so);
262 ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_streamoutput_size);
263}
264
265static void vmw_dx_streamoutput_hw_destroy(struct vmw_resource *res)
266{
267
268 res->id = -1;
269}
270
271
272
273
274
275
276
277
278
279
280int vmw_dx_streamoutput_add(struct vmw_cmdbuf_res_manager *man,
281 struct vmw_resource *ctx, u32 user_key,
282 struct list_head *list)
283{
284 struct vmw_dx_streamoutput *so;
285 struct vmw_resource *res;
286 struct vmw_private *dev_priv = ctx->dev_priv;
287 struct ttm_operation_ctx ttm_opt_ctx = {
288 .interruptible = true,
289 .no_wait_gpu = false
290 };
291 int ret;
292
293 if (!vmw_streamoutput_size)
294 vmw_streamoutput_size = ttm_round_pot(sizeof(*so));
295
296 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
297 vmw_streamoutput_size, &ttm_opt_ctx);
298 if (ret) {
299 if (ret != -ERESTARTSYS)
300 DRM_ERROR("Out of graphics memory for streamout.\n");
301 return ret;
302 }
303
304 so = kmalloc(sizeof(*so), GFP_KERNEL);
305 if (!so) {
306 ttm_mem_global_free(vmw_mem_glob(dev_priv),
307 vmw_streamoutput_size);
308 return -ENOMEM;
309 }
310
311 res = &so->res;
312 so->ctx = ctx;
313 so->cotable = vmw_resource_reference
314 (vmw_context_cotable(ctx, SVGA_COTABLE_STREAMOUTPUT));
315 so->id = user_key;
316 so->committed = false;
317 INIT_LIST_HEAD(&so->cotable_head);
318 ret = vmw_resource_init(dev_priv, res, true,
319 vmw_dx_streamoutput_res_free,
320 &vmw_dx_streamoutput_func);
321 if (ret)
322 goto out_resource_init;
323
324 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_streamoutput, user_key,
325 res, list);
326 if (ret)
327 goto out_resource_init;
328
329 res->id = so->id;
330 res->hw_destroy = vmw_dx_streamoutput_hw_destroy;
331
332out_resource_init:
333 vmw_resource_unreference(&res);
334
335 return ret;
336}
337
338
339
340
341
342
343void vmw_dx_streamoutput_set_size(struct vmw_resource *res, u32 size)
344{
345 struct vmw_dx_streamoutput *so = vmw_res_to_dx_streamoutput(res);
346
347 so->size = size;
348}
349
350
351
352
353
354
355
356
357
358int vmw_dx_streamoutput_remove(struct vmw_cmdbuf_res_manager *man,
359 u32 user_key,
360 struct list_head *list)
361{
362 struct vmw_resource *r;
363
364 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_streamoutput,
365 (u32)user_key, list, &r);
366}
367
368
369
370
371
372
373
374void vmw_dx_streamoutput_cotable_list_scrub(struct vmw_private *dev_priv,
375 struct list_head *list,
376 bool readback)
377{
378 struct vmw_dx_streamoutput *entry, *next;
379
380 lockdep_assert_held_once(&dev_priv->binding_mutex);
381
382 list_for_each_entry_safe(entry, next, list, cotable_head) {
383 WARN_ON(vmw_dx_streamoutput_scrub(&entry->res));
384 if (!readback)
385 entry->committed =false;
386 }
387}
388