1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/dma-buf.h>
32#include <linux/vfio.h>
33
34#include <drm/drm_fourcc.h>
35#include <drm/drm_plane.h>
36
37#include "gem/i915_gem_dmabuf.h"
38
39#include "i915_drv.h"
40#include "i915_reg.h"
41#include "gvt.h"
42
43#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
44
45static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
46 unsigned long size,
47 dma_addr_t dma_addr)
48{
49 int ret = 0;
50
51 if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
52 ret = -EINVAL;
53
54 return ret;
55}
56
57static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
58 dma_addr_t dma_addr)
59{
60 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
61}
62
63static int vgpu_gem_get_pages(
64 struct drm_i915_gem_object *obj)
65{
66 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
67 struct intel_vgpu *vgpu;
68 struct sg_table *st;
69 struct scatterlist *sg;
70 int i, j, ret;
71 gen8_pte_t __iomem *gtt_entries;
72 struct intel_vgpu_fb_info *fb_info;
73 u32 page_num;
74
75 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
76 if (drm_WARN_ON(&dev_priv->drm, !fb_info))
77 return -ENODEV;
78
79 vgpu = fb_info->obj->vgpu;
80 if (drm_WARN_ON(&dev_priv->drm, !vgpu))
81 return -ENODEV;
82
83 st = kmalloc(sizeof(*st), GFP_KERNEL);
84 if (unlikely(!st))
85 return -ENOMEM;
86
87 page_num = obj->base.size >> PAGE_SHIFT;
88 ret = sg_alloc_table(st, page_num, GFP_KERNEL);
89 if (ret) {
90 kfree(st);
91 return ret;
92 }
93 gtt_entries = (gen8_pte_t __iomem *)to_gt(dev_priv)->ggtt->gsm +
94 (fb_info->start >> PAGE_SHIFT);
95 for_each_sg(st->sgl, sg, page_num, i) {
96 dma_addr_t dma_addr =
97 GEN8_DECODE_PTE(readq(>t_entries[i]));
98 if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
99 ret = -EINVAL;
100 goto out;
101 }
102
103 sg->offset = 0;
104 sg->length = PAGE_SIZE;
105 sg_dma_len(sg) = PAGE_SIZE;
106 sg_dma_address(sg) = dma_addr;
107 }
108
109 __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
110out:
111 if (ret) {
112 dma_addr_t dma_addr;
113
114 for_each_sg(st->sgl, sg, i, j) {
115 dma_addr = sg_dma_address(sg);
116 if (dma_addr)
117 vgpu_unpin_dma_address(vgpu, dma_addr);
118 }
119 sg_free_table(st);
120 kfree(st);
121 }
122
123 return ret;
124
125}
126
127static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
128 struct sg_table *pages)
129{
130 struct scatterlist *sg;
131
132 if (obj->base.dma_buf) {
133 struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
134 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
135 struct intel_vgpu *vgpu = obj->vgpu;
136 int i;
137
138 for_each_sg(pages->sgl, sg, fb_info->size, i)
139 vgpu_unpin_dma_address(vgpu,
140 sg_dma_address(sg));
141 }
142
143 sg_free_table(pages);
144 kfree(pages);
145}
146
147static void dmabuf_gem_object_free(struct kref *kref)
148{
149 struct intel_vgpu_dmabuf_obj *obj =
150 container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
151 struct intel_vgpu *vgpu = obj->vgpu;
152 struct list_head *pos;
153 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
154
155 if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
156 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
157 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
158 if (dmabuf_obj == obj) {
159 list_del(pos);
160 intel_gvt_hypervisor_put_vfio_device(vgpu);
161 idr_remove(&vgpu->object_idr,
162 dmabuf_obj->dmabuf_id);
163 kfree(dmabuf_obj->info);
164 kfree(dmabuf_obj);
165 break;
166 }
167 }
168 } else {
169
170 kfree(obj->info);
171 kfree(obj);
172 }
173}
174
175
176static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
177{
178 kref_get(&obj->kref);
179}
180
181static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
182{
183 kref_put(&obj->kref, dmabuf_gem_object_free);
184}
185
186static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
187{
188
189 struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
190 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
191 struct intel_vgpu *vgpu = obj->vgpu;
192
193 if (vgpu) {
194 mutex_lock(&vgpu->dmabuf_lock);
195 gem_obj->base.dma_buf = NULL;
196 dmabuf_obj_put(obj);
197 mutex_unlock(&vgpu->dmabuf_lock);
198 } else {
199
200 gem_obj->base.dma_buf = NULL;
201 dmabuf_obj_put(obj);
202 }
203}
204
205static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
206 .name = "i915_gem_object_vgpu",
207 .flags = I915_GEM_OBJECT_IS_PROXY,
208 .get_pages = vgpu_gem_get_pages,
209 .put_pages = vgpu_gem_put_pages,
210 .release = vgpu_gem_release,
211};
212
213static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
214 struct intel_vgpu_fb_info *info)
215{
216 static struct lock_class_key lock_class;
217 struct drm_i915_private *dev_priv = to_i915(dev);
218 struct drm_i915_gem_object *obj;
219
220 obj = i915_gem_object_alloc();
221 if (obj == NULL)
222 return NULL;
223
224 drm_gem_private_object_init(dev, &obj->base,
225 roundup(info->size, PAGE_SIZE));
226 i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class, 0);
227 i915_gem_object_set_readonly(obj);
228
229 obj->read_domains = I915_GEM_DOMAIN_GTT;
230 obj->write_domain = 0;
231 if (GRAPHICS_VER(dev_priv) >= 9) {
232 unsigned int tiling_mode = 0;
233 unsigned int stride = 0;
234
235 switch (info->drm_format_mod) {
236 case DRM_FORMAT_MOD_LINEAR:
237 tiling_mode = I915_TILING_NONE;
238 break;
239 case I915_FORMAT_MOD_X_TILED:
240 tiling_mode = I915_TILING_X;
241 stride = info->stride;
242 break;
243 case I915_FORMAT_MOD_Y_TILED:
244 case I915_FORMAT_MOD_Yf_TILED:
245 tiling_mode = I915_TILING_Y;
246 stride = info->stride;
247 break;
248 default:
249 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
250 info->drm_format_mod);
251 }
252 obj->tiling_and_stride = tiling_mode | stride;
253 } else {
254 obj->tiling_and_stride = info->drm_format_mod ?
255 I915_TILING_X : 0;
256 }
257
258 return obj;
259}
260
261static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
262{
263 if (c && c->x_hot <= c->width && c->y_hot <= c->height)
264 return true;
265 else
266 return false;
267}
268
269static int vgpu_get_plane_info(struct drm_device *dev,
270 struct intel_vgpu *vgpu,
271 struct intel_vgpu_fb_info *info,
272 int plane_id)
273{
274 struct intel_vgpu_primary_plane_format p;
275 struct intel_vgpu_cursor_plane_format c;
276 int ret, tile_height = 1;
277
278 memset(info, 0, sizeof(*info));
279
280 if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
281 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
282 if (ret)
283 return ret;
284 info->start = p.base;
285 info->start_gpa = p.base_gpa;
286 info->width = p.width;
287 info->height = p.height;
288 info->stride = p.stride;
289 info->drm_format = p.drm_format;
290
291 switch (p.tiled) {
292 case PLANE_CTL_TILED_LINEAR:
293 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
294 break;
295 case PLANE_CTL_TILED_X:
296 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
297 tile_height = 8;
298 break;
299 case PLANE_CTL_TILED_Y:
300 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
301 tile_height = 32;
302 break;
303 case PLANE_CTL_TILED_YF:
304 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
305 tile_height = 32;
306 break;
307 default:
308 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
309 }
310 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
311 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
312 if (ret)
313 return ret;
314 info->start = c.base;
315 info->start_gpa = c.base_gpa;
316 info->width = c.width;
317 info->height = c.height;
318 info->stride = c.width * (c.bpp / 8);
319 info->drm_format = c.drm_format;
320 info->drm_format_mod = 0;
321 info->x_pos = c.x_pos;
322 info->y_pos = c.y_pos;
323
324 if (validate_hotspot(&c)) {
325 info->x_hot = c.x_hot;
326 info->y_hot = c.y_hot;
327 } else {
328 info->x_hot = UINT_MAX;
329 info->y_hot = UINT_MAX;
330 }
331 } else {
332 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
333 return -EINVAL;
334 }
335
336 info->size = info->stride * roundup(info->height, tile_height);
337 if (info->size == 0) {
338 gvt_vgpu_err("fb size is zero\n");
339 return -EINVAL;
340 }
341
342 if (info->start & (PAGE_SIZE - 1)) {
343 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
344 return -EFAULT;
345 }
346
347 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
348 gvt_vgpu_err("invalid gma addr\n");
349 return -EFAULT;
350 }
351
352 return 0;
353}
354
355static struct intel_vgpu_dmabuf_obj *
356pick_dmabuf_by_info(struct intel_vgpu *vgpu,
357 struct intel_vgpu_fb_info *latest_info)
358{
359 struct list_head *pos;
360 struct intel_vgpu_fb_info *fb_info;
361 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
362 struct intel_vgpu_dmabuf_obj *ret = NULL;
363
364 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
365 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
366 if (!dmabuf_obj->info)
367 continue;
368
369 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
370 if ((fb_info->start == latest_info->start) &&
371 (fb_info->start_gpa == latest_info->start_gpa) &&
372 (fb_info->size == latest_info->size) &&
373 (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
374 (fb_info->drm_format == latest_info->drm_format) &&
375 (fb_info->width == latest_info->width) &&
376 (fb_info->height == latest_info->height)) {
377 ret = dmabuf_obj;
378 break;
379 }
380 }
381
382 return ret;
383}
384
385static struct intel_vgpu_dmabuf_obj *
386pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
387{
388 struct list_head *pos;
389 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
390 struct intel_vgpu_dmabuf_obj *ret = NULL;
391
392 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
393 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
394 if (dmabuf_obj->dmabuf_id == id) {
395 ret = dmabuf_obj;
396 break;
397 }
398 }
399
400 return ret;
401}
402
403static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
404 struct intel_vgpu_fb_info *fb_info)
405{
406 gvt_dmabuf->drm_format = fb_info->drm_format;
407 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
408 gvt_dmabuf->width = fb_info->width;
409 gvt_dmabuf->height = fb_info->height;
410 gvt_dmabuf->stride = fb_info->stride;
411 gvt_dmabuf->size = fb_info->size;
412 gvt_dmabuf->x_pos = fb_info->x_pos;
413 gvt_dmabuf->y_pos = fb_info->y_pos;
414 gvt_dmabuf->x_hot = fb_info->x_hot;
415 gvt_dmabuf->y_hot = fb_info->y_hot;
416}
417
418int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
419{
420 struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
421 struct vfio_device_gfx_plane_info *gfx_plane_info = args;
422 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
423 struct intel_vgpu_fb_info fb_info;
424 int ret = 0;
425
426 if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
427 VFIO_GFX_PLANE_TYPE_PROBE))
428 return ret;
429 else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
430 (!gfx_plane_info->flags))
431 return -EINVAL;
432
433 ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
434 gfx_plane_info->drm_plane_type);
435 if (ret != 0)
436 goto out;
437
438 mutex_lock(&vgpu->dmabuf_lock);
439
440 dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
441 if (dmabuf_obj) {
442 update_fb_info(gfx_plane_info, &fb_info);
443 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
444
445
446
447
448
449 if (!dmabuf_obj->initref) {
450 dmabuf_obj->initref = true;
451 dmabuf_obj_get(dmabuf_obj);
452 }
453 ret = 0;
454 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
455 vgpu->id, kref_read(&dmabuf_obj->kref),
456 gfx_plane_info->dmabuf_id);
457 mutex_unlock(&vgpu->dmabuf_lock);
458 goto out;
459 }
460
461 mutex_unlock(&vgpu->dmabuf_lock);
462
463
464 dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
465 if (unlikely(!dmabuf_obj)) {
466 gvt_vgpu_err("alloc dmabuf_obj failed\n");
467 ret = -ENOMEM;
468 goto out;
469 }
470
471 dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
472 GFP_KERNEL);
473 if (unlikely(!dmabuf_obj->info)) {
474 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
475 ret = -ENOMEM;
476 goto out_free_dmabuf;
477 }
478 memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
479
480 ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
481
482 dmabuf_obj->vgpu = vgpu;
483
484 ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
485 if (ret < 0)
486 goto out_free_info;
487 gfx_plane_info->dmabuf_id = ret;
488 dmabuf_obj->dmabuf_id = ret;
489
490 dmabuf_obj->initref = true;
491
492 kref_init(&dmabuf_obj->kref);
493
494 mutex_lock(&vgpu->dmabuf_lock);
495 if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
496 gvt_vgpu_err("get vfio device failed\n");
497 mutex_unlock(&vgpu->dmabuf_lock);
498 goto out_free_info;
499 }
500 mutex_unlock(&vgpu->dmabuf_lock);
501
502 update_fb_info(gfx_plane_info, &fb_info);
503
504 INIT_LIST_HEAD(&dmabuf_obj->list);
505 mutex_lock(&vgpu->dmabuf_lock);
506 list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
507 mutex_unlock(&vgpu->dmabuf_lock);
508
509 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
510 __func__, kref_read(&dmabuf_obj->kref), ret);
511
512 return 0;
513
514out_free_info:
515 kfree(dmabuf_obj->info);
516out_free_dmabuf:
517 kfree(dmabuf_obj);
518out:
519
520 return (ret == -ENODEV) ? 0 : ret;
521}
522
523
524int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
525{
526 struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
527 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
528 struct drm_i915_gem_object *obj;
529 struct dma_buf *dmabuf;
530 int dmabuf_fd;
531 int ret = 0;
532
533 mutex_lock(&vgpu->dmabuf_lock);
534
535 dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
536 if (dmabuf_obj == NULL) {
537 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
538 ret = -EINVAL;
539 goto out;
540 }
541
542 obj = vgpu_create_gem(dev, dmabuf_obj->info);
543 if (obj == NULL) {
544 gvt_vgpu_err("create gvt gem obj failed\n");
545 ret = -ENOMEM;
546 goto out;
547 }
548
549 obj->gvt_info = dmabuf_obj->info;
550
551 dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
552 if (IS_ERR(dmabuf)) {
553 gvt_vgpu_err("export dma-buf failed\n");
554 ret = PTR_ERR(dmabuf);
555 goto out_free_gem;
556 }
557
558 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
559 if (ret < 0) {
560 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
561 goto out_free_dmabuf;
562 }
563 dmabuf_fd = ret;
564
565 dmabuf_obj_get(dmabuf_obj);
566
567 if (dmabuf_obj->initref) {
568 dmabuf_obj->initref = false;
569 dmabuf_obj_put(dmabuf_obj);
570 }
571
572 mutex_unlock(&vgpu->dmabuf_lock);
573
574 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
575 " file count: %ld, GEM ref: %d\n",
576 vgpu->id, dmabuf_obj->dmabuf_id,
577 kref_read(&dmabuf_obj->kref),
578 dmabuf_fd,
579 file_count(dmabuf->file),
580 kref_read(&obj->base.refcount));
581
582 i915_gem_object_put(obj);
583
584 return dmabuf_fd;
585
586out_free_dmabuf:
587 dma_buf_put(dmabuf);
588out_free_gem:
589 i915_gem_object_put(obj);
590out:
591 mutex_unlock(&vgpu->dmabuf_lock);
592 return ret;
593}
594
595void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
596{
597 struct list_head *pos, *n;
598 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
599
600 mutex_lock(&vgpu->dmabuf_lock);
601 list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
602 dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
603 dmabuf_obj->vgpu = NULL;
604
605 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
606 intel_gvt_hypervisor_put_vfio_device(vgpu);
607 list_del(pos);
608
609
610 if (dmabuf_obj->initref) {
611 dmabuf_obj->initref = false;
612 dmabuf_obj_put(dmabuf_obj);
613 }
614
615 }
616 mutex_unlock(&vgpu->dmabuf_lock);
617}
618