1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include "virtgpu_drv.h"
27#include <drm/drm_plane_helper.h>
28#include <drm/drm_atomic_helper.h>
29
30static const uint32_t virtio_gpu_formats[] = {
31 DRM_FORMAT_HOST_XRGB8888,
32};
33
34static const uint32_t virtio_gpu_cursor_formats[] = {
35 DRM_FORMAT_HOST_ARGB8888,
36};
37
38uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
39{
40 uint32_t format;
41
42 switch (drm_fourcc) {
43 case DRM_FORMAT_XRGB8888:
44 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
45 break;
46 case DRM_FORMAT_ARGB8888:
47 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
48 break;
49 case DRM_FORMAT_BGRX8888:
50 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
51 break;
52 case DRM_FORMAT_BGRA8888:
53 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
54 break;
55 default:
56
57
58
59
60 format = 0;
61 break;
62 }
63 WARN_ON(format == 0);
64 return format;
65}
66
67static void virtio_gpu_plane_destroy(struct drm_plane *plane)
68{
69 drm_plane_cleanup(plane);
70 kfree(plane);
71}
72
73static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
74 .update_plane = drm_atomic_helper_update_plane,
75 .disable_plane = drm_atomic_helper_disable_plane,
76 .destroy = virtio_gpu_plane_destroy,
77 .reset = drm_atomic_helper_plane_reset,
78 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
79 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
80};
81
82static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
83 struct drm_plane_state *state)
84{
85 return 0;
86}
87
88static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
89 struct drm_plane_state *old_state)
90{
91 struct drm_device *dev = plane->dev;
92 struct virtio_gpu_device *vgdev = dev->dev_private;
93 struct virtio_gpu_output *output = NULL;
94 struct virtio_gpu_framebuffer *vgfb;
95 struct virtio_gpu_object *bo;
96 uint32_t handle;
97
98 if (plane->state->crtc)
99 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
100 if (old_state->crtc)
101 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
102 if (WARN_ON(!output))
103 return;
104
105 if (plane->state->fb && output->enabled) {
106 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
107 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
108 handle = bo->hw_res_handle;
109 if (bo->dumb) {
110 virtio_gpu_cmd_transfer_to_host_2d
111 (vgdev, bo, 0,
112 cpu_to_le32(plane->state->src_w >> 16),
113 cpu_to_le32(plane->state->src_h >> 16),
114 cpu_to_le32(plane->state->src_x >> 16),
115 cpu_to_le32(plane->state->src_y >> 16), NULL);
116 }
117 } else {
118 handle = 0;
119 }
120
121 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
122 plane->state->crtc_w, plane->state->crtc_h,
123 plane->state->crtc_x, plane->state->crtc_y,
124 plane->state->src_w >> 16,
125 plane->state->src_h >> 16,
126 plane->state->src_x >> 16,
127 plane->state->src_y >> 16);
128 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
129 plane->state->src_w >> 16,
130 plane->state->src_h >> 16,
131 plane->state->src_x >> 16,
132 plane->state->src_y >> 16);
133 if (handle)
134 virtio_gpu_cmd_resource_flush(vgdev, handle,
135 plane->state->src_x >> 16,
136 plane->state->src_y >> 16,
137 plane->state->src_w >> 16,
138 plane->state->src_h >> 16);
139}
140
141static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
142 struct drm_plane_state *new_state)
143{
144 struct drm_device *dev = plane->dev;
145 struct virtio_gpu_device *vgdev = dev->dev_private;
146 struct virtio_gpu_framebuffer *vgfb;
147 struct virtio_gpu_object *bo;
148
149 if (!new_state->fb)
150 return 0;
151
152 vgfb = to_virtio_gpu_framebuffer(new_state->fb);
153 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
154 if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
155 vgfb->fence = virtio_gpu_fence_alloc(vgdev);
156 if (!vgfb->fence)
157 return -ENOMEM;
158 }
159
160 return 0;
161}
162
163static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
164 struct drm_plane_state *old_state)
165{
166 struct virtio_gpu_framebuffer *vgfb;
167
168 if (!plane->state->fb)
169 return;
170
171 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
172 if (vgfb->fence) {
173 dma_fence_put(&vgfb->fence->f);
174 vgfb->fence = NULL;
175 }
176}
177
178static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
179 struct drm_plane_state *old_state)
180{
181 struct drm_device *dev = plane->dev;
182 struct virtio_gpu_device *vgdev = dev->dev_private;
183 struct virtio_gpu_output *output = NULL;
184 struct virtio_gpu_framebuffer *vgfb;
185 struct virtio_gpu_object *bo = NULL;
186 uint32_t handle;
187 int ret = 0;
188
189 if (plane->state->crtc)
190 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
191 if (old_state->crtc)
192 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
193 if (WARN_ON(!output))
194 return;
195
196 if (plane->state->fb) {
197 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
198 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
199 handle = bo->hw_res_handle;
200 } else {
201 handle = 0;
202 }
203
204 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
205
206 virtio_gpu_cmd_transfer_to_host_2d
207 (vgdev, bo, 0,
208 cpu_to_le32(plane->state->crtc_w),
209 cpu_to_le32(plane->state->crtc_h),
210 0, 0, vgfb->fence);
211 ret = virtio_gpu_object_reserve(bo, false);
212 if (!ret) {
213 reservation_object_add_excl_fence(bo->tbo.resv,
214 &vgfb->fence->f);
215 dma_fence_put(&vgfb->fence->f);
216 vgfb->fence = NULL;
217 virtio_gpu_object_unreserve(bo);
218 virtio_gpu_object_wait(bo, false);
219 }
220 }
221
222 if (plane->state->fb != old_state->fb) {
223 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
224 plane->state->crtc_x,
225 plane->state->crtc_y,
226 plane->state->fb ? plane->state->fb->hot_x : 0,
227 plane->state->fb ? plane->state->fb->hot_y : 0);
228 output->cursor.hdr.type =
229 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
230 output->cursor.resource_id = cpu_to_le32(handle);
231 if (plane->state->fb) {
232 output->cursor.hot_x =
233 cpu_to_le32(plane->state->fb->hot_x);
234 output->cursor.hot_y =
235 cpu_to_le32(plane->state->fb->hot_y);
236 } else {
237 output->cursor.hot_x = cpu_to_le32(0);
238 output->cursor.hot_y = cpu_to_le32(0);
239 }
240 } else {
241 DRM_DEBUG("move +%d+%d\n",
242 plane->state->crtc_x,
243 plane->state->crtc_y);
244 output->cursor.hdr.type =
245 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
246 }
247 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
248 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
249 virtio_gpu_cursor_ping(vgdev, output);
250}
251
252static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
253 .atomic_check = virtio_gpu_plane_atomic_check,
254 .atomic_update = virtio_gpu_primary_plane_update,
255};
256
257static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
258 .prepare_fb = virtio_gpu_cursor_prepare_fb,
259 .cleanup_fb = virtio_gpu_cursor_cleanup_fb,
260 .atomic_check = virtio_gpu_plane_atomic_check,
261 .atomic_update = virtio_gpu_cursor_plane_update,
262};
263
264struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
265 enum drm_plane_type type,
266 int index)
267{
268 struct drm_device *dev = vgdev->ddev;
269 const struct drm_plane_helper_funcs *funcs;
270 struct drm_plane *plane;
271 const uint32_t *formats;
272 int ret, nformats;
273
274 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
275 if (!plane)
276 return ERR_PTR(-ENOMEM);
277
278 if (type == DRM_PLANE_TYPE_CURSOR) {
279 formats = virtio_gpu_cursor_formats;
280 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
281 funcs = &virtio_gpu_cursor_helper_funcs;
282 } else {
283 formats = virtio_gpu_formats;
284 nformats = ARRAY_SIZE(virtio_gpu_formats);
285 funcs = &virtio_gpu_primary_helper_funcs;
286 }
287 ret = drm_universal_plane_init(dev, plane, 1 << index,
288 &virtio_gpu_plane_funcs,
289 formats, nformats,
290 NULL, type, NULL);
291 if (ret)
292 goto err_plane_init;
293
294 drm_plane_helper_add(plane, funcs);
295 return plane;
296
297err_plane_init:
298 kfree(plane);
299 return ERR_PTR(ret);
300}
301