linux/drivers/gpu/drm/virtio/virtgpu_plane.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Red Hat, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 */
  25
  26#include <drm/drm_atomic_helper.h>
  27#include <drm/drm_damage_helper.h>
  28#include <drm/drm_fourcc.h>
  29#include <drm/drm_plane_helper.h>
  30
  31#include "virtgpu_drv.h"
  32
  33static const uint32_t virtio_gpu_formats[] = {
  34        DRM_FORMAT_HOST_XRGB8888,
  35};
  36
  37static const uint32_t virtio_gpu_cursor_formats[] = {
  38        DRM_FORMAT_HOST_ARGB8888,
  39};
  40
  41uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
  42{
  43        uint32_t format;
  44
  45        switch (drm_fourcc) {
  46        case DRM_FORMAT_XRGB8888:
  47                format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
  48                break;
  49        case DRM_FORMAT_ARGB8888:
  50                format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
  51                break;
  52        case DRM_FORMAT_BGRX8888:
  53                format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
  54                break;
  55        case DRM_FORMAT_BGRA8888:
  56                format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
  57                break;
  58        default:
  59                /*
  60                 * This should not happen, we handle everything listed
  61                 * in virtio_gpu_formats[].
  62                 */
  63                format = 0;
  64                break;
  65        }
  66        WARN_ON(format == 0);
  67        return format;
  68}
  69
  70static void virtio_gpu_plane_destroy(struct drm_plane *plane)
  71{
  72        drm_plane_cleanup(plane);
  73        kfree(plane);
  74}
  75
  76static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
  77        .update_plane           = drm_atomic_helper_update_plane,
  78        .disable_plane          = drm_atomic_helper_disable_plane,
  79        .destroy                = virtio_gpu_plane_destroy,
  80        .reset                  = drm_atomic_helper_plane_reset,
  81        .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
  82        .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
  83};
  84
  85static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
  86                                         struct drm_atomic_state *state)
  87{
  88        struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
  89                                                                                 plane);
  90        bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
  91        struct drm_crtc_state *crtc_state;
  92        int ret;
  93
  94        if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
  95                return 0;
  96
  97        crtc_state = drm_atomic_get_crtc_state(state,
  98                                               new_plane_state->crtc);
  99        if (IS_ERR(crtc_state))
 100                return PTR_ERR(crtc_state);
 101
 102        ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
 103                                                  DRM_PLANE_HELPER_NO_SCALING,
 104                                                  DRM_PLANE_HELPER_NO_SCALING,
 105                                                  is_cursor, true);
 106        return ret;
 107}
 108
 109static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
 110                                      struct drm_plane_state *state,
 111                                      struct drm_rect *rect)
 112{
 113        struct virtio_gpu_object *bo =
 114                gem_to_virtio_gpu_obj(state->fb->obj[0]);
 115        struct virtio_gpu_object_array *objs;
 116        uint32_t w = rect->x2 - rect->x1;
 117        uint32_t h = rect->y2 - rect->y1;
 118        uint32_t x = rect->x1;
 119        uint32_t y = rect->y1;
 120        uint32_t off = x * state->fb->format->cpp[0] +
 121                y * state->fb->pitches[0];
 122
 123        objs = virtio_gpu_array_alloc(1);
 124        if (!objs)
 125                return;
 126        virtio_gpu_array_add_obj(objs, &bo->base.base);
 127
 128        virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
 129                                           objs, NULL);
 130}
 131
 132static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
 133                                            struct drm_atomic_state *state)
 134{
 135        struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 136                                                                           plane);
 137        struct drm_device *dev = plane->dev;
 138        struct virtio_gpu_device *vgdev = dev->dev_private;
 139        struct virtio_gpu_output *output = NULL;
 140        struct virtio_gpu_object *bo;
 141        struct drm_rect rect;
 142
 143        if (plane->state->crtc)
 144                output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
 145        if (old_state->crtc)
 146                output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
 147        if (WARN_ON(!output))
 148                return;
 149
 150        if (!plane->state->fb || !output->crtc.state->active) {
 151                DRM_DEBUG("nofb\n");
 152                virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
 153                                           plane->state->src_w >> 16,
 154                                           plane->state->src_h >> 16,
 155                                           0, 0);
 156                virtio_gpu_notify(vgdev);
 157                return;
 158        }
 159
 160        if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
 161                return;
 162
 163        bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
 164        if (bo->dumb)
 165                virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
 166
 167        if (plane->state->fb != old_state->fb ||
 168            plane->state->src_w != old_state->src_w ||
 169            plane->state->src_h != old_state->src_h ||
 170            plane->state->src_x != old_state->src_x ||
 171            plane->state->src_y != old_state->src_y ||
 172            output->needs_modeset) {
 173                output->needs_modeset = false;
 174                DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
 175                          bo->hw_res_handle,
 176                          plane->state->crtc_w, plane->state->crtc_h,
 177                          plane->state->crtc_x, plane->state->crtc_y,
 178                          plane->state->src_w >> 16,
 179                          plane->state->src_h >> 16,
 180                          plane->state->src_x >> 16,
 181                          plane->state->src_y >> 16);
 182
 183                if (bo->host3d_blob || bo->guest_blob) {
 184                        virtio_gpu_cmd_set_scanout_blob
 185                                                (vgdev, output->index, bo,
 186                                                 plane->state->fb,
 187                                                 plane->state->src_w >> 16,
 188                                                 plane->state->src_h >> 16,
 189                                                 plane->state->src_x >> 16,
 190                                                 plane->state->src_y >> 16);
 191                } else {
 192                        virtio_gpu_cmd_set_scanout(vgdev, output->index,
 193                                                   bo->hw_res_handle,
 194                                                   plane->state->src_w >> 16,
 195                                                   plane->state->src_h >> 16,
 196                                                   plane->state->src_x >> 16,
 197                                                   plane->state->src_y >> 16);
 198                }
 199        }
 200
 201        virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
 202                                      rect.x1,
 203                                      rect.y1,
 204                                      rect.x2 - rect.x1,
 205                                      rect.y2 - rect.y1);
 206        virtio_gpu_notify(vgdev);
 207}
 208
 209static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
 210                                        struct drm_plane_state *new_state)
 211{
 212        struct drm_device *dev = plane->dev;
 213        struct virtio_gpu_device *vgdev = dev->dev_private;
 214        struct virtio_gpu_framebuffer *vgfb;
 215        struct virtio_gpu_object *bo;
 216
 217        if (!new_state->fb)
 218                return 0;
 219
 220        vgfb = to_virtio_gpu_framebuffer(new_state->fb);
 221        bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
 222        if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
 223                vgfb->fence = virtio_gpu_fence_alloc(vgdev);
 224                if (!vgfb->fence)
 225                        return -ENOMEM;
 226        }
 227
 228        return 0;
 229}
 230
 231static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
 232                                         struct drm_plane_state *old_state)
 233{
 234        struct virtio_gpu_framebuffer *vgfb;
 235
 236        if (!plane->state->fb)
 237                return;
 238
 239        vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
 240        if (vgfb->fence) {
 241                dma_fence_put(&vgfb->fence->f);
 242                vgfb->fence = NULL;
 243        }
 244}
 245
 246static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
 247                                           struct drm_atomic_state *state)
 248{
 249        struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
 250                                                                           plane);
 251        struct drm_device *dev = plane->dev;
 252        struct virtio_gpu_device *vgdev = dev->dev_private;
 253        struct virtio_gpu_output *output = NULL;
 254        struct virtio_gpu_framebuffer *vgfb;
 255        struct virtio_gpu_object *bo = NULL;
 256        uint32_t handle;
 257
 258        if (plane->state->crtc)
 259                output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
 260        if (old_state->crtc)
 261                output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
 262        if (WARN_ON(!output))
 263                return;
 264
 265        if (plane->state->fb) {
 266                vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
 267                bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
 268                handle = bo->hw_res_handle;
 269        } else {
 270                handle = 0;
 271        }
 272
 273        if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
 274                /* new cursor -- update & wait */
 275                struct virtio_gpu_object_array *objs;
 276
 277                objs = virtio_gpu_array_alloc(1);
 278                if (!objs)
 279                        return;
 280                virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
 281                virtio_gpu_array_lock_resv(objs);
 282                virtio_gpu_cmd_transfer_to_host_2d
 283                        (vgdev, 0,
 284                         plane->state->crtc_w,
 285                         plane->state->crtc_h,
 286                         0, 0, objs, vgfb->fence);
 287                virtio_gpu_notify(vgdev);
 288                dma_fence_wait(&vgfb->fence->f, true);
 289                dma_fence_put(&vgfb->fence->f);
 290                vgfb->fence = NULL;
 291        }
 292
 293        if (plane->state->fb != old_state->fb) {
 294                DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
 295                          plane->state->crtc_x,
 296                          plane->state->crtc_y,
 297                          plane->state->fb ? plane->state->fb->hot_x : 0,
 298                          plane->state->fb ? plane->state->fb->hot_y : 0);
 299                output->cursor.hdr.type =
 300                        cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
 301                output->cursor.resource_id = cpu_to_le32(handle);
 302                if (plane->state->fb) {
 303                        output->cursor.hot_x =
 304                                cpu_to_le32(plane->state->fb->hot_x);
 305                        output->cursor.hot_y =
 306                                cpu_to_le32(plane->state->fb->hot_y);
 307                } else {
 308                        output->cursor.hot_x = cpu_to_le32(0);
 309                        output->cursor.hot_y = cpu_to_le32(0);
 310                }
 311        } else {
 312                DRM_DEBUG("move +%d+%d\n",
 313                          plane->state->crtc_x,
 314                          plane->state->crtc_y);
 315                output->cursor.hdr.type =
 316                        cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
 317        }
 318        output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
 319        output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
 320        virtio_gpu_cursor_ping(vgdev, output);
 321}
 322
 323static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
 324        .atomic_check           = virtio_gpu_plane_atomic_check,
 325        .atomic_update          = virtio_gpu_primary_plane_update,
 326};
 327
 328static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
 329        .prepare_fb             = virtio_gpu_cursor_prepare_fb,
 330        .cleanup_fb             = virtio_gpu_cursor_cleanup_fb,
 331        .atomic_check           = virtio_gpu_plane_atomic_check,
 332        .atomic_update          = virtio_gpu_cursor_plane_update,
 333};
 334
 335struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
 336                                        enum drm_plane_type type,
 337                                        int index)
 338{
 339        struct drm_device *dev = vgdev->ddev;
 340        const struct drm_plane_helper_funcs *funcs;
 341        struct drm_plane *plane;
 342        const uint32_t *formats;
 343        int ret, nformats;
 344
 345        plane = kzalloc(sizeof(*plane), GFP_KERNEL);
 346        if (!plane)
 347                return ERR_PTR(-ENOMEM);
 348
 349        if (type == DRM_PLANE_TYPE_CURSOR) {
 350                formats = virtio_gpu_cursor_formats;
 351                nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
 352                funcs = &virtio_gpu_cursor_helper_funcs;
 353        } else {
 354                formats = virtio_gpu_formats;
 355                nformats = ARRAY_SIZE(virtio_gpu_formats);
 356                funcs = &virtio_gpu_primary_helper_funcs;
 357        }
 358        ret = drm_universal_plane_init(dev, plane, 1 << index,
 359                                       &virtio_gpu_plane_funcs,
 360                                       formats, nformats,
 361                                       NULL, type, NULL);
 362        if (ret)
 363                goto err_plane_init;
 364
 365        drm_plane_helper_add(plane, funcs);
 366        return plane;
 367
 368err_plane_init:
 369        kfree(plane);
 370        return ERR_PTR(ret);
 371}
 372