linux/drivers/staging/vboxvideo/vbva_base.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006-2017 Oracle Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22
  23#include "vbox_drv.h"
  24#include "vbox_err.h"
  25#include "vboxvideo_guest.h"
  26#include "hgsmi_channels.h"
  27
  28/*
  29 * There is a hardware ring buffer in the graphics device video RAM, formerly
  30 * in the VBox VMMDev PCI memory space.
  31 * All graphics commands go there serialized by vbva_buffer_begin_update.
  32 * and vbva_buffer_end_update.
  33 *
  34 * free_offset is writing position. data_offset is reading position.
  35 * free_offset == data_offset means buffer is empty.
  36 * There must be always gap between data_offset and free_offset when data
  37 * are in the buffer.
  38 * Guest only changes free_offset, host changes data_offset.
  39 */
  40
  41static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
  42{
  43        s32 diff = vbva->data_offset - vbva->free_offset;
  44
  45        return diff > 0 ? diff : vbva->data_len + diff;
  46}
  47
  48static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
  49                                      const void *p, u32 len, u32 offset)
  50{
  51        struct vbva_buffer *vbva = vbva_ctx->vbva;
  52        u32 bytes_till_boundary = vbva->data_len - offset;
  53        u8 *dst = &vbva->data[offset];
  54        s32 diff = len - bytes_till_boundary;
  55
  56        if (diff <= 0) {
  57                /* Chunk will not cross buffer boundary. */
  58                memcpy(dst, p, len);
  59        } else {
  60                /* Chunk crosses buffer boundary. */
  61                memcpy(dst, p, bytes_till_boundary);
  62                memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
  63        }
  64}
  65
  66static void vbva_buffer_flush(struct gen_pool *ctx)
  67{
  68        struct vbva_flush *p;
  69
  70        p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
  71        if (!p)
  72                return;
  73
  74        p->reserved = 0;
  75
  76        hgsmi_buffer_submit(ctx, p);
  77        hgsmi_buffer_free(ctx, p);
  78}
  79
  80bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
  81                const void *p, u32 len)
  82{
  83        struct vbva_record *record;
  84        struct vbva_buffer *vbva;
  85        u32 available;
  86
  87        vbva = vbva_ctx->vbva;
  88        record = vbva_ctx->record;
  89
  90        if (!vbva || vbva_ctx->buffer_overflow ||
  91            !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
  92                return false;
  93
  94        available = vbva_buffer_available(vbva);
  95
  96        while (len > 0) {
  97                u32 chunk = len;
  98
  99                if (chunk >= available) {
 100                        vbva_buffer_flush(ctx);
 101                        available = vbva_buffer_available(vbva);
 102                }
 103
 104                if (chunk >= available) {
 105                        if (WARN_ON(available <= vbva->partial_write_tresh)) {
 106                                vbva_ctx->buffer_overflow = true;
 107                                return false;
 108                        }
 109                        chunk = available - vbva->partial_write_tresh;
 110                }
 111
 112                vbva_buffer_place_data_at(vbva_ctx, p, chunk,
 113                                          vbva->free_offset);
 114
 115                vbva->free_offset = (vbva->free_offset + chunk) %
 116                                    vbva->data_len;
 117                record->len_and_flags += chunk;
 118                available -= chunk;
 119                len -= chunk;
 120                p += chunk;
 121        }
 122
 123        return true;
 124}
 125
 126static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
 127                             struct gen_pool *ctx, s32 screen, bool enable)
 128{
 129        struct vbva_enable_ex *p;
 130        bool ret;
 131
 132        p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
 133        if (!p)
 134                return false;
 135
 136        p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
 137        p->base.offset = vbva_ctx->buffer_offset;
 138        p->base.result = VERR_NOT_SUPPORTED;
 139        if (screen >= 0) {
 140                p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
 141                p->screen_id = screen;
 142        }
 143
 144        hgsmi_buffer_submit(ctx, p);
 145
 146        if (enable)
 147                ret = RT_SUCCESS(p->base.result);
 148        else
 149                ret = true;
 150
 151        hgsmi_buffer_free(ctx, p);
 152
 153        return ret;
 154}
 155
 156bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
 157                 struct vbva_buffer *vbva, s32 screen)
 158{
 159        bool ret = false;
 160
 161        memset(vbva, 0, sizeof(*vbva));
 162        vbva->partial_write_tresh = 256;
 163        vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
 164        vbva_ctx->vbva = vbva;
 165
 166        ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
 167        if (!ret)
 168                vbva_disable(vbva_ctx, ctx, screen);
 169
 170        return ret;
 171}
 172
 173void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
 174                  s32 screen)
 175{
 176        vbva_ctx->buffer_overflow = false;
 177        vbva_ctx->record = NULL;
 178        vbva_ctx->vbva = NULL;
 179
 180        vbva_inform_host(vbva_ctx, ctx, screen, false);
 181}
 182
 183bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
 184                              struct gen_pool *ctx)
 185{
 186        struct vbva_record *record;
 187        u32 next;
 188
 189        if (!vbva_ctx->vbva ||
 190            !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
 191                return false;
 192
 193        WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
 194
 195        next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
 196
 197        /* Flush if all slots in the records queue are used */
 198        if (next == vbva_ctx->vbva->record_first_index)
 199                vbva_buffer_flush(ctx);
 200
 201        /* If even after flush there is no place then fail the request */
 202        if (next == vbva_ctx->vbva->record_first_index)
 203                return false;
 204
 205        record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
 206        record->len_and_flags = VBVA_F_RECORD_PARTIAL;
 207        vbva_ctx->vbva->record_free_index = next;
 208        /* Remember which record we are using. */
 209        vbva_ctx->record = record;
 210
 211        return true;
 212}
 213
 214void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
 215{
 216        struct vbva_record *record = vbva_ctx->record;
 217
 218        WARN_ON(!vbva_ctx->vbva || !record ||
 219                !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
 220
 221        /* Mark the record completed. */
 222        record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
 223
 224        vbva_ctx->buffer_overflow = false;
 225        vbva_ctx->record = NULL;
 226}
 227
 228void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
 229                               u32 buffer_offset, u32 buffer_length)
 230{
 231        vbva_ctx->buffer_offset = buffer_offset;
 232        vbva_ctx->buffer_length = buffer_length;
 233}
 234