linux/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include "ttm/ttm_bo_driver.h"
  30#include "ttm/ttm_placement.h"
  31
  32static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
  33        TTM_PL_FLAG_CACHED;
  34
  35static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
  36        TTM_PL_FLAG_CACHED |
  37        TTM_PL_FLAG_NO_EVICT;
  38
  39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
  40        TTM_PL_FLAG_CACHED;
  41
  42static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
  43        TTM_PL_FLAG_CACHED;
  44
  45struct ttm_placement vmw_vram_placement = {
  46        .fpfn = 0,
  47        .lpfn = 0,
  48        .num_placement = 1,
  49        .placement = &vram_placement_flags,
  50        .num_busy_placement = 1,
  51        .busy_placement = &vram_placement_flags
  52};
  53
  54static uint32_t vram_gmr_placement_flags[] = {
  55        TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
  56        VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
  57};
  58
  59struct ttm_placement vmw_vram_gmr_placement = {
  60        .fpfn = 0,
  61        .lpfn = 0,
  62        .num_placement = 2,
  63        .placement = vram_gmr_placement_flags,
  64        .num_busy_placement = 1,
  65        .busy_placement = &gmr_placement_flags
  66};
  67
  68struct ttm_placement vmw_vram_sys_placement = {
  69        .fpfn = 0,
  70        .lpfn = 0,
  71        .num_placement = 1,
  72        .placement = &vram_placement_flags,
  73        .num_busy_placement = 1,
  74        .busy_placement = &sys_placement_flags
  75};
  76
  77struct ttm_placement vmw_vram_ne_placement = {
  78        .fpfn = 0,
  79        .lpfn = 0,
  80        .num_placement = 1,
  81        .placement = &vram_ne_placement_flags,
  82        .num_busy_placement = 1,
  83        .busy_placement = &vram_ne_placement_flags
  84};
  85
  86struct ttm_placement vmw_sys_placement = {
  87        .fpfn = 0,
  88        .lpfn = 0,
  89        .num_placement = 1,
  90        .placement = &sys_placement_flags,
  91        .num_busy_placement = 1,
  92        .busy_placement = &sys_placement_flags
  93};
  94
  95struct vmw_ttm_backend {
  96        struct ttm_backend backend;
  97        struct page **pages;
  98        unsigned long num_pages;
  99        struct vmw_private *dev_priv;
 100        int gmr_id;
 101};
 102
 103static int vmw_ttm_populate(struct ttm_backend *backend,
 104                            unsigned long num_pages, struct page **pages,
 105                            struct page *dummy_read_page,
 106                            dma_addr_t *dma_addrs)
 107{
 108        struct vmw_ttm_backend *vmw_be =
 109            container_of(backend, struct vmw_ttm_backend, backend);
 110
 111        vmw_be->pages = pages;
 112        vmw_be->num_pages = num_pages;
 113
 114        return 0;
 115}
 116
 117static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
 118{
 119        struct vmw_ttm_backend *vmw_be =
 120            container_of(backend, struct vmw_ttm_backend, backend);
 121
 122        vmw_be->gmr_id = bo_mem->start;
 123
 124        return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
 125                            vmw_be->num_pages, vmw_be->gmr_id);
 126}
 127
 128static int vmw_ttm_unbind(struct ttm_backend *backend)
 129{
 130        struct vmw_ttm_backend *vmw_be =
 131            container_of(backend, struct vmw_ttm_backend, backend);
 132
 133        vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
 134        return 0;
 135}
 136
 137static void vmw_ttm_clear(struct ttm_backend *backend)
 138{
 139        struct vmw_ttm_backend *vmw_be =
 140                container_of(backend, struct vmw_ttm_backend, backend);
 141
 142        vmw_be->pages = NULL;
 143        vmw_be->num_pages = 0;
 144}
 145
 146static void vmw_ttm_destroy(struct ttm_backend *backend)
 147{
 148        struct vmw_ttm_backend *vmw_be =
 149            container_of(backend, struct vmw_ttm_backend, backend);
 150
 151        kfree(vmw_be);
 152}
 153
 154static struct ttm_backend_func vmw_ttm_func = {
 155        .populate = vmw_ttm_populate,
 156        .clear = vmw_ttm_clear,
 157        .bind = vmw_ttm_bind,
 158        .unbind = vmw_ttm_unbind,
 159        .destroy = vmw_ttm_destroy,
 160};
 161
 162struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
 163{
 164        struct vmw_ttm_backend *vmw_be;
 165
 166        vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
 167        if (!vmw_be)
 168                return NULL;
 169
 170        vmw_be->backend.func = &vmw_ttm_func;
 171        vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
 172
 173        return &vmw_be->backend;
 174}
 175
 176int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
 177{
 178        return 0;
 179}
 180
 181int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 182                      struct ttm_mem_type_manager *man)
 183{
 184        switch (type) {
 185        case TTM_PL_SYSTEM:
 186                /* System memory */
 187
 188                man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 189                man->available_caching = TTM_PL_FLAG_CACHED;
 190                man->default_caching = TTM_PL_FLAG_CACHED;
 191                break;
 192        case TTM_PL_VRAM:
 193                /* "On-card" video ram */
 194                man->func = &ttm_bo_manager_func;
 195                man->gpu_offset = 0;
 196                man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
 197                man->available_caching = TTM_PL_FLAG_CACHED;
 198                man->default_caching = TTM_PL_FLAG_CACHED;
 199                break;
 200        case VMW_PL_GMR:
 201                /*
 202                 * "Guest Memory Regions" is an aperture like feature with
 203                 *  one slot per bo. There is an upper limit of the number of
 204                 *  slots as well as the bo size.
 205                 */
 206                man->func = &vmw_gmrid_manager_func;
 207                man->gpu_offset = 0;
 208                man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
 209                man->available_caching = TTM_PL_FLAG_CACHED;
 210                man->default_caching = TTM_PL_FLAG_CACHED;
 211                break;
 212        default:
 213                DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 214                return -EINVAL;
 215        }
 216        return 0;
 217}
 218
 219void vmw_evict_flags(struct ttm_buffer_object *bo,
 220                     struct ttm_placement *placement)
 221{
 222        *placement = vmw_sys_placement;
 223}
 224
 225/**
 226 * FIXME: Proper access checks on buffers.
 227 */
 228
 229static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 230{
 231        return 0;
 232}
 233
 234static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 235{
 236        struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 237        struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
 238
 239        mem->bus.addr = NULL;
 240        mem->bus.is_iomem = false;
 241        mem->bus.offset = 0;
 242        mem->bus.size = mem->num_pages << PAGE_SHIFT;
 243        mem->bus.base = 0;
 244        if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
 245                return -EINVAL;
 246        switch (mem->mem_type) {
 247        case TTM_PL_SYSTEM:
 248        case VMW_PL_GMR:
 249                return 0;
 250        case TTM_PL_VRAM:
 251                mem->bus.offset = mem->start << PAGE_SHIFT;
 252                mem->bus.base = dev_priv->vram_start;
 253                mem->bus.is_iomem = true;
 254                break;
 255        default:
 256                return -EINVAL;
 257        }
 258        return 0;
 259}
 260
 261static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 262{
 263}
 264
 265static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 266{
 267        return 0;
 268}
 269
 270/**
 271 * FIXME: We're using the old vmware polling method to sync.
 272 * Do this with fences instead.
 273 */
 274
 275static void *vmw_sync_obj_ref(void *sync_obj)
 276{
 277        return sync_obj;
 278}
 279
 280static void vmw_sync_obj_unref(void **sync_obj)
 281{
 282        *sync_obj = NULL;
 283}
 284
 285static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
 286{
 287        struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
 288
 289        mutex_lock(&dev_priv->hw_mutex);
 290        vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
 291        mutex_unlock(&dev_priv->hw_mutex);
 292        return 0;
 293}
 294
 295static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
 296{
 297        struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
 298        uint32_t sequence = (unsigned long) sync_obj;
 299
 300        return vmw_fence_signaled(dev_priv, sequence);
 301}
 302
 303static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
 304                             bool lazy, bool interruptible)
 305{
 306        struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
 307        uint32_t sequence = (unsigned long) sync_obj;
 308
 309        return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
 310}
 311
 312struct ttm_bo_driver vmw_bo_driver = {
 313        .create_ttm_backend_entry = vmw_ttm_backend_init,
 314        .invalidate_caches = vmw_invalidate_caches,
 315        .init_mem_type = vmw_init_mem_type,
 316        .evict_flags = vmw_evict_flags,
 317        .move = NULL,
 318        .verify_access = vmw_verify_access,
 319        .sync_obj_signaled = vmw_sync_obj_signaled,
 320        .sync_obj_wait = vmw_sync_obj_wait,
 321        .sync_obj_flush = vmw_sync_obj_flush,
 322        .sync_obj_unref = vmw_sync_obj_unref,
 323        .sync_obj_ref = vmw_sync_obj_ref,
 324        .move_notify = NULL,
 325        .swap_notify = NULL,
 326        .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
 327        .io_mem_reserve = &vmw_ttm_io_mem_reserve,
 328        .io_mem_free = &vmw_ttm_io_mem_free,
 329};
 330