linux/drivers/gpu/drm/qxl/qxl_object.c
<<
>>
Prefs
   1/*
   2 * Copyright 2013 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Dave Airlie
  23 *          Alon Levy
  24 */
  25
  26#include <linux/dma-buf-map.h>
  27#include <linux/io-mapping.h>
  28
  29#include "qxl_drv.h"
  30#include "qxl_object.h"
  31
  32static int __qxl_bo_pin(struct qxl_bo *bo);
  33static void __qxl_bo_unpin(struct qxl_bo *bo);
  34
  35static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
  36{
  37        struct qxl_bo *bo;
  38        struct qxl_device *qdev;
  39
  40        bo = to_qxl_bo(tbo);
  41        qdev = to_qxl(bo->tbo.base.dev);
  42
  43        qxl_surface_evict(qdev, bo, false);
  44        WARN_ON_ONCE(bo->map_count > 0);
  45        mutex_lock(&qdev->gem.mutex);
  46        list_del_init(&bo->list);
  47        mutex_unlock(&qdev->gem.mutex);
  48        drm_gem_object_release(&bo->tbo.base);
  49        kfree(bo);
  50}
  51
  52bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
  53{
  54        if (bo->destroy == &qxl_ttm_bo_destroy)
  55                return true;
  56        return false;
  57}
  58
  59void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
  60{
  61        u32 c = 0;
  62        u32 pflag = 0;
  63        unsigned int i;
  64
  65        if (qbo->tbo.base.size <= PAGE_SIZE)
  66                pflag |= TTM_PL_FLAG_TOPDOWN;
  67
  68        qbo->placement.placement = qbo->placements;
  69        qbo->placement.busy_placement = qbo->placements;
  70        if (domain == QXL_GEM_DOMAIN_VRAM) {
  71                qbo->placements[c].mem_type = TTM_PL_VRAM;
  72                qbo->placements[c++].flags = pflag;
  73        }
  74        if (domain == QXL_GEM_DOMAIN_SURFACE) {
  75                qbo->placements[c].mem_type = TTM_PL_PRIV;
  76                qbo->placements[c++].flags = pflag;
  77                qbo->placements[c].mem_type = TTM_PL_VRAM;
  78                qbo->placements[c++].flags = pflag;
  79        }
  80        if (domain == QXL_GEM_DOMAIN_CPU) {
  81                qbo->placements[c].mem_type = TTM_PL_SYSTEM;
  82                qbo->placements[c++].flags = pflag;
  83        }
  84        if (!c) {
  85                qbo->placements[c].mem_type = TTM_PL_SYSTEM;
  86                qbo->placements[c++].flags = 0;
  87        }
  88        qbo->placement.num_placement = c;
  89        qbo->placement.num_busy_placement = c;
  90        for (i = 0; i < c; ++i) {
  91                qbo->placements[i].fpfn = 0;
  92                qbo->placements[i].lpfn = 0;
  93        }
  94}
  95
  96static const struct drm_gem_object_funcs qxl_object_funcs = {
  97        .free = qxl_gem_object_free,
  98        .open = qxl_gem_object_open,
  99        .close = qxl_gem_object_close,
 100        .pin = qxl_gem_prime_pin,
 101        .unpin = qxl_gem_prime_unpin,
 102        .get_sg_table = qxl_gem_prime_get_sg_table,
 103        .vmap = qxl_gem_prime_vmap,
 104        .vunmap = qxl_gem_prime_vunmap,
 105        .mmap = drm_gem_ttm_mmap,
 106        .print_info = drm_gem_ttm_print_info,
 107};
 108
 109int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
 110                  bool kernel, bool pinned, u32 domain, u32 priority,
 111                  struct qxl_surface *surf,
 112                  struct qxl_bo **bo_ptr)
 113{
 114        struct ttm_operation_ctx ctx = { !kernel, false };
 115        struct qxl_bo *bo;
 116        enum ttm_bo_type type;
 117        int r;
 118
 119        if (kernel)
 120                type = ttm_bo_type_kernel;
 121        else
 122                type = ttm_bo_type_device;
 123        *bo_ptr = NULL;
 124        bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
 125        if (bo == NULL)
 126                return -ENOMEM;
 127        size = roundup(size, PAGE_SIZE);
 128        r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
 129        if (unlikely(r)) {
 130                kfree(bo);
 131                return r;
 132        }
 133        bo->tbo.base.funcs = &qxl_object_funcs;
 134        bo->type = domain;
 135        bo->surface_id = 0;
 136        INIT_LIST_HEAD(&bo->list);
 137
 138        if (surf)
 139                bo->surf = *surf;
 140
 141        qxl_ttm_placement_from_domain(bo, domain);
 142
 143        bo->tbo.priority = priority;
 144        r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
 145                                 &bo->placement, 0, &ctx, NULL, NULL,
 146                                 &qxl_ttm_bo_destroy);
 147        if (unlikely(r != 0)) {
 148                if (r != -ERESTARTSYS)
 149                        dev_err(qdev->ddev.dev,
 150                                "object_init failed for (%lu, 0x%08X)\n",
 151                                size, domain);
 152                return r;
 153        }
 154        if (pinned)
 155                ttm_bo_pin(&bo->tbo);
 156        ttm_bo_unreserve(&bo->tbo);
 157        *bo_ptr = bo;
 158        return 0;
 159}
 160
 161int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map)
 162{
 163        int r;
 164
 165        dma_resv_assert_held(bo->tbo.base.resv);
 166
 167        if (bo->kptr) {
 168                bo->map_count++;
 169                goto out;
 170        }
 171        r = ttm_bo_vmap(&bo->tbo, &bo->map);
 172        if (r)
 173                return r;
 174        bo->map_count = 1;
 175
 176        /* TODO: Remove kptr in favor of map everywhere. */
 177        if (bo->map.is_iomem)
 178                bo->kptr = (void *)bo->map.vaddr_iomem;
 179        else
 180                bo->kptr = bo->map.vaddr;
 181
 182out:
 183        *map = bo->map;
 184        return 0;
 185}
 186
 187int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map)
 188{
 189        int r;
 190
 191        r = qxl_bo_reserve(bo);
 192        if (r)
 193                return r;
 194
 195        r = __qxl_bo_pin(bo);
 196        if (r) {
 197                qxl_bo_unreserve(bo);
 198                return r;
 199        }
 200
 201        r = qxl_bo_vmap_locked(bo, map);
 202        qxl_bo_unreserve(bo);
 203        return r;
 204}
 205
 206void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
 207                              struct qxl_bo *bo, int page_offset)
 208{
 209        unsigned long offset;
 210        void *rptr;
 211        int ret;
 212        struct io_mapping *map;
 213        struct dma_buf_map bo_map;
 214
 215        if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
 216                map = qdev->vram_mapping;
 217        else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
 218                map = qdev->surface_mapping;
 219        else
 220                goto fallback;
 221
 222        offset = bo->tbo.resource->start << PAGE_SHIFT;
 223        return io_mapping_map_atomic_wc(map, offset + page_offset);
 224fallback:
 225        if (bo->kptr) {
 226                rptr = bo->kptr + (page_offset * PAGE_SIZE);
 227                return rptr;
 228        }
 229
 230        ret = qxl_bo_vmap_locked(bo, &bo_map);
 231        if (ret)
 232                return NULL;
 233        rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
 234
 235        rptr += page_offset * PAGE_SIZE;
 236        return rptr;
 237}
 238
 239void qxl_bo_vunmap_locked(struct qxl_bo *bo)
 240{
 241        dma_resv_assert_held(bo->tbo.base.resv);
 242
 243        if (bo->kptr == NULL)
 244                return;
 245        bo->map_count--;
 246        if (bo->map_count > 0)
 247                return;
 248        bo->kptr = NULL;
 249        ttm_bo_vunmap(&bo->tbo, &bo->map);
 250}
 251
 252int qxl_bo_vunmap(struct qxl_bo *bo)
 253{
 254        int r;
 255
 256        r = qxl_bo_reserve(bo);
 257        if (r)
 258                return r;
 259
 260        qxl_bo_vunmap_locked(bo);
 261        __qxl_bo_unpin(bo);
 262        qxl_bo_unreserve(bo);
 263        return 0;
 264}
 265
 266void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
 267                               struct qxl_bo *bo, void *pmap)
 268{
 269        if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
 270            (bo->tbo.resource->mem_type != TTM_PL_PRIV))
 271                goto fallback;
 272
 273        io_mapping_unmap_atomic(pmap);
 274        return;
 275 fallback:
 276        qxl_bo_vunmap_locked(bo);
 277}
 278
 279void qxl_bo_unref(struct qxl_bo **bo)
 280{
 281        if ((*bo) == NULL)
 282                return;
 283
 284        drm_gem_object_put(&(*bo)->tbo.base);
 285        *bo = NULL;
 286}
 287
 288struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
 289{
 290        drm_gem_object_get(&bo->tbo.base);
 291        return bo;
 292}
 293
 294static int __qxl_bo_pin(struct qxl_bo *bo)
 295{
 296        struct ttm_operation_ctx ctx = { false, false };
 297        struct drm_device *ddev = bo->tbo.base.dev;
 298        int r;
 299
 300        if (bo->tbo.pin_count) {
 301                ttm_bo_pin(&bo->tbo);
 302                return 0;
 303        }
 304        qxl_ttm_placement_from_domain(bo, bo->type);
 305        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 306        if (likely(r == 0))
 307                ttm_bo_pin(&bo->tbo);
 308        if (unlikely(r != 0))
 309                dev_err(ddev->dev, "%p pin failed\n", bo);
 310        return r;
 311}
 312
 313static void __qxl_bo_unpin(struct qxl_bo *bo)
 314{
 315        ttm_bo_unpin(&bo->tbo);
 316}
 317
 318/*
 319 * Reserve the BO before pinning the object.  If the BO was reserved
 320 * beforehand, use the internal version directly __qxl_bo_pin.
 321 *
 322 */
 323int qxl_bo_pin(struct qxl_bo *bo)
 324{
 325        int r;
 326
 327        r = qxl_bo_reserve(bo);
 328        if (r)
 329                return r;
 330
 331        r = __qxl_bo_pin(bo);
 332        qxl_bo_unreserve(bo);
 333        return r;
 334}
 335
 336/*
 337 * Reserve the BO before pinning the object.  If the BO was reserved
 338 * beforehand, use the internal version directly __qxl_bo_unpin.
 339 *
 340 */
 341int qxl_bo_unpin(struct qxl_bo *bo)
 342{
 343        int r;
 344
 345        r = qxl_bo_reserve(bo);
 346        if (r)
 347                return r;
 348
 349        __qxl_bo_unpin(bo);
 350        qxl_bo_unreserve(bo);
 351        return 0;
 352}
 353
 354void qxl_bo_force_delete(struct qxl_device *qdev)
 355{
 356        struct qxl_bo *bo, *n;
 357
 358        if (list_empty(&qdev->gem.objects))
 359                return;
 360        dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
 361        list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
 362                dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
 363                        &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
 364                        *((unsigned long *)&bo->tbo.base.refcount));
 365                mutex_lock(&qdev->gem.mutex);
 366                list_del_init(&bo->list);
 367                mutex_unlock(&qdev->gem.mutex);
 368                /* this should unref the ttm bo */
 369                drm_gem_object_put(&bo->tbo.base);
 370        }
 371}
 372
 373int qxl_bo_init(struct qxl_device *qdev)
 374{
 375        return qxl_ttm_init(qdev);
 376}
 377
 378void qxl_bo_fini(struct qxl_device *qdev)
 379{
 380        qxl_ttm_fini(qdev);
 381}
 382
 383int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
 384{
 385        int ret;
 386
 387        if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
 388                /* allocate a surface id for this surface now */
 389                ret = qxl_surface_id_alloc(qdev, bo);
 390                if (ret)
 391                        return ret;
 392
 393                ret = qxl_hw_surface_alloc(qdev, bo);
 394                if (ret)
 395                        return ret;
 396        }
 397        return 0;
 398}
 399
 400int qxl_surf_evict(struct qxl_device *qdev)
 401{
 402        struct ttm_resource_manager *man;
 403
 404        man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
 405        return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
 406}
 407
 408int qxl_vram_evict(struct qxl_device *qdev)
 409{
 410        struct ttm_resource_manager *man;
 411
 412        man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
 413        return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
 414}
 415