linux/drivers/gpu/drm/qxl/qxl_release.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Red Hat, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * on the rights to use, copy, modify, merge, publish, distribute, sub
   8 * license, and/or sell copies of the Software, and to permit persons to whom
   9 * the Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "qxl_drv.h"
  23#include "qxl_object.h"
  24#include <trace/events/dma_fence.h>
  25
  26/*
  27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
  28 * into 256 byte chunks for now - gives 16 cmds per page.
  29 *
  30 * use an ida to index into the chunks?
  31 */
  32/* manage releaseables */
  33/* stack them 16 high for now -drawable object is 191 */
  34#define RELEASE_SIZE 256
  35#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
  36/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
  37#define SURFACE_RELEASE_SIZE 128
  38#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
  39
  40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
  41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
  42
  43static const char *qxl_get_driver_name(struct dma_fence *fence)
  44{
  45        return "qxl";
  46}
  47
  48static const char *qxl_get_timeline_name(struct dma_fence *fence)
  49{
  50        return "release";
  51}
  52
  53static long qxl_fence_wait(struct dma_fence *fence, bool intr,
  54                           signed long timeout)
  55{
  56        struct qxl_device *qdev;
  57        struct qxl_release *release;
  58        int count = 0, sc = 0;
  59        bool have_drawable_releases;
  60        unsigned long cur, end = jiffies + timeout;
  61
  62        qdev = container_of(fence->lock, struct qxl_device, release_lock);
  63        release = container_of(fence, struct qxl_release, base);
  64        have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
  65
  66retry:
  67        sc++;
  68
  69        if (dma_fence_is_signaled(fence))
  70                goto signaled;
  71
  72        qxl_io_notify_oom(qdev);
  73
  74        for (count = 0; count < 11; count++) {
  75                if (!qxl_queue_garbage_collect(qdev, true))
  76                        break;
  77
  78                if (dma_fence_is_signaled(fence))
  79                        goto signaled;
  80        }
  81
  82        if (dma_fence_is_signaled(fence))
  83                goto signaled;
  84
  85        if (have_drawable_releases || sc < 4) {
  86                if (sc > 2)
  87                        /* back off */
  88                        usleep_range(500, 1000);
  89
  90                if (time_after(jiffies, end))
  91                        return 0;
  92
  93                if (have_drawable_releases && sc > 300) {
  94                        DMA_FENCE_WARN(fence, "failed to wait on release %llu "
  95                                       "after spincount %d\n",
  96                                       fence->context & ~0xf0000000, sc);
  97                        goto signaled;
  98                }
  99                goto retry;
 100        }
 101        /*
 102         * yeah, original sync_obj_wait gave up after 3 spins when
 103         * have_drawable_releases is not set.
 104         */
 105
 106signaled:
 107        cur = jiffies;
 108        if (time_after(cur, end))
 109                return 0;
 110        return end - cur;
 111}
 112
 113static const struct dma_fence_ops qxl_fence_ops = {
 114        .get_driver_name = qxl_get_driver_name,
 115        .get_timeline_name = qxl_get_timeline_name,
 116        .wait = qxl_fence_wait,
 117};
 118
 119static int
 120qxl_release_alloc(struct qxl_device *qdev, int type,
 121                  struct qxl_release **ret)
 122{
 123        struct qxl_release *release;
 124        int handle;
 125        size_t size = sizeof(*release);
 126
 127        release = kmalloc(size, GFP_KERNEL);
 128        if (!release) {
 129                DRM_ERROR("Out of memory\n");
 130                return -ENOMEM;
 131        }
 132        release->base.ops = NULL;
 133        release->type = type;
 134        release->release_offset = 0;
 135        release->surface_release_id = 0;
 136        INIT_LIST_HEAD(&release->bos);
 137
 138        idr_preload(GFP_KERNEL);
 139        spin_lock(&qdev->release_idr_lock);
 140        handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
 141        release->base.seqno = ++qdev->release_seqno;
 142        spin_unlock(&qdev->release_idr_lock);
 143        idr_preload_end();
 144        if (handle < 0) {
 145                kfree(release);
 146                *ret = NULL;
 147                return handle;
 148        }
 149        *ret = release;
 150        DRM_DEBUG_DRIVER("allocated release %d\n", handle);
 151        release->id = handle;
 152        return handle;
 153}
 154
 155static void
 156qxl_release_free_list(struct qxl_release *release)
 157{
 158        while (!list_empty(&release->bos)) {
 159                struct qxl_bo_list *entry;
 160                struct qxl_bo *bo;
 161
 162                entry = container_of(release->bos.next,
 163                                     struct qxl_bo_list, tv.head);
 164                bo = to_qxl_bo(entry->tv.bo);
 165                qxl_bo_unref(&bo);
 166                list_del(&entry->tv.head);
 167                kfree(entry);
 168        }
 169        release->release_bo = NULL;
 170}
 171
 172void
 173qxl_release_free(struct qxl_device *qdev,
 174                 struct qxl_release *release)
 175{
 176        DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type);
 177
 178        if (release->surface_release_id)
 179                qxl_surface_id_dealloc(qdev, release->surface_release_id);
 180
 181        spin_lock(&qdev->release_idr_lock);
 182        idr_remove(&qdev->release_idr, release->id);
 183        spin_unlock(&qdev->release_idr_lock);
 184
 185        if (release->base.ops) {
 186                WARN_ON(list_empty(&release->bos));
 187                qxl_release_free_list(release);
 188
 189                dma_fence_signal(&release->base);
 190                dma_fence_put(&release->base);
 191        } else {
 192                qxl_release_free_list(release);
 193                kfree(release);
 194        }
 195}
 196
 197static int qxl_release_bo_alloc(struct qxl_device *qdev,
 198                                struct qxl_bo **bo)
 199{
 200        /* pin releases bo's they are too messy to evict */
 201        return qxl_bo_create(qdev, PAGE_SIZE, false, true,
 202                             QXL_GEM_DOMAIN_VRAM, NULL, bo);
 203}
 204
 205int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 206{
 207        struct qxl_bo_list *entry;
 208
 209        list_for_each_entry(entry, &release->bos, tv.head) {
 210                if (entry->tv.bo == &bo->tbo)
 211                        return 0;
 212        }
 213
 214        entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
 215        if (!entry)
 216                return -ENOMEM;
 217
 218        qxl_bo_ref(bo);
 219        entry->tv.bo = &bo->tbo;
 220        entry->tv.num_shared = 0;
 221        list_add_tail(&entry->tv.head, &release->bos);
 222        return 0;
 223}
 224
 225static int qxl_release_validate_bo(struct qxl_bo *bo)
 226{
 227        struct ttm_operation_ctx ctx = { true, false };
 228        int ret;
 229
 230        if (!bo->pin_count) {
 231                qxl_ttm_placement_from_domain(bo, bo->type, false);
 232                ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 233                if (ret)
 234                        return ret;
 235        }
 236
 237        ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
 238        if (ret)
 239                return ret;
 240
 241        /* allocate a surface for reserved + validated buffers */
 242        ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
 243        if (ret)
 244                return ret;
 245        return 0;
 246}
 247
 248int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
 249{
 250        int ret;
 251        struct qxl_bo_list *entry;
 252
 253        /* if only one object on the release its the release itself
 254           since these objects are pinned no need to reserve */
 255        if (list_is_singular(&release->bos))
 256                return 0;
 257
 258        ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
 259                                     !no_intr, NULL, true);
 260        if (ret)
 261                return ret;
 262
 263        list_for_each_entry(entry, &release->bos, tv.head) {
 264                struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
 265
 266                ret = qxl_release_validate_bo(bo);
 267                if (ret) {
 268                        ttm_eu_backoff_reservation(&release->ticket, &release->bos);
 269                        return ret;
 270                }
 271        }
 272        return 0;
 273}
 274
 275void qxl_release_backoff_reserve_list(struct qxl_release *release)
 276{
 277        /* if only one object on the release its the release itself
 278           since these objects are pinned no need to reserve */
 279        if (list_is_singular(&release->bos))
 280                return;
 281
 282        ttm_eu_backoff_reservation(&release->ticket, &release->bos);
 283}
 284
 285int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
 286                                       enum qxl_surface_cmd_type surface_cmd_type,
 287                                       struct qxl_release *create_rel,
 288                                       struct qxl_release **release)
 289{
 290        if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
 291                int idr_ret;
 292                struct qxl_bo *bo;
 293                union qxl_release_info *info;
 294
 295                /* stash the release after the create command */
 296                idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
 297                if (idr_ret < 0)
 298                        return idr_ret;
 299                bo = create_rel->release_bo;
 300
 301                (*release)->release_bo = bo;
 302                (*release)->release_offset = create_rel->release_offset + 64;
 303
 304                qxl_release_list_add(*release, bo);
 305
 306                info = qxl_release_map(qdev, *release);
 307                info->id = idr_ret;
 308                qxl_release_unmap(qdev, *release, info);
 309                return 0;
 310        }
 311
 312        return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
 313                                         QXL_RELEASE_SURFACE_CMD, release, NULL);
 314}
 315
 316int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 317                                       int type, struct qxl_release **release,
 318                                       struct qxl_bo **rbo)
 319{
 320        struct qxl_bo *bo;
 321        int idr_ret;
 322        int ret = 0;
 323        union qxl_release_info *info;
 324        int cur_idx;
 325
 326        if (type == QXL_RELEASE_DRAWABLE)
 327                cur_idx = 0;
 328        else if (type == QXL_RELEASE_SURFACE_CMD)
 329                cur_idx = 1;
 330        else if (type == QXL_RELEASE_CURSOR_CMD)
 331                cur_idx = 2;
 332        else {
 333                DRM_ERROR("got illegal type: %d\n", type);
 334                return -EINVAL;
 335        }
 336
 337        idr_ret = qxl_release_alloc(qdev, type, release);
 338        if (idr_ret < 0) {
 339                if (rbo)
 340                        *rbo = NULL;
 341                return idr_ret;
 342        }
 343
 344        mutex_lock(&qdev->release_mutex);
 345        if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
 346                qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
 347                qdev->current_release_bo_offset[cur_idx] = 0;
 348                qdev->current_release_bo[cur_idx] = NULL;
 349        }
 350        if (!qdev->current_release_bo[cur_idx]) {
 351                ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
 352                if (ret) {
 353                        mutex_unlock(&qdev->release_mutex);
 354                        qxl_release_free(qdev, *release);
 355                        return ret;
 356                }
 357        }
 358
 359        bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
 360
 361        (*release)->release_bo = bo;
 362        (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
 363        qdev->current_release_bo_offset[cur_idx]++;
 364
 365        if (rbo)
 366                *rbo = bo;
 367
 368        mutex_unlock(&qdev->release_mutex);
 369
 370        ret = qxl_release_list_add(*release, bo);
 371        qxl_bo_unref(&bo);
 372        if (ret) {
 373                qxl_release_free(qdev, *release);
 374                return ret;
 375        }
 376
 377        info = qxl_release_map(qdev, *release);
 378        info->id = idr_ret;
 379        qxl_release_unmap(qdev, *release, info);
 380
 381        return ret;
 382}
 383
 384struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
 385                                                   uint64_t id)
 386{
 387        struct qxl_release *release;
 388
 389        spin_lock(&qdev->release_idr_lock);
 390        release = idr_find(&qdev->release_idr, id);
 391        spin_unlock(&qdev->release_idr_lock);
 392        if (!release) {
 393                DRM_ERROR("failed to find id in release_idr\n");
 394                return NULL;
 395        }
 396
 397        return release;
 398}
 399
 400union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
 401                                        struct qxl_release *release)
 402{
 403        void *ptr;
 404        union qxl_release_info *info;
 405        struct qxl_bo *bo = release->release_bo;
 406
 407        ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
 408        if (!ptr)
 409                return NULL;
 410        info = ptr + (release->release_offset & ~PAGE_MASK);
 411        return info;
 412}
 413
 414void qxl_release_unmap(struct qxl_device *qdev,
 415                       struct qxl_release *release,
 416                       union qxl_release_info *info)
 417{
 418        struct qxl_bo *bo = release->release_bo;
 419        void *ptr;
 420
 421        ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
 422        qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
 423}
 424
 425void qxl_release_fence_buffer_objects(struct qxl_release *release)
 426{
 427        struct ttm_buffer_object *bo;
 428        struct ttm_bo_global *glob;
 429        struct ttm_bo_device *bdev;
 430        struct ttm_validate_buffer *entry;
 431        struct qxl_device *qdev;
 432
 433        /* if only one object on the release its the release itself
 434           since these objects are pinned no need to reserve */
 435        if (list_is_singular(&release->bos) || list_empty(&release->bos))
 436                return;
 437
 438        bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
 439        bdev = bo->bdev;
 440        qdev = container_of(bdev, struct qxl_device, mman.bdev);
 441
 442        /*
 443         * Since we never really allocated a context and we don't want to conflict,
 444         * set the highest bits. This will break if we really allow exporting of dma-bufs.
 445         */
 446        dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
 447                       release->id | 0xf0000000, release->base.seqno);
 448        trace_dma_fence_emit(&release->base);
 449
 450        glob = bdev->glob;
 451
 452        spin_lock(&glob->lru_lock);
 453
 454        list_for_each_entry(entry, &release->bos, head) {
 455                bo = entry->bo;
 456
 457                reservation_object_add_shared_fence(bo->resv, &release->base);
 458                ttm_bo_add_to_lru(bo);
 459                reservation_object_unlock(bo->resv);
 460        }
 461        spin_unlock(&glob->lru_lock);
 462        ww_acquire_fini(&release->ticket);
 463}
 464
 465