linux/drivers/gpu/drm/qxl/qxl_release.c
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Red Hat, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * on the rights to use, copy, modify, merge, publish, distribute, sub
   8 * license, and/or sell copies of the Software, and to permit persons to whom
   9 * the Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
  19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  21 */
  22#include "qxl_drv.h"
  23#include "qxl_object.h"
  24#include <trace/events/fence.h>
  25
  26/*
  27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
  28 * into 256 byte chunks for now - gives 16 cmds per page.
  29 *
  30 * use an ida to index into the chunks?
  31 */
  32/* manage releaseables */
  33/* stack them 16 high for now -drawable object is 191 */
  34#define RELEASE_SIZE 256
  35#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
  36/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
  37#define SURFACE_RELEASE_SIZE 128
  38#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
  39
  40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
  41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
  42
  43static const char *qxl_get_driver_name(struct fence *fence)
  44{
  45        return "qxl";
  46}
  47
  48static const char *qxl_get_timeline_name(struct fence *fence)
  49{
  50        return "release";
  51}
  52
  53static bool qxl_nop_signaling(struct fence *fence)
  54{
  55        /* fences are always automatically signaled, so just pretend we did this.. */
  56        return true;
  57}
  58
  59static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
  60{
  61        struct qxl_device *qdev;
  62        struct qxl_release *release;
  63        int count = 0, sc = 0;
  64        bool have_drawable_releases;
  65        unsigned long cur, end = jiffies + timeout;
  66
  67        qdev = container_of(fence->lock, struct qxl_device, release_lock);
  68        release = container_of(fence, struct qxl_release, base);
  69        have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
  70
  71retry:
  72        sc++;
  73
  74        if (fence_is_signaled(fence))
  75                goto signaled;
  76
  77        qxl_io_notify_oom(qdev);
  78
  79        for (count = 0; count < 11; count++) {
  80                if (!qxl_queue_garbage_collect(qdev, true))
  81                        break;
  82
  83                if (fence_is_signaled(fence))
  84                        goto signaled;
  85        }
  86
  87        if (fence_is_signaled(fence))
  88                goto signaled;
  89
  90        if (have_drawable_releases || sc < 4) {
  91                if (sc > 2)
  92                        /* back off */
  93                        usleep_range(500, 1000);
  94
  95                if (time_after(jiffies, end))
  96                        return 0;
  97
  98                if (have_drawable_releases && sc > 300) {
  99                        FENCE_WARN(fence, "failed to wait on release %d "
 100                                          "after spincount %d\n",
 101                                          fence->context & ~0xf0000000, sc);
 102                        goto signaled;
 103                }
 104                goto retry;
 105        }
 106        /*
 107         * yeah, original sync_obj_wait gave up after 3 spins when
 108         * have_drawable_releases is not set.
 109         */
 110
 111signaled:
 112        cur = jiffies;
 113        if (time_after(cur, end))
 114                return 0;
 115        return end - cur;
 116}
 117
 118static const struct fence_ops qxl_fence_ops = {
 119        .get_driver_name = qxl_get_driver_name,
 120        .get_timeline_name = qxl_get_timeline_name,
 121        .enable_signaling = qxl_nop_signaling,
 122        .wait = qxl_fence_wait,
 123};
 124
 125static uint64_t
 126qxl_release_alloc(struct qxl_device *qdev, int type,
 127                  struct qxl_release **ret)
 128{
 129        struct qxl_release *release;
 130        int handle;
 131        size_t size = sizeof(*release);
 132
 133        release = kmalloc(size, GFP_KERNEL);
 134        if (!release) {
 135                DRM_ERROR("Out of memory\n");
 136                return 0;
 137        }
 138        release->base.ops = NULL;
 139        release->type = type;
 140        release->release_offset = 0;
 141        release->surface_release_id = 0;
 142        INIT_LIST_HEAD(&release->bos);
 143
 144        idr_preload(GFP_KERNEL);
 145        spin_lock(&qdev->release_idr_lock);
 146        handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
 147        release->base.seqno = ++qdev->release_seqno;
 148        spin_unlock(&qdev->release_idr_lock);
 149        idr_preload_end();
 150        if (handle < 0) {
 151                kfree(release);
 152                *ret = NULL;
 153                return handle;
 154        }
 155        *ret = release;
 156        QXL_INFO(qdev, "allocated release %lld\n", handle);
 157        release->id = handle;
 158        return handle;
 159}
 160
 161static void
 162qxl_release_free_list(struct qxl_release *release)
 163{
 164        while (!list_empty(&release->bos)) {
 165                struct qxl_bo_list *entry;
 166                struct qxl_bo *bo;
 167
 168                entry = container_of(release->bos.next,
 169                                     struct qxl_bo_list, tv.head);
 170                bo = to_qxl_bo(entry->tv.bo);
 171                qxl_bo_unref(&bo);
 172                list_del(&entry->tv.head);
 173                kfree(entry);
 174        }
 175}
 176
 177void
 178qxl_release_free(struct qxl_device *qdev,
 179                 struct qxl_release *release)
 180{
 181        QXL_INFO(qdev, "release %d, type %d\n", release->id,
 182                 release->type);
 183
 184        if (release->surface_release_id)
 185                qxl_surface_id_dealloc(qdev, release->surface_release_id);
 186
 187        spin_lock(&qdev->release_idr_lock);
 188        idr_remove(&qdev->release_idr, release->id);
 189        spin_unlock(&qdev->release_idr_lock);
 190
 191        if (release->base.ops) {
 192                WARN_ON(list_empty(&release->bos));
 193                qxl_release_free_list(release);
 194
 195                fence_signal(&release->base);
 196                fence_put(&release->base);
 197        } else {
 198                qxl_release_free_list(release);
 199                kfree(release);
 200        }
 201}
 202
 203static int qxl_release_bo_alloc(struct qxl_device *qdev,
 204                                struct qxl_bo **bo)
 205{
 206        int ret;
 207        /* pin releases bo's they are too messy to evict */
 208        ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
 209                            QXL_GEM_DOMAIN_VRAM, NULL,
 210                            bo);
 211        return ret;
 212}
 213
 214int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 215{
 216        struct qxl_bo_list *entry;
 217
 218        list_for_each_entry(entry, &release->bos, tv.head) {
 219                if (entry->tv.bo == &bo->tbo)
 220                        return 0;
 221        }
 222
 223        entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
 224        if (!entry)
 225                return -ENOMEM;
 226
 227        qxl_bo_ref(bo);
 228        entry->tv.bo = &bo->tbo;
 229        entry->tv.shared = false;
 230        list_add_tail(&entry->tv.head, &release->bos);
 231        return 0;
 232}
 233
 234static int qxl_release_validate_bo(struct qxl_bo *bo)
 235{
 236        int ret;
 237
 238        if (!bo->pin_count) {
 239                qxl_ttm_placement_from_domain(bo, bo->type, false);
 240                ret = ttm_bo_validate(&bo->tbo, &bo->placement,
 241                                      true, false);
 242                if (ret)
 243                        return ret;
 244        }
 245
 246        ret = reservation_object_reserve_shared(bo->tbo.resv);
 247        if (ret)
 248                return ret;
 249
 250        /* allocate a surface for reserved + validated buffers */
 251        ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
 252        if (ret)
 253                return ret;
 254        return 0;
 255}
 256
 257int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
 258{
 259        int ret;
 260        struct qxl_bo_list *entry;
 261
 262        /* if only one object on the release its the release itself
 263           since these objects are pinned no need to reserve */
 264        if (list_is_singular(&release->bos))
 265                return 0;
 266
 267        ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
 268                                     !no_intr, NULL);
 269        if (ret)
 270                return ret;
 271
 272        list_for_each_entry(entry, &release->bos, tv.head) {
 273                struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
 274
 275                ret = qxl_release_validate_bo(bo);
 276                if (ret) {
 277                        ttm_eu_backoff_reservation(&release->ticket, &release->bos);
 278                        return ret;
 279                }
 280        }
 281        return 0;
 282}
 283
 284void qxl_release_backoff_reserve_list(struct qxl_release *release)
 285{
 286        /* if only one object on the release its the release itself
 287           since these objects are pinned no need to reserve */
 288        if (list_is_singular(&release->bos))
 289                return;
 290
 291        ttm_eu_backoff_reservation(&release->ticket, &release->bos);
 292}
 293
 294
 295int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
 296                                       enum qxl_surface_cmd_type surface_cmd_type,
 297                                       struct qxl_release *create_rel,
 298                                       struct qxl_release **release)
 299{
 300        if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
 301                int idr_ret;
 302                struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
 303                struct qxl_bo *bo;
 304                union qxl_release_info *info;
 305
 306                /* stash the release after the create command */
 307                idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
 308                if (idr_ret < 0)
 309                        return idr_ret;
 310                bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
 311
 312                (*release)->release_offset = create_rel->release_offset + 64;
 313
 314                qxl_release_list_add(*release, bo);
 315
 316                info = qxl_release_map(qdev, *release);
 317                info->id = idr_ret;
 318                qxl_release_unmap(qdev, *release, info);
 319
 320                qxl_bo_unref(&bo);
 321                return 0;
 322        }
 323
 324        return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
 325                                         QXL_RELEASE_SURFACE_CMD, release, NULL);
 326}
 327
 328int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 329                                       int type, struct qxl_release **release,
 330                                       struct qxl_bo **rbo)
 331{
 332        struct qxl_bo *bo;
 333        int idr_ret;
 334        int ret = 0;
 335        union qxl_release_info *info;
 336        int cur_idx;
 337
 338        if (type == QXL_RELEASE_DRAWABLE)
 339                cur_idx = 0;
 340        else if (type == QXL_RELEASE_SURFACE_CMD)
 341                cur_idx = 1;
 342        else if (type == QXL_RELEASE_CURSOR_CMD)
 343                cur_idx = 2;
 344        else {
 345                DRM_ERROR("got illegal type: %d\n", type);
 346                return -EINVAL;
 347        }
 348
 349        idr_ret = qxl_release_alloc(qdev, type, release);
 350        if (idr_ret < 0) {
 351                if (rbo)
 352                        *rbo = NULL;
 353                return idr_ret;
 354        }
 355
 356        mutex_lock(&qdev->release_mutex);
 357        if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
 358                qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
 359                qdev->current_release_bo_offset[cur_idx] = 0;
 360                qdev->current_release_bo[cur_idx] = NULL;
 361        }
 362        if (!qdev->current_release_bo[cur_idx]) {
 363                ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
 364                if (ret) {
 365                        mutex_unlock(&qdev->release_mutex);
 366                        return ret;
 367                }
 368        }
 369
 370        bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
 371
 372        (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
 373        qdev->current_release_bo_offset[cur_idx]++;
 374
 375        if (rbo)
 376                *rbo = bo;
 377
 378        mutex_unlock(&qdev->release_mutex);
 379
 380        qxl_release_list_add(*release, bo);
 381
 382        info = qxl_release_map(qdev, *release);
 383        info->id = idr_ret;
 384        qxl_release_unmap(qdev, *release, info);
 385
 386        qxl_bo_unref(&bo);
 387        return ret;
 388}
 389
 390struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
 391                                                   uint64_t id)
 392{
 393        struct qxl_release *release;
 394
 395        spin_lock(&qdev->release_idr_lock);
 396        release = idr_find(&qdev->release_idr, id);
 397        spin_unlock(&qdev->release_idr_lock);
 398        if (!release) {
 399                DRM_ERROR("failed to find id in release_idr\n");
 400                return NULL;
 401        }
 402
 403        return release;
 404}
 405
 406union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
 407                                        struct qxl_release *release)
 408{
 409        void *ptr;
 410        union qxl_release_info *info;
 411        struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
 412        struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
 413
 414        ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
 415        if (!ptr)
 416                return NULL;
 417        info = ptr + (release->release_offset & ~PAGE_SIZE);
 418        return info;
 419}
 420
 421void qxl_release_unmap(struct qxl_device *qdev,
 422                       struct qxl_release *release,
 423                       union qxl_release_info *info)
 424{
 425        struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
 426        struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
 427        void *ptr;
 428
 429        ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
 430        qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
 431}
 432
 433void qxl_release_fence_buffer_objects(struct qxl_release *release)
 434{
 435        struct ttm_buffer_object *bo;
 436        struct ttm_bo_global *glob;
 437        struct ttm_bo_device *bdev;
 438        struct ttm_bo_driver *driver;
 439        struct qxl_bo *qbo;
 440        struct ttm_validate_buffer *entry;
 441        struct qxl_device *qdev;
 442
 443        /* if only one object on the release its the release itself
 444           since these objects are pinned no need to reserve */
 445        if (list_is_singular(&release->bos) || list_empty(&release->bos))
 446                return;
 447
 448        bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
 449        bdev = bo->bdev;
 450        qdev = container_of(bdev, struct qxl_device, mman.bdev);
 451
 452        /*
 453         * Since we never really allocated a context and we don't want to conflict,
 454         * set the highest bits. This will break if we really allow exporting of dma-bufs.
 455         */
 456        fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
 457                   release->id | 0xf0000000, release->base.seqno);
 458        trace_fence_emit(&release->base);
 459
 460        driver = bdev->driver;
 461        glob = bo->glob;
 462
 463        spin_lock(&glob->lru_lock);
 464
 465        list_for_each_entry(entry, &release->bos, head) {
 466                bo = entry->bo;
 467                qbo = to_qxl_bo(bo);
 468
 469                reservation_object_add_shared_fence(bo->resv, &release->base);
 470                ttm_bo_add_to_lru(bo);
 471                __ttm_bo_unreserve(bo);
 472        }
 473        spin_unlock(&glob->lru_lock);
 474        ww_acquire_fini(&release->ticket);
 475}
 476
 477