linux/drivers/gpu/drm/nouveau/nv17_fence.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs <bskeggs@redhat.com>
  23 */
  24
  25#include <nvif/os.h>
  26#include <nvif/class.h>
  27
  28#include "nouveau_drm.h"
  29#include "nouveau_dma.h"
  30#include "nv10_fence.h"
  31
  32int
  33nv17_fence_sync(struct nouveau_fence *fence,
  34                struct nouveau_channel *prev, struct nouveau_channel *chan)
  35{
  36        struct nouveau_cli *cli = (void *)prev->user.client;
  37        struct nv10_fence_priv *priv = chan->drm->fence;
  38        struct nv10_fence_chan *fctx = chan->fence;
  39        u32 value;
  40        int ret;
  41
  42        if (!mutex_trylock(&cli->mutex))
  43                return -EBUSY;
  44
  45        spin_lock(&priv->lock);
  46        value = priv->sequence;
  47        priv->sequence += 2;
  48        spin_unlock(&priv->lock);
  49
  50        ret = RING_SPACE(prev, 5);
  51        if (!ret) {
  52                BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
  53                OUT_RING  (prev, fctx->sema.handle);
  54                OUT_RING  (prev, 0);
  55                OUT_RING  (prev, value + 0);
  56                OUT_RING  (prev, value + 1);
  57                FIRE_RING (prev);
  58        }
  59
  60        if (!ret && !(ret = RING_SPACE(chan, 5))) {
  61                BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
  62                OUT_RING  (chan, fctx->sema.handle);
  63                OUT_RING  (chan, 0);
  64                OUT_RING  (chan, value + 1);
  65                OUT_RING  (chan, value + 2);
  66                FIRE_RING (chan);
  67        }
  68
  69        mutex_unlock(&cli->mutex);
  70        return 0;
  71}
  72
  73static int
  74nv17_fence_context_new(struct nouveau_channel *chan)
  75{
  76        struct nv10_fence_priv *priv = chan->drm->fence;
  77        struct nv10_fence_chan *fctx;
  78        struct ttm_mem_reg *mem = &priv->bo->bo.mem;
  79        u32 start = mem->start * PAGE_SIZE;
  80        u32 limit = start + mem->size - 1;
  81        int ret = 0;
  82
  83        fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
  84        if (!fctx)
  85                return -ENOMEM;
  86
  87        nouveau_fence_context_new(chan, &fctx->base);
  88        fctx->base.emit = nv10_fence_emit;
  89        fctx->base.read = nv10_fence_read;
  90        fctx->base.sync = nv17_fence_sync;
  91
  92        ret = nvif_object_init(&chan->user, NvSema, NV_DMA_FROM_MEMORY,
  93                               &(struct nv_dma_v0) {
  94                                        .target = NV_DMA_V0_TARGET_VRAM,
  95                                        .access = NV_DMA_V0_ACCESS_RDWR,
  96                                        .start = start,
  97                                        .limit = limit,
  98                               }, sizeof(struct nv_dma_v0),
  99                               &fctx->sema);
 100        if (ret)
 101                nv10_fence_context_del(chan);
 102        return ret;
 103}
 104
 105void
 106nv17_fence_resume(struct nouveau_drm *drm)
 107{
 108        struct nv10_fence_priv *priv = drm->fence;
 109
 110        nouveau_bo_wr32(priv->bo, 0, priv->sequence);
 111}
 112
 113int
 114nv17_fence_create(struct nouveau_drm *drm)
 115{
 116        struct nv10_fence_priv *priv;
 117        int ret = 0;
 118
 119        priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
 120        if (!priv)
 121                return -ENOMEM;
 122
 123        priv->base.dtor = nv10_fence_destroy;
 124        priv->base.resume = nv17_fence_resume;
 125        priv->base.context_new = nv17_fence_context_new;
 126        priv->base.context_del = nv10_fence_context_del;
 127        priv->base.contexts = 31;
 128        priv->base.context_base = fence_context_alloc(priv->base.contexts);
 129        spin_lock_init(&priv->lock);
 130
 131        ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
 132                             0, 0x0000, NULL, NULL, &priv->bo);
 133        if (!ret) {
 134                ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
 135                if (!ret) {
 136                        ret = nouveau_bo_map(priv->bo);
 137                        if (ret)
 138                                nouveau_bo_unpin(priv->bo);
 139                }
 140                if (ret)
 141                        nouveau_bo_ref(NULL, &priv->bo);
 142        }
 143
 144        if (ret) {
 145                nv10_fence_destroy(drm);
 146                return ret;
 147        }
 148
 149        nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
 150        return ret;
 151}
 152