linux/drivers/gpu/drm/nouveau/nv17_fence.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs <bskeggs@redhat.com>
  23 */
  24
  25#include <core/object.h>
  26#include <core/class.h>
  27
  28#include "nouveau_drm.h"
  29#include "nouveau_dma.h"
  30#include "nv10_fence.h"
  31
  32int
  33nv17_fence_sync(struct nouveau_fence *fence,
  34                struct nouveau_channel *prev, struct nouveau_channel *chan)
  35{
  36        struct nv10_fence_priv *priv = chan->drm->fence;
  37        u32 value;
  38        int ret;
  39
  40        if (!mutex_trylock(&prev->cli->mutex))
  41                return -EBUSY;
  42
  43        spin_lock(&priv->lock);
  44        value = priv->sequence;
  45        priv->sequence += 2;
  46        spin_unlock(&priv->lock);
  47
  48        ret = RING_SPACE(prev, 5);
  49        if (!ret) {
  50                BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
  51                OUT_RING  (prev, NvSema);
  52                OUT_RING  (prev, 0);
  53                OUT_RING  (prev, value + 0);
  54                OUT_RING  (prev, value + 1);
  55                FIRE_RING (prev);
  56        }
  57
  58        if (!ret && !(ret = RING_SPACE(chan, 5))) {
  59                BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
  60                OUT_RING  (chan, NvSema);
  61                OUT_RING  (chan, 0);
  62                OUT_RING  (chan, value + 1);
  63                OUT_RING  (chan, value + 2);
  64                FIRE_RING (chan);
  65        }
  66
  67        mutex_unlock(&prev->cli->mutex);
  68        return 0;
  69}
  70
  71static int
  72nv17_fence_context_new(struct nouveau_channel *chan)
  73{
  74        struct nv10_fence_priv *priv = chan->drm->fence;
  75        struct nv10_fence_chan *fctx;
  76        struct ttm_mem_reg *mem = &priv->bo->bo.mem;
  77        struct nouveau_object *object;
  78        u32 start = mem->start * PAGE_SIZE;
  79        u32 limit = start + mem->size - 1;
  80        int ret = 0;
  81
  82        fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
  83        if (!fctx)
  84                return -ENOMEM;
  85
  86        nouveau_fence_context_new(&fctx->base);
  87        fctx->base.emit = nv10_fence_emit;
  88        fctx->base.read = nv10_fence_read;
  89        fctx->base.sync = nv17_fence_sync;
  90
  91        ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
  92                                 NvSema, 0x0002,
  93                                 &(struct nv_dma_class) {
  94                                        .flags = NV_DMA_TARGET_VRAM |
  95                                                 NV_DMA_ACCESS_RDWR,
  96                                        .start = start,
  97                                        .limit = limit,
  98                                 }, sizeof(struct nv_dma_class),
  99                                 &object);
 100        if (ret)
 101                nv10_fence_context_del(chan);
 102        return ret;
 103}
 104
 105void
 106nv17_fence_resume(struct nouveau_drm *drm)
 107{
 108        struct nv10_fence_priv *priv = drm->fence;
 109
 110        nouveau_bo_wr32(priv->bo, 0, priv->sequence);
 111}
 112
 113int
 114nv17_fence_create(struct nouveau_drm *drm)
 115{
 116        struct nv10_fence_priv *priv;
 117        int ret = 0;
 118
 119        priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
 120        if (!priv)
 121                return -ENOMEM;
 122
 123        priv->base.dtor = nv10_fence_destroy;
 124        priv->base.resume = nv17_fence_resume;
 125        priv->base.context_new = nv17_fence_context_new;
 126        priv->base.context_del = nv10_fence_context_del;
 127        spin_lock_init(&priv->lock);
 128
 129        ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
 130                             0, 0x0000, NULL, &priv->bo);
 131        if (!ret) {
 132                ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
 133                if (!ret) {
 134                        ret = nouveau_bo_map(priv->bo);
 135                        if (ret)
 136                                nouveau_bo_unpin(priv->bo);
 137                }
 138                if (ret)
 139                        nouveau_bo_ref(NULL, &priv->bo);
 140        }
 141
 142        if (ret) {
 143                nv10_fence_destroy(drm);
 144                return ret;
 145        }
 146
 147        nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
 148        return ret;
 149}
 150