linux/drivers/gpu/drm/nouveau/nv84_fence.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24
  25#include "nouveau_drv.h"
  26#include "nouveau_dma.h"
  27#include "nouveau_fence.h"
  28
  29#include "nv50_display.h"
  30
  31static int
  32nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
  33{
  34        int ret = RING_SPACE(chan, 8);
  35        if (ret == 0) {
  36                BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
  37                OUT_RING  (chan, chan->vram.handle);
  38                BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
  39                OUT_RING  (chan, upper_32_bits(virtual));
  40                OUT_RING  (chan, lower_32_bits(virtual));
  41                OUT_RING  (chan, sequence);
  42                OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
  43                OUT_RING  (chan, 0x00000000);
  44                FIRE_RING (chan);
  45        }
  46        return ret;
  47}
  48
  49static int
  50nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
  51{
  52        int ret = RING_SPACE(chan, 7);
  53        if (ret == 0) {
  54                BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
  55                OUT_RING  (chan, chan->vram.handle);
  56                BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
  57                OUT_RING  (chan, upper_32_bits(virtual));
  58                OUT_RING  (chan, lower_32_bits(virtual));
  59                OUT_RING  (chan, sequence);
  60                OUT_RING  (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
  61                FIRE_RING (chan);
  62        }
  63        return ret;
  64}
  65
  66static int
  67nv84_fence_emit(struct nouveau_fence *fence)
  68{
  69        struct nouveau_channel *chan = fence->channel;
  70        struct nv84_fence_chan *fctx = chan->fence;
  71        u64 addr = chan->chid * 16;
  72
  73        if (fence->sysmem)
  74                addr += fctx->vma_gart.offset;
  75        else
  76                addr += fctx->vma.offset;
  77
  78        return fctx->base.emit32(chan, addr, fence->base.seqno);
  79}
  80
  81static int
  82nv84_fence_sync(struct nouveau_fence *fence,
  83                struct nouveau_channel *prev, struct nouveau_channel *chan)
  84{
  85        struct nv84_fence_chan *fctx = chan->fence;
  86        u64 addr = prev->chid * 16;
  87
  88        if (fence->sysmem)
  89                addr += fctx->vma_gart.offset;
  90        else
  91                addr += fctx->vma.offset;
  92
  93        return fctx->base.sync32(chan, addr, fence->base.seqno);
  94}
  95
  96static u32
  97nv84_fence_read(struct nouveau_channel *chan)
  98{
  99        struct nv84_fence_priv *priv = chan->drm->fence;
 100        return nouveau_bo_rd32(priv->bo, chan->chid * 16/4);
 101}
 102
 103static void
 104nv84_fence_context_del(struct nouveau_channel *chan)
 105{
 106        struct nv84_fence_priv *priv = chan->drm->fence;
 107        struct nv84_fence_chan *fctx = chan->fence;
 108
 109        nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
 110        mutex_lock(&priv->mutex);
 111        nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
 112        nouveau_bo_vma_del(priv->bo, &fctx->vma);
 113        mutex_unlock(&priv->mutex);
 114        nouveau_fence_context_del(&fctx->base);
 115        chan->fence = NULL;
 116        nouveau_fence_context_free(&fctx->base);
 117}
 118
 119int
 120nv84_fence_context_new(struct nouveau_channel *chan)
 121{
 122        struct nouveau_cli *cli = (void *)chan->user.client;
 123        struct nv84_fence_priv *priv = chan->drm->fence;
 124        struct nv84_fence_chan *fctx;
 125        int ret;
 126
 127        fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
 128        if (!fctx)
 129                return -ENOMEM;
 130
 131        nouveau_fence_context_new(chan, &fctx->base);
 132        fctx->base.emit = nv84_fence_emit;
 133        fctx->base.sync = nv84_fence_sync;
 134        fctx->base.read = nv84_fence_read;
 135        fctx->base.emit32 = nv84_fence_emit32;
 136        fctx->base.sync32 = nv84_fence_sync32;
 137        fctx->base.sequence = nv84_fence_read(chan);
 138
 139        mutex_lock(&priv->mutex);
 140        ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
 141        if (ret == 0) {
 142                ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
 143                                        &fctx->vma_gart);
 144        }
 145        mutex_unlock(&priv->mutex);
 146
 147        if (ret)
 148                nv84_fence_context_del(chan);
 149        return ret;
 150}
 151
 152static bool
 153nv84_fence_suspend(struct nouveau_drm *drm)
 154{
 155        struct nv84_fence_priv *priv = drm->fence;
 156        int i;
 157
 158        priv->suspend = vmalloc(priv->base.contexts * sizeof(u32));
 159        if (priv->suspend) {
 160                for (i = 0; i < priv->base.contexts; i++)
 161                        priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
 162        }
 163
 164        return priv->suspend != NULL;
 165}
 166
 167static void
 168nv84_fence_resume(struct nouveau_drm *drm)
 169{
 170        struct nv84_fence_priv *priv = drm->fence;
 171        int i;
 172
 173        if (priv->suspend) {
 174                for (i = 0; i < priv->base.contexts; i++)
 175                        nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
 176                vfree(priv->suspend);
 177                priv->suspend = NULL;
 178        }
 179}
 180
 181static void
 182nv84_fence_destroy(struct nouveau_drm *drm)
 183{
 184        struct nv84_fence_priv *priv = drm->fence;
 185        nouveau_bo_unmap(priv->bo_gart);
 186        if (priv->bo_gart)
 187                nouveau_bo_unpin(priv->bo_gart);
 188        nouveau_bo_ref(NULL, &priv->bo_gart);
 189        nouveau_bo_unmap(priv->bo);
 190        if (priv->bo)
 191                nouveau_bo_unpin(priv->bo);
 192        nouveau_bo_ref(NULL, &priv->bo);
 193        drm->fence = NULL;
 194        kfree(priv);
 195}
 196
 197int
 198nv84_fence_create(struct nouveau_drm *drm)
 199{
 200        struct nvkm_fifo *fifo = nvxx_fifo(&drm->client.device);
 201        struct nv84_fence_priv *priv;
 202        u32 domain;
 203        int ret;
 204
 205        priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
 206        if (!priv)
 207                return -ENOMEM;
 208
 209        priv->base.dtor = nv84_fence_destroy;
 210        priv->base.suspend = nv84_fence_suspend;
 211        priv->base.resume = nv84_fence_resume;
 212        priv->base.context_new = nv84_fence_context_new;
 213        priv->base.context_del = nv84_fence_context_del;
 214
 215        priv->base.contexts = fifo->nr;
 216        priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
 217        priv->base.uevent = true;
 218
 219        mutex_init(&priv->mutex);
 220
 221        /* Use VRAM if there is any ; otherwise fallback to system memory */
 222        domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
 223                         /*
 224                          * fences created in sysmem must be non-cached or we
 225                          * will lose CPU/GPU coherency!
 226                          */
 227                         TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
 228        ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
 229                             domain, 0, 0, NULL, NULL, &priv->bo);
 230        if (ret == 0) {
 231                ret = nouveau_bo_pin(priv->bo, domain, false);
 232                if (ret == 0) {
 233                        ret = nouveau_bo_map(priv->bo);
 234                        if (ret)
 235                                nouveau_bo_unpin(priv->bo);
 236                }
 237                if (ret)
 238                        nouveau_bo_ref(NULL, &priv->bo);
 239        }
 240
 241        if (ret == 0)
 242                ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
 243                                     TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
 244                                     0, NULL, NULL, &priv->bo_gart);
 245        if (ret == 0) {
 246                ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
 247                if (ret == 0) {
 248                        ret = nouveau_bo_map(priv->bo_gart);
 249                        if (ret)
 250                                nouveau_bo_unpin(priv->bo_gart);
 251                }
 252                if (ret)
 253                        nouveau_bo_ref(NULL, &priv->bo_gart);
 254        }
 255
 256        if (ret)
 257                nv84_fence_destroy(drm);
 258        return ret;
 259}
 260