linux/drivers/gpu/drm/nouveau/nouveau_chan.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24
  25#include <nvif/os.h>
  26#include <nvif/class.h>
  27
  28/*XXX*/
  29#include <core/client.h>
  30
  31#include "nouveau_drm.h"
  32#include "nouveau_dma.h"
  33#include "nouveau_bo.h"
  34#include "nouveau_chan.h"
  35#include "nouveau_fence.h"
  36#include "nouveau_abi16.h"
  37
  38MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
  39int nouveau_vram_pushbuf;
  40module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
  41
  42int
  43nouveau_channel_idle(struct nouveau_channel *chan)
  44{
  45        struct nouveau_cli *cli = (void *)nvif_client(chan->object);
  46        struct nouveau_fence *fence = NULL;
  47        int ret;
  48
  49        ret = nouveau_fence_new(chan, false, &fence);
  50        if (!ret) {
  51                ret = nouveau_fence_wait(fence, false, false);
  52                nouveau_fence_unref(&fence);
  53        }
  54
  55        if (ret)
  56                NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
  57                          chan->object->handle, nvxx_client(&cli->base)->name);
  58        return ret;
  59}
  60
  61void
  62nouveau_channel_del(struct nouveau_channel **pchan)
  63{
  64        struct nouveau_channel *chan = *pchan;
  65        if (chan) {
  66                if (chan->fence) {
  67                        nouveau_channel_idle(chan);
  68                        nouveau_fence(chan->drm)->context_del(chan);
  69                }
  70                nvif_object_fini(&chan->nvsw);
  71                nvif_object_fini(&chan->gart);
  72                nvif_object_fini(&chan->vram);
  73                nvif_object_ref(NULL, &chan->object);
  74                nvif_object_fini(&chan->push.ctxdma);
  75                nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
  76                nouveau_bo_unmap(chan->push.buffer);
  77                if (chan->push.buffer && chan->push.buffer->pin_refcnt)
  78                        nouveau_bo_unpin(chan->push.buffer);
  79                nouveau_bo_ref(NULL, &chan->push.buffer);
  80                nvif_device_ref(NULL, &chan->device);
  81                kfree(chan);
  82        }
  83        *pchan = NULL;
  84}
  85
  86static int
  87nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
  88                     u32 handle, u32 size, struct nouveau_channel **pchan)
  89{
  90        struct nouveau_cli *cli = (void *)nvif_client(&device->base);
  91        struct nvkm_mmu *mmu = nvxx_mmu(device);
  92        struct nv_dma_v0 args = {};
  93        struct nouveau_channel *chan;
  94        u32 target;
  95        int ret;
  96
  97        chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
  98        if (!chan)
  99                return -ENOMEM;
 100
 101        nvif_device_ref(device, &chan->device);
 102        chan->drm = drm;
 103
 104        /* allocate memory for dma push buffer */
 105        target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
 106        if (nouveau_vram_pushbuf)
 107                target = TTM_PL_FLAG_VRAM;
 108
 109        ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
 110                            &chan->push.buffer);
 111        if (ret == 0) {
 112                ret = nouveau_bo_pin(chan->push.buffer, target, false);
 113                if (ret == 0)
 114                        ret = nouveau_bo_map(chan->push.buffer);
 115        }
 116
 117        if (ret) {
 118                nouveau_channel_del(pchan);
 119                return ret;
 120        }
 121
 122        /* create dma object covering the *entire* memory space that the
 123         * pushbuf lives in, this is because the GEM code requires that
 124         * we be able to call out to other (indirect) push buffers
 125         */
 126        chan->push.vma.offset = chan->push.buffer->bo.offset;
 127
 128        if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 129                ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
 130                                        &chan->push.vma);
 131                if (ret) {
 132                        nouveau_channel_del(pchan);
 133                        return ret;
 134                }
 135
 136                args.target = NV_DMA_V0_TARGET_VM;
 137                args.access = NV_DMA_V0_ACCESS_VM;
 138                args.start = 0;
 139                args.limit = cli->vm->mmu->limit - 1;
 140        } else
 141        if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
 142                if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
 143                        /* nv04 vram pushbuf hack, retarget to its location in
 144                         * the framebuffer bar rather than direct vram access..
 145                         * nfi why this exists, it came from the -nv ddx.
 146                         */
 147                        args.target = NV_DMA_V0_TARGET_PCI;
 148                        args.access = NV_DMA_V0_ACCESS_RDWR;
 149                        args.start = nv_device_resource_start(nvxx_device(device), 1);
 150                        args.limit = args.start + device->info.ram_user - 1;
 151                } else {
 152                        args.target = NV_DMA_V0_TARGET_VRAM;
 153                        args.access = NV_DMA_V0_ACCESS_RDWR;
 154                        args.start = 0;
 155                        args.limit = device->info.ram_user - 1;
 156                }
 157        } else {
 158                if (chan->drm->agp.stat == ENABLED) {
 159                        args.target = NV_DMA_V0_TARGET_AGP;
 160                        args.access = NV_DMA_V0_ACCESS_RDWR;
 161                        args.start = chan->drm->agp.base;
 162                        args.limit = chan->drm->agp.base +
 163                                     chan->drm->agp.size - 1;
 164                } else {
 165                        args.target = NV_DMA_V0_TARGET_VM;
 166                        args.access = NV_DMA_V0_ACCESS_RDWR;
 167                        args.start = 0;
 168                        args.limit = mmu->limit - 1;
 169                }
 170        }
 171
 172        ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
 173                               (handle & 0xffff), NV_DMA_FROM_MEMORY,
 174                               &args, sizeof(args), &chan->push.ctxdma);
 175        if (ret) {
 176                nouveau_channel_del(pchan);
 177                return ret;
 178        }
 179
 180        return 0;
 181}
 182
 183static int
 184nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
 185                    u32 handle, u32 engine, struct nouveau_channel **pchan)
 186{
 187        static const u16 oclasses[] = { KEPLER_CHANNEL_GPFIFO_A,
 188                                        FERMI_CHANNEL_GPFIFO,
 189                                        G82_CHANNEL_GPFIFO,
 190                                        NV50_CHANNEL_GPFIFO,
 191                                        0 };
 192        const u16 *oclass = oclasses;
 193        union {
 194                struct nv50_channel_gpfifo_v0 nv50;
 195                struct kepler_channel_gpfifo_a_v0 kepler;
 196        } args, *retn;
 197        struct nouveau_channel *chan;
 198        u32 size;
 199        int ret;
 200
 201        /* allocate dma push buffer */
 202        ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan);
 203        *pchan = chan;
 204        if (ret)
 205                return ret;
 206
 207        /* create channel object */
 208        do {
 209                if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
 210                        args.kepler.version = 0;
 211                        args.kepler.engine  = engine;
 212                        args.kepler.pushbuf = chan->push.ctxdma.handle;
 213                        args.kepler.ilength = 0x02000;
 214                        args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
 215                        size = sizeof(args.kepler);
 216                } else {
 217                        args.nv50.version = 0;
 218                        args.nv50.pushbuf = chan->push.ctxdma.handle;
 219                        args.nv50.ilength = 0x02000;
 220                        args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
 221                        size = sizeof(args.nv50);
 222                }
 223
 224                ret = nvif_object_new(nvif_object(device), handle, *oclass++,
 225                                      &args, size, &chan->object);
 226                if (ret == 0) {
 227                        retn = chan->object->data;
 228                        if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A)
 229                                chan->chid = retn->kepler.chid;
 230                        else
 231                                chan->chid = retn->nv50.chid;
 232                        return ret;
 233                }
 234        } while (*oclass);
 235
 236        nouveau_channel_del(pchan);
 237        return ret;
 238}
 239
 240static int
 241nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
 242                    u32 handle, struct nouveau_channel **pchan)
 243{
 244        static const u16 oclasses[] = { NV40_CHANNEL_DMA,
 245                                        NV17_CHANNEL_DMA,
 246                                        NV10_CHANNEL_DMA,
 247                                        NV03_CHANNEL_DMA,
 248                                        0 };
 249        const u16 *oclass = oclasses;
 250        struct nv03_channel_dma_v0 args, *retn;
 251        struct nouveau_channel *chan;
 252        int ret;
 253
 254        /* allocate dma push buffer */
 255        ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan);
 256        *pchan = chan;
 257        if (ret)
 258                return ret;
 259
 260        /* create channel object */
 261        args.version = 0;
 262        args.pushbuf = chan->push.ctxdma.handle;
 263        args.offset = chan->push.vma.offset;
 264
 265        do {
 266                ret = nvif_object_new(nvif_object(device), handle, *oclass++,
 267                                      &args, sizeof(args), &chan->object);
 268                if (ret == 0) {
 269                        retn = chan->object->data;
 270                        chan->chid = retn->chid;
 271                        return ret;
 272                }
 273        } while (ret && *oclass);
 274
 275        nouveau_channel_del(pchan);
 276        return ret;
 277}
 278
 279static int
 280nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
 281{
 282        struct nvif_device *device = chan->device;
 283        struct nouveau_cli *cli = (void *)nvif_client(&device->base);
 284        struct nvkm_mmu *mmu = nvxx_mmu(device);
 285        struct nvkm_sw_chan *swch;
 286        struct nv_dma_v0 args = {};
 287        int ret, i;
 288
 289        nvif_object_map(chan->object);
 290
 291        /* allocate dma objects to cover all allowed vram, and gart */
 292        if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
 293                if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 294                        args.target = NV_DMA_V0_TARGET_VM;
 295                        args.access = NV_DMA_V0_ACCESS_VM;
 296                        args.start = 0;
 297                        args.limit = cli->vm->mmu->limit - 1;
 298                } else {
 299                        args.target = NV_DMA_V0_TARGET_VRAM;
 300                        args.access = NV_DMA_V0_ACCESS_RDWR;
 301                        args.start = 0;
 302                        args.limit = device->info.ram_user - 1;
 303                }
 304
 305                ret = nvif_object_init(chan->object, NULL, vram,
 306                                       NV_DMA_IN_MEMORY, &args,
 307                                       sizeof(args), &chan->vram);
 308                if (ret)
 309                        return ret;
 310
 311                if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
 312                        args.target = NV_DMA_V0_TARGET_VM;
 313                        args.access = NV_DMA_V0_ACCESS_VM;
 314                        args.start = 0;
 315                        args.limit = cli->vm->mmu->limit - 1;
 316                } else
 317                if (chan->drm->agp.stat == ENABLED) {
 318                        args.target = NV_DMA_V0_TARGET_AGP;
 319                        args.access = NV_DMA_V0_ACCESS_RDWR;
 320                        args.start = chan->drm->agp.base;
 321                        args.limit = chan->drm->agp.base +
 322                                     chan->drm->agp.size - 1;
 323                } else {
 324                        args.target = NV_DMA_V0_TARGET_VM;
 325                        args.access = NV_DMA_V0_ACCESS_RDWR;
 326                        args.start = 0;
 327                        args.limit = mmu->limit - 1;
 328                }
 329
 330                ret = nvif_object_init(chan->object, NULL, gart,
 331                                       NV_DMA_IN_MEMORY, &args,
 332                                       sizeof(args), &chan->gart);
 333                if (ret)
 334                        return ret;
 335        }
 336
 337        /* initialise dma tracking parameters */
 338        switch (chan->object->oclass & 0x00ff) {
 339        case 0x006b:
 340        case 0x006e:
 341                chan->user_put = 0x40;
 342                chan->user_get = 0x44;
 343                chan->dma.max = (0x10000 / 4) - 2;
 344                break;
 345        default:
 346                chan->user_put = 0x40;
 347                chan->user_get = 0x44;
 348                chan->user_get_hi = 0x60;
 349                chan->dma.ib_base =  0x10000 / 4;
 350                chan->dma.ib_max  = (0x02000 / 8) - 1;
 351                chan->dma.ib_put  = 0;
 352                chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
 353                chan->dma.max = chan->dma.ib_base;
 354                break;
 355        }
 356
 357        chan->dma.put = 0;
 358        chan->dma.cur = chan->dma.put;
 359        chan->dma.free = chan->dma.max - chan->dma.cur;
 360
 361        ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
 362        if (ret)
 363                return ret;
 364
 365        for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
 366                OUT_RING(chan, 0x00000000);
 367
 368        /* allocate software object class (used for fences on <= nv05) */
 369        if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
 370                ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
 371                                       NULL, 0, &chan->nvsw);
 372                if (ret)
 373                        return ret;
 374
 375                swch = (void *)nvxx_object(&chan->nvsw)->parent;
 376                swch->flip = nouveau_flip_complete;
 377                swch->flip_data = chan;
 378
 379                ret = RING_SPACE(chan, 2);
 380                if (ret)
 381                        return ret;
 382
 383                BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
 384                OUT_RING  (chan, chan->nvsw.handle);
 385                FIRE_RING (chan);
 386        }
 387
 388        /* initialise synchronisation */
 389        return nouveau_fence(chan->drm)->context_new(chan);
 390}
 391
 392int
 393nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
 394                    u32 handle, u32 arg0, u32 arg1,
 395                    struct nouveau_channel **pchan)
 396{
 397        struct nouveau_cli *cli = (void *)nvif_client(&device->base);
 398        bool super;
 399        int ret;
 400
 401        /* hack until fencenv50 is fixed, and agp access relaxed */
 402        super = cli->base.super;
 403        cli->base.super = true;
 404
 405        ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
 406        if (ret) {
 407                NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
 408                ret = nouveau_channel_dma(drm, device, handle, pchan);
 409                if (ret) {
 410                        NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
 411                        goto done;
 412                }
 413        }
 414
 415        ret = nouveau_channel_init(*pchan, arg0, arg1);
 416        if (ret) {
 417                NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
 418                nouveau_channel_del(pchan);
 419        }
 420
 421done:
 422        cli->base.super = super;
 423        return ret;
 424}
 425