linux/drivers/gpu/drm/nouveau/nvkm/engine/dma/usernv04.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#define nv04_dmaobj(p) container_of((p), struct nv04_dmaobj, base)
  25#include "user.h"
  26
  27#include <core/gpuobj.h>
  28#include <subdev/fb.h>
  29#include <subdev/mmu/vmm.h>
  30
  31#include <nvif/class.h>
  32
  33struct nv04_dmaobj {
  34        struct nvkm_dmaobj base;
  35        bool clone;
  36        u32 flags0;
  37        u32 flags2;
  38};
  39
  40static int
  41nv04_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent,
  42                 int align, struct nvkm_gpuobj **pgpuobj)
  43{
  44        struct nv04_dmaobj *dmaobj = nv04_dmaobj(base);
  45        struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device;
  46        u64 offset = dmaobj->base.start & 0xfffff000;
  47        u64 adjust = dmaobj->base.start & 0x00000fff;
  48        u32 length = dmaobj->base.limit - dmaobj->base.start;
  49        int ret;
  50
  51        if (dmaobj->clone) {
  52                struct nvkm_memory *pgt =
  53                        device->mmu->vmm->pd->pt[0]->memory;
  54                if (!dmaobj->base.start)
  55                        return nvkm_gpuobj_wrap(pgt, pgpuobj);
  56                nvkm_kmap(pgt);
  57                offset  = nvkm_ro32(pgt, 8 + (offset >> 10));
  58                offset &= 0xfffff000;
  59                nvkm_done(pgt);
  60        }
  61
  62        ret = nvkm_gpuobj_new(device, 16, align, false, parent, pgpuobj);
  63        if (ret == 0) {
  64                nvkm_kmap(*pgpuobj);
  65                nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0 | (adjust << 20));
  66                nvkm_wo32(*pgpuobj, 0x04, length);
  67                nvkm_wo32(*pgpuobj, 0x08, dmaobj->flags2 | offset);
  68                nvkm_wo32(*pgpuobj, 0x0c, dmaobj->flags2 | offset);
  69                nvkm_done(*pgpuobj);
  70        }
  71
  72        return ret;
  73}
  74
  75static const struct nvkm_dmaobj_func
  76nv04_dmaobj_func = {
  77        .bind = nv04_dmaobj_bind,
  78};
  79
  80int
  81nv04_dmaobj_new(struct nvkm_dma *dma, const struct nvkm_oclass *oclass,
  82                void *data, u32 size, struct nvkm_dmaobj **pdmaobj)
  83{
  84        struct nvkm_device *device = dma->engine.subdev.device;
  85        struct nv04_dmaobj *dmaobj;
  86        int ret;
  87
  88        if (!(dmaobj = kzalloc(sizeof(*dmaobj), GFP_KERNEL)))
  89                return -ENOMEM;
  90        *pdmaobj = &dmaobj->base;
  91
  92        ret = nvkm_dmaobj_ctor(&nv04_dmaobj_func, dma, oclass,
  93                               &data, &size, &dmaobj->base);
  94        if (ret)
  95                return ret;
  96
  97        if (dmaobj->base.target == NV_MEM_TARGET_VM) {
  98                if (device->mmu->func == &nv04_mmu)
  99                        dmaobj->clone = true;
 100                dmaobj->base.target = NV_MEM_TARGET_PCI;
 101                dmaobj->base.access = NV_MEM_ACCESS_RW;
 102        }
 103
 104        dmaobj->flags0 = oclass->base.oclass;
 105        switch (dmaobj->base.target) {
 106        case NV_MEM_TARGET_VRAM:
 107                dmaobj->flags0 |= 0x00003000;
 108                break;
 109        case NV_MEM_TARGET_PCI:
 110                dmaobj->flags0 |= 0x00023000;
 111                break;
 112        case NV_MEM_TARGET_PCI_NOSNOOP:
 113                dmaobj->flags0 |= 0x00033000;
 114                break;
 115        default:
 116                return -EINVAL;
 117        }
 118
 119        switch (dmaobj->base.access) {
 120        case NV_MEM_ACCESS_RO:
 121                dmaobj->flags0 |= 0x00004000;
 122                break;
 123        case NV_MEM_ACCESS_WO:
 124                dmaobj->flags0 |= 0x00008000;
 125                /* fall through */
 126        case NV_MEM_ACCESS_RW:
 127                dmaobj->flags2 |= 0x00000002;
 128                break;
 129        default:
 130                return -EINVAL;
 131        }
 132
 133        return 0;
 134}
 135