linux/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24
  25#include <core/gpuobj.h>
  26
  27#include "nv04.h"
  28
  29#define NV04_PDMA_SIZE (128 * 1024 * 1024)
  30#define NV04_PDMA_PAGE (  4 * 1024)
  31
  32/*******************************************************************************
  33 * VM map/unmap callbacks
  34 ******************************************************************************/
  35
  36static void
  37nv04_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
  38               struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
  39{
  40        pte = 0x00008 + (pte * 4);
  41        while (cnt) {
  42                u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
  43                u32 phys = (u32)*list++;
  44                while (cnt && page--) {
  45                        nv_wo32(pgt, pte, phys | 3);
  46                        phys += NV04_PDMA_PAGE;
  47                        pte += 4;
  48                        cnt -= 1;
  49                }
  50        }
  51}
  52
  53static void
  54nv04_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
  55{
  56        pte = 0x00008 + (pte * 4);
  57        while (cnt--) {
  58                nv_wo32(pgt, pte, 0x00000000);
  59                pte += 4;
  60        }
  61}
  62
  63static void
  64nv04_vm_flush(struct nouveau_vm *vm)
  65{
  66}
  67
  68/*******************************************************************************
  69 * VM object
  70 ******************************************************************************/
  71
  72int
  73nv04_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, u64 mmstart,
  74               struct nouveau_vm **pvm)
  75{
  76        return -EINVAL;
  77}
  78
  79/*******************************************************************************
  80 * VMMGR subdev
  81 ******************************************************************************/
  82
  83static int
  84nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
  85                struct nouveau_oclass *oclass, void *data, u32 size,
  86                struct nouveau_object **pobject)
  87{
  88        struct nv04_vmmgr_priv *priv;
  89        struct nouveau_gpuobj *dma;
  90        int ret;
  91
  92        ret = nouveau_vmmgr_create(parent, engine, oclass, "PCIGART",
  93                                   "pcigart", &priv);
  94        *pobject = nv_object(priv);
  95        if (ret)
  96                return ret;
  97
  98        priv->base.create = nv04_vm_create;
  99        priv->base.limit = NV04_PDMA_SIZE;
 100        priv->base.dma_bits = 32;
 101        priv->base.pgt_bits = 32 - 12;
 102        priv->base.spg_shift = 12;
 103        priv->base.lpg_shift = 12;
 104        priv->base.map_sg = nv04_vm_map_sg;
 105        priv->base.unmap = nv04_vm_unmap;
 106        priv->base.flush = nv04_vm_flush;
 107
 108        ret = nouveau_vm_create(&priv->base, 0, NV04_PDMA_SIZE, 0, 4096,
 109                                &priv->vm);
 110        if (ret)
 111                return ret;
 112
 113        ret = nouveau_gpuobj_new(nv_object(priv), NULL,
 114                                 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 +
 115                                 8, 16, NVOBJ_FLAG_ZERO_ALLOC,
 116                                 &priv->vm->pgt[0].obj[0]);
 117        dma = priv->vm->pgt[0].obj[0];
 118        priv->vm->pgt[0].refcount[0] = 1;
 119        if (ret)
 120                return ret;
 121
 122        nv_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
 123        nv_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
 124        return 0;
 125}
 126
 127void
 128nv04_vmmgr_dtor(struct nouveau_object *object)
 129{
 130        struct nv04_vmmgr_priv *priv = (void *)object;
 131        if (priv->vm) {
 132                nouveau_gpuobj_ref(NULL, &priv->vm->pgt[0].obj[0]);
 133                nouveau_vm_ref(NULL, &priv->vm, NULL);
 134        }
 135        if (priv->nullp) {
 136                pci_free_consistent(nv_device(priv)->pdev, 16 * 1024,
 137                                    priv->nullp, priv->null);
 138        }
 139        nouveau_vmmgr_destroy(&priv->base);
 140}
 141
 142struct nouveau_oclass
 143nv04_vmmgr_oclass = {
 144        .handle = NV_SUBDEV(VM, 0x04),
 145        .ofuncs = &(struct nouveau_ofuncs) {
 146                .ctor = nv04_vmmgr_ctor,
 147                .dtor = nv04_vmmgr_dtor,
 148                .init = _nouveau_vmmgr_init,
 149                .fini = _nouveau_vmmgr_fini,
 150        },
 151};
 152