linux/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24
  25#include <engine/graph/nv40.h>
  26
  27#include "nv04.h"
  28
  29/******************************************************************************
  30 * instmem subdev implementation
  31 *****************************************************************************/
  32
  33static u32
  34nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
  35{
  36        struct nv04_instmem_priv *priv = (void *)object;
  37        return ioread32_native(priv->iomem + addr);
  38}
  39
  40static void
  41nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
  42{
  43        struct nv04_instmem_priv *priv = (void *)object;
  44        iowrite32_native(data, priv->iomem + addr);
  45}
  46
  47static int
  48nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
  49                  struct nouveau_oclass *oclass, void *data, u32 size,
  50                  struct nouveau_object **pobject)
  51{
  52        struct nouveau_device *device = nv_device(parent);
  53        struct nv04_instmem_priv *priv;
  54        int ret, bar, vs;
  55
  56        ret = nouveau_instmem_create(parent, engine, oclass, &priv);
  57        *pobject = nv_object(priv);
  58        if (ret)
  59                return ret;
  60
  61        /* map bar */
  62        if (nv_device_resource_len(device, 2))
  63                bar = 2;
  64        else
  65                bar = 3;
  66
  67        priv->iomem = ioremap(nv_device_resource_start(device, bar),
  68                              nv_device_resource_len(device, bar));
  69        if (!priv->iomem) {
  70                nv_error(priv, "unable to map PRAMIN BAR\n");
  71                return -EFAULT;
  72        }
  73
  74        /* PRAMIN aperture maps over the end of vram, reserve enough space
  75         * to fit graphics contexts for every channel, the magics come
  76         * from engine/graph/nv40.c
  77         */
  78        vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8);
  79        if      (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs;
  80        else if (device->chipset  < 0x43) priv->base.reserved = 0x4f00 * vs;
  81        else if (nv44_graph_class(priv))  priv->base.reserved = 0x4980 * vs;
  82        else                              priv->base.reserved = 0x4a40 * vs;
  83        priv->base.reserved += 16 * 1024;
  84        priv->base.reserved *= 32;              /* per-channel */
  85        priv->base.reserved += 512 * 1024;      /* pci(e)gart table */
  86        priv->base.reserved += 512 * 1024;      /* object storage */
  87
  88        priv->base.reserved = round_up(priv->base.reserved, 4096);
  89
  90        ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1);
  91        if (ret)
  92                return ret;
  93
  94        /* 0x00000-0x10000: reserve for probable vbios image */
  95        ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0,
  96                                &priv->vbios);
  97        if (ret)
  98                return ret;
  99
 100        /* 0x10000-0x18000: reserve for RAMHT */
 101        ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0,
 102                               &priv->ramht);
 103        if (ret)
 104                return ret;
 105
 106        /* 0x18000-0x18200: reserve for RAMRO
 107         * 0x18200-0x20000: padding
 108         */
 109        ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0,
 110                                &priv->ramro);
 111        if (ret)
 112                return ret;
 113
 114        /* 0x20000-0x21000: reserve for RAMFC
 115         * 0x21000-0x40000: padding and some unknown crap
 116         */
 117        ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0,
 118                                 NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc);
 119        if (ret)
 120                return ret;
 121
 122        return 0;
 123}
 124
 125struct nouveau_oclass *
 126nv40_instmem_oclass = &(struct nouveau_instmem_impl) {
 127        .base.handle = NV_SUBDEV(INSTMEM, 0x40),
 128        .base.ofuncs = &(struct nouveau_ofuncs) {
 129                .ctor = nv40_instmem_ctor,
 130                .dtor = nv04_instmem_dtor,
 131                .init = _nouveau_instmem_init,
 132                .fini = _nouveau_instmem_fini,
 133                .rd32 = nv40_instmem_rd32,
 134                .wr32 = nv40_instmem_wr32,
 135        },
 136        .instobj = &nv04_instobj_oclass.base,
 137}.base;
 138