linux/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "nv50.h"
  25
  26#include <core/gpuobj.h>
  27#include <subdev/fb.h>
  28#include <subdev/mmu.h>
  29#include <subdev/timer.h>
  30
  31static void
  32nv50_bar_flush(struct nvkm_bar *base)
  33{
  34        struct nv50_bar *bar = nv50_bar(base);
  35        struct nvkm_device *device = bar->base.subdev.device;
  36        unsigned long flags;
  37        spin_lock_irqsave(&bar->base.lock, flags);
  38        nvkm_wr32(device, 0x00330c, 0x00000001);
  39        nvkm_msec(device, 2000,
  40                if (!(nvkm_rd32(device, 0x00330c) & 0x00000002))
  41                        break;
  42        );
  43        spin_unlock_irqrestore(&bar->base.lock, flags);
  44}
  45
  46struct nvkm_vmm *
  47nv50_bar_bar1_vmm(struct nvkm_bar *base)
  48{
  49        return nv50_bar(base)->bar1_vmm;
  50}
  51
  52void
  53nv50_bar_bar1_wait(struct nvkm_bar *base)
  54{
  55        nvkm_bar_flush(base);
  56}
  57
  58void
  59nv50_bar_bar1_fini(struct nvkm_bar *bar)
  60{
  61        nvkm_wr32(bar->subdev.device, 0x001708, 0x00000000);
  62}
  63
  64void
  65nv50_bar_bar1_init(struct nvkm_bar *base)
  66{
  67        struct nvkm_device *device = base->subdev.device;
  68        struct nv50_bar *bar = nv50_bar(base);
  69        nvkm_wr32(device, 0x001708, 0x80000000 | bar->bar1->node->offset >> 4);
  70}
  71
  72struct nvkm_vmm *
  73nv50_bar_bar2_vmm(struct nvkm_bar *base)
  74{
  75        return nv50_bar(base)->bar2_vmm;
  76}
  77
  78void
  79nv50_bar_bar2_fini(struct nvkm_bar *bar)
  80{
  81        nvkm_wr32(bar->subdev.device, 0x00170c, 0x00000000);
  82}
  83
  84void
  85nv50_bar_bar2_init(struct nvkm_bar *base)
  86{
  87        struct nvkm_device *device = base->subdev.device;
  88        struct nv50_bar *bar = nv50_bar(base);
  89        nvkm_wr32(device, 0x001704, 0x00000000 | bar->mem->addr >> 12);
  90        nvkm_wr32(device, 0x001704, 0x40000000 | bar->mem->addr >> 12);
  91        nvkm_wr32(device, 0x00170c, 0x80000000 | bar->bar2->node->offset >> 4);
  92}
  93
  94void
  95nv50_bar_init(struct nvkm_bar *base)
  96{
  97        struct nv50_bar *bar = nv50_bar(base);
  98        struct nvkm_device *device = bar->base.subdev.device;
  99        int i;
 100
 101        for (i = 0; i < 8; i++)
 102                nvkm_wr32(device, 0x001900 + (i * 4), 0x00000000);
 103}
 104
 105int
 106nv50_bar_oneinit(struct nvkm_bar *base)
 107{
 108        struct nv50_bar *bar = nv50_bar(base);
 109        struct nvkm_device *device = bar->base.subdev.device;
 110        static struct lock_class_key bar1_lock;
 111        static struct lock_class_key bar2_lock;
 112        u64 start, limit, size;
 113        int ret;
 114
 115        ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
 116        if (ret)
 117                return ret;
 118
 119        ret = nvkm_gpuobj_new(device, bar->pgd_addr, 0, false, bar->mem,
 120                              &bar->pad);
 121        if (ret)
 122                return ret;
 123
 124        ret = nvkm_gpuobj_new(device, 0x4000, 0, false, bar->mem, &bar->pgd);
 125        if (ret)
 126                return ret;
 127
 128        /* BAR2 */
 129        start = 0x0100000000ULL;
 130        size = device->func->resource_size(device, 3);
 131        if (!size)
 132                return -ENOMEM;
 133        limit = start + size;
 134
 135        ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
 136                           &bar2_lock, "bar2", &bar->bar2_vmm);
 137        if (ret)
 138                return ret;
 139
 140        atomic_inc(&bar->bar2_vmm->engref[NVKM_SUBDEV_BAR]);
 141        bar->bar2_vmm->debug = bar->base.subdev.debug;
 142
 143        ret = nvkm_vmm_boot(bar->bar2_vmm);
 144        if (ret)
 145                return ret;
 146
 147        ret = nvkm_vmm_join(bar->bar2_vmm, bar->mem->memory);
 148        if (ret)
 149                return ret;
 150
 151        ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar2);
 152        if (ret)
 153                return ret;
 154
 155        nvkm_kmap(bar->bar2);
 156        nvkm_wo32(bar->bar2, 0x00, 0x7fc00000);
 157        nvkm_wo32(bar->bar2, 0x04, lower_32_bits(limit));
 158        nvkm_wo32(bar->bar2, 0x08, lower_32_bits(start));
 159        nvkm_wo32(bar->bar2, 0x0c, upper_32_bits(limit) << 24 |
 160                                   upper_32_bits(start));
 161        nvkm_wo32(bar->bar2, 0x10, 0x00000000);
 162        nvkm_wo32(bar->bar2, 0x14, 0x00000000);
 163        nvkm_done(bar->bar2);
 164
 165        bar->base.subdev.oneinit = true;
 166        nvkm_bar_bar2_init(device);
 167
 168        /* BAR1 */
 169        start = 0x0000000000ULL;
 170        size = device->func->resource_size(device, 1);
 171        if (!size)
 172                return -ENOMEM;
 173        limit = start + size;
 174
 175        ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
 176                           &bar1_lock, "bar1", &bar->bar1_vmm);
 177
 178        atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
 179        bar->bar1_vmm->debug = bar->base.subdev.debug;
 180
 181        ret = nvkm_vmm_join(bar->bar1_vmm, bar->mem->memory);
 182        if (ret)
 183                return ret;
 184
 185        ret = nvkm_gpuobj_new(device, 24, 16, false, bar->mem, &bar->bar1);
 186        if (ret)
 187                return ret;
 188
 189        nvkm_kmap(bar->bar1);
 190        nvkm_wo32(bar->bar1, 0x00, 0x7fc00000);
 191        nvkm_wo32(bar->bar1, 0x04, lower_32_bits(limit));
 192        nvkm_wo32(bar->bar1, 0x08, lower_32_bits(start));
 193        nvkm_wo32(bar->bar1, 0x0c, upper_32_bits(limit) << 24 |
 194                                   upper_32_bits(start));
 195        nvkm_wo32(bar->bar1, 0x10, 0x00000000);
 196        nvkm_wo32(bar->bar1, 0x14, 0x00000000);
 197        nvkm_done(bar->bar1);
 198        return 0;
 199}
 200
 201void *
 202nv50_bar_dtor(struct nvkm_bar *base)
 203{
 204        struct nv50_bar *bar = nv50_bar(base);
 205        if (bar->mem) {
 206                nvkm_gpuobj_del(&bar->bar1);
 207                nvkm_vmm_part(bar->bar1_vmm, bar->mem->memory);
 208                nvkm_vmm_unref(&bar->bar1_vmm);
 209                nvkm_gpuobj_del(&bar->bar2);
 210                nvkm_vmm_part(bar->bar2_vmm, bar->mem->memory);
 211                nvkm_vmm_unref(&bar->bar2_vmm);
 212                nvkm_gpuobj_del(&bar->pgd);
 213                nvkm_gpuobj_del(&bar->pad);
 214                nvkm_gpuobj_del(&bar->mem);
 215        }
 216        return bar;
 217}
 218
 219int
 220nv50_bar_new_(const struct nvkm_bar_func *func, struct nvkm_device *device,
 221              int index, u32 pgd_addr, struct nvkm_bar **pbar)
 222{
 223        struct nv50_bar *bar;
 224        if (!(bar = kzalloc(sizeof(*bar), GFP_KERNEL)))
 225                return -ENOMEM;
 226        nvkm_bar_ctor(func, device, index, &bar->base);
 227        bar->pgd_addr = pgd_addr;
 228        *pbar = &bar->base;
 229        return 0;
 230}
 231
 232static const struct nvkm_bar_func
 233nv50_bar_func = {
 234        .dtor = nv50_bar_dtor,
 235        .oneinit = nv50_bar_oneinit,
 236        .init = nv50_bar_init,
 237        .bar1.init = nv50_bar_bar1_init,
 238        .bar1.fini = nv50_bar_bar1_fini,
 239        .bar1.wait = nv50_bar_bar1_wait,
 240        .bar1.vmm = nv50_bar_bar1_vmm,
 241        .bar2.init = nv50_bar_bar2_init,
 242        .bar2.fini = nv50_bar_bar2_fini,
 243        .bar2.wait = nv50_bar_bar1_wait,
 244        .bar2.vmm = nv50_bar_bar2_vmm,
 245        .flush = nv50_bar_flush,
 246};
 247
 248int
 249nv50_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
 250{
 251        return nv50_bar_new_(&nv50_bar_func, device, index, 0x1400, pbar);
 252}
 253