linux/drivers/gpu/drm/nouveau/nouveau_ttm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
   3 * All Rights Reserved.
   4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the "Software"),
   9 * to deal in the Software without restriction, including without limitation
  10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
  11 * and/or sell copies of the Software, and to permit persons to whom the
  12 * Software is furnished to do so, subject to the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 */
  26
  27#include <subdev/fb.h>
  28#include <subdev/vm.h>
  29#include <subdev/instmem.h>
  30
  31#include "nouveau_drm.h"
  32#include "nouveau_ttm.h"
  33#include "nouveau_gem.h"
  34
  35static int
  36nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  37{
  38        struct nouveau_drm *drm = nouveau_bdev(man->bdev);
  39        struct nouveau_fb *pfb = nouveau_fb(drm->device);
  40        man->priv = pfb;
  41        return 0;
  42}
  43
  44static int
  45nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
  46{
  47        man->priv = NULL;
  48        return 0;
  49}
  50
  51static inline void
  52nouveau_mem_node_cleanup(struct nouveau_mem *node)
  53{
  54        if (node->vma[0].node) {
  55                nouveau_vm_unmap(&node->vma[0]);
  56                nouveau_vm_put(&node->vma[0]);
  57        }
  58
  59        if (node->vma[1].node) {
  60                nouveau_vm_unmap(&node->vma[1]);
  61                nouveau_vm_put(&node->vma[1]);
  62        }
  63}
  64
  65static void
  66nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
  67                         struct ttm_mem_reg *mem)
  68{
  69        struct nouveau_drm *drm = nouveau_bdev(man->bdev);
  70        struct nouveau_fb *pfb = nouveau_fb(drm->device);
  71        nouveau_mem_node_cleanup(mem->mm_node);
  72        pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
  73}
  74
  75static int
  76nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
  77                         struct ttm_buffer_object *bo,
  78                         struct ttm_placement *placement,
  79                         struct ttm_mem_reg *mem)
  80{
  81        struct nouveau_drm *drm = nouveau_bdev(man->bdev);
  82        struct nouveau_fb *pfb = nouveau_fb(drm->device);
  83        struct nouveau_bo *nvbo = nouveau_bo(bo);
  84        struct nouveau_mem *node;
  85        u32 size_nc = 0;
  86        int ret;
  87
  88        if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
  89                size_nc = 1 << nvbo->page_shift;
  90
  91        ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT,
  92                           mem->page_alignment << PAGE_SHIFT, size_nc,
  93                           (nvbo->tile_flags >> 8) & 0x3ff, &node);
  94        if (ret) {
  95                mem->mm_node = NULL;
  96                return (ret == -ENOSPC) ? 0 : ret;
  97        }
  98
  99        node->page_shift = nvbo->page_shift;
 100
 101        mem->mm_node = node;
 102        mem->start   = node->offset >> PAGE_SHIFT;
 103        return 0;
 104}
 105
 106static void
 107nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
 108{
 109        struct nouveau_fb *pfb = man->priv;
 110        struct nouveau_mm *mm = &pfb->vram;
 111        struct nouveau_mm_node *r;
 112        u32 total = 0, free = 0;
 113
 114        mutex_lock(&nv_subdev(pfb)->mutex);
 115        list_for_each_entry(r, &mm->nodes, nl_entry) {
 116                printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
 117                       prefix, r->type, ((u64)r->offset << 12),
 118                       (((u64)r->offset + r->length) << 12));
 119
 120                total += r->length;
 121                if (!r->type)
 122                        free += r->length;
 123        }
 124        mutex_unlock(&nv_subdev(pfb)->mutex);
 125
 126        printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
 127               prefix, (u64)total << 12, (u64)free << 12);
 128        printk(KERN_DEBUG "%s  block: 0x%08x\n",
 129               prefix, mm->block_size << 12);
 130}
 131
 132const struct ttm_mem_type_manager_func nouveau_vram_manager = {
 133        nouveau_vram_manager_init,
 134        nouveau_vram_manager_fini,
 135        nouveau_vram_manager_new,
 136        nouveau_vram_manager_del,
 137        nouveau_vram_manager_debug
 138};
 139
 140static int
 141nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 142{
 143        return 0;
 144}
 145
 146static int
 147nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
 148{
 149        return 0;
 150}
 151
 152static void
 153nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
 154                         struct ttm_mem_reg *mem)
 155{
 156        nouveau_mem_node_cleanup(mem->mm_node);
 157        kfree(mem->mm_node);
 158        mem->mm_node = NULL;
 159}
 160
 161static int
 162nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
 163                         struct ttm_buffer_object *bo,
 164                         struct ttm_placement *placement,
 165                         struct ttm_mem_reg *mem)
 166{
 167        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 168        struct nouveau_bo *nvbo = nouveau_bo(bo);
 169        struct nouveau_mem *node;
 170
 171        node = kzalloc(sizeof(*node), GFP_KERNEL);
 172        if (!node)
 173                return -ENOMEM;
 174
 175        node->page_shift = 12;
 176
 177        switch (nv_device(drm->device)->card_type) {
 178        case NV_50:
 179                if (nv_device(drm->device)->chipset != 0x50)
 180                        node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
 181                break;
 182        case NV_C0:
 183        case NV_D0:
 184        case NV_E0:
 185                node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
 186                break;
 187        default:
 188                break;
 189        }
 190
 191        mem->mm_node = node;
 192        mem->start   = 0;
 193        return 0;
 194}
 195
 196static void
 197nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
 198{
 199}
 200
 201const struct ttm_mem_type_manager_func nouveau_gart_manager = {
 202        nouveau_gart_manager_init,
 203        nouveau_gart_manager_fini,
 204        nouveau_gart_manager_new,
 205        nouveau_gart_manager_del,
 206        nouveau_gart_manager_debug
 207};
 208
 209#include <core/subdev/vm/nv04.h>
 210static int
 211nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 212{
 213        struct nouveau_drm *drm = nouveau_bdev(man->bdev);
 214        struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
 215        struct nv04_vmmgr_priv *priv = (void *)vmm;
 216        struct nouveau_vm *vm = NULL;
 217        nouveau_vm_ref(priv->vm, &vm, NULL);
 218        man->priv = vm;
 219        return 0;
 220}
 221
 222static int
 223nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
 224{
 225        struct nouveau_vm *vm = man->priv;
 226        nouveau_vm_ref(NULL, &vm, NULL);
 227        man->priv = NULL;
 228        return 0;
 229}
 230
 231static void
 232nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
 233{
 234        struct nouveau_mem *node = mem->mm_node;
 235        if (node->vma[0].node)
 236                nouveau_vm_put(&node->vma[0]);
 237        kfree(mem->mm_node);
 238        mem->mm_node = NULL;
 239}
 240
 241static int
 242nv04_gart_manager_new(struct ttm_mem_type_manager *man,
 243                      struct ttm_buffer_object *bo,
 244                      struct ttm_placement *placement,
 245                      struct ttm_mem_reg *mem)
 246{
 247        struct nouveau_mem *node;
 248        int ret;
 249
 250        node = kzalloc(sizeof(*node), GFP_KERNEL);
 251        if (!node)
 252                return -ENOMEM;
 253
 254        node->page_shift = 12;
 255
 256        ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
 257                             NV_MEM_ACCESS_RW, &node->vma[0]);
 258        if (ret) {
 259                kfree(node);
 260                return ret;
 261        }
 262
 263        mem->mm_node = node;
 264        mem->start   = node->vma[0].offset >> PAGE_SHIFT;
 265        return 0;
 266}
 267
 268static void
 269nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
 270{
 271}
 272
 273const struct ttm_mem_type_manager_func nv04_gart_manager = {
 274        nv04_gart_manager_init,
 275        nv04_gart_manager_fini,
 276        nv04_gart_manager_new,
 277        nv04_gart_manager_del,
 278        nv04_gart_manager_debug
 279};
 280
 281int
 282nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
 283{
 284        struct drm_file *file_priv = filp->private_data;
 285        struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
 286
 287        if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
 288                return drm_mmap(filp, vma);
 289
 290        return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
 291}
 292
 293static int
 294nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
 295{
 296        return ttm_mem_global_init(ref->object);
 297}
 298
 299static void
 300nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
 301{
 302        ttm_mem_global_release(ref->object);
 303}
 304
 305int
 306nouveau_ttm_global_init(struct nouveau_drm *drm)
 307{
 308        struct drm_global_reference *global_ref;
 309        int ret;
 310
 311        global_ref = &drm->ttm.mem_global_ref;
 312        global_ref->global_type = DRM_GLOBAL_TTM_MEM;
 313        global_ref->size = sizeof(struct ttm_mem_global);
 314        global_ref->init = &nouveau_ttm_mem_global_init;
 315        global_ref->release = &nouveau_ttm_mem_global_release;
 316
 317        ret = drm_global_item_ref(global_ref);
 318        if (unlikely(ret != 0)) {
 319                DRM_ERROR("Failed setting up TTM memory accounting\n");
 320                drm->ttm.mem_global_ref.release = NULL;
 321                return ret;
 322        }
 323
 324        drm->ttm.bo_global_ref.mem_glob = global_ref->object;
 325        global_ref = &drm->ttm.bo_global_ref.ref;
 326        global_ref->global_type = DRM_GLOBAL_TTM_BO;
 327        global_ref->size = sizeof(struct ttm_bo_global);
 328        global_ref->init = &ttm_bo_global_init;
 329        global_ref->release = &ttm_bo_global_release;
 330
 331        ret = drm_global_item_ref(global_ref);
 332        if (unlikely(ret != 0)) {
 333                DRM_ERROR("Failed setting up TTM BO subsystem\n");
 334                drm_global_item_unref(&drm->ttm.mem_global_ref);
 335                drm->ttm.mem_global_ref.release = NULL;
 336                return ret;
 337        }
 338
 339        return 0;
 340}
 341
 342void
 343nouveau_ttm_global_release(struct nouveau_drm *drm)
 344{
 345        if (drm->ttm.mem_global_ref.release == NULL)
 346                return;
 347
 348        drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
 349        drm_global_item_unref(&drm->ttm.mem_global_ref);
 350        drm->ttm.mem_global_ref.release = NULL;
 351}
 352
 353int
 354nouveau_ttm_init(struct nouveau_drm *drm)
 355{
 356        struct drm_device *dev = drm->dev;
 357        struct nouveau_device *device = nv_device(drm->device);
 358        u32 bits;
 359        int ret;
 360
 361        bits = nouveau_vmmgr(drm->device)->dma_bits;
 362        if (nv_device_is_pci(device)) {
 363                if (drm->agp.stat == ENABLED ||
 364                     !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
 365                        bits = 32;
 366
 367                ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
 368                if (ret)
 369                        return ret;
 370
 371                ret = pci_set_consistent_dma_mask(dev->pdev,
 372                                                  DMA_BIT_MASK(bits));
 373                if (ret)
 374                        pci_set_consistent_dma_mask(dev->pdev,
 375                                                    DMA_BIT_MASK(32));
 376        }
 377
 378        ret = nouveau_ttm_global_init(drm);
 379        if (ret)
 380                return ret;
 381
 382        ret = ttm_bo_device_init(&drm->ttm.bdev,
 383                                  drm->ttm.bo_global_ref.ref.object,
 384                                  &nouveau_bo_driver,
 385                                  dev->anon_inode->i_mapping,
 386                                  DRM_FILE_PAGE_OFFSET,
 387                                  bits <= 32 ? true : false);
 388        if (ret) {
 389                NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
 390                return ret;
 391        }
 392
 393        /* VRAM init */
 394        drm->gem.vram_available  = nouveau_fb(drm->device)->ram->size;
 395        drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
 396
 397        ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
 398                              drm->gem.vram_available >> PAGE_SHIFT);
 399        if (ret) {
 400                NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
 401                return ret;
 402        }
 403
 404        drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1),
 405                                         nv_device_resource_len(device, 1));
 406
 407        /* GART init */
 408        if (drm->agp.stat != ENABLED) {
 409                drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
 410        } else {
 411                drm->gem.gart_available = drm->agp.size;
 412        }
 413
 414        ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
 415                              drm->gem.gart_available >> PAGE_SHIFT);
 416        if (ret) {
 417                NV_ERROR(drm, "GART mm init failed, %d\n", ret);
 418                return ret;
 419        }
 420
 421        NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
 422        NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
 423        return 0;
 424}
 425
 426void
 427nouveau_ttm_fini(struct nouveau_drm *drm)
 428{
 429        mutex_lock(&drm->dev->struct_mutex);
 430        ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
 431        ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
 432        mutex_unlock(&drm->dev->struct_mutex);
 433
 434        ttm_bo_device_release(&drm->ttm.bdev);
 435
 436        nouveau_ttm_global_release(drm);
 437
 438        arch_phys_wc_del(drm->ttm.mtrr);
 439        drm->ttm.mtrr = 0;
 440}
 441