linux/drivers/gpu/drm/nouveau/nouveau_ttm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/*
   3 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
   4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial portions
  15 * of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 */
  25
  26#include <linux/limits.h>
  27#include <linux/swiotlb.h>
  28
  29#include <drm/ttm/ttm_range_manager.h>
  30
  31#include "nouveau_drv.h"
  32#include "nouveau_gem.h"
  33#include "nouveau_mem.h"
  34#include "nouveau_ttm.h"
  35
  36#include <core/tegra.h>
  37
  38static void
  39nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
  40{
  41        nouveau_mem_del(reg);
  42}
  43
  44static int
  45nouveau_vram_manager_new(struct ttm_resource_manager *man,
  46                         struct ttm_buffer_object *bo,
  47                         const struct ttm_place *place,
  48                         struct ttm_resource **res)
  49{
  50        struct nouveau_bo *nvbo = nouveau_bo(bo);
  51        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  52        int ret;
  53
  54        if (drm->client.device.info.ram_size == 0)
  55                return -ENOMEM;
  56
  57        ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
  58        if (ret)
  59                return ret;
  60
  61        ttm_resource_init(bo, place, *res);
  62
  63        ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page);
  64        if (ret) {
  65                nouveau_mem_del(*res);
  66                return ret;
  67        }
  68
  69        return 0;
  70}
  71
  72const struct ttm_resource_manager_func nouveau_vram_manager = {
  73        .alloc = nouveau_vram_manager_new,
  74        .free = nouveau_manager_del,
  75};
  76
  77static int
  78nouveau_gart_manager_new(struct ttm_resource_manager *man,
  79                         struct ttm_buffer_object *bo,
  80                         const struct ttm_place *place,
  81                         struct ttm_resource **res)
  82{
  83        struct nouveau_bo *nvbo = nouveau_bo(bo);
  84        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  85        int ret;
  86
  87        ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
  88        if (ret)
  89                return ret;
  90
  91        ttm_resource_init(bo, place, *res);
  92        (*res)->start = 0;
  93        return 0;
  94}
  95
  96const struct ttm_resource_manager_func nouveau_gart_manager = {
  97        .alloc = nouveau_gart_manager_new,
  98        .free = nouveau_manager_del,
  99};
 100
 101static int
 102nv04_gart_manager_new(struct ttm_resource_manager *man,
 103                      struct ttm_buffer_object *bo,
 104                      const struct ttm_place *place,
 105                      struct ttm_resource **res)
 106{
 107        struct nouveau_bo *nvbo = nouveau_bo(bo);
 108        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
 109        struct nouveau_mem *mem;
 110        int ret;
 111
 112        ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, res);
 113        if (ret)
 114                return ret;
 115
 116        mem = nouveau_mem(*res);
 117        ttm_resource_init(bo, place, *res);
 118        ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
 119                           (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]);
 120        if (ret) {
 121                nouveau_mem_del(*res);
 122                return ret;
 123        }
 124
 125        (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
 126        return 0;
 127}
 128
 129const struct ttm_resource_manager_func nv04_gart_manager = {
 130        .alloc = nv04_gart_manager_new,
 131        .free = nouveau_manager_del,
 132};
 133
 134static int
 135nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
 136{
 137        struct nvif_mmu *mmu = &drm->client.mmu;
 138        int typei;
 139
 140        typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
 141                                            kind | NVIF_MEM_COHERENT);
 142        if (typei < 0)
 143                return -ENOSYS;
 144
 145        drm->ttm.type_host[!!kind] = typei;
 146
 147        typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
 148        if (typei < 0)
 149                return -ENOSYS;
 150
 151        drm->ttm.type_ncoh[!!kind] = typei;
 152        return 0;
 153}
 154
 155static int
 156nouveau_ttm_init_vram(struct nouveau_drm *drm)
 157{
 158        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 159                struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
 160
 161                if (!man)
 162                        return -ENOMEM;
 163
 164                man->func = &nouveau_vram_manager;
 165
 166                ttm_resource_manager_init(man,
 167                                          drm->gem.vram_available >> PAGE_SHIFT);
 168                ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
 169                ttm_resource_manager_set_used(man, true);
 170                return 0;
 171        } else {
 172                return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
 173                                          drm->gem.vram_available >> PAGE_SHIFT);
 174        }
 175}
 176
 177static void
 178nouveau_ttm_fini_vram(struct nouveau_drm *drm)
 179{
 180        struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
 181
 182        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 183                ttm_resource_manager_set_used(man, false);
 184                ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
 185                ttm_resource_manager_cleanup(man);
 186                ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
 187                kfree(man);
 188        } else
 189                ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
 190}
 191
 192static int
 193nouveau_ttm_init_gtt(struct nouveau_drm *drm)
 194{
 195        struct ttm_resource_manager *man;
 196        unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
 197        const struct ttm_resource_manager_func *func = NULL;
 198
 199        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
 200                func = &nouveau_gart_manager;
 201        else if (!drm->agp.bridge)
 202                func = &nv04_gart_manager;
 203        else
 204                return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
 205                                          size_pages);
 206
 207        man = kzalloc(sizeof(*man), GFP_KERNEL);
 208        if (!man)
 209                return -ENOMEM;
 210
 211        man->func = func;
 212        man->use_tt = true;
 213        ttm_resource_manager_init(man, size_pages);
 214        ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
 215        ttm_resource_manager_set_used(man, true);
 216        return 0;
 217}
 218
 219static void
 220nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
 221{
 222        struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT);
 223
 224        if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
 225            drm->agp.bridge)
 226                ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
 227        else {
 228                ttm_resource_manager_set_used(man, false);
 229                ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
 230                ttm_resource_manager_cleanup(man);
 231                ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
 232                kfree(man);
 233        }
 234}
 235
 236int
 237nouveau_ttm_init(struct nouveau_drm *drm)
 238{
 239        struct nvkm_device *device = nvxx_device(&drm->client.device);
 240        struct nvkm_pci *pci = device->pci;
 241        struct nvif_mmu *mmu = &drm->client.mmu;
 242        struct drm_device *dev = drm->dev;
 243        bool need_swiotlb = false;
 244        int typei, ret;
 245
 246        ret = nouveau_ttm_init_host(drm, 0);
 247        if (ret)
 248                return ret;
 249
 250        if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
 251            drm->client.device.info.chipset != 0x50) {
 252                ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
 253                if (ret)
 254                        return ret;
 255        }
 256
 257        if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
 258            drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
 259                typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
 260                                           NVIF_MEM_KIND |
 261                                           NVIF_MEM_COMP |
 262                                           NVIF_MEM_DISP);
 263                if (typei < 0)
 264                        return -ENOSYS;
 265
 266                drm->ttm.type_vram = typei;
 267        } else {
 268                drm->ttm.type_vram = -1;
 269        }
 270
 271        if (pci && pci->agp.bridge) {
 272                drm->agp.bridge = pci->agp.bridge;
 273                drm->agp.base = pci->agp.base;
 274                drm->agp.size = pci->agp.size;
 275                drm->agp.cma = pci->agp.cma;
 276        }
 277
 278#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
 279        need_swiotlb = is_swiotlb_active(dev->dev);
 280#endif
 281
 282        ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
 283                                  dev->anon_inode->i_mapping,
 284                                  dev->vma_offset_manager, need_swiotlb,
 285                                  drm->client.mmu.dmabits <= 32);
 286        if (ret) {
 287                NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
 288                return ret;
 289        }
 290
 291        /* VRAM init */
 292        drm->gem.vram_available = drm->client.device.info.ram_user;
 293
 294        arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
 295                                   device->func->resource_size(device, 1));
 296
 297        ret = nouveau_ttm_init_vram(drm);
 298        if (ret) {
 299                NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
 300                return ret;
 301        }
 302
 303        drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
 304                                         device->func->resource_size(device, 1));
 305
 306        /* GART init */
 307        if (!drm->agp.bridge) {
 308                drm->gem.gart_available = drm->client.vmm.vmm.limit;
 309        } else {
 310                drm->gem.gart_available = drm->agp.size;
 311        }
 312
 313        ret = nouveau_ttm_init_gtt(drm);
 314        if (ret) {
 315                NV_ERROR(drm, "GART mm init failed, %d\n", ret);
 316                return ret;
 317        }
 318
 319        mutex_init(&drm->ttm.io_reserve_mutex);
 320        INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
 321
 322        NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
 323        NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
 324        return 0;
 325}
 326
 327void
 328nouveau_ttm_fini(struct nouveau_drm *drm)
 329{
 330        struct nvkm_device *device = nvxx_device(&drm->client.device);
 331
 332        nouveau_ttm_fini_vram(drm);
 333        nouveau_ttm_fini_gtt(drm);
 334
 335        ttm_device_fini(&drm->ttm.bdev);
 336
 337        arch_phys_wc_del(drm->ttm.mtrr);
 338        drm->ttm.mtrr = 0;
 339        arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
 340                                device->func->resource_size(device, 1));
 341
 342}
 343