linux/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22
  23/*
  24 * GK20A does not have dedicated video memory, and to accurately represent this
  25 * fact Nouveau will not create a RAM device for it. Therefore its instmem
  26 * implementation must be done directly on top of system memory, while providing
  27 * coherent read and write operations.
  28 *
  29 * Instmem can be allocated through two means:
  30 * 1) If an IOMMU mapping has been probed, the IOMMU API is used to make memory
  31 *    pages contiguous to the GPU. This is the preferred way.
  32 * 2) If no IOMMU mapping is probed, the DMA API is used to allocate physically
  33 *    contiguous memory.
  34 *
  35 * In both cases CPU read and writes are performed using PRAMIN (i.e. using the
  36 * GPU path) to ensure these operations are coherent for the GPU. This allows us
  37 * to use more "relaxed" allocation parameters when using the DMA API, since we
  38 * never need a kernel mapping.
  39 */
  40#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
  41#include "priv.h"
  42
  43#include <core/memory.h>
  44#include <core/mm.h>
  45#include <core/tegra.h>
  46#include <subdev/fb.h>
  47
  48#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
  49
  50struct gk20a_instobj {
  51        struct nvkm_memory memory;
  52        struct gk20a_instmem *imem;
  53        struct nvkm_mem mem;
  54};
  55
  56/*
  57 * Used for objects allocated using the DMA API
  58 */
  59struct gk20a_instobj_dma {
  60        struct gk20a_instobj base;
  61
  62        void *cpuaddr;
  63        dma_addr_t handle;
  64        struct nvkm_mm_node r;
  65};
  66
  67/*
  68 * Used for objects flattened using the IOMMU API
  69 */
  70struct gk20a_instobj_iommu {
  71        struct gk20a_instobj base;
  72
  73        /* array of base.mem->size pages */
  74        struct page *pages[];
  75};
  76
  77struct gk20a_instmem {
  78        struct nvkm_instmem base;
  79        unsigned long lock_flags;
  80        spinlock_t lock;
  81        u64 addr;
  82
  83        /* Only used if IOMMU if present */
  84        struct mutex *mm_mutex;
  85        struct nvkm_mm *mm;
  86        struct iommu_domain *domain;
  87        unsigned long iommu_pgshift;
  88
  89        /* Only used by DMA API */
  90        struct dma_attrs attrs;
  91};
  92
  93static enum nvkm_memory_target
  94gk20a_instobj_target(struct nvkm_memory *memory)
  95{
  96        return NVKM_MEM_TARGET_HOST;
  97}
  98
  99static u64
 100gk20a_instobj_addr(struct nvkm_memory *memory)
 101{
 102        return gk20a_instobj(memory)->mem.offset;
 103
 104}
 105
 106static u64
 107gk20a_instobj_size(struct nvkm_memory *memory)
 108{
 109        return (u64)gk20a_instobj(memory)->mem.size << 12;
 110}
 111
 112static void __iomem *
 113gk20a_instobj_acquire(struct nvkm_memory *memory)
 114{
 115        struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
 116        unsigned long flags;
 117        spin_lock_irqsave(&imem->lock, flags);
 118        imem->lock_flags = flags;
 119        return NULL;
 120}
 121
 122static void
 123gk20a_instobj_release(struct nvkm_memory *memory)
 124{
 125        struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
 126        spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
 127}
 128
 129/*
 130 * Use PRAMIN to read/write data and avoid coherency issues.
 131 * PRAMIN uses the GPU path and ensures data will always be coherent.
 132 *
 133 * A dynamic mapping based solution would be desirable in the future, but
 134 * the issue remains of how to maintain coherency efficiently. On ARM it is
 135 * not easy (if possible at all?) to create uncached temporary mappings.
 136 */
 137
 138static u32
 139gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
 140{
 141        struct gk20a_instobj *node = gk20a_instobj(memory);
 142        struct gk20a_instmem *imem = node->imem;
 143        struct nvkm_device *device = imem->base.subdev.device;
 144        u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
 145        u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
 146        u32 data;
 147
 148        if (unlikely(imem->addr != base)) {
 149                nvkm_wr32(device, 0x001700, base >> 16);
 150                imem->addr = base;
 151        }
 152        data = nvkm_rd32(device, 0x700000 + addr);
 153        return data;
 154}
 155
 156static void
 157gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
 158{
 159        struct gk20a_instobj *node = gk20a_instobj(memory);
 160        struct gk20a_instmem *imem = node->imem;
 161        struct nvkm_device *device = imem->base.subdev.device;
 162        u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
 163        u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
 164
 165        if (unlikely(imem->addr != base)) {
 166                nvkm_wr32(device, 0x001700, base >> 16);
 167                imem->addr = base;
 168        }
 169        nvkm_wr32(device, 0x700000 + addr, data);
 170}
 171
 172static void
 173gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
 174{
 175        struct gk20a_instobj *node = gk20a_instobj(memory);
 176        nvkm_vm_map_at(vma, offset, &node->mem);
 177}
 178
 179static void
 180gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
 181{
 182        struct gk20a_instobj_dma *node = (void *)_node;
 183        struct gk20a_instmem *imem = _node->imem;
 184        struct device *dev = imem->base.subdev.device->dev;
 185
 186        if (unlikely(!node->cpuaddr))
 187                return;
 188
 189        dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
 190                       node->handle, &imem->attrs);
 191}
 192
 193static void
 194gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
 195{
 196        struct gk20a_instobj_iommu *node = (void *)_node;
 197        struct gk20a_instmem *imem = _node->imem;
 198        struct nvkm_mm_node *r;
 199        int i;
 200
 201        if (unlikely(list_empty(&_node->mem.regions)))
 202                return;
 203
 204        r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
 205                             rl_entry);
 206
 207        /* clear bit 34 to unmap pages */
 208        r->offset &= ~BIT(34 - imem->iommu_pgshift);
 209
 210        /* Unmap pages from GPU address space and free them */
 211        for (i = 0; i < _node->mem.size; i++) {
 212                iommu_unmap(imem->domain,
 213                            (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
 214                __free_page(node->pages[i]);
 215        }
 216
 217        /* Release area from GPU address space */
 218        mutex_lock(imem->mm_mutex);
 219        nvkm_mm_free(imem->mm, &r);
 220        mutex_unlock(imem->mm_mutex);
 221}
 222
 223static void *
 224gk20a_instobj_dtor(struct nvkm_memory *memory)
 225{
 226        struct gk20a_instobj *node = gk20a_instobj(memory);
 227        struct gk20a_instmem *imem = node->imem;
 228
 229        if (imem->domain)
 230                gk20a_instobj_dtor_iommu(node);
 231        else
 232                gk20a_instobj_dtor_dma(node);
 233
 234        return node;
 235}
 236
 237static const struct nvkm_memory_func
 238gk20a_instobj_func = {
 239        .dtor = gk20a_instobj_dtor,
 240        .target = gk20a_instobj_target,
 241        .addr = gk20a_instobj_addr,
 242        .size = gk20a_instobj_size,
 243        .acquire = gk20a_instobj_acquire,
 244        .release = gk20a_instobj_release,
 245        .rd32 = gk20a_instobj_rd32,
 246        .wr32 = gk20a_instobj_wr32,
 247        .map = gk20a_instobj_map,
 248};
 249
 250static int
 251gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
 252                       struct gk20a_instobj **_node)
 253{
 254        struct gk20a_instobj_dma *node;
 255        struct nvkm_subdev *subdev = &imem->base.subdev;
 256        struct device *dev = subdev->device->dev;
 257
 258        if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
 259                return -ENOMEM;
 260        *_node = &node->base;
 261
 262        node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
 263                                        &node->handle, GFP_KERNEL,
 264                                        &imem->attrs);
 265        if (!node->cpuaddr) {
 266                nvkm_error(subdev, "cannot allocate DMA memory\n");
 267                return -ENOMEM;
 268        }
 269
 270        /* alignment check */
 271        if (unlikely(node->handle & (align - 1)))
 272                nvkm_warn(subdev,
 273                          "memory not aligned as requested: %pad (0x%x)\n",
 274                          &node->handle, align);
 275
 276        /* present memory for being mapped using small pages */
 277        node->r.type = 12;
 278        node->r.offset = node->handle >> 12;
 279        node->r.length = (npages << PAGE_SHIFT) >> 12;
 280
 281        node->base.mem.offset = node->handle;
 282
 283        INIT_LIST_HEAD(&node->base.mem.regions);
 284        list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
 285
 286        return 0;
 287}
 288
 289static int
 290gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
 291                         struct gk20a_instobj **_node)
 292{
 293        struct gk20a_instobj_iommu *node;
 294        struct nvkm_subdev *subdev = &imem->base.subdev;
 295        struct nvkm_mm_node *r;
 296        int ret;
 297        int i;
 298
 299        if (!(node = kzalloc(sizeof(*node) +
 300                             sizeof( node->pages[0]) * npages, GFP_KERNEL)))
 301                return -ENOMEM;
 302        *_node = &node->base;
 303
 304        /* Allocate backing memory */
 305        for (i = 0; i < npages; i++) {
 306                struct page *p = alloc_page(GFP_KERNEL);
 307
 308                if (p == NULL) {
 309                        ret = -ENOMEM;
 310                        goto free_pages;
 311                }
 312                node->pages[i] = p;
 313        }
 314
 315        mutex_lock(imem->mm_mutex);
 316        /* Reserve area from GPU address space */
 317        ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
 318                           align >> imem->iommu_pgshift, &r);
 319        mutex_unlock(imem->mm_mutex);
 320        if (ret) {
 321                nvkm_error(subdev, "virtual space is full!\n");
 322                goto free_pages;
 323        }
 324
 325        /* Map into GPU address space */
 326        for (i = 0; i < npages; i++) {
 327                struct page *p = node->pages[i];
 328                u32 offset = (r->offset + i) << imem->iommu_pgshift;
 329
 330                ret = iommu_map(imem->domain, offset, page_to_phys(p),
 331                                PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
 332                if (ret < 0) {
 333                        nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
 334
 335                        while (i-- > 0) {
 336                                offset -= PAGE_SIZE;
 337                                iommu_unmap(imem->domain, offset, PAGE_SIZE);
 338                        }
 339                        goto release_area;
 340                }
 341        }
 342
 343        /* Bit 34 tells that an address is to be resolved through the IOMMU */
 344        r->offset |= BIT(34 - imem->iommu_pgshift);
 345
 346        node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
 347
 348        INIT_LIST_HEAD(&node->base.mem.regions);
 349        list_add_tail(&r->rl_entry, &node->base.mem.regions);
 350
 351        return 0;
 352
 353release_area:
 354        mutex_lock(imem->mm_mutex);
 355        nvkm_mm_free(imem->mm, &r);
 356        mutex_unlock(imem->mm_mutex);
 357
 358free_pages:
 359        for (i = 0; i < npages && node->pages[i] != NULL; i++)
 360                __free_page(node->pages[i]);
 361
 362        return ret;
 363}
 364
 365static int
 366gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
 367                  struct nvkm_memory **pmemory)
 368{
 369        struct gk20a_instmem *imem = gk20a_instmem(base);
 370        struct gk20a_instobj *node = NULL;
 371        struct nvkm_subdev *subdev = &imem->base.subdev;
 372        int ret;
 373
 374        nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
 375                   imem->domain ? "IOMMU" : "DMA", size, align);
 376
 377        /* Round size and align to page bounds */
 378        size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
 379        align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
 380
 381        if (imem->domain)
 382                ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
 383                                               align, &node);
 384        else
 385                ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
 386                                             align, &node);
 387        *pmemory = node ? &node->memory : NULL;
 388        if (ret)
 389                return ret;
 390
 391        nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
 392        node->imem = imem;
 393
 394        /* present memory for being mapped using small pages */
 395        node->mem.size = size >> 12;
 396        node->mem.memtype = 0;
 397        node->mem.page_shift = 12;
 398
 399        nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
 400                   size, align, node->mem.offset);
 401
 402        return 0;
 403}
 404
 405static void
 406gk20a_instmem_fini(struct nvkm_instmem *base)
 407{
 408        gk20a_instmem(base)->addr = ~0ULL;
 409}
 410
 411static const struct nvkm_instmem_func
 412gk20a_instmem = {
 413        .fini = gk20a_instmem_fini,
 414        .memory_new = gk20a_instobj_new,
 415        .persistent = true,
 416        .zero = false,
 417};
 418
 419int
 420gk20a_instmem_new(struct nvkm_device *device, int index,
 421                  struct nvkm_instmem **pimem)
 422{
 423        struct nvkm_device_tegra *tdev = device->func->tegra(device);
 424        struct gk20a_instmem *imem;
 425
 426        if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
 427                return -ENOMEM;
 428        nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
 429        spin_lock_init(&imem->lock);
 430        *pimem = &imem->base;
 431
 432        if (tdev->iommu.domain) {
 433                imem->domain = tdev->iommu.domain;
 434                imem->mm = &tdev->iommu.mm;
 435                imem->iommu_pgshift = tdev->iommu.pgshift;
 436                imem->mm_mutex = &tdev->iommu.mutex;
 437
 438                nvkm_info(&imem->base.subdev, "using IOMMU\n");
 439        } else {
 440                init_dma_attrs(&imem->attrs);
 441                /*
 442                 * We will access instmem through PRAMIN and thus do not need a
 443                 * consistent CPU pointer or kernel mapping
 444                 */
 445                dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
 446                dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
 447                dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
 448                dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
 449
 450                nvkm_info(&imem->base.subdev, "using DMA API\n");
 451        }
 452
 453        return 0;
 454}
 455