linux/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  20 * DEALINGS IN THE SOFTWARE.
  21 */
  22
  23/*
  24 * GK20A does not have dedicated video memory, and to accurately represent this
  25 * fact Nouveau will not create a RAM device for it. Therefore its instmem
  26 * implementation must be done directly on top of system memory, while providing
  27 * coherent read and write operations.
  28 *
  29 * Instmem can be allocated through two means:
  30 * 1) If an IOMMU mapping has been probed, the IOMMU API is used to make memory
  31 *    pages contiguous to the GPU. This is the preferred way.
  32 * 2) If no IOMMU mapping is probed, the DMA API is used to allocate physically
  33 *    contiguous memory.
  34 *
  35 * In both cases CPU read and writes are performed using PRAMIN (i.e. using the
  36 * GPU path) to ensure these operations are coherent for the GPU. This allows us
  37 * to use more "relaxed" allocation parameters when using the DMA API, since we
  38 * never need a kernel mapping.
  39 */
  40
  41#include <subdev/fb.h>
  42#include <core/mm.h>
  43#include <core/device.h>
  44
  45#ifdef __KERNEL__
  46#include <linux/dma-attrs.h>
  47#include <linux/iommu.h>
  48#include <nouveau_platform.h>
  49#endif
  50
  51#include "priv.h"
  52
  53struct gk20a_instobj_priv {
  54        struct nvkm_instobj base;
  55        /* Must be second member here - see nouveau_gpuobj_map_vm() */
  56        struct nvkm_mem *mem;
  57        /* Pointed by mem */
  58        struct nvkm_mem _mem;
  59};
  60
  61/*
  62 * Used for objects allocated using the DMA API
  63 */
  64struct gk20a_instobj_dma {
  65        struct gk20a_instobj_priv base;
  66
  67        void *cpuaddr;
  68        dma_addr_t handle;
  69        struct nvkm_mm_node r;
  70};
  71
  72/*
  73 * Used for objects flattened using the IOMMU API
  74 */
  75struct gk20a_instobj_iommu {
  76        struct gk20a_instobj_priv base;
  77
  78        /* array of base.mem->size pages */
  79        struct page *pages[];
  80};
  81
  82struct gk20a_instmem_priv {
  83        struct nvkm_instmem base;
  84        spinlock_t lock;
  85        u64 addr;
  86
  87        /* Only used if IOMMU if present */
  88        struct mutex *mm_mutex;
  89        struct nvkm_mm *mm;
  90        struct iommu_domain *domain;
  91        unsigned long iommu_pgshift;
  92
  93        /* Only used by DMA API */
  94        struct dma_attrs attrs;
  95};
  96
  97/*
  98 * Use PRAMIN to read/write data and avoid coherency issues.
  99 * PRAMIN uses the GPU path and ensures data will always be coherent.
 100 *
 101 * A dynamic mapping based solution would be desirable in the future, but
 102 * the issue remains of how to maintain coherency efficiently. On ARM it is
 103 * not easy (if possible at all?) to create uncached temporary mappings.
 104 */
 105
 106static u32
 107gk20a_instobj_rd32(struct nvkm_object *object, u64 offset)
 108{
 109        struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
 110        struct gk20a_instobj_priv *node = (void *)object;
 111        unsigned long flags;
 112        u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
 113        u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
 114        u32 data;
 115
 116        spin_lock_irqsave(&priv->lock, flags);
 117        if (unlikely(priv->addr != base)) {
 118                nv_wr32(priv, 0x001700, base >> 16);
 119                priv->addr = base;
 120        }
 121        data = nv_rd32(priv, 0x700000 + addr);
 122        spin_unlock_irqrestore(&priv->lock, flags);
 123        return data;
 124}
 125
 126static void
 127gk20a_instobj_wr32(struct nvkm_object *object, u64 offset, u32 data)
 128{
 129        struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(object);
 130        struct gk20a_instobj_priv *node = (void *)object;
 131        unsigned long flags;
 132        u64 base = (node->mem->offset + offset) & 0xffffff00000ULL;
 133        u64 addr = (node->mem->offset + offset) & 0x000000fffffULL;
 134
 135        spin_lock_irqsave(&priv->lock, flags);
 136        if (unlikely(priv->addr != base)) {
 137                nv_wr32(priv, 0x001700, base >> 16);
 138                priv->addr = base;
 139        }
 140        nv_wr32(priv, 0x700000 + addr, data);
 141        spin_unlock_irqrestore(&priv->lock, flags);
 142}
 143
 144static void
 145gk20a_instobj_dtor_dma(struct gk20a_instobj_priv *_node)
 146{
 147        struct gk20a_instobj_dma *node = (void *)_node;
 148        struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
 149        struct device *dev = nv_device_base(nv_device(priv));
 150
 151        if (unlikely(!node->cpuaddr))
 152                return;
 153
 154        dma_free_attrs(dev, _node->mem->size << PAGE_SHIFT, node->cpuaddr,
 155                       node->handle, &priv->attrs);
 156}
 157
 158static void
 159gk20a_instobj_dtor_iommu(struct gk20a_instobj_priv *_node)
 160{
 161        struct gk20a_instobj_iommu *node = (void *)_node;
 162        struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
 163        struct nvkm_mm_node *r;
 164        int i;
 165
 166        if (unlikely(list_empty(&_node->mem->regions)))
 167                return;
 168
 169        r = list_first_entry(&_node->mem->regions, struct nvkm_mm_node,
 170                             rl_entry);
 171
 172        /* clear bit 34 to unmap pages */
 173        r->offset &= ~BIT(34 - priv->iommu_pgshift);
 174
 175        /* Unmap pages from GPU address space and free them */
 176        for (i = 0; i < _node->mem->size; i++) {
 177                iommu_unmap(priv->domain,
 178                            (r->offset + i) << priv->iommu_pgshift, PAGE_SIZE);
 179                __free_page(node->pages[i]);
 180        }
 181
 182        /* Release area from GPU address space */
 183        mutex_lock(priv->mm_mutex);
 184        nvkm_mm_free(priv->mm, &r);
 185        mutex_unlock(priv->mm_mutex);
 186}
 187
 188static void
 189gk20a_instobj_dtor(struct nvkm_object *object)
 190{
 191        struct gk20a_instobj_priv *node = (void *)object;
 192        struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(node);
 193
 194        if (priv->domain)
 195                gk20a_instobj_dtor_iommu(node);
 196        else
 197                gk20a_instobj_dtor_dma(node);
 198
 199        nvkm_instobj_destroy(&node->base);
 200}
 201
 202static int
 203gk20a_instobj_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
 204                       struct nvkm_oclass *oclass, u32 npages, u32 align,
 205                       struct gk20a_instobj_priv **_node)
 206{
 207        struct gk20a_instobj_dma *node;
 208        struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
 209        struct device *dev = nv_device_base(nv_device(parent));
 210        int ret;
 211
 212        ret = nvkm_instobj_create_(parent, engine, oclass, sizeof(*node),
 213                                   (void **)&node);
 214        *_node = &node->base;
 215        if (ret)
 216                return ret;
 217
 218        node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
 219                                        &node->handle, GFP_KERNEL,
 220                                        &priv->attrs);
 221        if (!node->cpuaddr) {
 222                nv_error(priv, "cannot allocate DMA memory\n");
 223                return -ENOMEM;
 224        }
 225
 226        /* alignment check */
 227        if (unlikely(node->handle & (align - 1)))
 228                nv_warn(priv, "memory not aligned as requested: %pad (0x%x)\n",
 229                        &node->handle, align);
 230
 231        /* present memory for being mapped using small pages */
 232        node->r.type = 12;
 233        node->r.offset = node->handle >> 12;
 234        node->r.length = (npages << PAGE_SHIFT) >> 12;
 235
 236        node->base._mem.offset = node->handle;
 237
 238        INIT_LIST_HEAD(&node->base._mem.regions);
 239        list_add_tail(&node->r.rl_entry, &node->base._mem.regions);
 240
 241        return 0;
 242}
 243
 244static int
 245gk20a_instobj_ctor_iommu(struct nvkm_object *parent, struct nvkm_object *engine,
 246                         struct nvkm_oclass *oclass, u32 npages, u32 align,
 247                         struct gk20a_instobj_priv **_node)
 248{
 249        struct gk20a_instobj_iommu *node;
 250        struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
 251        struct nvkm_mm_node *r;
 252        int ret;
 253        int i;
 254
 255        ret = nvkm_instobj_create_(parent, engine, oclass,
 256                                sizeof(*node) + sizeof(node->pages[0]) * npages,
 257                                (void **)&node);
 258        *_node = &node->base;
 259        if (ret)
 260                return ret;
 261
 262        /* Allocate backing memory */
 263        for (i = 0; i < npages; i++) {
 264                struct page *p = alloc_page(GFP_KERNEL);
 265
 266                if (p == NULL) {
 267                        ret = -ENOMEM;
 268                        goto free_pages;
 269                }
 270                node->pages[i] = p;
 271        }
 272
 273        mutex_lock(priv->mm_mutex);
 274        /* Reserve area from GPU address space */
 275        ret = nvkm_mm_head(priv->mm, 0, 1, npages, npages,
 276                           align >> priv->iommu_pgshift, &r);
 277        mutex_unlock(priv->mm_mutex);
 278        if (ret) {
 279                nv_error(priv, "virtual space is full!\n");
 280                goto free_pages;
 281        }
 282
 283        /* Map into GPU address space */
 284        for (i = 0; i < npages; i++) {
 285                struct page *p = node->pages[i];
 286                u32 offset = (r->offset + i) << priv->iommu_pgshift;
 287
 288                ret = iommu_map(priv->domain, offset, page_to_phys(p),
 289                                PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
 290                if (ret < 0) {
 291                        nv_error(priv, "IOMMU mapping failure: %d\n", ret);
 292
 293                        while (i-- > 0) {
 294                                offset -= PAGE_SIZE;
 295                                iommu_unmap(priv->domain, offset, PAGE_SIZE);
 296                        }
 297                        goto release_area;
 298                }
 299        }
 300
 301        /* Bit 34 tells that an address is to be resolved through the IOMMU */
 302        r->offset |= BIT(34 - priv->iommu_pgshift);
 303
 304        node->base._mem.offset = ((u64)r->offset) << priv->iommu_pgshift;
 305
 306        INIT_LIST_HEAD(&node->base._mem.regions);
 307        list_add_tail(&r->rl_entry, &node->base._mem.regions);
 308
 309        return 0;
 310
 311release_area:
 312        mutex_lock(priv->mm_mutex);
 313        nvkm_mm_free(priv->mm, &r);
 314        mutex_unlock(priv->mm_mutex);
 315
 316free_pages:
 317        for (i = 0; i < npages && node->pages[i] != NULL; i++)
 318                __free_page(node->pages[i]);
 319
 320        return ret;
 321}
 322
 323static int
 324gk20a_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 325                   struct nvkm_oclass *oclass, void *data, u32 _size,
 326                   struct nvkm_object **pobject)
 327{
 328        struct nvkm_instobj_args *args = data;
 329        struct gk20a_instmem_priv *priv = (void *)nvkm_instmem(parent);
 330        struct gk20a_instobj_priv *node;
 331        u32 size, align;
 332        int ret;
 333
 334        nv_debug(parent, "%s (%s): size: %x align: %x\n", __func__,
 335                 priv->domain ? "IOMMU" : "DMA", args->size, args->align);
 336
 337        /* Round size and align to page bounds */
 338        size = max(roundup(args->size, PAGE_SIZE), PAGE_SIZE);
 339        align = max(roundup(args->align, PAGE_SIZE), PAGE_SIZE);
 340
 341        if (priv->domain)
 342                ret = gk20a_instobj_ctor_iommu(parent, engine, oclass,
 343                                              size >> PAGE_SHIFT, align, &node);
 344        else
 345                ret = gk20a_instobj_ctor_dma(parent, engine, oclass,
 346                                             size >> PAGE_SHIFT, align, &node);
 347        *pobject = nv_object(node);
 348        if (ret)
 349                return ret;
 350
 351        node->mem = &node->_mem;
 352
 353        /* present memory for being mapped using small pages */
 354        node->mem->size = size >> 12;
 355        node->mem->memtype = 0;
 356        node->mem->page_shift = 12;
 357
 358        node->base.addr = node->mem->offset;
 359        node->base.size = size;
 360
 361        nv_debug(parent, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
 362                 size, align, node->mem->offset);
 363
 364        return 0;
 365}
 366
 367static struct nvkm_instobj_impl
 368gk20a_instobj_oclass = {
 369        .base.ofuncs = &(struct nvkm_ofuncs) {
 370                .ctor = gk20a_instobj_ctor,
 371                .dtor = gk20a_instobj_dtor,
 372                .init = _nvkm_instobj_init,
 373                .fini = _nvkm_instobj_fini,
 374                .rd32 = gk20a_instobj_rd32,
 375                .wr32 = gk20a_instobj_wr32,
 376        },
 377};
 378
 379
 380
 381static int
 382gk20a_instmem_fini(struct nvkm_object *object, bool suspend)
 383{
 384        struct gk20a_instmem_priv *priv = (void *)object;
 385        priv->addr = ~0ULL;
 386        return nvkm_instmem_fini(&priv->base, suspend);
 387}
 388
 389static int
 390gk20a_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 391                   struct nvkm_oclass *oclass, void *data, u32 size,
 392                   struct nvkm_object **pobject)
 393{
 394        struct gk20a_instmem_priv *priv;
 395        struct nouveau_platform_device *plat;
 396        int ret;
 397
 398        ret = nvkm_instmem_create(parent, engine, oclass, &priv);
 399        *pobject = nv_object(priv);
 400        if (ret)
 401                return ret;
 402
 403        spin_lock_init(&priv->lock);
 404
 405        plat = nv_device_to_platform(nv_device(parent));
 406        if (plat->gpu->iommu.domain) {
 407                priv->domain = plat->gpu->iommu.domain;
 408                priv->mm = plat->gpu->iommu.mm;
 409                priv->iommu_pgshift = plat->gpu->iommu.pgshift;
 410                priv->mm_mutex = &plat->gpu->iommu.mutex;
 411
 412                nv_info(priv, "using IOMMU\n");
 413        } else {
 414                init_dma_attrs(&priv->attrs);
 415                /*
 416                 * We will access instmem through PRAMIN and thus do not need a
 417                 * consistent CPU pointer or kernel mapping
 418                 */
 419                dma_set_attr(DMA_ATTR_NON_CONSISTENT, &priv->attrs);
 420                dma_set_attr(DMA_ATTR_WEAK_ORDERING, &priv->attrs);
 421                dma_set_attr(DMA_ATTR_WRITE_COMBINE, &priv->attrs);
 422                dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &priv->attrs);
 423
 424                nv_info(priv, "using DMA API\n");
 425        }
 426
 427        return 0;
 428}
 429
 430struct nvkm_oclass *
 431gk20a_instmem_oclass = &(struct nvkm_instmem_impl) {
 432        .base.handle = NV_SUBDEV(INSTMEM, 0xea),
 433        .base.ofuncs = &(struct nvkm_ofuncs) {
 434                .ctor = gk20a_instmem_ctor,
 435                .dtor = _nvkm_instmem_dtor,
 436                .init = _nvkm_instmem_init,
 437                .fini = gk20a_instmem_fini,
 438        },
 439        .instobj = &gk20a_instobj_oclass.base,
 440}.base;
 441