linux/drivers/gpu/drm/nouveau/nvkm/core/memory.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs <bskeggs@redhat.com>
  23 */
  24#include <core/memory.h>
  25#include <core/mm.h>
  26#include <subdev/fb.h>
  27#include <subdev/instmem.h>
  28
  29void
  30nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device,
  31                     struct nvkm_tags **ptags)
  32{
  33        struct nvkm_fb *fb = device->fb;
  34        struct nvkm_tags *tags = *ptags;
  35        if (tags) {
  36                mutex_lock(&fb->tags.mutex);
  37                if (refcount_dec_and_test(&tags->refcount)) {
  38                        nvkm_mm_free(&fb->tags.mm, &tags->mn);
  39                        kfree(memory->tags);
  40                        memory->tags = NULL;
  41                }
  42                mutex_unlock(&fb->tags.mutex);
  43                *ptags = NULL;
  44        }
  45}
  46
  47int
  48nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device,
  49                     u32 nr, void (*clr)(struct nvkm_device *, u32, u32),
  50                     struct nvkm_tags **ptags)
  51{
  52        struct nvkm_fb *fb = device->fb;
  53        struct nvkm_tags *tags;
  54
  55        mutex_lock(&fb->tags.mutex);
  56        if ((tags = memory->tags)) {
  57                /* If comptags exist for the memory, but a different amount
  58                 * than requested, the buffer is being mapped with settings
  59                 * that are incompatible with existing mappings.
  60                 */
  61                if (tags->mn && tags->mn->length != nr) {
  62                        mutex_unlock(&fb->tags.mutex);
  63                        return -EINVAL;
  64                }
  65
  66                refcount_inc(&tags->refcount);
  67                mutex_unlock(&fb->tags.mutex);
  68                *ptags = tags;
  69                return 0;
  70        }
  71
  72        if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
  73                mutex_unlock(&fb->tags.mutex);
  74                return -ENOMEM;
  75        }
  76
  77        if (!nvkm_mm_head(&fb->tags.mm, 0, 1, nr, nr, 1, &tags->mn)) {
  78                if (clr)
  79                        clr(device, tags->mn->offset, tags->mn->length);
  80        } else {
  81                /* Failure to allocate HW comptags is not an error, the
  82                 * caller should fall back to an uncompressed map.
  83                 *
  84                 * As memory can be mapped in multiple places, we still
  85                 * need to track the allocation failure and ensure that
  86                 * any additional mappings remain uncompressed.
  87                 *
  88                 * This is handled by returning an empty nvkm_tags.
  89                 */
  90                tags->mn = NULL;
  91        }
  92
  93        refcount_set(&tags->refcount, 1);
  94        *ptags = memory->tags = tags;
  95        mutex_unlock(&fb->tags.mutex);
  96        return 0;
  97}
  98
  99void
 100nvkm_memory_ctor(const struct nvkm_memory_func *func,
 101                 struct nvkm_memory *memory)
 102{
 103        memory->func = func;
 104        kref_init(&memory->kref);
 105}
 106
 107static void
 108nvkm_memory_del(struct kref *kref)
 109{
 110        struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref);
 111        if (!WARN_ON(!memory->func)) {
 112                if (memory->func->dtor)
 113                        memory = memory->func->dtor(memory);
 114                kfree(memory);
 115        }
 116}
 117
 118void
 119nvkm_memory_unref(struct nvkm_memory **pmemory)
 120{
 121        struct nvkm_memory *memory = *pmemory;
 122        if (memory) {
 123                kref_put(&memory->kref, nvkm_memory_del);
 124                *pmemory = NULL;
 125        }
 126}
 127
 128struct nvkm_memory *
 129nvkm_memory_ref(struct nvkm_memory *memory)
 130{
 131        if (memory)
 132                kref_get(&memory->kref);
 133        return memory;
 134}
 135
 136int
 137nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
 138                u64 size, u32 align, bool zero,
 139                struct nvkm_memory **pmemory)
 140{
 141        struct nvkm_instmem *imem = device->imem;
 142        struct nvkm_memory *memory;
 143        int ret;
 144
 145        if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
 146                return -ENOSYS;
 147
 148        ret = nvkm_instobj_new(imem, size, align, zero, &memory);
 149        if (ret)
 150                return ret;
 151
 152        *pmemory = memory;
 153        return 0;
 154}
 155