linux/drivers/gpu/drm/nouveau/nvkm/core/mm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include <core/mm.h>
  25
  26#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL :          \
  27        list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry)
  28
  29void
  30nvkm_mm_dump(struct nvkm_mm *mm, const char *header)
  31{
  32        struct nvkm_mm_node *node;
  33
  34        pr_err("nvkm: %s\n", header);
  35        pr_err("nvkm: node list:\n");
  36        list_for_each_entry(node, &mm->nodes, nl_entry) {
  37                pr_err("nvkm: \t%08x %08x %d\n",
  38                       node->offset, node->length, node->type);
  39        }
  40        pr_err("nvkm: free list:\n");
  41        list_for_each_entry(node, &mm->free, fl_entry) {
  42                pr_err("nvkm: \t%08x %08x %d\n",
  43                       node->offset, node->length, node->type);
  44        }
  45}
  46
  47void
  48nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis)
  49{
  50        struct nvkm_mm_node *this = *pthis;
  51
  52        if (this) {
  53                struct nvkm_mm_node *prev = node(this, prev);
  54                struct nvkm_mm_node *next = node(this, next);
  55
  56                if (prev && prev->type == NVKM_MM_TYPE_NONE) {
  57                        prev->length += this->length;
  58                        list_del(&this->nl_entry);
  59                        kfree(this); this = prev;
  60                }
  61
  62                if (next && next->type == NVKM_MM_TYPE_NONE) {
  63                        next->offset  = this->offset;
  64                        next->length += this->length;
  65                        if (this->type == NVKM_MM_TYPE_NONE)
  66                                list_del(&this->fl_entry);
  67                        list_del(&this->nl_entry);
  68                        kfree(this); this = NULL;
  69                }
  70
  71                if (this && this->type != NVKM_MM_TYPE_NONE) {
  72                        list_for_each_entry(prev, &mm->free, fl_entry) {
  73                                if (this->offset < prev->offset)
  74                                        break;
  75                        }
  76
  77                        list_add_tail(&this->fl_entry, &prev->fl_entry);
  78                        this->type = NVKM_MM_TYPE_NONE;
  79                }
  80        }
  81
  82        *pthis = NULL;
  83}
  84
  85static struct nvkm_mm_node *
  86region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
  87{
  88        struct nvkm_mm_node *b;
  89
  90        if (a->length == size)
  91                return a;
  92
  93        b = kmalloc(sizeof(*b), GFP_KERNEL);
  94        if (unlikely(b == NULL))
  95                return NULL;
  96
  97        b->offset = a->offset;
  98        b->length = size;
  99        b->heap   = a->heap;
 100        b->type   = a->type;
 101        a->offset += size;
 102        a->length -= size;
 103        list_add_tail(&b->nl_entry, &a->nl_entry);
 104        if (b->type == NVKM_MM_TYPE_NONE)
 105                list_add_tail(&b->fl_entry, &a->fl_entry);
 106
 107        return b;
 108}
 109
 110int
 111nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
 112             u32 align, struct nvkm_mm_node **pnode)
 113{
 114        struct nvkm_mm_node *prev, *this, *next;
 115        u32 mask = align - 1;
 116        u32 splitoff;
 117        u32 s, e;
 118
 119        BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
 120
 121        list_for_each_entry(this, &mm->free, fl_entry) {
 122                if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
 123                        if (this->heap != heap)
 124                                continue;
 125                }
 126                e = this->offset + this->length;
 127                s = this->offset;
 128
 129                prev = node(this, prev);
 130                if (prev && prev->type != type)
 131                        s = roundup(s, mm->block_size);
 132
 133                next = node(this, next);
 134                if (next && next->type != type)
 135                        e = rounddown(e, mm->block_size);
 136
 137                s  = (s + mask) & ~mask;
 138                e &= ~mask;
 139                if (s > e || e - s < size_min)
 140                        continue;
 141
 142                splitoff = s - this->offset;
 143                if (splitoff && !region_head(mm, this, splitoff))
 144                        return -ENOMEM;
 145
 146                this = region_head(mm, this, min(size_max, e - s));
 147                if (!this)
 148                        return -ENOMEM;
 149
 150                this->next = NULL;
 151                this->type = type;
 152                list_del(&this->fl_entry);
 153                *pnode = this;
 154                return 0;
 155        }
 156
 157        return -ENOSPC;
 158}
 159
 160static struct nvkm_mm_node *
 161region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size)
 162{
 163        struct nvkm_mm_node *b;
 164
 165        if (a->length == size)
 166                return a;
 167
 168        b = kmalloc(sizeof(*b), GFP_KERNEL);
 169        if (unlikely(b == NULL))
 170                return NULL;
 171
 172        a->length -= size;
 173        b->offset  = a->offset + a->length;
 174        b->length  = size;
 175        b->heap    = a->heap;
 176        b->type    = a->type;
 177
 178        list_add(&b->nl_entry, &a->nl_entry);
 179        if (b->type == NVKM_MM_TYPE_NONE)
 180                list_add(&b->fl_entry, &a->fl_entry);
 181
 182        return b;
 183}
 184
 185int
 186nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
 187             u32 align, struct nvkm_mm_node **pnode)
 188{
 189        struct nvkm_mm_node *prev, *this, *next;
 190        u32 mask = align - 1;
 191
 192        BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
 193
 194        list_for_each_entry_reverse(this, &mm->free, fl_entry) {
 195                u32 e = this->offset + this->length;
 196                u32 s = this->offset;
 197                u32 c = 0, a;
 198                if (unlikely(heap != NVKM_MM_HEAP_ANY)) {
 199                        if (this->heap != heap)
 200                                continue;
 201                }
 202
 203                prev = node(this, prev);
 204                if (prev && prev->type != type)
 205                        s = roundup(s, mm->block_size);
 206
 207                next = node(this, next);
 208                if (next && next->type != type) {
 209                        e = rounddown(e, mm->block_size);
 210                        c = next->offset - e;
 211                }
 212
 213                s = (s + mask) & ~mask;
 214                a = e - s;
 215                if (s > e || a < size_min)
 216                        continue;
 217
 218                a  = min(a, size_max);
 219                s  = (e - a) & ~mask;
 220                c += (e - s) - a;
 221
 222                if (c && !region_tail(mm, this, c))
 223                        return -ENOMEM;
 224
 225                this = region_tail(mm, this, a);
 226                if (!this)
 227                        return -ENOMEM;
 228
 229                this->next = NULL;
 230                this->type = type;
 231                list_del(&this->fl_entry);
 232                *pnode = this;
 233                return 0;
 234        }
 235
 236        return -ENOSPC;
 237}
 238
 239int
 240nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block)
 241{
 242        struct nvkm_mm_node *node, *prev;
 243        u32 next;
 244
 245        if (nvkm_mm_initialised(mm)) {
 246                prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
 247                next = prev->offset + prev->length;
 248                if (next != offset) {
 249                        BUG_ON(next > offset);
 250                        if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
 251                                return -ENOMEM;
 252                        node->type   = NVKM_MM_TYPE_HOLE;
 253                        node->offset = next;
 254                        node->length = offset - next;
 255                        list_add_tail(&node->nl_entry, &mm->nodes);
 256                }
 257                BUG_ON(block != mm->block_size);
 258        } else {
 259                INIT_LIST_HEAD(&mm->nodes);
 260                INIT_LIST_HEAD(&mm->free);
 261                mm->block_size = block;
 262                mm->heap_nodes = 0;
 263        }
 264
 265        node = kzalloc(sizeof(*node), GFP_KERNEL);
 266        if (!node)
 267                return -ENOMEM;
 268
 269        if (length) {
 270                node->offset  = roundup(offset, mm->block_size);
 271                node->length  = rounddown(offset + length, mm->block_size);
 272                node->length -= node->offset;
 273        }
 274
 275        list_add_tail(&node->nl_entry, &mm->nodes);
 276        list_add_tail(&node->fl_entry, &mm->free);
 277        node->heap = heap;
 278        mm->heap_nodes++;
 279        return 0;
 280}
 281
 282int
 283nvkm_mm_fini(struct nvkm_mm *mm)
 284{
 285        struct nvkm_mm_node *node, *temp;
 286        int nodes = 0;
 287
 288        if (!nvkm_mm_initialised(mm))
 289                return 0;
 290
 291        list_for_each_entry(node, &mm->nodes, nl_entry) {
 292                if (node->type != NVKM_MM_TYPE_HOLE) {
 293                        if (++nodes > mm->heap_nodes) {
 294                                nvkm_mm_dump(mm, "mm not clean!");
 295                                return -EBUSY;
 296                        }
 297                }
 298        }
 299
 300        list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
 301                list_del(&node->nl_entry);
 302                kfree(node);
 303        }
 304
 305        mm->heap_nodes = 0;
 306        return 0;
 307}
 308