linux/drivers/gpu/drm/nouveau/nouveau_mm.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24
  25#include "drmP.h"
  26#include "nouveau_drv.h"
  27#include "nouveau_mm.h"
  28
  29static inline void
  30region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
  31{
  32        list_del(&a->nl_entry);
  33        list_del(&a->fl_entry);
  34        kfree(a);
  35}
  36
  37static struct nouveau_mm_node *
  38region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
  39{
  40        struct nouveau_mm_node *b;
  41
  42        if (a->length == size)
  43                return a;
  44
  45        b = kmalloc(sizeof(*b), GFP_KERNEL);
  46        if (unlikely(b == NULL))
  47                return NULL;
  48
  49        b->offset = a->offset;
  50        b->length = size;
  51        b->type   = a->type;
  52        a->offset += size;
  53        a->length -= size;
  54        list_add_tail(&b->nl_entry, &a->nl_entry);
  55        if (b->type == 0)
  56                list_add_tail(&b->fl_entry, &a->fl_entry);
  57        return b;
  58}
  59
  60#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
  61        list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
  62
  63void
  64nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
  65{
  66        struct nouveau_mm_node *prev = node(this, prev);
  67        struct nouveau_mm_node *next = node(this, next);
  68
  69        list_add(&this->fl_entry, &rmm->free);
  70        this->type = 0;
  71
  72        if (prev && prev->type == 0) {
  73                prev->length += this->length;
  74                region_put(rmm, this);
  75                this = prev;
  76        }
  77
  78        if (next && next->type == 0) {
  79                next->offset  = this->offset;
  80                next->length += this->length;
  81                region_put(rmm, this);
  82        }
  83}
  84
  85int
  86nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
  87               u32 align, struct nouveau_mm_node **pnode)
  88{
  89        struct nouveau_mm_node *prev, *this, *next;
  90        u32 min = size_nc ? size_nc : size;
  91        u32 align_mask = align - 1;
  92        u32 splitoff;
  93        u32 s, e;
  94
  95        list_for_each_entry(this, &rmm->free, fl_entry) {
  96                e = this->offset + this->length;
  97                s = this->offset;
  98
  99                prev = node(this, prev);
 100                if (prev && prev->type != type)
 101                        s = roundup(s, rmm->block_size);
 102
 103                next = node(this, next);
 104                if (next && next->type != type)
 105                        e = rounddown(e, rmm->block_size);
 106
 107                s  = (s + align_mask) & ~align_mask;
 108                e &= ~align_mask;
 109                if (s > e || e - s < min)
 110                        continue;
 111
 112                splitoff = s - this->offset;
 113                if (splitoff && !region_split(rmm, this, splitoff))
 114                        return -ENOMEM;
 115
 116                this = region_split(rmm, this, min(size, e - s));
 117                if (!this)
 118                        return -ENOMEM;
 119
 120                this->type = type;
 121                list_del(&this->fl_entry);
 122                *pnode = this;
 123                return 0;
 124        }
 125
 126        return -ENOSPC;
 127}
 128
 129int
 130nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
 131{
 132        struct nouveau_mm *rmm;
 133        struct nouveau_mm_node *heap;
 134
 135        heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 136        if (!heap)
 137                return -ENOMEM;
 138        heap->offset = roundup(offset, block);
 139        heap->length = rounddown(offset + length, block) - heap->offset;
 140
 141        rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
 142        if (!rmm) {
 143                kfree(heap);
 144                return -ENOMEM;
 145        }
 146        rmm->block_size = block;
 147        mutex_init(&rmm->mutex);
 148        INIT_LIST_HEAD(&rmm->nodes);
 149        INIT_LIST_HEAD(&rmm->free);
 150        list_add(&heap->nl_entry, &rmm->nodes);
 151        list_add(&heap->fl_entry, &rmm->free);
 152
 153        *prmm = rmm;
 154        return 0;
 155}
 156
 157int
 158nouveau_mm_fini(struct nouveau_mm **prmm)
 159{
 160        struct nouveau_mm *rmm = *prmm;
 161        struct nouveau_mm_node *heap =
 162                list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
 163
 164        if (!list_is_singular(&rmm->nodes))
 165                return -EBUSY;
 166
 167        kfree(heap);
 168        kfree(rmm);
 169        *prmm = NULL;
 170        return 0;
 171}
 172