linux/include/drm/drm_mm.h
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 *
  27 **************************************************************************/
  28/*
  29 * Authors:
  30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  31 */
  32
  33#ifndef _DRM_MM_H_
  34#define _DRM_MM_H_
  35
  36/*
  37 * Generic range manager structs
  38 */
  39#include <linux/bug.h>
  40#include <linux/kernel.h>
  41#include <linux/list.h>
  42#include <linux/spinlock.h>
  43#ifdef CONFIG_DEBUG_FS
  44#include <linux/seq_file.h>
  45#endif
  46
  47enum drm_mm_search_flags {
  48        DRM_MM_SEARCH_DEFAULT =         0,
  49        DRM_MM_SEARCH_BEST =            1 << 0,
  50        DRM_MM_SEARCH_BELOW =           1 << 1,
  51};
  52
  53enum drm_mm_allocator_flags {
  54        DRM_MM_CREATE_DEFAULT =         0,
  55        DRM_MM_CREATE_TOP =             1 << 0,
  56};
  57
  58#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
  59#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
  60
  61struct drm_mm_node {
  62        struct list_head node_list;
  63        struct list_head hole_stack;
  64        unsigned hole_follows : 1;
  65        unsigned scanned_block : 1;
  66        unsigned scanned_prev_free : 1;
  67        unsigned scanned_next_free : 1;
  68        unsigned scanned_preceeds_hole : 1;
  69        unsigned allocated : 1;
  70        unsigned long color;
  71        u64 start;
  72        u64 size;
  73        struct drm_mm *mm;
  74};
  75
  76struct drm_mm {
  77        /* List of all memory nodes that immediately precede a free hole. */
  78        struct list_head hole_stack;
  79        /* head_node.node_list is the list of all memory nodes, ordered
  80         * according to the (increasing) start address of the memory node. */
  81        struct drm_mm_node head_node;
  82        unsigned int scan_check_range : 1;
  83        unsigned scan_alignment;
  84        unsigned long scan_color;
  85        u64 scan_size;
  86        u64 scan_hit_start;
  87        u64 scan_hit_end;
  88        unsigned scanned_blocks;
  89        u64 scan_start;
  90        u64 scan_end;
  91        struct drm_mm_node *prev_scanned_node;
  92
  93        void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
  94                             u64 *start, u64 *end);
  95};
  96
  97/**
  98 * drm_mm_node_allocated - checks whether a node is allocated
  99 * @node: drm_mm_node to check
 100 *
 101 * Drivers should use this helpers for proper encapusulation of drm_mm
 102 * internals.
 103 *
 104 * Returns:
 105 * True if the @node is allocated.
 106 */
 107static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
 108{
 109        return node->allocated;
 110}
 111
 112/**
 113 * drm_mm_initialized - checks whether an allocator is initialized
 114 * @mm: drm_mm to check
 115 *
 116 * Drivers should use this helpers for proper encapusulation of drm_mm
 117 * internals.
 118 *
 119 * Returns:
 120 * True if the @mm is initialized.
 121 */
 122static inline bool drm_mm_initialized(struct drm_mm *mm)
 123{
 124        return mm->hole_stack.next;
 125}
 126
 127static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 128{
 129        return hole_node->start + hole_node->size;
 130}
 131
 132/**
 133 * drm_mm_hole_node_start - computes the start of the hole following @node
 134 * @hole_node: drm_mm_node which implicitly tracks the following hole
 135 *
 136 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
 137 * inspect holes themselves. Drivers must check first whether a hole indeed
 138 * follows by looking at node->hole_follows.
 139 *
 140 * Returns:
 141 * Start of the subsequent hole.
 142 */
 143static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 144{
 145        BUG_ON(!hole_node->hole_follows);
 146        return __drm_mm_hole_node_start(hole_node);
 147}
 148
 149static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 150{
 151        return list_next_entry(hole_node, node_list)->start;
 152}
 153
 154/**
 155 * drm_mm_hole_node_end - computes the end of the hole following @node
 156 * @hole_node: drm_mm_node which implicitly tracks the following hole
 157 *
 158 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
 159 * inspect holes themselves. Drivers must check first whether a hole indeed
 160 * follows by looking at node->hole_follows.
 161 *
 162 * Returns:
 163 * End of the subsequent hole.
 164 */
 165static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 166{
 167        return __drm_mm_hole_node_end(hole_node);
 168}
 169
 170/**
 171 * drm_mm_for_each_node - iterator to walk over all allocated nodes
 172 * @entry: drm_mm_node structure to assign to in each iteration step
 173 * @mm: drm_mm allocator to walk
 174 *
 175 * This iterator walks over all nodes in the range allocator. It is implemented
 176 * with list_for_each, so not save against removal of elements.
 177 */
 178#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
 179                                                &(mm)->head_node.node_list, \
 180                                                node_list)
 181
 182#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
 183        for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
 184             &entry->hole_stack != &(mm)->hole_stack ? \
 185             hole_start = drm_mm_hole_node_start(entry), \
 186             hole_end = drm_mm_hole_node_end(entry), \
 187             1 : 0; \
 188             entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
 189
 190/**
 191 * drm_mm_for_each_hole - iterator to walk over all holes
 192 * @entry: drm_mm_node used internally to track progress
 193 * @mm: drm_mm allocator to walk
 194 * @hole_start: ulong variable to assign the hole start to on each iteration
 195 * @hole_end: ulong variable to assign the hole end to on each iteration
 196 *
 197 * This iterator walks over all holes in the range allocator. It is implemented
 198 * with list_for_each, so not save against removal of elements. @entry is used
 199 * internally and will not reflect a real drm_mm_node for the very first hole.
 200 * Hence users of this iterator may not access it.
 201 *
 202 * Implementation Note:
 203 * We need to inline list_for_each_entry in order to be able to set hole_start
 204 * and hole_end on each iteration while keeping the macro sane.
 205 *
 206 * The __drm_mm_for_each_hole version is similar, but with added support for
 207 * going backwards.
 208 */
 209#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
 210        __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
 211
 212/*
 213 * Basic range manager support (drm_mm.c)
 214 */
 215int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
 216
 217int drm_mm_insert_node_generic(struct drm_mm *mm,
 218                               struct drm_mm_node *node,
 219                               u64 size,
 220                               unsigned alignment,
 221                               unsigned long color,
 222                               enum drm_mm_search_flags sflags,
 223                               enum drm_mm_allocator_flags aflags);
 224/**
 225 * drm_mm_insert_node - search for space and insert @node
 226 * @mm: drm_mm to allocate from
 227 * @node: preallocate node to insert
 228 * @size: size of the allocation
 229 * @alignment: alignment of the allocation
 230 * @flags: flags to fine-tune the allocation
 231 *
 232 * This is a simplified version of drm_mm_insert_node_generic() with @color set
 233 * to 0.
 234 *
 235 * The preallocated node must be cleared to 0.
 236 *
 237 * Returns:
 238 * 0 on success, -ENOSPC if there's no suitable hole.
 239 */
 240static inline int drm_mm_insert_node(struct drm_mm *mm,
 241                                     struct drm_mm_node *node,
 242                                     u64 size,
 243                                     unsigned alignment,
 244                                     enum drm_mm_search_flags flags)
 245{
 246        return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
 247                                          DRM_MM_CREATE_DEFAULT);
 248}
 249
 250int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
 251                                        struct drm_mm_node *node,
 252                                        u64 size,
 253                                        unsigned alignment,
 254                                        unsigned long color,
 255                                        u64 start,
 256                                        u64 end,
 257                                        enum drm_mm_search_flags sflags,
 258                                        enum drm_mm_allocator_flags aflags);
 259/**
 260 * drm_mm_insert_node_in_range - ranged search for space and insert @node
 261 * @mm: drm_mm to allocate from
 262 * @node: preallocate node to insert
 263 * @size: size of the allocation
 264 * @alignment: alignment of the allocation
 265 * @start: start of the allowed range for this node
 266 * @end: end of the allowed range for this node
 267 * @flags: flags to fine-tune the allocation
 268 *
 269 * This is a simplified version of drm_mm_insert_node_in_range_generic() with
 270 * @color set to 0.
 271 *
 272 * The preallocated node must be cleared to 0.
 273 *
 274 * Returns:
 275 * 0 on success, -ENOSPC if there's no suitable hole.
 276 */
 277static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
 278                                              struct drm_mm_node *node,
 279                                              u64 size,
 280                                              unsigned alignment,
 281                                              u64 start,
 282                                              u64 end,
 283                                              enum drm_mm_search_flags flags)
 284{
 285        return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
 286                                                   0, start, end, flags,
 287                                                   DRM_MM_CREATE_DEFAULT);
 288}
 289
 290void drm_mm_remove_node(struct drm_mm_node *node);
 291void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
 292void drm_mm_init(struct drm_mm *mm,
 293                 u64 start,
 294                 u64 size);
 295void drm_mm_takedown(struct drm_mm *mm);
 296bool drm_mm_clean(struct drm_mm *mm);
 297
 298void drm_mm_init_scan(struct drm_mm *mm,
 299                      u64 size,
 300                      unsigned alignment,
 301                      unsigned long color);
 302void drm_mm_init_scan_with_range(struct drm_mm *mm,
 303                                 u64 size,
 304                                 unsigned alignment,
 305                                 unsigned long color,
 306                                 u64 start,
 307                                 u64 end);
 308bool drm_mm_scan_add_block(struct drm_mm_node *node);
 309bool drm_mm_scan_remove_block(struct drm_mm_node *node);
 310
 311void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
 312#ifdef CONFIG_DEBUG_FS
 313int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
 314#endif
 315
 316#endif
 317