linux/drivers/iommu/iova.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2006-2009, Intel Corporation.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * You should have received a copy of the GNU General Public License along with
  14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15 * Place - Suite 330, Boston, MA 02111-1307 USA.
  16 *
  17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  18 */
  19
  20#include <linux/iova.h>
  21
  22void
  23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
  24{
  25        spin_lock_init(&iovad->iova_rbtree_lock);
  26        iovad->rbroot = RB_ROOT;
  27        iovad->cached32_node = NULL;
  28        iovad->dma_32bit_pfn = pfn_32bit;
  29}
  30
  31static struct rb_node *
  32__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
  33{
  34        if ((*limit_pfn != iovad->dma_32bit_pfn) ||
  35                (iovad->cached32_node == NULL))
  36                return rb_last(&iovad->rbroot);
  37        else {
  38                struct rb_node *prev_node = rb_prev(iovad->cached32_node);
  39                struct iova *curr_iova =
  40                        container_of(iovad->cached32_node, struct iova, node);
  41                *limit_pfn = curr_iova->pfn_lo - 1;
  42                return prev_node;
  43        }
  44}
  45
  46static void
  47__cached_rbnode_insert_update(struct iova_domain *iovad,
  48        unsigned long limit_pfn, struct iova *new)
  49{
  50        if (limit_pfn != iovad->dma_32bit_pfn)
  51                return;
  52        iovad->cached32_node = &new->node;
  53}
  54
  55static void
  56__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
  57{
  58        struct iova *cached_iova;
  59        struct rb_node *curr;
  60
  61        if (!iovad->cached32_node)
  62                return;
  63        curr = iovad->cached32_node;
  64        cached_iova = container_of(curr, struct iova, node);
  65
  66        if (free->pfn_lo >= cached_iova->pfn_lo) {
  67                struct rb_node *node = rb_next(&free->node);
  68                struct iova *iova = container_of(node, struct iova, node);
  69
  70                /* only cache if it's below 32bit pfn */
  71                if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
  72                        iovad->cached32_node = node;
  73                else
  74                        iovad->cached32_node = NULL;
  75        }
  76}
  77
  78/* Computes the padding size required, to make the
  79 * the start address naturally aligned on its size
  80 */
  81static int
  82iova_get_pad_size(int size, unsigned int limit_pfn)
  83{
  84        unsigned int pad_size = 0;
  85        unsigned int order = ilog2(size);
  86
  87        if (order)
  88                pad_size = (limit_pfn + 1) % (1 << order);
  89
  90        return pad_size;
  91}
  92
  93static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
  94                unsigned long size, unsigned long limit_pfn,
  95                        struct iova *new, bool size_aligned)
  96{
  97        struct rb_node *prev, *curr = NULL;
  98        unsigned long flags;
  99        unsigned long saved_pfn;
 100        unsigned int pad_size = 0;
 101
 102        /* Walk the tree backwards */
 103        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 104        saved_pfn = limit_pfn;
 105        curr = __get_cached_rbnode(iovad, &limit_pfn);
 106        prev = curr;
 107        while (curr) {
 108                struct iova *curr_iova = container_of(curr, struct iova, node);
 109
 110                if (limit_pfn < curr_iova->pfn_lo)
 111                        goto move_left;
 112                else if (limit_pfn < curr_iova->pfn_hi)
 113                        goto adjust_limit_pfn;
 114                else {
 115                        if (size_aligned)
 116                                pad_size = iova_get_pad_size(size, limit_pfn);
 117                        if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn)
 118                                break;  /* found a free slot */
 119                }
 120adjust_limit_pfn:
 121                limit_pfn = curr_iova->pfn_lo - 1;
 122move_left:
 123                prev = curr;
 124                curr = rb_prev(curr);
 125        }
 126
 127        if (!curr) {
 128                if (size_aligned)
 129                        pad_size = iova_get_pad_size(size, limit_pfn);
 130                if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
 131                        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 132                        return -ENOMEM;
 133                }
 134        }
 135
 136        /* pfn_lo will point to size aligned address if size_aligned is set */
 137        new->pfn_lo = limit_pfn - (size + pad_size) + 1;
 138        new->pfn_hi = new->pfn_lo + size - 1;
 139
 140        /* Insert the new_iova into domain rbtree by holding writer lock */
 141        /* Add new node and rebalance tree. */
 142        {
 143                struct rb_node **entry, *parent = NULL;
 144
 145                /* If we have 'prev', it's a valid place to start the
 146                   insertion. Otherwise, start from the root. */
 147                if (prev)
 148                        entry = &prev;
 149                else
 150                        entry = &iovad->rbroot.rb_node;
 151
 152                /* Figure out where to put new node */
 153                while (*entry) {
 154                        struct iova *this = container_of(*entry,
 155                                                        struct iova, node);
 156                        parent = *entry;
 157
 158                        if (new->pfn_lo < this->pfn_lo)
 159                                entry = &((*entry)->rb_left);
 160                        else if (new->pfn_lo > this->pfn_lo)
 161                                entry = &((*entry)->rb_right);
 162                        else
 163                                BUG(); /* this should not happen */
 164                }
 165
 166                /* Add new node and rebalance tree. */
 167                rb_link_node(&new->node, parent, entry);
 168                rb_insert_color(&new->node, &iovad->rbroot);
 169        }
 170        __cached_rbnode_insert_update(iovad, saved_pfn, new);
 171
 172        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 173
 174
 175        return 0;
 176}
 177
 178static void
 179iova_insert_rbtree(struct rb_root *root, struct iova *iova)
 180{
 181        struct rb_node **new = &(root->rb_node), *parent = NULL;
 182        /* Figure out where to put new node */
 183        while (*new) {
 184                struct iova *this = container_of(*new, struct iova, node);
 185                parent = *new;
 186
 187                if (iova->pfn_lo < this->pfn_lo)
 188                        new = &((*new)->rb_left);
 189                else if (iova->pfn_lo > this->pfn_lo)
 190                        new = &((*new)->rb_right);
 191                else
 192                        BUG(); /* this should not happen */
 193        }
 194        /* Add new node and rebalance tree. */
 195        rb_link_node(&iova->node, parent, new);
 196        rb_insert_color(&iova->node, root);
 197}
 198
 199/**
 200 * alloc_iova - allocates an iova
 201 * @iovad: - iova domain in question
 202 * @size: - size of page frames to allocate
 203 * @limit_pfn: - max limit address
 204 * @size_aligned: - set if size_aligned address range is required
 205 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
 206 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
 207 * flag is set then the allocated address iova->pfn_lo will be naturally
 208 * aligned on roundup_power_of_two(size).
 209 */
 210struct iova *
 211alloc_iova(struct iova_domain *iovad, unsigned long size,
 212        unsigned long limit_pfn,
 213        bool size_aligned)
 214{
 215        struct iova *new_iova;
 216        int ret;
 217
 218        new_iova = alloc_iova_mem();
 219        if (!new_iova)
 220                return NULL;
 221
 222        /* If size aligned is set then round the size to
 223         * to next power of two.
 224         */
 225        if (size_aligned)
 226                size = __roundup_pow_of_two(size);
 227
 228        ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
 229                        new_iova, size_aligned);
 230
 231        if (ret) {
 232                free_iova_mem(new_iova);
 233                return NULL;
 234        }
 235
 236        return new_iova;
 237}
 238
 239/**
 240 * find_iova - find's an iova for a given pfn
 241 * @iovad: - iova domain in question.
 242 * @pfn: - page frame number
 243 * This function finds and returns an iova belonging to the
 244 * given doamin which matches the given pfn.
 245 */
 246struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
 247{
 248        unsigned long flags;
 249        struct rb_node *node;
 250
 251        /* Take the lock so that no other thread is manipulating the rbtree */
 252        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 253        node = iovad->rbroot.rb_node;
 254        while (node) {
 255                struct iova *iova = container_of(node, struct iova, node);
 256
 257                /* If pfn falls within iova's range, return iova */
 258                if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
 259                        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 260                        /* We are not holding the lock while this iova
 261                         * is referenced by the caller as the same thread
 262                         * which called this function also calls __free_iova()
 263                         * and it is by design that only one thread can possibly
 264                         * reference a particular iova and hence no conflict.
 265                         */
 266                        return iova;
 267                }
 268
 269                if (pfn < iova->pfn_lo)
 270                        node = node->rb_left;
 271                else if (pfn > iova->pfn_lo)
 272                        node = node->rb_right;
 273        }
 274
 275        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 276        return NULL;
 277}
 278
 279/**
 280 * __free_iova - frees the given iova
 281 * @iovad: iova domain in question.
 282 * @iova: iova in question.
 283 * Frees the given iova belonging to the giving domain
 284 */
 285void
 286__free_iova(struct iova_domain *iovad, struct iova *iova)
 287{
 288        unsigned long flags;
 289
 290        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 291        __cached_rbnode_delete_update(iovad, iova);
 292        rb_erase(&iova->node, &iovad->rbroot);
 293        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 294        free_iova_mem(iova);
 295}
 296
 297/**
 298 * free_iova - finds and frees the iova for a given pfn
 299 * @iovad: - iova domain in question.
 300 * @pfn: - pfn that is allocated previously
 301 * This functions finds an iova for a given pfn and then
 302 * frees the iova from that domain.
 303 */
 304void
 305free_iova(struct iova_domain *iovad, unsigned long pfn)
 306{
 307        struct iova *iova = find_iova(iovad, pfn);
 308        if (iova)
 309                __free_iova(iovad, iova);
 310
 311}
 312
 313/**
 314 * put_iova_domain - destroys the iova doamin
 315 * @iovad: - iova domain in question.
 316 * All the iova's in that domain are destroyed.
 317 */
 318void put_iova_domain(struct iova_domain *iovad)
 319{
 320        struct rb_node *node;
 321        unsigned long flags;
 322
 323        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 324        node = rb_first(&iovad->rbroot);
 325        while (node) {
 326                struct iova *iova = container_of(node, struct iova, node);
 327                rb_erase(node, &iovad->rbroot);
 328                free_iova_mem(iova);
 329                node = rb_first(&iovad->rbroot);
 330        }
 331        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 332}
 333
 334static int
 335__is_range_overlap(struct rb_node *node,
 336        unsigned long pfn_lo, unsigned long pfn_hi)
 337{
 338        struct iova *iova = container_of(node, struct iova, node);
 339
 340        if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
 341                return 1;
 342        return 0;
 343}
 344
 345static inline struct iova *
 346alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
 347{
 348        struct iova *iova;
 349
 350        iova = alloc_iova_mem();
 351        if (iova) {
 352                iova->pfn_lo = pfn_lo;
 353                iova->pfn_hi = pfn_hi;
 354        }
 355
 356        return iova;
 357}
 358
 359static struct iova *
 360__insert_new_range(struct iova_domain *iovad,
 361        unsigned long pfn_lo, unsigned long pfn_hi)
 362{
 363        struct iova *iova;
 364
 365        iova = alloc_and_init_iova(pfn_lo, pfn_hi);
 366        if (iova)
 367                iova_insert_rbtree(&iovad->rbroot, iova);
 368
 369        return iova;
 370}
 371
 372static void
 373__adjust_overlap_range(struct iova *iova,
 374        unsigned long *pfn_lo, unsigned long *pfn_hi)
 375{
 376        if (*pfn_lo < iova->pfn_lo)
 377                iova->pfn_lo = *pfn_lo;
 378        if (*pfn_hi > iova->pfn_hi)
 379                *pfn_lo = iova->pfn_hi + 1;
 380}
 381
 382/**
 383 * reserve_iova - reserves an iova in the given range
 384 * @iovad: - iova domain pointer
 385 * @pfn_lo: - lower page frame address
 386 * @pfn_hi:- higher pfn adderss
 387 * This function allocates reserves the address range from pfn_lo to pfn_hi so
 388 * that this address is not dished out as part of alloc_iova.
 389 */
 390struct iova *
 391reserve_iova(struct iova_domain *iovad,
 392        unsigned long pfn_lo, unsigned long pfn_hi)
 393{
 394        struct rb_node *node;
 395        unsigned long flags;
 396        struct iova *iova;
 397        unsigned int overlap = 0;
 398
 399        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 400        for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
 401                if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
 402                        iova = container_of(node, struct iova, node);
 403                        __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
 404                        if ((pfn_lo >= iova->pfn_lo) &&
 405                                (pfn_hi <= iova->pfn_hi))
 406                                goto finish;
 407                        overlap = 1;
 408
 409                } else if (overlap)
 410                                break;
 411        }
 412
 413        /* We are here either because this is the first reserver node
 414         * or need to insert remaining non overlap addr range
 415         */
 416        iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
 417finish:
 418
 419        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 420        return iova;
 421}
 422
 423/**
 424 * copy_reserved_iova - copies the reserved between domains
 425 * @from: - source doamin from where to copy
 426 * @to: - destination domin where to copy
 427 * This function copies reserved iova's from one doamin to
 428 * other.
 429 */
 430void
 431copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
 432{
 433        unsigned long flags;
 434        struct rb_node *node;
 435
 436        spin_lock_irqsave(&from->iova_rbtree_lock, flags);
 437        for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
 438                struct iova *iova = container_of(node, struct iova, node);
 439                struct iova *new_iova;
 440                new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
 441                if (!new_iova)
 442                        printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
 443                                iova->pfn_lo, iova->pfn_lo);
 444        }
 445        spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
 446}
 447
 448struct iova *
 449split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
 450                      unsigned long pfn_lo, unsigned long pfn_hi)
 451{
 452        unsigned long flags;
 453        struct iova *prev = NULL, *next = NULL;
 454
 455        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
 456        if (iova->pfn_lo < pfn_lo) {
 457                prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
 458                if (prev == NULL)
 459                        goto error;
 460        }
 461        if (iova->pfn_hi > pfn_hi) {
 462                next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
 463                if (next == NULL)
 464                        goto error;
 465        }
 466
 467        __cached_rbnode_delete_update(iovad, iova);
 468        rb_erase(&iova->node, &iovad->rbroot);
 469
 470        if (prev) {
 471                iova_insert_rbtree(&iovad->rbroot, prev);
 472                iova->pfn_lo = pfn_lo;
 473        }
 474        if (next) {
 475                iova_insert_rbtree(&iovad->rbroot, next);
 476                iova->pfn_hi = pfn_hi;
 477        }
 478        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 479
 480        return iova;
 481
 482error:
 483        spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
 484        if (prev)
 485                free_iova_mem(prev);
 486        return NULL;
 487}
 488