linux/kernel/resource.c
<<
>>
Prefs
   1/*
   2 *      linux/kernel/resource.c
   3 *
   4 * Copyright (C) 1999   Linus Torvalds
   5 * Copyright (C) 1999   Martin Mares <mj@ucw.cz>
   6 *
   7 * Arbitrary resource management.
   8 */
   9
  10#include <linux/module.h>
  11#include <linux/errno.h>
  12#include <linux/ioport.h>
  13#include <linux/init.h>
  14#include <linux/slab.h>
  15#include <linux/spinlock.h>
  16#include <linux/fs.h>
  17#include <linux/proc_fs.h>
  18#include <linux/seq_file.h>
  19#include <linux/device.h>
  20#include <asm/io.h>
  21
  22
  23struct resource ioport_resource = {
  24        .name   = "PCI IO",
  25        .start  = 0,
  26        .end    = IO_SPACE_LIMIT,
  27        .flags  = IORESOURCE_IO,
  28};
  29EXPORT_SYMBOL(ioport_resource);
  30
  31struct resource iomem_resource = {
  32        .name   = "PCI mem",
  33        .start  = 0,
  34        .end    = -1,
  35        .flags  = IORESOURCE_MEM,
  36};
  37EXPORT_SYMBOL(iomem_resource);
  38
  39static DEFINE_RWLOCK(resource_lock);
  40
  41#ifdef CONFIG_PROC_FS
  42
  43enum { MAX_IORES_LEVEL = 5 };
  44
  45static void *r_next(struct seq_file *m, void *v, loff_t *pos)
  46{
  47        struct resource *p = v;
  48        (*pos)++;
  49        if (p->child)
  50                return p->child;
  51        while (!p->sibling && p->parent)
  52                p = p->parent;
  53        return p->sibling;
  54}
  55
  56static void *r_start(struct seq_file *m, loff_t *pos)
  57        __acquires(resource_lock)
  58{
  59        struct resource *p = m->private;
  60        loff_t l = 0;
  61        read_lock(&resource_lock);
  62        for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
  63                ;
  64        return p;
  65}
  66
  67static void r_stop(struct seq_file *m, void *v)
  68        __releases(resource_lock)
  69{
  70        read_unlock(&resource_lock);
  71}
  72
  73static int r_show(struct seq_file *m, void *v)
  74{
  75        struct resource *root = m->private;
  76        struct resource *r = v, *p;
  77        int width = root->end < 0x10000 ? 4 : 8;
  78        int depth;
  79
  80        for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
  81                if (p->parent == root)
  82                        break;
  83        seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
  84                        depth * 2, "",
  85                        width, (unsigned long long) r->start,
  86                        width, (unsigned long long) r->end,
  87                        r->name ? r->name : "<BAD>");
  88        return 0;
  89}
  90
  91static const struct seq_operations resource_op = {
  92        .start  = r_start,
  93        .next   = r_next,
  94        .stop   = r_stop,
  95        .show   = r_show,
  96};
  97
  98static int ioports_open(struct inode *inode, struct file *file)
  99{
 100        int res = seq_open(file, &resource_op);
 101        if (!res) {
 102                struct seq_file *m = file->private_data;
 103                m->private = &ioport_resource;
 104        }
 105        return res;
 106}
 107
 108static int iomem_open(struct inode *inode, struct file *file)
 109{
 110        int res = seq_open(file, &resource_op);
 111        if (!res) {
 112                struct seq_file *m = file->private_data;
 113                m->private = &iomem_resource;
 114        }
 115        return res;
 116}
 117
 118static const struct file_operations proc_ioports_operations = {
 119        .open           = ioports_open,
 120        .read           = seq_read,
 121        .llseek         = seq_lseek,
 122        .release        = seq_release,
 123};
 124
 125static const struct file_operations proc_iomem_operations = {
 126        .open           = iomem_open,
 127        .read           = seq_read,
 128        .llseek         = seq_lseek,
 129        .release        = seq_release,
 130};
 131
 132static int __init ioresources_init(void)
 133{
 134        struct proc_dir_entry *entry;
 135
 136        entry = create_proc_entry("ioports", 0, NULL);
 137        if (entry)
 138                entry->proc_fops = &proc_ioports_operations;
 139        entry = create_proc_entry("iomem", 0, NULL);
 140        if (entry)
 141                entry->proc_fops = &proc_iomem_operations;
 142        return 0;
 143}
 144__initcall(ioresources_init);
 145
 146#endif /* CONFIG_PROC_FS */
 147
 148/* Return the conflict entry if you can't request it */
 149static struct resource * __request_resource(struct resource *root, struct resource *new)
 150{
 151        resource_size_t start = new->start;
 152        resource_size_t end = new->end;
 153        struct resource *tmp, **p;
 154
 155        if (end < start)
 156                return root;
 157        if (start < root->start)
 158                return root;
 159        if (end > root->end)
 160                return root;
 161        p = &root->child;
 162        for (;;) {
 163                tmp = *p;
 164                if (!tmp || tmp->start > end) {
 165                        new->sibling = tmp;
 166                        *p = new;
 167                        new->parent = root;
 168                        return NULL;
 169                }
 170                p = &tmp->sibling;
 171                if (tmp->end < start)
 172                        continue;
 173                return tmp;
 174        }
 175}
 176
 177static int __release_resource(struct resource *old)
 178{
 179        struct resource *tmp, **p;
 180
 181        p = &old->parent->child;
 182        for (;;) {
 183                tmp = *p;
 184                if (!tmp)
 185                        break;
 186                if (tmp == old) {
 187                        *p = tmp->sibling;
 188                        old->parent = NULL;
 189                        return 0;
 190                }
 191                p = &tmp->sibling;
 192        }
 193        return -EINVAL;
 194}
 195
 196/**
 197 * request_resource - request and reserve an I/O or memory resource
 198 * @root: root resource descriptor
 199 * @new: resource descriptor desired by caller
 200 *
 201 * Returns 0 for success, negative error code on error.
 202 */
 203int request_resource(struct resource *root, struct resource *new)
 204{
 205        struct resource *conflict;
 206
 207        write_lock(&resource_lock);
 208        conflict = __request_resource(root, new);
 209        write_unlock(&resource_lock);
 210        return conflict ? -EBUSY : 0;
 211}
 212
 213EXPORT_SYMBOL(request_resource);
 214
 215/**
 216 * release_resource - release a previously reserved resource
 217 * @old: resource pointer
 218 */
 219int release_resource(struct resource *old)
 220{
 221        int retval;
 222
 223        write_lock(&resource_lock);
 224        retval = __release_resource(old);
 225        write_unlock(&resource_lock);
 226        return retval;
 227}
 228
 229EXPORT_SYMBOL(release_resource);
 230
 231#ifdef CONFIG_MEMORY_HOTPLUG
 232/*
 233 * Finds the lowest memory reosurce exists within [res->start.res->end)
 234 * the caller must specify res->start, res->end, res->flags.
 235 * If found, returns 0, res is overwritten, if not found, returns -1.
 236 */
 237static int find_next_system_ram(struct resource *res)
 238{
 239        resource_size_t start, end;
 240        struct resource *p;
 241
 242        BUG_ON(!res);
 243
 244        start = res->start;
 245        end = res->end;
 246        BUG_ON(start >= end);
 247
 248        read_lock(&resource_lock);
 249        for (p = iomem_resource.child; p ; p = p->sibling) {
 250                /* system ram is just marked as IORESOURCE_MEM */
 251                if (p->flags != res->flags)
 252                        continue;
 253                if (p->start > end) {
 254                        p = NULL;
 255                        break;
 256                }
 257                if ((p->end >= start) && (p->start < end))
 258                        break;
 259        }
 260        read_unlock(&resource_lock);
 261        if (!p)
 262                return -1;
 263        /* copy data */
 264        if (res->start < p->start)
 265                res->start = p->start;
 266        if (res->end > p->end)
 267                res->end = p->end;
 268        return 0;
 269}
 270int
 271walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
 272                        int (*func)(unsigned long, unsigned long, void *))
 273{
 274        struct resource res;
 275        unsigned long pfn, len;
 276        u64 orig_end;
 277        int ret = -1;
 278        res.start = (u64) start_pfn << PAGE_SHIFT;
 279        res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
 280        res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 281        orig_end = res.end;
 282        while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
 283                pfn = (unsigned long)(res.start >> PAGE_SHIFT);
 284                len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
 285                ret = (*func)(pfn, len, arg);
 286                if (ret)
 287                        break;
 288                res.start = res.end + 1;
 289                res.end = orig_end;
 290        }
 291        return ret;
 292}
 293
 294#endif
 295
 296/*
 297 * Find empty slot in the resource tree given range and alignment.
 298 */
 299static int find_resource(struct resource *root, struct resource *new,
 300                         resource_size_t size, resource_size_t min,
 301                         resource_size_t max, resource_size_t align,
 302                         void (*alignf)(void *, struct resource *,
 303                                        resource_size_t, resource_size_t),
 304                         void *alignf_data)
 305{
 306        struct resource *this = root->child;
 307
 308        new->start = root->start;
 309        /*
 310         * Skip past an allocated resource that starts at 0, since the assignment
 311         * of this->start - 1 to new->end below would cause an underflow.
 312         */
 313        if (this && this->start == 0) {
 314                new->start = this->end + 1;
 315                this = this->sibling;
 316        }
 317        for(;;) {
 318                if (this)
 319                        new->end = this->start - 1;
 320                else
 321                        new->end = root->end;
 322                if (new->start < min)
 323                        new->start = min;
 324                if (new->end > max)
 325                        new->end = max;
 326                new->start = ALIGN(new->start, align);
 327                if (alignf)
 328                        alignf(alignf_data, new, size, align);
 329                if (new->start < new->end && new->end - new->start >= size - 1) {
 330                        new->end = new->start + size - 1;
 331                        return 0;
 332                }
 333                if (!this)
 334                        break;
 335                new->start = this->end + 1;
 336                this = this->sibling;
 337        }
 338        return -EBUSY;
 339}
 340
 341/**
 342 * allocate_resource - allocate empty slot in the resource tree given range & alignment
 343 * @root: root resource descriptor
 344 * @new: resource descriptor desired by caller
 345 * @size: requested resource region size
 346 * @min: minimum size to allocate
 347 * @max: maximum size to allocate
 348 * @align: alignment requested, in bytes
 349 * @alignf: alignment function, optional, called if not NULL
 350 * @alignf_data: arbitrary data to pass to the @alignf function
 351 */
 352int allocate_resource(struct resource *root, struct resource *new,
 353                      resource_size_t size, resource_size_t min,
 354                      resource_size_t max, resource_size_t align,
 355                      void (*alignf)(void *, struct resource *,
 356                                     resource_size_t, resource_size_t),
 357                      void *alignf_data)
 358{
 359        int err;
 360
 361        write_lock(&resource_lock);
 362        err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
 363        if (err >= 0 && __request_resource(root, new))
 364                err = -EBUSY;
 365        write_unlock(&resource_lock);
 366        return err;
 367}
 368
 369EXPORT_SYMBOL(allocate_resource);
 370
 371/**
 372 * insert_resource - Inserts a resource in the resource tree
 373 * @parent: parent of the new resource
 374 * @new: new resource to insert
 375 *
 376 * Returns 0 on success, -EBUSY if the resource can't be inserted.
 377 *
 378 * This function is equivalent to request_resource when no conflict
 379 * happens. If a conflict happens, and the conflicting resources
 380 * entirely fit within the range of the new resource, then the new
 381 * resource is inserted and the conflicting resources become children of
 382 * the new resource.
 383 */
 384int insert_resource(struct resource *parent, struct resource *new)
 385{
 386        int result;
 387        struct resource *first, *next;
 388
 389        write_lock(&resource_lock);
 390
 391        for (;; parent = first) {
 392                result = 0;
 393                first = __request_resource(parent, new);
 394                if (!first)
 395                        goto out;
 396
 397                result = -EBUSY;
 398                if (first == parent)
 399                        goto out;
 400
 401                if ((first->start > new->start) || (first->end < new->end))
 402                        break;
 403                if ((first->start == new->start) && (first->end == new->end))
 404                        break;
 405        }
 406
 407        for (next = first; ; next = next->sibling) {
 408                /* Partial overlap? Bad, and unfixable */
 409                if (next->start < new->start || next->end > new->end)
 410                        goto out;
 411                if (!next->sibling)
 412                        break;
 413                if (next->sibling->start > new->end)
 414                        break;
 415        }
 416
 417        result = 0;
 418
 419        new->parent = parent;
 420        new->sibling = next->sibling;
 421        new->child = first;
 422
 423        next->sibling = NULL;
 424        for (next = first; next; next = next->sibling)
 425                next->parent = new;
 426
 427        if (parent->child == first) {
 428                parent->child = new;
 429        } else {
 430                next = parent->child;
 431                while (next->sibling != first)
 432                        next = next->sibling;
 433                next->sibling = new;
 434        }
 435
 436 out:
 437        write_unlock(&resource_lock);
 438        return result;
 439}
 440
 441/**
 442 * adjust_resource - modify a resource's start and size
 443 * @res: resource to modify
 444 * @start: new start value
 445 * @size: new size
 446 *
 447 * Given an existing resource, change its start and size to match the
 448 * arguments.  Returns 0 on success, -EBUSY if it can't fit.
 449 * Existing children of the resource are assumed to be immutable.
 450 */
 451int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
 452{
 453        struct resource *tmp, *parent = res->parent;
 454        resource_size_t end = start + size - 1;
 455        int result = -EBUSY;
 456
 457        write_lock(&resource_lock);
 458
 459        if ((start < parent->start) || (end > parent->end))
 460                goto out;
 461
 462        for (tmp = res->child; tmp; tmp = tmp->sibling) {
 463                if ((tmp->start < start) || (tmp->end > end))
 464                        goto out;
 465        }
 466
 467        if (res->sibling && (res->sibling->start <= end))
 468                goto out;
 469
 470        tmp = parent->child;
 471        if (tmp != res) {
 472                while (tmp->sibling != res)
 473                        tmp = tmp->sibling;
 474                if (start <= tmp->end)
 475                        goto out;
 476        }
 477
 478        res->start = start;
 479        res->end = end;
 480        result = 0;
 481
 482 out:
 483        write_unlock(&resource_lock);
 484        return result;
 485}
 486
 487EXPORT_SYMBOL(adjust_resource);
 488
 489/*
 490 * This is compatibility stuff for IO resources.
 491 *
 492 * Note how this, unlike the above, knows about
 493 * the IO flag meanings (busy etc).
 494 *
 495 * request_region creates a new busy region.
 496 *
 497 * check_region returns non-zero if the area is already busy.
 498 *
 499 * release_region releases a matching busy region.
 500 */
 501
 502/**
 503 * __request_region - create a new busy resource region
 504 * @parent: parent resource descriptor
 505 * @start: resource start address
 506 * @n: resource region size
 507 * @name: reserving caller's ID string
 508 */
 509struct resource * __request_region(struct resource *parent,
 510                                   resource_size_t start, resource_size_t n,
 511                                   const char *name)
 512{
 513        struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
 514
 515        if (res) {
 516                res->name = name;
 517                res->start = start;
 518                res->end = start + n - 1;
 519                res->flags = IORESOURCE_BUSY;
 520
 521                write_lock(&resource_lock);
 522
 523                for (;;) {
 524                        struct resource *conflict;
 525
 526                        conflict = __request_resource(parent, res);
 527                        if (!conflict)
 528                                break;
 529                        if (conflict != parent) {
 530                                parent = conflict;
 531                                if (!(conflict->flags & IORESOURCE_BUSY))
 532                                        continue;
 533                        }
 534
 535                        /* Uhhuh, that didn't work out.. */
 536                        kfree(res);
 537                        res = NULL;
 538                        break;
 539                }
 540                write_unlock(&resource_lock);
 541        }
 542        return res;
 543}
 544EXPORT_SYMBOL(__request_region);
 545
 546/**
 547 * __check_region - check if a resource region is busy or free
 548 * @parent: parent resource descriptor
 549 * @start: resource start address
 550 * @n: resource region size
 551 *
 552 * Returns 0 if the region is free at the moment it is checked,
 553 * returns %-EBUSY if the region is busy.
 554 *
 555 * NOTE:
 556 * This function is deprecated because its use is racy.
 557 * Even if it returns 0, a subsequent call to request_region()
 558 * may fail because another driver etc. just allocated the region.
 559 * Do NOT use it.  It will be removed from the kernel.
 560 */
 561int __check_region(struct resource *parent, resource_size_t start,
 562                        resource_size_t n)
 563{
 564        struct resource * res;
 565
 566        res = __request_region(parent, start, n, "check-region");
 567        if (!res)
 568                return -EBUSY;
 569
 570        release_resource(res);
 571        kfree(res);
 572        return 0;
 573}
 574EXPORT_SYMBOL(__check_region);
 575
 576/**
 577 * __release_region - release a previously reserved resource region
 578 * @parent: parent resource descriptor
 579 * @start: resource start address
 580 * @n: resource region size
 581 *
 582 * The described resource region must match a currently busy region.
 583 */
 584void __release_region(struct resource *parent, resource_size_t start,
 585                        resource_size_t n)
 586{
 587        struct resource **p;
 588        resource_size_t end;
 589
 590        p = &parent->child;
 591        end = start + n - 1;
 592
 593        write_lock(&resource_lock);
 594
 595        for (;;) {
 596                struct resource *res = *p;
 597
 598                if (!res)
 599                        break;
 600                if (res->start <= start && res->end >= end) {
 601                        if (!(res->flags & IORESOURCE_BUSY)) {
 602                                p = &res->child;
 603                                continue;
 604                        }
 605                        if (res->start != start || res->end != end)
 606                                break;
 607                        *p = res->sibling;
 608                        write_unlock(&resource_lock);
 609                        kfree(res);
 610                        return;
 611                }
 612                p = &res->sibling;
 613        }
 614
 615        write_unlock(&resource_lock);
 616
 617        printk(KERN_WARNING "Trying to free nonexistent resource "
 618                "<%016llx-%016llx>\n", (unsigned long long)start,
 619                (unsigned long long)end);
 620}
 621EXPORT_SYMBOL(__release_region);
 622
 623/*
 624 * Managed region resource
 625 */
 626struct region_devres {
 627        struct resource *parent;
 628        resource_size_t start;
 629        resource_size_t n;
 630};
 631
 632static void devm_region_release(struct device *dev, void *res)
 633{
 634        struct region_devres *this = res;
 635
 636        __release_region(this->parent, this->start, this->n);
 637}
 638
 639static int devm_region_match(struct device *dev, void *res, void *match_data)
 640{
 641        struct region_devres *this = res, *match = match_data;
 642
 643        return this->parent == match->parent &&
 644                this->start == match->start && this->n == match->n;
 645}
 646
 647struct resource * __devm_request_region(struct device *dev,
 648                                struct resource *parent, resource_size_t start,
 649                                resource_size_t n, const char *name)
 650{
 651        struct region_devres *dr = NULL;
 652        struct resource *res;
 653
 654        dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
 655                          GFP_KERNEL);
 656        if (!dr)
 657                return NULL;
 658
 659        dr->parent = parent;
 660        dr->start = start;
 661        dr->n = n;
 662
 663        res = __request_region(parent, start, n, name);
 664        if (res)
 665                devres_add(dev, dr);
 666        else
 667                devres_free(dr);
 668
 669        return res;
 670}
 671EXPORT_SYMBOL(__devm_request_region);
 672
 673void __devm_release_region(struct device *dev, struct resource *parent,
 674                           resource_size_t start, resource_size_t n)
 675{
 676        struct region_devres match_data = { parent, start, n };
 677
 678        __release_region(parent, start, n);
 679        WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
 680                               &match_data));
 681}
 682EXPORT_SYMBOL(__devm_release_region);
 683
 684/*
 685 * Called from init/main.c to reserve IO ports.
 686 */
 687#define MAXRESERVE 4
 688static int __init reserve_setup(char *str)
 689{
 690        static int reserved;
 691        static struct resource reserve[MAXRESERVE];
 692
 693        for (;;) {
 694                int io_start, io_num;
 695                int x = reserved;
 696
 697                if (get_option (&str, &io_start) != 2)
 698                        break;
 699                if (get_option (&str, &io_num)   == 0)
 700                        break;
 701                if (x < MAXRESERVE) {
 702                        struct resource *res = reserve + x;
 703                        res->name = "reserved";
 704                        res->start = io_start;
 705                        res->end = io_start + io_num - 1;
 706                        res->flags = IORESOURCE_BUSY;
 707                        res->child = NULL;
 708                        if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
 709                                reserved = x+1;
 710                }
 711        }
 712        return 1;
 713}
 714
 715__setup("reserve=", reserve_setup);
 716