linux/drivers/pci/setup-bus.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Support routines for initializing a PCI subsystem
   4 *
   5 * Extruded from code written by
   6 *      Dave Rusling (david.rusling@reo.mts.dec.com)
   7 *      David Mosberger (davidm@cs.arizona.edu)
   8 *      David Miller (davem@redhat.com)
   9 *
  10 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
  11 *           PCI-PCI bridges cleanup, sorted resource allocation.
  12 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
  13 *           Converted to allocation in 3 passes, which gives
  14 *           tighter packing. Prefetchable range support.
  15 */
  16
  17#include <linux/init.h>
  18#include <linux/kernel.h>
  19#include <linux/module.h>
  20#include <linux/pci.h>
  21#include <linux/errno.h>
  22#include <linux/ioport.h>
  23#include <linux/cache.h>
  24#include <linux/slab.h>
  25#include <linux/acpi.h>
  26#include "pci.h"
  27
  28unsigned int pci_flags;
  29EXPORT_SYMBOL_GPL(pci_flags);
  30
  31struct pci_dev_resource {
  32        struct list_head list;
  33        struct resource *res;
  34        struct pci_dev *dev;
  35        resource_size_t start;
  36        resource_size_t end;
  37        resource_size_t add_size;
  38        resource_size_t min_align;
  39        unsigned long flags;
  40};
  41
  42static void free_list(struct list_head *head)
  43{
  44        struct pci_dev_resource *dev_res, *tmp;
  45
  46        list_for_each_entry_safe(dev_res, tmp, head, list) {
  47                list_del(&dev_res->list);
  48                kfree(dev_res);
  49        }
  50}
  51
  52/**
  53 * add_to_list() - Add a new resource tracker to the list
  54 * @head:       Head of the list
  55 * @dev:        Device to which the resource belongs
  56 * @res:        Resource to be tracked
  57 * @add_size:   Additional size to be optionally added to the resource
  58 * @min_align:  Minimum memory window alignment
  59 */
  60static int add_to_list(struct list_head *head, struct pci_dev *dev,
  61                       struct resource *res, resource_size_t add_size,
  62                       resource_size_t min_align)
  63{
  64        struct pci_dev_resource *tmp;
  65
  66        tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
  67        if (!tmp)
  68                return -ENOMEM;
  69
  70        tmp->res = res;
  71        tmp->dev = dev;
  72        tmp->start = res->start;
  73        tmp->end = res->end;
  74        tmp->flags = res->flags;
  75        tmp->add_size = add_size;
  76        tmp->min_align = min_align;
  77
  78        list_add(&tmp->list, head);
  79
  80        return 0;
  81}
  82
  83static void remove_from_list(struct list_head *head, struct resource *res)
  84{
  85        struct pci_dev_resource *dev_res, *tmp;
  86
  87        list_for_each_entry_safe(dev_res, tmp, head, list) {
  88                if (dev_res->res == res) {
  89                        list_del(&dev_res->list);
  90                        kfree(dev_res);
  91                        break;
  92                }
  93        }
  94}
  95
  96static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
  97                                               struct resource *res)
  98{
  99        struct pci_dev_resource *dev_res;
 100
 101        list_for_each_entry(dev_res, head, list) {
 102                if (dev_res->res == res)
 103                        return dev_res;
 104        }
 105
 106        return NULL;
 107}
 108
 109static resource_size_t get_res_add_size(struct list_head *head,
 110                                        struct resource *res)
 111{
 112        struct pci_dev_resource *dev_res;
 113
 114        dev_res = res_to_dev_res(head, res);
 115        return dev_res ? dev_res->add_size : 0;
 116}
 117
 118static resource_size_t get_res_add_align(struct list_head *head,
 119                                         struct resource *res)
 120{
 121        struct pci_dev_resource *dev_res;
 122
 123        dev_res = res_to_dev_res(head, res);
 124        return dev_res ? dev_res->min_align : 0;
 125}
 126
 127
 128/* Sort resources by alignment */
 129static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
 130{
 131        int i;
 132
 133        for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 134                struct resource *r;
 135                struct pci_dev_resource *dev_res, *tmp;
 136                resource_size_t r_align;
 137                struct list_head *n;
 138
 139                r = &dev->resource[i];
 140
 141                if (r->flags & IORESOURCE_PCI_FIXED)
 142                        continue;
 143
 144                if (!(r->flags) || r->parent)
 145                        continue;
 146
 147                r_align = pci_resource_alignment(dev, r);
 148                if (!r_align) {
 149                        pci_warn(dev, "BAR %d: %pR has bogus alignment\n",
 150                                 i, r);
 151                        continue;
 152                }
 153
 154                tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
 155                if (!tmp)
 156                        panic("%s: kzalloc() failed!\n", __func__);
 157                tmp->res = r;
 158                tmp->dev = dev;
 159
 160                /* Fallback is smallest one or list is empty */
 161                n = head;
 162                list_for_each_entry(dev_res, head, list) {
 163                        resource_size_t align;
 164
 165                        align = pci_resource_alignment(dev_res->dev,
 166                                                         dev_res->res);
 167
 168                        if (r_align > align) {
 169                                n = &dev_res->list;
 170                                break;
 171                        }
 172                }
 173                /* Insert it just before n */
 174                list_add_tail(&tmp->list, n);
 175        }
 176}
 177
 178static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
 179{
 180        u16 class = dev->class >> 8;
 181
 182        /* Don't touch classless devices or host bridges or IOAPICs */
 183        if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
 184                return;
 185
 186        /* Don't touch IOAPIC devices already enabled by firmware */
 187        if (class == PCI_CLASS_SYSTEM_PIC) {
 188                u16 command;
 189                pci_read_config_word(dev, PCI_COMMAND, &command);
 190                if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
 191                        return;
 192        }
 193
 194        pdev_sort_resources(dev, head);
 195}
 196
 197static inline void reset_resource(struct resource *res)
 198{
 199        res->start = 0;
 200        res->end = 0;
 201        res->flags = 0;
 202}
 203
 204/**
 205 * reassign_resources_sorted() - Satisfy any additional resource requests
 206 *
 207 * @realloc_head:       Head of the list tracking requests requiring
 208 *                      additional resources
 209 * @head:               Head of the list tracking requests with allocated
 210 *                      resources
 211 *
 212 * Walk through each element of the realloc_head and try to procure additional
 213 * resources for the element, provided the element is in the head list.
 214 */
 215static void reassign_resources_sorted(struct list_head *realloc_head,
 216                                      struct list_head *head)
 217{
 218        struct resource *res;
 219        struct pci_dev_resource *add_res, *tmp;
 220        struct pci_dev_resource *dev_res;
 221        resource_size_t add_size, align;
 222        int idx;
 223
 224        list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
 225                bool found_match = false;
 226
 227                res = add_res->res;
 228                /* Skip resource that has been reset */
 229                if (!res->flags)
 230                        goto out;
 231
 232                /* Skip this resource if not found in head list */
 233                list_for_each_entry(dev_res, head, list) {
 234                        if (dev_res->res == res) {
 235                                found_match = true;
 236                                break;
 237                        }
 238                }
 239                if (!found_match) /* Just skip */
 240                        continue;
 241
 242                idx = res - &add_res->dev->resource[0];
 243                add_size = add_res->add_size;
 244                align = add_res->min_align;
 245                if (!resource_size(res)) {
 246                        res->start = align;
 247                        res->end = res->start + add_size - 1;
 248                        if (pci_assign_resource(add_res->dev, idx))
 249                                reset_resource(res);
 250                } else {
 251                        res->flags |= add_res->flags &
 252                                 (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
 253                        if (pci_reassign_resource(add_res->dev, idx,
 254                                                  add_size, align))
 255                                pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
 256                                         (unsigned long long) add_size, idx,
 257                                         res);
 258                }
 259out:
 260                list_del(&add_res->list);
 261                kfree(add_res);
 262        }
 263}
 264
 265/**
 266 * assign_requested_resources_sorted() - Satisfy resource requests
 267 *
 268 * @head:       Head of the list tracking requests for resources
 269 * @fail_head:  Head of the list tracking requests that could not be
 270 *              allocated
 271 *
 272 * Satisfy resource requests of each element in the list.  Add requests that
 273 * could not be satisfied to the failed_list.
 274 */
 275static void assign_requested_resources_sorted(struct list_head *head,
 276                                 struct list_head *fail_head)
 277{
 278        struct resource *res;
 279        struct pci_dev_resource *dev_res;
 280        int idx;
 281
 282        list_for_each_entry(dev_res, head, list) {
 283                res = dev_res->res;
 284                idx = res - &dev_res->dev->resource[0];
 285                if (resource_size(res) &&
 286                    pci_assign_resource(dev_res->dev, idx)) {
 287                        if (fail_head) {
 288                                /*
 289                                 * If the failed resource is a ROM BAR and
 290                                 * it will be enabled later, don't add it
 291                                 * to the list.
 292                                 */
 293                                if (!((idx == PCI_ROM_RESOURCE) &&
 294                                      (!(res->flags & IORESOURCE_ROM_ENABLE))))
 295                                        add_to_list(fail_head,
 296                                                    dev_res->dev, res,
 297                                                    0 /* don't care */,
 298                                                    0 /* don't care */);
 299                        }
 300                        reset_resource(res);
 301                }
 302        }
 303}
 304
 305static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
 306{
 307        struct pci_dev_resource *fail_res;
 308        unsigned long mask = 0;
 309
 310        /* Check failed type */
 311        list_for_each_entry(fail_res, fail_head, list)
 312                mask |= fail_res->flags;
 313
 314        /*
 315         * One pref failed resource will set IORESOURCE_MEM, as we can
 316         * allocate pref in non-pref range.  Will release all assigned
 317         * non-pref sibling resources according to that bit.
 318         */
 319        return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
 320}
 321
 322static bool pci_need_to_release(unsigned long mask, struct resource *res)
 323{
 324        if (res->flags & IORESOURCE_IO)
 325                return !!(mask & IORESOURCE_IO);
 326
 327        /* Check pref at first */
 328        if (res->flags & IORESOURCE_PREFETCH) {
 329                if (mask & IORESOURCE_PREFETCH)
 330                        return true;
 331                /* Count pref if its parent is non-pref */
 332                else if ((mask & IORESOURCE_MEM) &&
 333                         !(res->parent->flags & IORESOURCE_PREFETCH))
 334                        return true;
 335                else
 336                        return false;
 337        }
 338
 339        if (res->flags & IORESOURCE_MEM)
 340                return !!(mask & IORESOURCE_MEM);
 341
 342        return false;   /* Should not get here */
 343}
 344
 345static void __assign_resources_sorted(struct list_head *head,
 346                                      struct list_head *realloc_head,
 347                                      struct list_head *fail_head)
 348{
 349        /*
 350         * Should not assign requested resources at first.  They could be
 351         * adjacent, so later reassign can not reallocate them one by one in
 352         * parent resource window.
 353         *
 354         * Try to assign requested + add_size at beginning.  If could do that,
 355         * could get out early.  If could not do that, we still try to assign
 356         * requested at first, then try to reassign add_size for some resources.
 357         *
 358         * Separate three resource type checking if we need to release
 359         * assigned resource after requested + add_size try.
 360         *
 361         *      1. If IO port assignment fails, will release assigned IO
 362         *         port.
 363         *      2. If pref MMIO assignment fails, release assigned pref
 364         *         MMIO.  If assigned pref MMIO's parent is non-pref MMIO
 365         *         and non-pref MMIO assignment fails, will release that
 366         *         assigned pref MMIO.
 367         *      3. If non-pref MMIO assignment fails or pref MMIO
 368         *         assignment fails, will release assigned non-pref MMIO.
 369         */
 370        LIST_HEAD(save_head);
 371        LIST_HEAD(local_fail_head);
 372        struct pci_dev_resource *save_res;
 373        struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
 374        unsigned long fail_type;
 375        resource_size_t add_align, align;
 376
 377        /* Check if optional add_size is there */
 378        if (!realloc_head || list_empty(realloc_head))
 379                goto requested_and_reassign;
 380
 381        /* Save original start, end, flags etc at first */
 382        list_for_each_entry(dev_res, head, list) {
 383                if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
 384                        free_list(&save_head);
 385                        goto requested_and_reassign;
 386                }
 387        }
 388
 389        /* Update res in head list with add_size in realloc_head list */
 390        list_for_each_entry_safe(dev_res, tmp_res, head, list) {
 391                dev_res->res->end += get_res_add_size(realloc_head,
 392                                                        dev_res->res);
 393
 394                /*
 395                 * There are two kinds of additional resources in the list:
 396                 * 1. bridge resource  -- IORESOURCE_STARTALIGN
 397                 * 2. SR-IOV resource  -- IORESOURCE_SIZEALIGN
 398                 * Here just fix the additional alignment for bridge
 399                 */
 400                if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
 401                        continue;
 402
 403                add_align = get_res_add_align(realloc_head, dev_res->res);
 404
 405                /*
 406                 * The "head" list is sorted by alignment so resources with
 407                 * bigger alignment will be assigned first.  After we
 408                 * change the alignment of a dev_res in "head" list, we
 409                 * need to reorder the list by alignment to make it
 410                 * consistent.
 411                 */
 412                if (add_align > dev_res->res->start) {
 413                        resource_size_t r_size = resource_size(dev_res->res);
 414
 415                        dev_res->res->start = add_align;
 416                        dev_res->res->end = add_align + r_size - 1;
 417
 418                        list_for_each_entry(dev_res2, head, list) {
 419                                align = pci_resource_alignment(dev_res2->dev,
 420                                                               dev_res2->res);
 421                                if (add_align > align) {
 422                                        list_move_tail(&dev_res->list,
 423                                                       &dev_res2->list);
 424                                        break;
 425                                }
 426                        }
 427                }
 428
 429        }
 430
 431        /* Try updated head list with add_size added */
 432        assign_requested_resources_sorted(head, &local_fail_head);
 433
 434        /* All assigned with add_size? */
 435        if (list_empty(&local_fail_head)) {
 436                /* Remove head list from realloc_head list */
 437                list_for_each_entry(dev_res, head, list)
 438                        remove_from_list(realloc_head, dev_res->res);
 439                free_list(&save_head);
 440                free_list(head);
 441                return;
 442        }
 443
 444        /* Check failed type */
 445        fail_type = pci_fail_res_type_mask(&local_fail_head);
 446        /* Remove not need to be released assigned res from head list etc */
 447        list_for_each_entry_safe(dev_res, tmp_res, head, list)
 448                if (dev_res->res->parent &&
 449                    !pci_need_to_release(fail_type, dev_res->res)) {
 450                        /* Remove it from realloc_head list */
 451                        remove_from_list(realloc_head, dev_res->res);
 452                        remove_from_list(&save_head, dev_res->res);
 453                        list_del(&dev_res->list);
 454                        kfree(dev_res);
 455                }
 456
 457        free_list(&local_fail_head);
 458        /* Release assigned resource */
 459        list_for_each_entry(dev_res, head, list)
 460                if (dev_res->res->parent)
 461                        release_resource(dev_res->res);
 462        /* Restore start/end/flags from saved list */
 463        list_for_each_entry(save_res, &save_head, list) {
 464                struct resource *res = save_res->res;
 465
 466                res->start = save_res->start;
 467                res->end = save_res->end;
 468                res->flags = save_res->flags;
 469        }
 470        free_list(&save_head);
 471
 472requested_and_reassign:
 473        /* Satisfy the must-have resource requests */
 474        assign_requested_resources_sorted(head, fail_head);
 475
 476        /* Try to satisfy any additional optional resource requests */
 477        if (realloc_head)
 478                reassign_resources_sorted(realloc_head, head);
 479        free_list(head);
 480}
 481
 482static void pdev_assign_resources_sorted(struct pci_dev *dev,
 483                                         struct list_head *add_head,
 484                                         struct list_head *fail_head)
 485{
 486        LIST_HEAD(head);
 487
 488        __dev_sort_resources(dev, &head);
 489        __assign_resources_sorted(&head, add_head, fail_head);
 490
 491}
 492
 493static void pbus_assign_resources_sorted(const struct pci_bus *bus,
 494                                         struct list_head *realloc_head,
 495                                         struct list_head *fail_head)
 496{
 497        struct pci_dev *dev;
 498        LIST_HEAD(head);
 499
 500        list_for_each_entry(dev, &bus->devices, bus_list)
 501                __dev_sort_resources(dev, &head);
 502
 503        __assign_resources_sorted(&head, realloc_head, fail_head);
 504}
 505
 506void pci_setup_cardbus(struct pci_bus *bus)
 507{
 508        struct pci_dev *bridge = bus->self;
 509        struct resource *res;
 510        struct pci_bus_region region;
 511
 512        pci_info(bridge, "CardBus bridge to %pR\n",
 513                 &bus->busn_res);
 514
 515        res = bus->resource[0];
 516        pcibios_resource_to_bus(bridge->bus, &region, res);
 517        if (res->flags & IORESOURCE_IO) {
 518                /*
 519                 * The IO resource is allocated a range twice as large as it
 520                 * would normally need.  This allows us to set both IO regs.
 521                 */
 522                pci_info(bridge, "  bridge window %pR\n", res);
 523                pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
 524                                        region.start);
 525                pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
 526                                        region.end);
 527        }
 528
 529        res = bus->resource[1];
 530        pcibios_resource_to_bus(bridge->bus, &region, res);
 531        if (res->flags & IORESOURCE_IO) {
 532                pci_info(bridge, "  bridge window %pR\n", res);
 533                pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
 534                                        region.start);
 535                pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
 536                                        region.end);
 537        }
 538
 539        res = bus->resource[2];
 540        pcibios_resource_to_bus(bridge->bus, &region, res);
 541        if (res->flags & IORESOURCE_MEM) {
 542                pci_info(bridge, "  bridge window %pR\n", res);
 543                pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
 544                                        region.start);
 545                pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
 546                                        region.end);
 547        }
 548
 549        res = bus->resource[3];
 550        pcibios_resource_to_bus(bridge->bus, &region, res);
 551        if (res->flags & IORESOURCE_MEM) {
 552                pci_info(bridge, "  bridge window %pR\n", res);
 553                pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
 554                                        region.start);
 555                pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
 556                                        region.end);
 557        }
 558}
 559EXPORT_SYMBOL(pci_setup_cardbus);
 560
 561/*
 562 * Initialize bridges with base/limit values we have collected.  PCI-to-PCI
 563 * Bridge Architecture Specification rev. 1.1 (1998) requires that if there
 564 * are no I/O ports or memory behind the bridge, the corresponding range
 565 * must be turned off by writing base value greater than limit to the
 566 * bridge's base/limit registers.
 567 *
 568 * Note: care must be taken when updating I/O base/limit registers of
 569 * bridges which support 32-bit I/O.  This update requires two config space
 570 * writes, so it's quite possible that an I/O window of the bridge will
 571 * have some undesirable address (e.g. 0) after the first write.  Ditto
 572 * 64-bit prefetchable MMIO.
 573 */
 574static void pci_setup_bridge_io(struct pci_dev *bridge)
 575{
 576        struct resource *res;
 577        struct pci_bus_region region;
 578        unsigned long io_mask;
 579        u8 io_base_lo, io_limit_lo;
 580        u16 l;
 581        u32 io_upper16;
 582
 583        io_mask = PCI_IO_RANGE_MASK;
 584        if (bridge->io_window_1k)
 585                io_mask = PCI_IO_1K_RANGE_MASK;
 586
 587        /* Set up the top and bottom of the PCI I/O segment for this bus */
 588        res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
 589        pcibios_resource_to_bus(bridge->bus, &region, res);
 590        if (res->flags & IORESOURCE_IO) {
 591                pci_read_config_word(bridge, PCI_IO_BASE, &l);
 592                io_base_lo = (region.start >> 8) & io_mask;
 593                io_limit_lo = (region.end >> 8) & io_mask;
 594                l = ((u16) io_limit_lo << 8) | io_base_lo;
 595                /* Set up upper 16 bits of I/O base/limit */
 596                io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
 597                pci_info(bridge, "  bridge window %pR\n", res);
 598        } else {
 599                /* Clear upper 16 bits of I/O base/limit */
 600                io_upper16 = 0;
 601                l = 0x00f0;
 602        }
 603        /* Temporarily disable the I/O range before updating PCI_IO_BASE */
 604        pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
 605        /* Update lower 16 bits of I/O base/limit */
 606        pci_write_config_word(bridge, PCI_IO_BASE, l);
 607        /* Update upper 16 bits of I/O base/limit */
 608        pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
 609}
 610
 611static void pci_setup_bridge_mmio(struct pci_dev *bridge)
 612{
 613        struct resource *res;
 614        struct pci_bus_region region;
 615        u32 l;
 616
 617        /* Set up the top and bottom of the PCI Memory segment for this bus */
 618        res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
 619        pcibios_resource_to_bus(bridge->bus, &region, res);
 620        if (res->flags & IORESOURCE_MEM) {
 621                l = (region.start >> 16) & 0xfff0;
 622                l |= region.end & 0xfff00000;
 623                pci_info(bridge, "  bridge window %pR\n", res);
 624        } else {
 625                l = 0x0000fff0;
 626        }
 627        pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
 628}
 629
 630static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
 631{
 632        struct resource *res;
 633        struct pci_bus_region region;
 634        u32 l, bu, lu;
 635
 636        /*
 637         * Clear out the upper 32 bits of PREF limit.  If
 638         * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables
 639         * PREF range, which is ok.
 640         */
 641        pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
 642
 643        /* Set up PREF base/limit */
 644        bu = lu = 0;
 645        res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
 646        pcibios_resource_to_bus(bridge->bus, &region, res);
 647        if (res->flags & IORESOURCE_PREFETCH) {
 648                l = (region.start >> 16) & 0xfff0;
 649                l |= region.end & 0xfff00000;
 650                if (res->flags & IORESOURCE_MEM_64) {
 651                        bu = upper_32_bits(region.start);
 652                        lu = upper_32_bits(region.end);
 653                }
 654                pci_info(bridge, "  bridge window %pR\n", res);
 655        } else {
 656                l = 0x0000fff0;
 657        }
 658        pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
 659
 660        /* Set the upper 32 bits of PREF base & limit */
 661        pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
 662        pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
 663}
 664
 665static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
 666{
 667        struct pci_dev *bridge = bus->self;
 668
 669        pci_info(bridge, "PCI bridge to %pR\n",
 670                 &bus->busn_res);
 671
 672        if (type & IORESOURCE_IO)
 673                pci_setup_bridge_io(bridge);
 674
 675        if (type & IORESOURCE_MEM)
 676                pci_setup_bridge_mmio(bridge);
 677
 678        if (type & IORESOURCE_PREFETCH)
 679                pci_setup_bridge_mmio_pref(bridge);
 680
 681        pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
 682}
 683
 684void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
 685{
 686}
 687
 688void pci_setup_bridge(struct pci_bus *bus)
 689{
 690        unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
 691                                  IORESOURCE_PREFETCH;
 692
 693        pcibios_setup_bridge(bus, type);
 694        __pci_setup_bridge(bus, type);
 695}
 696
 697
 698int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
 699{
 700        if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
 701                return 0;
 702
 703        if (pci_claim_resource(bridge, i) == 0)
 704                return 0;       /* Claimed the window */
 705
 706        if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
 707                return 0;
 708
 709        if (!pci_bus_clip_resource(bridge, i))
 710                return -EINVAL; /* Clipping didn't change anything */
 711
 712        switch (i) {
 713        case PCI_BRIDGE_IO_WINDOW:
 714                pci_setup_bridge_io(bridge);
 715                break;
 716        case PCI_BRIDGE_MEM_WINDOW:
 717                pci_setup_bridge_mmio(bridge);
 718                break;
 719        case PCI_BRIDGE_PREF_MEM_WINDOW:
 720                pci_setup_bridge_mmio_pref(bridge);
 721                break;
 722        default:
 723                return -EINVAL;
 724        }
 725
 726        if (pci_claim_resource(bridge, i) == 0)
 727                return 0;       /* Claimed a smaller window */
 728
 729        return -EINVAL;
 730}
 731
 732/*
 733 * Check whether the bridge supports optional I/O and prefetchable memory
 734 * ranges.  If not, the respective base/limit registers must be read-only
 735 * and read as 0.
 736 */
 737static void pci_bridge_check_ranges(struct pci_bus *bus)
 738{
 739        struct pci_dev *bridge = bus->self;
 740        struct resource *b_res;
 741
 742        b_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
 743        b_res->flags |= IORESOURCE_MEM;
 744
 745        if (bridge->io_window) {
 746                b_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
 747                b_res->flags |= IORESOURCE_IO;
 748        }
 749
 750        if (bridge->pref_window) {
 751                b_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
 752                b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
 753                if (bridge->pref_64_window) {
 754                        b_res->flags |= IORESOURCE_MEM_64 |
 755                                        PCI_PREF_RANGE_TYPE_64;
 756                }
 757        }
 758}
 759
 760/*
 761 * Helper function for sizing routines.  Assigned resources have non-NULL
 762 * parent resource.
 763 *
 764 * Return first unassigned resource of the correct type.  If there is none,
 765 * return first assigned resource of the correct type.  If none of the
 766 * above, return NULL.
 767 *
 768 * Returning an assigned resource of the correct type allows the caller to
 769 * distinguish between already assigned and no resource of the correct type.
 770 */
 771static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
 772                                                  unsigned long type_mask,
 773                                                  unsigned long type)
 774{
 775        struct resource *r, *r_assigned = NULL;
 776        int i;
 777
 778        pci_bus_for_each_resource(bus, r, i) {
 779                if (r == &ioport_resource || r == &iomem_resource)
 780                        continue;
 781                if (r && (r->flags & type_mask) == type && !r->parent)
 782                        return r;
 783                if (r && (r->flags & type_mask) == type && !r_assigned)
 784                        r_assigned = r;
 785        }
 786        return r_assigned;
 787}
 788
 789static resource_size_t calculate_iosize(resource_size_t size,
 790                                        resource_size_t min_size,
 791                                        resource_size_t size1,
 792                                        resource_size_t add_size,
 793                                        resource_size_t children_add_size,
 794                                        resource_size_t old_size,
 795                                        resource_size_t align)
 796{
 797        if (size < min_size)
 798                size = min_size;
 799        if (old_size == 1)
 800                old_size = 0;
 801        /*
 802         * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the
 803         * struct pci_bus.
 804         */
 805#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
 806        size = (size & 0xff) + ((size & ~0xffUL) << 2);
 807#endif
 808        size = size + size1;
 809        if (size < old_size)
 810                size = old_size;
 811
 812        size = ALIGN(max(size, add_size) + children_add_size, align);
 813        return size;
 814}
 815
 816static resource_size_t calculate_memsize(resource_size_t size,
 817                                         resource_size_t min_size,
 818                                         resource_size_t add_size,
 819                                         resource_size_t children_add_size,
 820                                         resource_size_t old_size,
 821                                         resource_size_t align)
 822{
 823        if (size < min_size)
 824                size = min_size;
 825        if (old_size == 1)
 826                old_size = 0;
 827        if (size < old_size)
 828                size = old_size;
 829
 830        size = ALIGN(max(size, add_size) + children_add_size, align);
 831        return size;
 832}
 833
 834resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
 835                                                unsigned long type)
 836{
 837        return 1;
 838}
 839
 840#define PCI_P2P_DEFAULT_MEM_ALIGN       0x100000        /* 1MiB */
 841#define PCI_P2P_DEFAULT_IO_ALIGN        0x1000          /* 4KiB */
 842#define PCI_P2P_DEFAULT_IO_ALIGN_1K     0x400           /* 1KiB */
 843
 844static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
 845{
 846        resource_size_t align = 1, arch_align;
 847
 848        if (type & IORESOURCE_MEM)
 849                align = PCI_P2P_DEFAULT_MEM_ALIGN;
 850        else if (type & IORESOURCE_IO) {
 851                /*
 852                 * Per spec, I/O windows are 4K-aligned, but some bridges have
 853                 * an extension to support 1K alignment.
 854                 */
 855                if (bus->self && bus->self->io_window_1k)
 856                        align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
 857                else
 858                        align = PCI_P2P_DEFAULT_IO_ALIGN;
 859        }
 860
 861        arch_align = pcibios_window_alignment(bus, type);
 862        return max(align, arch_align);
 863}
 864
 865/**
 866 * pbus_size_io() - Size the I/O window of a given bus
 867 *
 868 * @bus:                The bus
 869 * @min_size:           The minimum I/O window that must be allocated
 870 * @add_size:           Additional optional I/O window
 871 * @realloc_head:       Track the additional I/O window on this list
 872 *
 873 * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these
 874 * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI
 875 * devices are limited to 256 bytes.  We must be careful with the ISA
 876 * aliasing though.
 877 */
 878static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
 879                         resource_size_t add_size,
 880                         struct list_head *realloc_head)
 881{
 882        struct pci_dev *dev;
 883        struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
 884                                                           IORESOURCE_IO);
 885        resource_size_t size = 0, size0 = 0, size1 = 0;
 886        resource_size_t children_add_size = 0;
 887        resource_size_t min_align, align;
 888
 889        if (!b_res)
 890                return;
 891
 892        /* If resource is already assigned, nothing more to do */
 893        if (b_res->parent)
 894                return;
 895
 896        min_align = window_alignment(bus, IORESOURCE_IO);
 897        list_for_each_entry(dev, &bus->devices, bus_list) {
 898                int i;
 899
 900                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 901                        struct resource *r = &dev->resource[i];
 902                        unsigned long r_size;
 903
 904                        if (r->parent || !(r->flags & IORESOURCE_IO))
 905                                continue;
 906                        r_size = resource_size(r);
 907
 908                        if (r_size < 0x400)
 909                                /* Might be re-aligned for ISA */
 910                                size += r_size;
 911                        else
 912                                size1 += r_size;
 913
 914                        align = pci_resource_alignment(dev, r);
 915                        if (align > min_align)
 916                                min_align = align;
 917
 918                        if (realloc_head)
 919                                children_add_size += get_res_add_size(realloc_head, r);
 920                }
 921        }
 922
 923        size0 = calculate_iosize(size, min_size, size1, 0, 0,
 924                        resource_size(b_res), min_align);
 925        size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
 926                calculate_iosize(size, min_size, size1, add_size, children_add_size,
 927                        resource_size(b_res), min_align);
 928        if (!size0 && !size1) {
 929                if (bus->self && (b_res->start || b_res->end))
 930                        pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
 931                                 b_res, &bus->busn_res);
 932                b_res->flags = 0;
 933                return;
 934        }
 935
 936        b_res->start = min_align;
 937        b_res->end = b_res->start + size0 - 1;
 938        b_res->flags |= IORESOURCE_STARTALIGN;
 939        if (bus->self && size1 > size0 && realloc_head) {
 940                add_to_list(realloc_head, bus->self, b_res, size1-size0,
 941                            min_align);
 942                pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
 943                         b_res, &bus->busn_res,
 944                         (unsigned long long) size1 - size0);
 945        }
 946}
 947
 948static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
 949                                                  int max_order)
 950{
 951        resource_size_t align = 0;
 952        resource_size_t min_align = 0;
 953        int order;
 954
 955        for (order = 0; order <= max_order; order++) {
 956                resource_size_t align1 = 1;
 957
 958                align1 <<= (order + 20);
 959
 960                if (!align)
 961                        min_align = align1;
 962                else if (ALIGN(align + min_align, min_align) < align1)
 963                        min_align = align1 >> 1;
 964                align += aligns[order];
 965        }
 966
 967        return min_align;
 968}
 969
 970/**
 971 * pbus_size_mem() - Size the memory window of a given bus
 972 *
 973 * @bus:                The bus
 974 * @mask:               Mask the resource flag, then compare it with type
 975 * @type:               The type of free resource from bridge
 976 * @type2:              Second match type
 977 * @type3:              Third match type
 978 * @min_size:           The minimum memory window that must be allocated
 979 * @add_size:           Additional optional memory window
 980 * @realloc_head:       Track the additional memory window on this list
 981 *
 982 * Calculate the size of the bus and minimal alignment which guarantees
 983 * that all child resources fit in this size.
 984 *
 985 * Return -ENOSPC if there's no available bus resource of the desired
 986 * type.  Otherwise, set the bus resource start/end to indicate the
 987 * required size, add things to realloc_head (if supplied), and return 0.
 988 */
 989static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
 990                         unsigned long type, unsigned long type2,
 991                         unsigned long type3, resource_size_t min_size,
 992                         resource_size_t add_size,
 993                         struct list_head *realloc_head)
 994{
 995        struct pci_dev *dev;
 996        resource_size_t min_align, align, size, size0, size1;
 997        resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
 998        int order, max_order;
 999        struct resource *b_res = find_bus_resource_of_type(bus,
1000                                        mask | IORESOURCE_PREFETCH, type);
1001        resource_size_t children_add_size = 0;
1002        resource_size_t children_add_align = 0;
1003        resource_size_t add_align = 0;
1004
1005        if (!b_res)
1006                return -ENOSPC;
1007
1008        /* If resource is already assigned, nothing more to do */
1009        if (b_res->parent)
1010                return 0;
1011
1012        memset(aligns, 0, sizeof(aligns));
1013        max_order = 0;
1014        size = 0;
1015
1016        list_for_each_entry(dev, &bus->devices, bus_list) {
1017                int i;
1018
1019                for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1020                        struct resource *r = &dev->resource[i];
1021                        resource_size_t r_size;
1022
1023                        if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
1024                            ((r->flags & mask) != type &&
1025                             (r->flags & mask) != type2 &&
1026                             (r->flags & mask) != type3))
1027                                continue;
1028                        r_size = resource_size(r);
1029#ifdef CONFIG_PCI_IOV
1030                        /* Put SRIOV requested res to the optional list */
1031                        if (realloc_head && i >= PCI_IOV_RESOURCES &&
1032                                        i <= PCI_IOV_RESOURCE_END) {
1033                                add_align = max(pci_resource_alignment(dev, r), add_align);
1034                                r->end = r->start - 1;
1035                                add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
1036                                children_add_size += r_size;
1037                                continue;
1038                        }
1039#endif
1040                        /*
1041                         * aligns[0] is for 1MB (since bridge memory
1042                         * windows are always at least 1MB aligned), so
1043                         * keep "order" from being negative for smaller
1044                         * resources.
1045                         */
1046                        align = pci_resource_alignment(dev, r);
1047                        order = __ffs(align) - 20;
1048                        if (order < 0)
1049                                order = 0;
1050                        if (order >= ARRAY_SIZE(aligns)) {
1051                                pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
1052                                         i, r, (unsigned long long) align);
1053                                r->flags = 0;
1054                                continue;
1055                        }
1056                        size += max(r_size, align);
1057                        /*
1058                         * Exclude ranges with size > align from calculation of
1059                         * the alignment.
1060                         */
1061                        if (r_size <= align)
1062                                aligns[order] += align;
1063                        if (order > max_order)
1064                                max_order = order;
1065
1066                        if (realloc_head) {
1067                                children_add_size += get_res_add_size(realloc_head, r);
1068                                children_add_align = get_res_add_align(realloc_head, r);
1069                                add_align = max(add_align, children_add_align);
1070                        }
1071                }
1072        }
1073
1074        min_align = calculate_mem_align(aligns, max_order);
1075        min_align = max(min_align, window_alignment(bus, b_res->flags));
1076        size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
1077        add_align = max(min_align, add_align);
1078        size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
1079                calculate_memsize(size, min_size, add_size, children_add_size,
1080                                resource_size(b_res), add_align);
1081        if (!size0 && !size1) {
1082                if (bus->self && (b_res->start || b_res->end))
1083                        pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
1084                                 b_res, &bus->busn_res);
1085                b_res->flags = 0;
1086                return 0;
1087        }
1088        b_res->start = min_align;
1089        b_res->end = size0 + min_align - 1;
1090        b_res->flags |= IORESOURCE_STARTALIGN;
1091        if (bus->self && size1 > size0 && realloc_head) {
1092                add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
1093                pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
1094                           b_res, &bus->busn_res,
1095                           (unsigned long long) (size1 - size0),
1096                           (unsigned long long) add_align);
1097        }
1098        return 0;
1099}
1100
1101unsigned long pci_cardbus_resource_alignment(struct resource *res)
1102{
1103        if (res->flags & IORESOURCE_IO)
1104                return pci_cardbus_io_size;
1105        if (res->flags & IORESOURCE_MEM)
1106                return pci_cardbus_mem_size;
1107        return 0;
1108}
1109
1110static void pci_bus_size_cardbus(struct pci_bus *bus,
1111                                 struct list_head *realloc_head)
1112{
1113        struct pci_dev *bridge = bus->self;
1114        struct resource *b_res;
1115        resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
1116        u16 ctrl;
1117
1118        b_res = &bridge->resource[PCI_CB_BRIDGE_IO_0_WINDOW];
1119        if (b_res->parent)
1120                goto handle_b_res_1;
1121        /*
1122         * Reserve some resources for CardBus.  We reserve a fixed amount
1123         * of bus space for CardBus bridges.
1124         */
1125        b_res->start = pci_cardbus_io_size;
1126        b_res->end = b_res->start + pci_cardbus_io_size - 1;
1127        b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1128        if (realloc_head) {
1129                b_res->end -= pci_cardbus_io_size;
1130                add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1131                            pci_cardbus_io_size);
1132        }
1133
1134handle_b_res_1:
1135        b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW];
1136        if (b_res->parent)
1137                goto handle_b_res_2;
1138        b_res->start = pci_cardbus_io_size;
1139        b_res->end = b_res->start + pci_cardbus_io_size - 1;
1140        b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1141        if (realloc_head) {
1142                b_res->end -= pci_cardbus_io_size;
1143                add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1144                            pci_cardbus_io_size);
1145        }
1146
1147handle_b_res_2:
1148        /* MEM1 must not be pref MMIO */
1149        pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1150        if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
1151                ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
1152                pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1153                pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1154        }
1155
1156        /* Check whether prefetchable memory is supported by this bridge. */
1157        pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1158        if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
1159                ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
1160                pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1161                pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1162        }
1163
1164        b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_0_WINDOW];
1165        if (b_res->parent)
1166                goto handle_b_res_3;
1167        /*
1168         * If we have prefetchable memory support, allocate two regions.
1169         * Otherwise, allocate one region of twice the size.
1170         */
1171        if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
1172                b_res->start = pci_cardbus_mem_size;
1173                b_res->end = b_res->start + pci_cardbus_mem_size - 1;
1174                b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
1175                                    IORESOURCE_STARTALIGN;
1176                if (realloc_head) {
1177                        b_res->end -= pci_cardbus_mem_size;
1178                        add_to_list(realloc_head, bridge, b_res,
1179                                    pci_cardbus_mem_size, pci_cardbus_mem_size);
1180                }
1181
1182                /* Reduce that to half */
1183                b_res_3_size = pci_cardbus_mem_size;
1184        }
1185
1186handle_b_res_3:
1187        b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW];
1188        if (b_res->parent)
1189                goto handle_done;
1190        b_res->start = pci_cardbus_mem_size;
1191        b_res->end = b_res->start + b_res_3_size - 1;
1192        b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
1193        if (realloc_head) {
1194                b_res->end -= b_res_3_size;
1195                add_to_list(realloc_head, bridge, b_res, b_res_3_size,
1196                            pci_cardbus_mem_size);
1197        }
1198
1199handle_done:
1200        ;
1201}
1202
1203void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
1204{
1205        struct pci_dev *dev;
1206        unsigned long mask, prefmask, type2 = 0, type3 = 0;
1207        resource_size_t additional_io_size = 0, additional_mmio_size = 0,
1208                        additional_mmio_pref_size = 0;
1209        struct resource *pref;
1210        struct pci_host_bridge *host;
1211        int hdr_type, i, ret;
1212
1213        list_for_each_entry(dev, &bus->devices, bus_list) {
1214                struct pci_bus *b = dev->subordinate;
1215                if (!b)
1216                        continue;
1217
1218                switch (dev->hdr_type) {
1219                case PCI_HEADER_TYPE_CARDBUS:
1220                        pci_bus_size_cardbus(b, realloc_head);
1221                        break;
1222
1223                case PCI_HEADER_TYPE_BRIDGE:
1224                default:
1225                        __pci_bus_size_bridges(b, realloc_head);
1226                        break;
1227                }
1228        }
1229
1230        /* The root bus? */
1231        if (pci_is_root_bus(bus)) {
1232                host = to_pci_host_bridge(bus->bridge);
1233                if (!host->size_windows)
1234                        return;
1235                pci_bus_for_each_resource(bus, pref, i)
1236                        if (pref && (pref->flags & IORESOURCE_PREFETCH))
1237                                break;
1238                hdr_type = -1;  /* Intentionally invalid - not a PCI device. */
1239        } else {
1240                pref = &bus->self->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1241                hdr_type = bus->self->hdr_type;
1242        }
1243
1244        switch (hdr_type) {
1245        case PCI_HEADER_TYPE_CARDBUS:
1246                /* Don't size CardBuses yet */
1247                break;
1248
1249        case PCI_HEADER_TYPE_BRIDGE:
1250                pci_bridge_check_ranges(bus);
1251                if (bus->self->is_hotplug_bridge) {
1252                        additional_io_size  = pci_hotplug_io_size;
1253                        additional_mmio_size = pci_hotplug_mmio_size;
1254                        additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
1255                }
1256                fallthrough;
1257        default:
1258                pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
1259                             additional_io_size, realloc_head);
1260
1261                /*
1262                 * If there's a 64-bit prefetchable MMIO window, compute
1263                 * the size required to put all 64-bit prefetchable
1264                 * resources in it.
1265                 */
1266                mask = IORESOURCE_MEM;
1267                prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
1268                if (pref && (pref->flags & IORESOURCE_MEM_64)) {
1269                        prefmask |= IORESOURCE_MEM_64;
1270                        ret = pbus_size_mem(bus, prefmask, prefmask,
1271                                prefmask, prefmask,
1272                                realloc_head ? 0 : additional_mmio_pref_size,
1273                                additional_mmio_pref_size, realloc_head);
1274
1275                        /*
1276                         * If successful, all non-prefetchable resources
1277                         * and any 32-bit prefetchable resources will go in
1278                         * the non-prefetchable window.
1279                         */
1280                        if (ret == 0) {
1281                                mask = prefmask;
1282                                type2 = prefmask & ~IORESOURCE_MEM_64;
1283                                type3 = prefmask & ~IORESOURCE_PREFETCH;
1284                        }
1285                }
1286
1287                /*
1288                 * If there is no 64-bit prefetchable window, compute the
1289                 * size required to put all prefetchable resources in the
1290                 * 32-bit prefetchable window (if there is one).
1291                 */
1292                if (!type2) {
1293                        prefmask &= ~IORESOURCE_MEM_64;
1294                        ret = pbus_size_mem(bus, prefmask, prefmask,
1295                                prefmask, prefmask,
1296                                realloc_head ? 0 : additional_mmio_pref_size,
1297                                additional_mmio_pref_size, realloc_head);
1298
1299                        /*
1300                         * If successful, only non-prefetchable resources
1301                         * will go in the non-prefetchable window.
1302                         */
1303                        if (ret == 0)
1304                                mask = prefmask;
1305                        else
1306                                additional_mmio_size += additional_mmio_pref_size;
1307
1308                        type2 = type3 = IORESOURCE_MEM;
1309                }
1310
1311                /*
1312                 * Compute the size required to put everything else in the
1313                 * non-prefetchable window. This includes:
1314                 *
1315                 *   - all non-prefetchable resources
1316                 *   - 32-bit prefetchable resources if there's a 64-bit
1317                 *     prefetchable window or no prefetchable window at all
1318                 *   - 64-bit prefetchable resources if there's no prefetchable
1319                 *     window at all
1320                 *
1321                 * Note that the strategy in __pci_assign_resource() must match
1322                 * that used here. Specifically, we cannot put a 32-bit
1323                 * prefetchable resource in a 64-bit prefetchable window.
1324                 */
1325                pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
1326                              realloc_head ? 0 : additional_mmio_size,
1327                              additional_mmio_size, realloc_head);
1328                break;
1329        }
1330}
1331
1332void pci_bus_size_bridges(struct pci_bus *bus)
1333{
1334        __pci_bus_size_bridges(bus, NULL);
1335}
1336EXPORT_SYMBOL(pci_bus_size_bridges);
1337
1338static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
1339{
1340        int i;
1341        struct resource *parent_r;
1342        unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
1343                             IORESOURCE_PREFETCH;
1344
1345        pci_bus_for_each_resource(b, parent_r, i) {
1346                if (!parent_r)
1347                        continue;
1348
1349                if ((r->flags & mask) == (parent_r->flags & mask) &&
1350                    resource_contains(parent_r, r))
1351                        request_resource(parent_r, r);
1352        }
1353}
1354
1355/*
1356 * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are
1357 * skipped by pbus_assign_resources_sorted().
1358 */
1359static void pdev_assign_fixed_resources(struct pci_dev *dev)
1360{
1361        int i;
1362
1363        for (i = 0; i <  PCI_NUM_RESOURCES; i++) {
1364                struct pci_bus *b;
1365                struct resource *r = &dev->resource[i];
1366
1367                if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
1368                    !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
1369                        continue;
1370
1371                b = dev->bus;
1372                while (b && !r->parent) {
1373                        assign_fixed_resource_on_bus(b, r);
1374                        b = b->parent;
1375                }
1376        }
1377}
1378
1379void __pci_bus_assign_resources(const struct pci_bus *bus,
1380                                struct list_head *realloc_head,
1381                                struct list_head *fail_head)
1382{
1383        struct pci_bus *b;
1384        struct pci_dev *dev;
1385
1386        pbus_assign_resources_sorted(bus, realloc_head, fail_head);
1387
1388        list_for_each_entry(dev, &bus->devices, bus_list) {
1389                pdev_assign_fixed_resources(dev);
1390
1391                b = dev->subordinate;
1392                if (!b)
1393                        continue;
1394
1395                __pci_bus_assign_resources(b, realloc_head, fail_head);
1396
1397                switch (dev->hdr_type) {
1398                case PCI_HEADER_TYPE_BRIDGE:
1399                        if (!pci_is_enabled(dev))
1400                                pci_setup_bridge(b);
1401                        break;
1402
1403                case PCI_HEADER_TYPE_CARDBUS:
1404                        pci_setup_cardbus(b);
1405                        break;
1406
1407                default:
1408                        pci_info(dev, "not setting up bridge for bus %04x:%02x\n",
1409                                 pci_domain_nr(b), b->number);
1410                        break;
1411                }
1412        }
1413}
1414
1415void pci_bus_assign_resources(const struct pci_bus *bus)
1416{
1417        __pci_bus_assign_resources(bus, NULL, NULL);
1418}
1419EXPORT_SYMBOL(pci_bus_assign_resources);
1420
1421static void pci_claim_device_resources(struct pci_dev *dev)
1422{
1423        int i;
1424
1425        for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
1426                struct resource *r = &dev->resource[i];
1427
1428                if (!r->flags || r->parent)
1429                        continue;
1430
1431                pci_claim_resource(dev, i);
1432        }
1433}
1434
1435static void pci_claim_bridge_resources(struct pci_dev *dev)
1436{
1437        int i;
1438
1439        for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
1440                struct resource *r = &dev->resource[i];
1441
1442                if (!r->flags || r->parent)
1443                        continue;
1444
1445                pci_claim_bridge_resource(dev, i);
1446        }
1447}
1448
1449static void pci_bus_allocate_dev_resources(struct pci_bus *b)
1450{
1451        struct pci_dev *dev;
1452        struct pci_bus *child;
1453
1454        list_for_each_entry(dev, &b->devices, bus_list) {
1455                pci_claim_device_resources(dev);
1456
1457                child = dev->subordinate;
1458                if (child)
1459                        pci_bus_allocate_dev_resources(child);
1460        }
1461}
1462
1463static void pci_bus_allocate_resources(struct pci_bus *b)
1464{
1465        struct pci_bus *child;
1466
1467        /*
1468         * Carry out a depth-first search on the PCI bus tree to allocate
1469         * bridge apertures.  Read the programmed bridge bases and
1470         * recursively claim the respective bridge resources.
1471         */
1472        if (b->self) {
1473                pci_read_bridge_bases(b);
1474                pci_claim_bridge_resources(b->self);
1475        }
1476
1477        list_for_each_entry(child, &b->children, node)
1478                pci_bus_allocate_resources(child);
1479}
1480
1481void pci_bus_claim_resources(struct pci_bus *b)
1482{
1483        pci_bus_allocate_resources(b);
1484        pci_bus_allocate_dev_resources(b);
1485}
1486EXPORT_SYMBOL(pci_bus_claim_resources);
1487
1488static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
1489                                          struct list_head *add_head,
1490                                          struct list_head *fail_head)
1491{
1492        struct pci_bus *b;
1493
1494        pdev_assign_resources_sorted((struct pci_dev *)bridge,
1495                                         add_head, fail_head);
1496
1497        b = bridge->subordinate;
1498        if (!b)
1499                return;
1500
1501        __pci_bus_assign_resources(b, add_head, fail_head);
1502
1503        switch (bridge->class >> 8) {
1504        case PCI_CLASS_BRIDGE_PCI:
1505                pci_setup_bridge(b);
1506                break;
1507
1508        case PCI_CLASS_BRIDGE_CARDBUS:
1509                pci_setup_cardbus(b);
1510                break;
1511
1512        default:
1513                pci_info(bridge, "not setting up bridge for bus %04x:%02x\n",
1514                         pci_domain_nr(b), b->number);
1515                break;
1516        }
1517}
1518
1519#define PCI_RES_TYPE_MASK \
1520        (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
1521         IORESOURCE_MEM_64)
1522
1523static void pci_bridge_release_resources(struct pci_bus *bus,
1524                                         unsigned long type)
1525{
1526        struct pci_dev *dev = bus->self;
1527        struct resource *r;
1528        unsigned old_flags = 0;
1529        struct resource *b_res;
1530        int idx = 1;
1531
1532        b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
1533
1534        /*
1535         * 1. If IO port assignment fails, release bridge IO port.
1536         * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
1537         * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
1538         *    release bridge pref MMIO.
1539         * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
1540         *    release bridge pref MMIO.
1541         * 5. If pref MMIO assignment fails, and bridge pref is not
1542         *    assigned, release bridge nonpref MMIO.
1543         */
1544        if (type & IORESOURCE_IO)
1545                idx = 0;
1546        else if (!(type & IORESOURCE_PREFETCH))
1547                idx = 1;
1548        else if ((type & IORESOURCE_MEM_64) &&
1549                 (b_res[2].flags & IORESOURCE_MEM_64))
1550                idx = 2;
1551        else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
1552                 (b_res[2].flags & IORESOURCE_PREFETCH))
1553                idx = 2;
1554        else
1555                idx = 1;
1556
1557        r = &b_res[idx];
1558
1559        if (!r->parent)
1560                return;
1561
1562        /* If there are children, release them all */
1563        release_child_resources(r);
1564        if (!release_resource(r)) {
1565                type = old_flags = r->flags & PCI_RES_TYPE_MASK;
1566                pci_info(dev, "resource %d %pR released\n",
1567                         PCI_BRIDGE_RESOURCES + idx, r);
1568                /* Keep the old size */
1569                r->end = resource_size(r) - 1;
1570                r->start = 0;
1571                r->flags = 0;
1572
1573                /* Avoiding touch the one without PREF */
1574                if (type & IORESOURCE_PREFETCH)
1575                        type = IORESOURCE_PREFETCH;
1576                __pci_setup_bridge(bus, type);
1577                /* For next child res under same bridge */
1578                r->flags = old_flags;
1579        }
1580}
1581
1582enum release_type {
1583        leaf_only,
1584        whole_subtree,
1585};
1586
1587/*
1588 * Try to release PCI bridge resources from leaf bridge, so we can allocate
1589 * a larger window later.
1590 */
1591static void pci_bus_release_bridge_resources(struct pci_bus *bus,
1592                                             unsigned long type,
1593                                             enum release_type rel_type)
1594{
1595        struct pci_dev *dev;
1596        bool is_leaf_bridge = true;
1597
1598        list_for_each_entry(dev, &bus->devices, bus_list) {
1599                struct pci_bus *b = dev->subordinate;
1600                if (!b)
1601                        continue;
1602
1603                is_leaf_bridge = false;
1604
1605                if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1606                        continue;
1607
1608                if (rel_type == whole_subtree)
1609                        pci_bus_release_bridge_resources(b, type,
1610                                                 whole_subtree);
1611        }
1612
1613        if (pci_is_root_bus(bus))
1614                return;
1615
1616        if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1617                return;
1618
1619        if ((rel_type == whole_subtree) || is_leaf_bridge)
1620                pci_bridge_release_resources(bus, type);
1621}
1622
1623static void pci_bus_dump_res(struct pci_bus *bus)
1624{
1625        struct resource *res;
1626        int i;
1627
1628        pci_bus_for_each_resource(bus, res, i) {
1629                if (!res || !res->end || !res->flags)
1630                        continue;
1631
1632                dev_info(&bus->dev, "resource %d %pR\n", i, res);
1633        }
1634}
1635
1636static void pci_bus_dump_resources(struct pci_bus *bus)
1637{
1638        struct pci_bus *b;
1639        struct pci_dev *dev;
1640
1641
1642        pci_bus_dump_res(bus);
1643
1644        list_for_each_entry(dev, &bus->devices, bus_list) {
1645                b = dev->subordinate;
1646                if (!b)
1647                        continue;
1648
1649                pci_bus_dump_resources(b);
1650        }
1651}
1652
1653static int pci_bus_get_depth(struct pci_bus *bus)
1654{
1655        int depth = 0;
1656        struct pci_bus *child_bus;
1657
1658        list_for_each_entry(child_bus, &bus->children, node) {
1659                int ret;
1660
1661                ret = pci_bus_get_depth(child_bus);
1662                if (ret + 1 > depth)
1663                        depth = ret + 1;
1664        }
1665
1666        return depth;
1667}
1668
1669/*
1670 * -1: undefined, will auto detect later
1671 *  0: disabled by user
1672 *  1: disabled by auto detect
1673 *  2: enabled by user
1674 *  3: enabled by auto detect
1675 */
1676enum enable_type {
1677        undefined = -1,
1678        user_disabled,
1679        auto_disabled,
1680        user_enabled,
1681        auto_enabled,
1682};
1683
1684static enum enable_type pci_realloc_enable = undefined;
1685void __init pci_realloc_get_opt(char *str)
1686{
1687        if (!strncmp(str, "off", 3))
1688                pci_realloc_enable = user_disabled;
1689        else if (!strncmp(str, "on", 2))
1690                pci_realloc_enable = user_enabled;
1691}
1692static bool pci_realloc_enabled(enum enable_type enable)
1693{
1694        return enable >= user_enabled;
1695}
1696
1697#if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
1698static int iov_resources_unassigned(struct pci_dev *dev, void *data)
1699{
1700        int i;
1701        bool *unassigned = data;
1702
1703        for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1704                struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES];
1705                struct pci_bus_region region;
1706
1707                /* Not assigned or rejected by kernel? */
1708                if (!r->flags)
1709                        continue;
1710
1711                pcibios_resource_to_bus(dev->bus, &region, r);
1712                if (!region.start) {
1713                        *unassigned = true;
1714                        return 1; /* Return early from pci_walk_bus() */
1715                }
1716        }
1717
1718        return 0;
1719}
1720
1721static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1722                                           enum enable_type enable_local)
1723{
1724        bool unassigned = false;
1725        struct pci_host_bridge *host;
1726
1727        if (enable_local != undefined)
1728                return enable_local;
1729
1730        host = pci_find_host_bridge(bus);
1731        if (host->preserve_config)
1732                return auto_disabled;
1733
1734        pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
1735        if (unassigned)
1736                return auto_enabled;
1737
1738        return enable_local;
1739}
1740#else
1741static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1742                                           enum enable_type enable_local)
1743{
1744        return enable_local;
1745}
1746#endif
1747
1748/*
1749 * First try will not touch PCI bridge res.
1750 * Second and later try will clear small leaf bridge res.
1751 * Will stop till to the max depth if can not find good one.
1752 */
1753void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
1754{
1755        LIST_HEAD(realloc_head);
1756        /* List of resources that want additional resources */
1757        struct list_head *add_list = NULL;
1758        int tried_times = 0;
1759        enum release_type rel_type = leaf_only;
1760        LIST_HEAD(fail_head);
1761        struct pci_dev_resource *fail_res;
1762        int pci_try_num = 1;
1763        enum enable_type enable_local;
1764
1765        /* Don't realloc if asked to do so */
1766        enable_local = pci_realloc_detect(bus, pci_realloc_enable);
1767        if (pci_realloc_enabled(enable_local)) {
1768                int max_depth = pci_bus_get_depth(bus);
1769
1770                pci_try_num = max_depth + 1;
1771                dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
1772                         max_depth, pci_try_num);
1773        }
1774
1775again:
1776        /*
1777         * Last try will use add_list, otherwise will try good to have as must
1778         * have, so can realloc parent bridge resource
1779         */
1780        if (tried_times + 1 == pci_try_num)
1781                add_list = &realloc_head;
1782        /*
1783         * Depth first, calculate sizes and alignments of all subordinate buses.
1784         */
1785        __pci_bus_size_bridges(bus, add_list);
1786
1787        /* Depth last, allocate resources and update the hardware. */
1788        __pci_bus_assign_resources(bus, add_list, &fail_head);
1789        if (add_list)
1790                BUG_ON(!list_empty(add_list));
1791        tried_times++;
1792
1793        /* Any device complain? */
1794        if (list_empty(&fail_head))
1795                goto dump;
1796
1797        if (tried_times >= pci_try_num) {
1798                if (enable_local == undefined)
1799                        dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
1800                else if (enable_local == auto_enabled)
1801                        dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
1802
1803                free_list(&fail_head);
1804                goto dump;
1805        }
1806
1807        dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
1808                 tried_times + 1);
1809
1810        /* Third times and later will not check if it is leaf */
1811        if ((tried_times + 1) > 2)
1812                rel_type = whole_subtree;
1813
1814        /*
1815         * Try to release leaf bridge's resources that doesn't fit resource of
1816         * child device under that bridge.
1817         */
1818        list_for_each_entry(fail_res, &fail_head, list)
1819                pci_bus_release_bridge_resources(fail_res->dev->bus,
1820                                                 fail_res->flags & PCI_RES_TYPE_MASK,
1821                                                 rel_type);
1822
1823        /* Restore size and flags */
1824        list_for_each_entry(fail_res, &fail_head, list) {
1825                struct resource *res = fail_res->res;
1826                int idx;
1827
1828                res->start = fail_res->start;
1829                res->end = fail_res->end;
1830                res->flags = fail_res->flags;
1831
1832                if (pci_is_bridge(fail_res->dev)) {
1833                        idx = res - &fail_res->dev->resource[0];
1834                        if (idx >= PCI_BRIDGE_RESOURCES &&
1835                            idx <= PCI_BRIDGE_RESOURCE_END)
1836                                res->flags = 0;
1837                }
1838        }
1839        free_list(&fail_head);
1840
1841        goto again;
1842
1843dump:
1844        /* Dump the resource on buses */
1845        pci_bus_dump_resources(bus);
1846}
1847
1848void __init pci_assign_unassigned_resources(void)
1849{
1850        struct pci_bus *root_bus;
1851
1852        list_for_each_entry(root_bus, &pci_root_buses, node) {
1853                pci_assign_unassigned_root_bus_resources(root_bus);
1854
1855                /* Make sure the root bridge has a companion ACPI device */
1856                if (ACPI_HANDLE(root_bus->bridge))
1857                        acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
1858        }
1859}
1860
1861static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
1862                                 struct list_head *add_list,
1863                                 resource_size_t new_size)
1864{
1865        resource_size_t add_size, size = resource_size(res);
1866
1867        if (res->parent)
1868                return;
1869
1870        if (!new_size)
1871                return;
1872
1873        if (new_size > size) {
1874                add_size = new_size - size;
1875                pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
1876                        &add_size);
1877        } else if (new_size < size) {
1878                add_size = size - new_size;
1879                pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
1880                        &add_size);
1881        }
1882
1883        res->end = res->start + new_size - 1;
1884        remove_from_list(add_list, res);
1885}
1886
1887static void pci_bus_distribute_available_resources(struct pci_bus *bus,
1888                                            struct list_head *add_list,
1889                                            struct resource io,
1890                                            struct resource mmio,
1891                                            struct resource mmio_pref)
1892{
1893        unsigned int normal_bridges = 0, hotplug_bridges = 0;
1894        struct resource *io_res, *mmio_res, *mmio_pref_res;
1895        struct pci_dev *dev, *bridge = bus->self;
1896        resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
1897
1898        io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
1899        mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
1900        mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1901
1902        /*
1903         * The alignment of this bridge is yet to be considered, hence it must
1904         * be done now before extending its bridge window.
1905         */
1906        align = pci_resource_alignment(bridge, io_res);
1907        if (!io_res->parent && align)
1908                io.start = min(ALIGN(io.start, align), io.end + 1);
1909
1910        align = pci_resource_alignment(bridge, mmio_res);
1911        if (!mmio_res->parent && align)
1912                mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1);
1913
1914        align = pci_resource_alignment(bridge, mmio_pref_res);
1915        if (!mmio_pref_res->parent && align)
1916                mmio_pref.start = min(ALIGN(mmio_pref.start, align),
1917                        mmio_pref.end + 1);
1918
1919        /*
1920         * Now that we have adjusted for alignment, update the bridge window
1921         * resources to fill as much remaining resource space as possible.
1922         */
1923        adjust_bridge_window(bridge, io_res, add_list, resource_size(&io));
1924        adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio));
1925        adjust_bridge_window(bridge, mmio_pref_res, add_list,
1926                             resource_size(&mmio_pref));
1927
1928        /*
1929         * Calculate how many hotplug bridges and normal bridges there
1930         * are on this bus.  We will distribute the additional available
1931         * resources between hotplug bridges.
1932         */
1933        for_each_pci_bridge(dev, bus) {
1934                if (dev->is_hotplug_bridge)
1935                        hotplug_bridges++;
1936                else
1937                        normal_bridges++;
1938        }
1939
1940        /*
1941         * There is only one bridge on the bus so it gets all available
1942         * resources which it can then distribute to the possible hotplug
1943         * bridges below.
1944         */
1945        if (hotplug_bridges + normal_bridges == 1) {
1946                dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
1947                if (dev->subordinate)
1948                        pci_bus_distribute_available_resources(dev->subordinate,
1949                                add_list, io, mmio, mmio_pref);
1950                return;
1951        }
1952
1953        if (hotplug_bridges == 0)
1954                return;
1955
1956        /*
1957         * Calculate the total amount of extra resource space we can
1958         * pass to bridges below this one.  This is basically the
1959         * extra space reduced by the minimal required space for the
1960         * non-hotplug bridges.
1961         */
1962        for_each_pci_bridge(dev, bus) {
1963                resource_size_t used_size;
1964                struct resource *res;
1965
1966                if (dev->is_hotplug_bridge)
1967                        continue;
1968
1969                /*
1970                 * Reduce the available resource space by what the
1971                 * bridge and devices below it occupy.
1972                 */
1973                res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
1974                align = pci_resource_alignment(dev, res);
1975                align = align ? ALIGN(io.start, align) - io.start : 0;
1976                used_size = align + resource_size(res);
1977                if (!res->parent)
1978                        io.start = min(io.start + used_size, io.end + 1);
1979
1980                res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
1981                align = pci_resource_alignment(dev, res);
1982                align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
1983                used_size = align + resource_size(res);
1984                if (!res->parent)
1985                        mmio.start = min(mmio.start + used_size, mmio.end + 1);
1986
1987                res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1988                align = pci_resource_alignment(dev, res);
1989                align = align ? ALIGN(mmio_pref.start, align) -
1990                        mmio_pref.start : 0;
1991                used_size = align + resource_size(res);
1992                if (!res->parent)
1993                        mmio_pref.start = min(mmio_pref.start + used_size,
1994                                mmio_pref.end + 1);
1995        }
1996
1997        io_per_hp = div64_ul(resource_size(&io), hotplug_bridges);
1998        mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges);
1999        mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref),
2000                hotplug_bridges);
2001
2002        /*
2003         * Go over devices on this bus and distribute the remaining
2004         * resource space between hotplug bridges.
2005         */
2006        for_each_pci_bridge(dev, bus) {
2007                struct pci_bus *b;
2008
2009                b = dev->subordinate;
2010                if (!b || !dev->is_hotplug_bridge)
2011                        continue;
2012
2013                /*
2014                 * Distribute available extra resources equally between
2015                 * hotplug-capable downstream ports taking alignment into
2016                 * account.
2017                 */
2018                io.end = io.start + io_per_hp - 1;
2019                mmio.end = mmio.start + mmio_per_hp - 1;
2020                mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1;
2021
2022                pci_bus_distribute_available_resources(b, add_list, io, mmio,
2023                                                       mmio_pref);
2024
2025                io.start += io_per_hp;
2026                mmio.start += mmio_per_hp;
2027                mmio_pref.start += mmio_pref_per_hp;
2028        }
2029}
2030
2031static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
2032                                                     struct list_head *add_list)
2033{
2034        struct resource available_io, available_mmio, available_mmio_pref;
2035
2036        if (!bridge->is_hotplug_bridge)
2037                return;
2038
2039        /* Take the initial extra resources from the hotplug port */
2040        available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
2041        available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
2042        available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
2043
2044        pci_bus_distribute_available_resources(bridge->subordinate,
2045                                               add_list, available_io,
2046                                               available_mmio,
2047                                               available_mmio_pref);
2048}
2049
2050void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
2051{
2052        struct pci_bus *parent = bridge->subordinate;
2053        /* List of resources that want additional resources */
2054        LIST_HEAD(add_list);
2055
2056        int tried_times = 0;
2057        LIST_HEAD(fail_head);
2058        struct pci_dev_resource *fail_res;
2059        int retval;
2060
2061again:
2062        __pci_bus_size_bridges(parent, &add_list);
2063
2064        /*
2065         * Distribute remaining resources (if any) equally between hotplug
2066         * bridges below.  This makes it possible to extend the hierarchy
2067         * later without running out of resources.
2068         */
2069        pci_bridge_distribute_available_resources(bridge, &add_list);
2070
2071        __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
2072        BUG_ON(!list_empty(&add_list));
2073        tried_times++;
2074
2075        if (list_empty(&fail_head))
2076                goto enable_all;
2077
2078        if (tried_times >= 2) {
2079                /* Still fail, don't need to try more */
2080                free_list(&fail_head);
2081                goto enable_all;
2082        }
2083
2084        printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
2085                         tried_times + 1);
2086
2087        /*
2088         * Try to release leaf bridge's resources that aren't big enough
2089         * to contain child device resources.
2090         */
2091        list_for_each_entry(fail_res, &fail_head, list)
2092                pci_bus_release_bridge_resources(fail_res->dev->bus,
2093                                                 fail_res->flags & PCI_RES_TYPE_MASK,
2094                                                 whole_subtree);
2095
2096        /* Restore size and flags */
2097        list_for_each_entry(fail_res, &fail_head, list) {
2098                struct resource *res = fail_res->res;
2099                int idx;
2100
2101                res->start = fail_res->start;
2102                res->end = fail_res->end;
2103                res->flags = fail_res->flags;
2104
2105                if (pci_is_bridge(fail_res->dev)) {
2106                        idx = res - &fail_res->dev->resource[0];
2107                        if (idx >= PCI_BRIDGE_RESOURCES &&
2108                            idx <= PCI_BRIDGE_RESOURCE_END)
2109                                res->flags = 0;
2110                }
2111        }
2112        free_list(&fail_head);
2113
2114        goto again;
2115
2116enable_all:
2117        retval = pci_reenable_device(bridge);
2118        if (retval)
2119                pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
2120        pci_set_master(bridge);
2121}
2122EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
2123
2124int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
2125{
2126        struct pci_dev_resource *dev_res;
2127        struct pci_dev *next;
2128        LIST_HEAD(saved);
2129        LIST_HEAD(added);
2130        LIST_HEAD(failed);
2131        unsigned int i;
2132        int ret;
2133
2134        down_read(&pci_bus_sem);
2135
2136        /* Walk to the root hub, releasing bridge BARs when possible */
2137        next = bridge;
2138        do {
2139                bridge = next;
2140                for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
2141                     i++) {
2142                        struct resource *res = &bridge->resource[i];
2143
2144                        if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
2145                                continue;
2146
2147                        /* Ignore BARs which are still in use */
2148                        if (res->child)
2149                                continue;
2150
2151                        ret = add_to_list(&saved, bridge, res, 0, 0);
2152                        if (ret)
2153                                goto cleanup;
2154
2155                        pci_info(bridge, "BAR %d: releasing %pR\n",
2156                                 i, res);
2157
2158                        if (res->parent)
2159                                release_resource(res);
2160                        res->start = 0;
2161                        res->end = 0;
2162                        break;
2163                }
2164                if (i == PCI_BRIDGE_RESOURCE_END)
2165                        break;
2166
2167                next = bridge->bus ? bridge->bus->self : NULL;
2168        } while (next);
2169
2170        if (list_empty(&saved)) {
2171                up_read(&pci_bus_sem);
2172                return -ENOENT;
2173        }
2174
2175        __pci_bus_size_bridges(bridge->subordinate, &added);
2176        __pci_bridge_assign_resources(bridge, &added, &failed);
2177        BUG_ON(!list_empty(&added));
2178
2179        if (!list_empty(&failed)) {
2180                ret = -ENOSPC;
2181                goto cleanup;
2182        }
2183
2184        list_for_each_entry(dev_res, &saved, list) {
2185                /* Skip the bridge we just assigned resources for */
2186                if (bridge == dev_res->dev)
2187                        continue;
2188
2189                bridge = dev_res->dev;
2190                pci_setup_bridge(bridge->subordinate);
2191        }
2192
2193        free_list(&saved);
2194        up_read(&pci_bus_sem);
2195        return 0;
2196
2197cleanup:
2198        /* Restore size and flags */
2199        list_for_each_entry(dev_res, &failed, list) {
2200                struct resource *res = dev_res->res;
2201
2202                res->start = dev_res->start;
2203                res->end = dev_res->end;
2204                res->flags = dev_res->flags;
2205        }
2206        free_list(&failed);
2207
2208        /* Revert to the old configuration */
2209        list_for_each_entry(dev_res, &saved, list) {
2210                struct resource *res = dev_res->res;
2211
2212                bridge = dev_res->dev;
2213                i = res - bridge->resource;
2214
2215                res->start = dev_res->start;
2216                res->end = dev_res->end;
2217                res->flags = dev_res->flags;
2218
2219                pci_claim_resource(bridge, i);
2220                pci_setup_bridge(bridge->subordinate);
2221        }
2222        free_list(&saved);
2223        up_read(&pci_bus_sem);
2224
2225        return ret;
2226}
2227
2228void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
2229{
2230        struct pci_dev *dev;
2231        /* List of resources that want additional resources */
2232        LIST_HEAD(add_list);
2233
2234        down_read(&pci_bus_sem);
2235        for_each_pci_bridge(dev, bus)
2236                if (pci_has_subordinate(dev))
2237                        __pci_bus_size_bridges(dev->subordinate, &add_list);
2238        up_read(&pci_bus_sem);
2239        __pci_bus_assign_resources(bus, &add_list, NULL);
2240        BUG_ON(!list_empty(&add_list));
2241}
2242EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
2243