linux/arch/sh/drivers/pci/pci.c
<<
>>
Prefs
   1/*
   2 * New-style PCI core.
   3 *
   4 * Copyright (c) 2004 - 2009  Paul Mundt
   5 * Copyright (c) 2002  M. R. Brown
   6 *
   7 * Modelled after arch/mips/pci/pci.c:
   8 *  Copyright (C) 2003, 04 Ralf Baechle (ralf@linux-mips.org)
   9 *
  10 * This file is subject to the terms and conditions of the GNU General Public
  11 * License.  See the file "COPYING" in the main directory of this archive
  12 * for more details.
  13 */
  14#include <linux/kernel.h>
  15#include <linux/mm.h>
  16#include <linux/pci.h>
  17#include <linux/init.h>
  18#include <linux/types.h>
  19#include <linux/dma-debug.h>
  20#include <linux/io.h>
  21#include <linux/mutex.h>
  22
  23unsigned long PCIBIOS_MIN_IO = 0x0000;
  24unsigned long PCIBIOS_MIN_MEM = 0;
  25
  26/*
  27 * The PCI controller list.
  28 */
  29static struct pci_channel *hose_head, **hose_tail = &hose_head;
  30
  31static int pci_initialized;
  32
  33static void __devinit pcibios_scanbus(struct pci_channel *hose)
  34{
  35        static int next_busno;
  36        struct pci_bus *bus;
  37
  38        bus = pci_scan_bus(next_busno, hose->pci_ops, hose);
  39        if (bus) {
  40                next_busno = bus->subordinate + 1;
  41                /* Don't allow 8-bit bus number overflow inside the hose -
  42                   reserve some space for bridges. */
  43                if (next_busno > 224)
  44                        next_busno = 0;
  45
  46                pci_bus_size_bridges(bus);
  47                pci_bus_assign_resources(bus);
  48                pci_enable_bridges(bus);
  49        }
  50}
  51
  52static DEFINE_MUTEX(pci_scan_mutex);
  53
  54void __devinit register_pci_controller(struct pci_channel *hose)
  55{
  56        request_resource(&iomem_resource, hose->mem_resource);
  57        request_resource(&ioport_resource, hose->io_resource);
  58
  59        *hose_tail = hose;
  60        hose_tail = &hose->next;
  61
  62        /*
  63         * Do not panic here but later - this might hapen before console init.
  64         */
  65        if (!hose->io_map_base) {
  66                printk(KERN_WARNING
  67                       "registering PCI controller with io_map_base unset\n");
  68        }
  69
  70        /*
  71         * Scan the bus if it is register after the PCI subsystem
  72         * initialization.
  73         */
  74        if (pci_initialized) {
  75                mutex_lock(&pci_scan_mutex);
  76                pcibios_scanbus(hose);
  77                mutex_unlock(&pci_scan_mutex);
  78        }
  79}
  80
  81static int __init pcibios_init(void)
  82{
  83        struct pci_channel *hose;
  84
  85        /* Scan all of the recorded PCI controllers.  */
  86        for (hose = hose_head; hose; hose = hose->next)
  87                pcibios_scanbus(hose);
  88
  89        pci_fixup_irqs(pci_common_swizzle, pcibios_map_platform_irq);
  90
  91        dma_debug_add_bus(&pci_bus_type);
  92
  93        pci_initialized = 1;
  94
  95        return 0;
  96}
  97subsys_initcall(pcibios_init);
  98
  99static void pcibios_fixup_device_resources(struct pci_dev *dev,
 100        struct pci_bus *bus)
 101{
 102        /* Update device resources.  */
 103        struct pci_channel *hose = bus->sysdata;
 104        unsigned long offset = 0;
 105        int i;
 106
 107        for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 108                if (!dev->resource[i].start)
 109                        continue;
 110                if (dev->resource[i].flags & IORESOURCE_PCI_FIXED)
 111                        continue;
 112                if (dev->resource[i].flags & IORESOURCE_IO)
 113                        offset = hose->io_offset;
 114                else if (dev->resource[i].flags & IORESOURCE_MEM)
 115                        offset = hose->mem_offset;
 116
 117                dev->resource[i].start += offset;
 118                dev->resource[i].end += offset;
 119        }
 120}
 121
 122/*
 123 *  Called after each bus is probed, but before its children
 124 *  are examined.
 125 */
 126void __devinit pcibios_fixup_bus(struct pci_bus *bus)
 127{
 128        struct pci_dev *dev = bus->self;
 129        struct list_head *ln;
 130        struct pci_channel *chan = bus->sysdata;
 131
 132        if (!dev) {
 133                bus->resource[0] = chan->io_resource;
 134                bus->resource[1] = chan->mem_resource;
 135        }
 136
 137        for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
 138                dev = pci_dev_b(ln);
 139
 140                if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
 141                        pcibios_fixup_device_resources(dev, bus);
 142        }
 143}
 144
 145/*
 146 * We need to avoid collisions with `mirrored' VGA ports
 147 * and other strange ISA hardware, so we always want the
 148 * addresses to be allocated in the 0x000-0x0ff region
 149 * modulo 0x400.
 150 */
 151void pcibios_align_resource(void *data, struct resource *res,
 152                            resource_size_t size, resource_size_t align)
 153{
 154        struct pci_dev *dev = data;
 155        struct pci_channel *chan = dev->sysdata;
 156        resource_size_t start = res->start;
 157
 158        if (res->flags & IORESOURCE_IO) {
 159                if (start < PCIBIOS_MIN_IO + chan->io_resource->start)
 160                        start = PCIBIOS_MIN_IO + chan->io_resource->start;
 161
 162                /*
 163                 * Put everything into 0x00-0xff region modulo 0x400.
 164                 */
 165                if (start & 0x300) {
 166                        start = (start + 0x3ff) & ~0x3ff;
 167                        res->start = start;
 168                }
 169        } else if (res->flags & IORESOURCE_MEM) {
 170                if (start < PCIBIOS_MIN_MEM + chan->mem_resource->start)
 171                        start = PCIBIOS_MIN_MEM + chan->mem_resource->start;
 172        }
 173
 174        res->start = start;
 175}
 176
 177void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
 178                         struct resource *res)
 179{
 180        struct pci_channel *hose = dev->sysdata;
 181        unsigned long offset = 0;
 182
 183        if (res->flags & IORESOURCE_IO)
 184                offset = hose->io_offset;
 185        else if (res->flags & IORESOURCE_MEM)
 186                offset = hose->mem_offset;
 187
 188        region->start = res->start - offset;
 189        region->end = res->end - offset;
 190}
 191
 192void __devinit
 193pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
 194                        struct pci_bus_region *region)
 195{
 196        struct pci_channel *hose = dev->sysdata;
 197        unsigned long offset = 0;
 198
 199        if (res->flags & IORESOURCE_IO)
 200                offset = hose->io_offset;
 201        else if (res->flags & IORESOURCE_MEM)
 202                offset = hose->mem_offset;
 203
 204        res->start = region->start + offset;
 205        res->end = region->end + offset;
 206}
 207
 208int pcibios_enable_device(struct pci_dev *dev, int mask)
 209{
 210        u16 cmd, old_cmd;
 211        int idx;
 212        struct resource *r;
 213
 214        pci_read_config_word(dev, PCI_COMMAND, &cmd);
 215        old_cmd = cmd;
 216        for (idx=0; idx < PCI_NUM_RESOURCES; idx++) {
 217                /* Only set up the requested stuff */
 218                if (!(mask & (1<<idx)))
 219                        continue;
 220
 221                r = &dev->resource[idx];
 222                if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
 223                        continue;
 224                if ((idx == PCI_ROM_RESOURCE) &&
 225                                (!(r->flags & IORESOURCE_ROM_ENABLE)))
 226                        continue;
 227                if (!r->start && r->end) {
 228                        printk(KERN_ERR "PCI: Device %s not available "
 229                               "because of resource collisions\n",
 230                               pci_name(dev));
 231                        return -EINVAL;
 232                }
 233                if (r->flags & IORESOURCE_IO)
 234                        cmd |= PCI_COMMAND_IO;
 235                if (r->flags & IORESOURCE_MEM)
 236                        cmd |= PCI_COMMAND_MEMORY;
 237        }
 238        if (cmd != old_cmd) {
 239                printk("PCI: Enabling device %s (%04x -> %04x)\n",
 240                       pci_name(dev), old_cmd, cmd);
 241                pci_write_config_word(dev, PCI_COMMAND, cmd);
 242        }
 243        return 0;
 244}
 245
 246/*
 247 *  If we set up a device for bus mastering, we need to check and set
 248 *  the latency timer as it may not be properly set.
 249 */
 250static unsigned int pcibios_max_latency = 255;
 251
 252void pcibios_set_master(struct pci_dev *dev)
 253{
 254        u8 lat;
 255        pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
 256        if (lat < 16)
 257                lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
 258        else if (lat > pcibios_max_latency)
 259                lat = pcibios_max_latency;
 260        else
 261                return;
 262        printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n",
 263               pci_name(dev), lat);
 264        pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 265}
 266
 267void __init pcibios_update_irq(struct pci_dev *dev, int irq)
 268{
 269        pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
 270}
 271
 272char * __devinit pcibios_setup(char *str)
 273{
 274        return str;
 275}
 276
 277int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 278                        enum pci_mmap_state mmap_state, int write_combine)
 279{
 280        /*
 281         * I/O space can be accessed via normal processor loads and stores on
 282         * this platform but for now we elect not to do this and portable
 283         * drivers should not do this anyway.
 284         */
 285        if (mmap_state == pci_mmap_io)
 286                return -EINVAL;
 287
 288        /*
 289         * Ignore write-combine; for now only return uncached mappings.
 290         */
 291        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 292
 293        return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 294                               vma->vm_end - vma->vm_start,
 295                               vma->vm_page_prot);
 296}
 297
 298#ifndef CONFIG_GENERIC_IOMAP
 299
 300static void __iomem *ioport_map_pci(struct pci_dev *dev,
 301                                    unsigned long port, unsigned int nr)
 302{
 303        struct pci_channel *chan = dev->sysdata;
 304
 305        if (!chan->io_map_base)
 306                chan->io_map_base = generic_io_base;
 307
 308        return (void __iomem *)(chan->io_map_base + port);
 309}
 310
 311void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 312{
 313        resource_size_t start = pci_resource_start(dev, bar);
 314        resource_size_t len = pci_resource_len(dev, bar);
 315        unsigned long flags = pci_resource_flags(dev, bar);
 316
 317        if (unlikely(!len || !start))
 318                return NULL;
 319        if (maxlen && len > maxlen)
 320                len = maxlen;
 321
 322        if (flags & IORESOURCE_IO)
 323                return ioport_map_pci(dev, start, len);
 324
 325        /*
 326         * Presently the IORESOURCE_MEM case is a bit special, most
 327         * SH7751 style PCI controllers have PCI memory at a fixed
 328         * location in the address space where no remapping is desired.
 329         * With the IORESOURCE_MEM case more care has to be taken
 330         * to inhibit page table mapping for legacy cores, but this is
 331         * punted off to __ioremap().
 332         *                                      -- PFM.
 333         */
 334        if (flags & IORESOURCE_MEM) {
 335                if (flags & IORESOURCE_CACHEABLE)
 336                        return ioremap(start, len);
 337
 338                return ioremap_nocache(start, len);
 339        }
 340
 341        return NULL;
 342}
 343EXPORT_SYMBOL(pci_iomap);
 344
 345void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
 346{
 347        iounmap(addr);
 348}
 349EXPORT_SYMBOL(pci_iounmap);
 350
 351#endif /* CONFIG_GENERIC_IOMAP */
 352
 353#ifdef CONFIG_HOTPLUG
 354EXPORT_SYMBOL(pcibios_resource_to_bus);
 355EXPORT_SYMBOL(pcibios_bus_to_resource);
 356EXPORT_SYMBOL(PCIBIOS_MIN_IO);
 357EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
 358#endif
 359