linux/drivers/bcma/host_pci.c
<<
>>
Prefs
   1/*
   2 * Broadcom specific AMBA
   3 * PCI Host
   4 *
   5 * Licensed under the GNU/GPL. See COPYING for details.
   6 */
   7
   8#include "bcma_private.h"
   9#include <linux/slab.h>
  10#include <linux/bcma/bcma.h>
  11#include <linux/pci.h>
  12#include <linux/module.h>
  13
  14static void bcma_host_pci_switch_core(struct bcma_device *core)
  15{
  16        int win2 = core->bus->host_is_pcie2 ?
  17                BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
  18
  19        pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
  20                               core->addr);
  21        pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
  22        core->bus->mapped_core = core;
  23        bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
  24}
  25
  26/* Provides access to the requested core. Returns base offset that has to be
  27 * used. It makes use of fixed windows when possible. */
  28static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
  29{
  30        switch (core->id.id) {
  31        case BCMA_CORE_CHIPCOMMON:
  32                return 3 * BCMA_CORE_SIZE;
  33        case BCMA_CORE_PCIE:
  34                return 2 * BCMA_CORE_SIZE;
  35        }
  36
  37        if (core->bus->mapped_core != core)
  38                bcma_host_pci_switch_core(core);
  39        return 0;
  40}
  41
  42static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
  43{
  44        offset += bcma_host_pci_provide_access_to_core(core);
  45        return ioread8(core->bus->mmio + offset);
  46}
  47
  48static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
  49{
  50        offset += bcma_host_pci_provide_access_to_core(core);
  51        return ioread16(core->bus->mmio + offset);
  52}
  53
  54static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
  55{
  56        offset += bcma_host_pci_provide_access_to_core(core);
  57        return ioread32(core->bus->mmio + offset);
  58}
  59
  60static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
  61                                 u8 value)
  62{
  63        offset += bcma_host_pci_provide_access_to_core(core);
  64        iowrite8(value, core->bus->mmio + offset);
  65}
  66
  67static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
  68                                 u16 value)
  69{
  70        offset += bcma_host_pci_provide_access_to_core(core);
  71        iowrite16(value, core->bus->mmio + offset);
  72}
  73
  74static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
  75                                 u32 value)
  76{
  77        offset += bcma_host_pci_provide_access_to_core(core);
  78        iowrite32(value, core->bus->mmio + offset);
  79}
  80
  81#ifdef CONFIG_BCMA_BLOCKIO
  82static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
  83                                     size_t count, u16 offset, u8 reg_width)
  84{
  85        void __iomem *addr = core->bus->mmio + offset;
  86        if (core->bus->mapped_core != core)
  87                bcma_host_pci_switch_core(core);
  88        switch (reg_width) {
  89        case sizeof(u8):
  90                ioread8_rep(addr, buffer, count);
  91                break;
  92        case sizeof(u16):
  93                WARN_ON(count & 1);
  94                ioread16_rep(addr, buffer, count >> 1);
  95                break;
  96        case sizeof(u32):
  97                WARN_ON(count & 3);
  98                ioread32_rep(addr, buffer, count >> 2);
  99                break;
 100        default:
 101                WARN_ON(1);
 102        }
 103}
 104
 105static void bcma_host_pci_block_write(struct bcma_device *core,
 106                                      const void *buffer, size_t count,
 107                                      u16 offset, u8 reg_width)
 108{
 109        void __iomem *addr = core->bus->mmio + offset;
 110        if (core->bus->mapped_core != core)
 111                bcma_host_pci_switch_core(core);
 112        switch (reg_width) {
 113        case sizeof(u8):
 114                iowrite8_rep(addr, buffer, count);
 115                break;
 116        case sizeof(u16):
 117                WARN_ON(count & 1);
 118                iowrite16_rep(addr, buffer, count >> 1);
 119                break;
 120        case sizeof(u32):
 121                WARN_ON(count & 3);
 122                iowrite32_rep(addr, buffer, count >> 2);
 123                break;
 124        default:
 125                WARN_ON(1);
 126        }
 127}
 128#endif
 129
 130static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
 131{
 132        if (core->bus->mapped_core != core)
 133                bcma_host_pci_switch_core(core);
 134        return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
 135}
 136
 137static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
 138                                  u32 value)
 139{
 140        if (core->bus->mapped_core != core)
 141                bcma_host_pci_switch_core(core);
 142        iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
 143}
 144
 145static const struct bcma_host_ops bcma_host_pci_ops = {
 146        .read8          = bcma_host_pci_read8,
 147        .read16         = bcma_host_pci_read16,
 148        .read32         = bcma_host_pci_read32,
 149        .write8         = bcma_host_pci_write8,
 150        .write16        = bcma_host_pci_write16,
 151        .write32        = bcma_host_pci_write32,
 152#ifdef CONFIG_BCMA_BLOCKIO
 153        .block_read     = bcma_host_pci_block_read,
 154        .block_write    = bcma_host_pci_block_write,
 155#endif
 156        .aread32        = bcma_host_pci_aread32,
 157        .awrite32       = bcma_host_pci_awrite32,
 158};
 159
 160static int bcma_host_pci_probe(struct pci_dev *dev,
 161                               const struct pci_device_id *id)
 162{
 163        struct bcma_bus *bus;
 164        int err = -ENOMEM;
 165        const char *name;
 166        u32 val;
 167
 168        /* Alloc */
 169        bus = kzalloc(sizeof(*bus), GFP_KERNEL);
 170        if (!bus)
 171                goto out;
 172
 173        /* Basic PCI configuration */
 174        err = pci_enable_device(dev);
 175        if (err)
 176                goto err_kfree_bus;
 177
 178        name = dev_name(&dev->dev);
 179        if (dev->driver && dev->driver->name)
 180                name = dev->driver->name;
 181        err = pci_request_regions(dev, name);
 182        if (err)
 183                goto err_pci_disable;
 184        pci_set_master(dev);
 185
 186        /* Disable the RETRY_TIMEOUT register (0x41) to keep
 187         * PCI Tx retries from interfering with C3 CPU state */
 188        pci_read_config_dword(dev, 0x40, &val);
 189        if ((val & 0x0000ff00) != 0)
 190                pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
 191
 192        /* SSB needed additional powering up, do we have any AMBA PCI cards? */
 193        if (!pci_is_pcie(dev)) {
 194                bcma_err(bus, "PCI card detected, they are not supported.\n");
 195                err = -ENXIO;
 196                goto err_pci_release_regions;
 197        }
 198
 199        bus->dev = &dev->dev;
 200
 201        /* Map MMIO */
 202        err = -ENOMEM;
 203        bus->mmio = pci_iomap(dev, 0, ~0UL);
 204        if (!bus->mmio)
 205                goto err_pci_release_regions;
 206
 207        /* Host specific */
 208        bus->host_pci = dev;
 209        bus->hosttype = BCMA_HOSTTYPE_PCI;
 210        bus->ops = &bcma_host_pci_ops;
 211
 212        bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
 213        bus->boardinfo.type = bus->host_pci->subsystem_device;
 214
 215        /* Initialize struct, detect chip */
 216        bcma_init_bus(bus);
 217
 218        /* Scan bus to find out generation of PCIe core */
 219        err = bcma_bus_scan(bus);
 220        if (err)
 221                goto err_pci_unmap_mmio;
 222
 223        if (bcma_find_core(bus, BCMA_CORE_PCIE2))
 224                bus->host_is_pcie2 = true;
 225
 226        /* Register */
 227        err = bcma_bus_register(bus);
 228        if (err)
 229                goto err_unregister_cores;
 230
 231        pci_set_drvdata(dev, bus);
 232
 233out:
 234        return err;
 235
 236err_unregister_cores:
 237        bcma_unregister_cores(bus);
 238err_pci_unmap_mmio:
 239        pci_iounmap(dev, bus->mmio);
 240err_pci_release_regions:
 241        pci_release_regions(dev);
 242err_pci_disable:
 243        pci_disable_device(dev);
 244err_kfree_bus:
 245        kfree(bus);
 246        return err;
 247}
 248
 249static void bcma_host_pci_remove(struct pci_dev *dev)
 250{
 251        struct bcma_bus *bus = pci_get_drvdata(dev);
 252
 253        bcma_bus_unregister(bus);
 254        pci_iounmap(dev, bus->mmio);
 255        pci_release_regions(dev);
 256        pci_disable_device(dev);
 257        kfree(bus);
 258}
 259
 260#ifdef CONFIG_PM_SLEEP
 261static int bcma_host_pci_suspend(struct device *dev)
 262{
 263        struct pci_dev *pdev = to_pci_dev(dev);
 264        struct bcma_bus *bus = pci_get_drvdata(pdev);
 265
 266        bus->mapped_core = NULL;
 267
 268        return bcma_bus_suspend(bus);
 269}
 270
 271static int bcma_host_pci_resume(struct device *dev)
 272{
 273        struct pci_dev *pdev = to_pci_dev(dev);
 274        struct bcma_bus *bus = pci_get_drvdata(pdev);
 275
 276        return bcma_bus_resume(bus);
 277}
 278
 279static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
 280                         bcma_host_pci_resume);
 281#define BCMA_PM_OPS     (&bcma_pm_ops)
 282
 283#else /* CONFIG_PM_SLEEP */
 284
 285#define BCMA_PM_OPS     NULL
 286
 287#endif /* CONFIG_PM_SLEEP */
 288
 289static const struct pci_device_id bcma_pci_bridge_tbl[] = {
 290        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
 291        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
 292        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },  /* 0xa8d8 */
 293        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
 294        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
 295        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
 296        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
 297        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
 298        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
 299        { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
 300        { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
 301        { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
 302        { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_HP, 0x804a) },
 303        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
 304        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
 305        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
 306        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
 307        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
 308        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },  /* 0xa8db, BCM43217 (sic!) */
 309        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) },  /* 0xa8dc */
 310        { 0, },
 311};
 312MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
 313
 314static struct pci_driver bcma_pci_bridge_driver = {
 315        .name = "bcma-pci-bridge",
 316        .id_table = bcma_pci_bridge_tbl,
 317        .probe = bcma_host_pci_probe,
 318        .remove = bcma_host_pci_remove,
 319        .driver.pm = BCMA_PM_OPS,
 320};
 321
 322int __init bcma_host_pci_init(void)
 323{
 324        return pci_register_driver(&bcma_pci_bridge_driver);
 325}
 326
 327void __exit bcma_host_pci_exit(void)
 328{
 329        pci_unregister_driver(&bcma_pci_bridge_driver);
 330}
 331
 332/**************************************************
 333 * Runtime ops for drivers.
 334 **************************************************/
 335
 336/* See also pcicore_up */
 337void bcma_host_pci_up(struct bcma_bus *bus)
 338{
 339        if (bus->hosttype != BCMA_HOSTTYPE_PCI)
 340                return;
 341
 342        if (bus->host_is_pcie2)
 343                bcma_core_pcie2_up(&bus->drv_pcie2);
 344        else
 345                bcma_core_pci_up(&bus->drv_pci[0]);
 346}
 347EXPORT_SYMBOL_GPL(bcma_host_pci_up);
 348
 349/* See also pcicore_down */
 350void bcma_host_pci_down(struct bcma_bus *bus)
 351{
 352        if (bus->hosttype != BCMA_HOSTTYPE_PCI)
 353                return;
 354
 355        if (!bus->host_is_pcie2)
 356                bcma_core_pci_down(&bus->drv_pci[0]);
 357}
 358EXPORT_SYMBOL_GPL(bcma_host_pci_down);
 359
 360/* See also si_pci_setup */
 361int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
 362                          bool enable)
 363{
 364        struct pci_dev *pdev;
 365        u32 coremask, tmp;
 366        int err = 0;
 367
 368        if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
 369                /* This bcma device is not on a PCI host-bus. So the IRQs are
 370                 * not routed through the PCI core.
 371                 * So we must not enable routing through the PCI core. */
 372                goto out;
 373        }
 374
 375        pdev = bus->host_pci;
 376
 377        err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
 378        if (err)
 379                goto out;
 380
 381        coremask = BIT(core->core_index) << 8;
 382        if (enable)
 383                tmp |= coremask;
 384        else
 385                tmp &= ~coremask;
 386
 387        err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
 388
 389out:
 390        return err;
 391}
 392EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
 393