linux/arch/mips/pci/pci-alchemy.c
<<
>>
Prefs
   1/*
   2 * Alchemy PCI host mode support.
   3 *
   4 * Copyright 2001-2003, 2007-2008 MontaVista Software Inc.
   5 * Author: MontaVista Software, Inc. <source@mvista.com>
   6 *
   7 * Support for all devices (greater than 16) added by David Gathright.
   8 */
   9
  10#include <linux/clk.h>
  11#include <linux/export.h>
  12#include <linux/types.h>
  13#include <linux/pci.h>
  14#include <linux/platform_device.h>
  15#include <linux/kernel.h>
  16#include <linux/init.h>
  17#include <linux/syscore_ops.h>
  18#include <linux/vmalloc.h>
  19
  20#include <asm/dma-coherence.h>
  21#include <asm/mach-au1x00/au1000.h>
  22#include <asm/tlbmisc.h>
  23
  24#ifdef CONFIG_PCI_DEBUG
  25#define DBG(x...) printk(KERN_DEBUG x)
  26#else
  27#define DBG(x...) do {} while (0)
  28#endif
  29
  30#define PCI_ACCESS_READ         0
  31#define PCI_ACCESS_WRITE        1
  32
  33struct alchemy_pci_context {
  34        struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
  35        void __iomem *regs;                     /* ctrl base */
  36        /* tools for wired entry for config space access */
  37        unsigned long last_elo0;
  38        unsigned long last_elo1;
  39        int wired_entry;
  40        struct vm_struct *pci_cfg_vm;
  41
  42        unsigned long pm[12];
  43
  44        int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin);
  45        int (*board_pci_idsel)(unsigned int devsel, int assert);
  46};
  47
  48/* for syscore_ops. There's only one PCI controller on Alchemy chips, so this
  49 * should suffice for now.
  50 */
  51static struct alchemy_pci_context *__alchemy_pci_ctx;
  52
  53
  54/* IO/MEM resources for PCI. Keep the memres in sync with __fixup_bigphys_addr
  55 * in arch/mips/alchemy/common/setup.c
  56 */
  57static struct resource alchemy_pci_def_memres = {
  58        .start  = ALCHEMY_PCI_MEMWIN_START,
  59        .end    = ALCHEMY_PCI_MEMWIN_END,
  60        .name   = "PCI memory space",
  61        .flags  = IORESOURCE_MEM
  62};
  63
  64static struct resource alchemy_pci_def_iores = {
  65        .start  = ALCHEMY_PCI_IOWIN_START,
  66        .end    = ALCHEMY_PCI_IOWIN_END,
  67        .name   = "PCI IO space",
  68        .flags  = IORESOURCE_IO
  69};
  70
  71static void mod_wired_entry(int entry, unsigned long entrylo0,
  72                unsigned long entrylo1, unsigned long entryhi,
  73                unsigned long pagemask)
  74{
  75        unsigned long old_pagemask;
  76        unsigned long old_ctx;
  77
  78        /* Save old context and create impossible VPN2 value */
  79        old_ctx = read_c0_entryhi() & 0xff;
  80        old_pagemask = read_c0_pagemask();
  81        write_c0_index(entry);
  82        write_c0_pagemask(pagemask);
  83        write_c0_entryhi(entryhi);
  84        write_c0_entrylo0(entrylo0);
  85        write_c0_entrylo1(entrylo1);
  86        tlb_write_indexed();
  87        write_c0_entryhi(old_ctx);
  88        write_c0_pagemask(old_pagemask);
  89}
  90
  91static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx)
  92{
  93        ctx->wired_entry = read_c0_wired();
  94        add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
  95        ctx->last_elo0 = ctx->last_elo1 = ~0;
  96}
  97
  98static int config_access(unsigned char access_type, struct pci_bus *bus,
  99                         unsigned int dev_fn, unsigned char where, u32 *data)
 100{
 101        struct alchemy_pci_context *ctx = bus->sysdata;
 102        unsigned int device = PCI_SLOT(dev_fn);
 103        unsigned int function = PCI_FUNC(dev_fn);
 104        unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r;
 105        int error = PCIBIOS_SUCCESSFUL;
 106
 107        if (device > 19) {
 108                *data = 0xffffffff;
 109                return -1;
 110        }
 111
 112        local_irq_save(flags);
 113        r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff;
 114        r |= PCI_STATCMD_STATUS(0x2000);
 115        __raw_writel(r, ctx->regs + PCI_REG_STATCMD);
 116        wmb();
 117
 118        /* Allow board vendors to implement their own off-chip IDSEL.
 119         * If it doesn't succeed, may as well bail out at this point.
 120         */
 121        if (ctx->board_pci_idsel(device, 1) == 0) {
 122                *data = 0xffffffff;
 123                local_irq_restore(flags);
 124                return -1;
 125        }
 126
 127        /* Setup the config window */
 128        if (bus->number == 0)
 129                cfg_base = (1 << device) << 11;
 130        else
 131                cfg_base = 0x80000000 | (bus->number << 16) | (device << 11);
 132
 133        /* Setup the lower bits of the 36-bit address */
 134        offset = (function << 8) | (where & ~0x3);
 135        /* Pick up any address that falls below the page mask */
 136        offset |= cfg_base & ~PAGE_MASK;
 137
 138        /* Page boundary */
 139        cfg_base = cfg_base & PAGE_MASK;
 140
 141        /* To improve performance, if the current device is the same as
 142         * the last device accessed, we don't touch the TLB.
 143         */
 144        entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7;
 145        entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7;
 146        if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) {
 147                mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1,
 148                                (unsigned long)ctx->pci_cfg_vm->addr, PM_4K);
 149                ctx->last_elo0 = entryLo0;
 150                ctx->last_elo1 = entryLo1;
 151        }
 152
 153        if (access_type == PCI_ACCESS_WRITE)
 154                __raw_writel(*data, ctx->pci_cfg_vm->addr + offset);
 155        else
 156                *data = __raw_readl(ctx->pci_cfg_vm->addr + offset);
 157        wmb();
 158
 159        DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n",
 160            access_type, bus->number, device, where, *data, offset);
 161
 162        /* check for errors, master abort */
 163        status = __raw_readl(ctx->regs + PCI_REG_STATCMD);
 164        if (status & (1 << 29)) {
 165                *data = 0xffffffff;
 166                error = -1;
 167                DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
 168                    access_type, bus->number, device);
 169        } else if ((status >> 28) & 0xf) {
 170                DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
 171                    device, (status >> 28) & 0xf);
 172
 173                /* clear errors */
 174                __raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD);
 175
 176                *data = 0xffffffff;
 177                error = -1;
 178        }
 179
 180        /* Take away the IDSEL. */
 181        (void)ctx->board_pci_idsel(device, 0);
 182
 183        local_irq_restore(flags);
 184        return error;
 185}
 186
 187static int read_config_byte(struct pci_bus *bus, unsigned int devfn,
 188                            int where,  u8 *val)
 189{
 190        u32 data;
 191        int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
 192
 193        if (where & 1)
 194                data >>= 8;
 195        if (where & 2)
 196                data >>= 16;
 197        *val = data & 0xff;
 198        return ret;
 199}
 200
 201static int read_config_word(struct pci_bus *bus, unsigned int devfn,
 202                            int where, u16 *val)
 203{
 204        u32 data;
 205        int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data);
 206
 207        if (where & 2)
 208                data >>= 16;
 209        *val = data & 0xffff;
 210        return ret;
 211}
 212
 213static int read_config_dword(struct pci_bus *bus, unsigned int devfn,
 214                             int where, u32 *val)
 215{
 216        return config_access(PCI_ACCESS_READ, bus, devfn, where, val);
 217}
 218
 219static int write_config_byte(struct pci_bus *bus, unsigned int devfn,
 220                             int where, u8 val)
 221{
 222        u32 data = 0;
 223
 224        if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
 225                return -1;
 226
 227        data = (data & ~(0xff << ((where & 3) << 3))) |
 228               (val << ((where & 3) << 3));
 229
 230        if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
 231                return -1;
 232
 233        return PCIBIOS_SUCCESSFUL;
 234}
 235
 236static int write_config_word(struct pci_bus *bus, unsigned int devfn,
 237                             int where, u16 val)
 238{
 239        u32 data = 0;
 240
 241        if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data))
 242                return -1;
 243
 244        data = (data & ~(0xffff << ((where & 3) << 3))) |
 245               (val << ((where & 3) << 3));
 246
 247        if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data))
 248                return -1;
 249
 250        return PCIBIOS_SUCCESSFUL;
 251}
 252
 253static int write_config_dword(struct pci_bus *bus, unsigned int devfn,
 254                              int where, u32 val)
 255{
 256        return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val);
 257}
 258
 259static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn,
 260                       int where, int size, u32 *val)
 261{
 262        switch (size) {
 263        case 1: {
 264                        u8 _val;
 265                        int rc = read_config_byte(bus, devfn, where, &_val);
 266
 267                        *val = _val;
 268                        return rc;
 269                }
 270        case 2: {
 271                        u16 _val;
 272                        int rc = read_config_word(bus, devfn, where, &_val);
 273
 274                        *val = _val;
 275                        return rc;
 276                }
 277        default:
 278                return read_config_dword(bus, devfn, where, val);
 279        }
 280}
 281
 282static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn,
 283                             int where, int size, u32 val)
 284{
 285        switch (size) {
 286        case 1:
 287                return write_config_byte(bus, devfn, where, (u8) val);
 288        case 2:
 289                return write_config_word(bus, devfn, where, (u16) val);
 290        default:
 291                return write_config_dword(bus, devfn, where, val);
 292        }
 293}
 294
 295static struct pci_ops alchemy_pci_ops = {
 296        .read   = alchemy_pci_read,
 297        .write  = alchemy_pci_write,
 298};
 299
 300static int alchemy_pci_def_idsel(unsigned int devsel, int assert)
 301{
 302        return 1;       /* success */
 303}
 304
 305/* save PCI controller register contents. */
 306static int alchemy_pci_suspend(void)
 307{
 308        struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
 309        if (!ctx)
 310                return 0;
 311
 312        ctx->pm[0]  = __raw_readl(ctx->regs + PCI_REG_CMEM);
 313        ctx->pm[1]  = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff;
 314        ctx->pm[2]  = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH);
 315        ctx->pm[3]  = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID);
 316        ctx->pm[4]  = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID);
 317        ctx->pm[5]  = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV);
 318        ctx->pm[6]  = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL);
 319        ctx->pm[7]  = __raw_readl(ctx->regs + PCI_REG_ID);
 320        ctx->pm[8]  = __raw_readl(ctx->regs + PCI_REG_CLASSREV);
 321        ctx->pm[9]  = __raw_readl(ctx->regs + PCI_REG_PARAM);
 322        ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR);
 323        ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT);
 324
 325        return 0;
 326}
 327
 328static void alchemy_pci_resume(void)
 329{
 330        struct alchemy_pci_context *ctx = __alchemy_pci_ctx;
 331        if (!ctx)
 332                return;
 333
 334        __raw_writel(ctx->pm[0],  ctx->regs + PCI_REG_CMEM);
 335        __raw_writel(ctx->pm[2],  ctx->regs + PCI_REG_B2BMASK_CCH);
 336        __raw_writel(ctx->pm[3],  ctx->regs + PCI_REG_B2BBASE0_VID);
 337        __raw_writel(ctx->pm[4],  ctx->regs + PCI_REG_B2BBASE1_SID);
 338        __raw_writel(ctx->pm[5],  ctx->regs + PCI_REG_MWMASK_DEV);
 339        __raw_writel(ctx->pm[6],  ctx->regs + PCI_REG_MWBASE_REV_CCL);
 340        __raw_writel(ctx->pm[7],  ctx->regs + PCI_REG_ID);
 341        __raw_writel(ctx->pm[8],  ctx->regs + PCI_REG_CLASSREV);
 342        __raw_writel(ctx->pm[9],  ctx->regs + PCI_REG_PARAM);
 343        __raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR);
 344        __raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT);
 345        wmb();
 346        __raw_writel(ctx->pm[1],  ctx->regs + PCI_REG_CONFIG);
 347        wmb();
 348
 349        /* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired
 350         * on resume, making it necessary to recreate it as soon as possible.
 351         */
 352        ctx->wired_entry = 8191;        /* impossibly high value */
 353        alchemy_pci_wired_entry(ctx);   /* install it */
 354}
 355
 356static struct syscore_ops alchemy_pci_pmops = {
 357        .suspend        = alchemy_pci_suspend,
 358        .resume         = alchemy_pci_resume,
 359};
 360
 361static int alchemy_pci_probe(struct platform_device *pdev)
 362{
 363        struct alchemy_pci_platdata *pd = pdev->dev.platform_data;
 364        struct alchemy_pci_context *ctx;
 365        void __iomem *virt_io;
 366        unsigned long val;
 367        struct resource *r;
 368        struct clk *c;
 369        int ret;
 370
 371        /* need at least PCI IRQ mapping table */
 372        if (!pd) {
 373                dev_err(&pdev->dev, "need platform data for PCI setup\n");
 374                ret = -ENODEV;
 375                goto out;
 376        }
 377
 378        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
 379        if (!ctx) {
 380                dev_err(&pdev->dev, "no memory for pcictl context\n");
 381                ret = -ENOMEM;
 382                goto out;
 383        }
 384
 385        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 386        if (!r) {
 387                dev_err(&pdev->dev, "no  pcictl ctrl regs resource\n");
 388                ret = -ENODEV;
 389                goto out1;
 390        }
 391
 392        if (!request_mem_region(r->start, resource_size(r), pdev->name)) {
 393                dev_err(&pdev->dev, "cannot claim pci regs\n");
 394                ret = -ENODEV;
 395                goto out1;
 396        }
 397
 398        c = clk_get(&pdev->dev, "pci_clko");
 399        if (IS_ERR(c)) {
 400                dev_err(&pdev->dev, "unable to find PCI clock\n");
 401                ret = PTR_ERR(c);
 402                goto out2;
 403        }
 404
 405        ret = clk_prepare_enable(c);
 406        if (ret) {
 407                dev_err(&pdev->dev, "cannot enable PCI clock\n");
 408                goto out6;
 409        }
 410
 411        ctx->regs = ioremap_nocache(r->start, resource_size(r));
 412        if (!ctx->regs) {
 413                dev_err(&pdev->dev, "cannot map pci regs\n");
 414                ret = -ENODEV;
 415                goto out5;
 416        }
 417
 418        /* map parts of the PCI IO area */
 419        /* REVISIT: if this changes with a newer variant (doubt it) make this
 420         * a platform resource.
 421         */
 422        virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000);
 423        if (!virt_io) {
 424                dev_err(&pdev->dev, "cannot remap pci io space\n");
 425                ret = -ENODEV;
 426                goto out3;
 427        }
 428        ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io;
 429
 430        /* Au1500 revisions older than AD have borked coherent PCI */
 431        if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
 432            (read_c0_prid() < 0x01030202) && !coherentio) {
 433                val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
 434                val |= PCI_CONFIG_NC;
 435                __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
 436                wmb();
 437                dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n");
 438        }
 439
 440        if (pd->board_map_irq)
 441                ctx->board_map_irq = pd->board_map_irq;
 442
 443        if (pd->board_pci_idsel)
 444                ctx->board_pci_idsel = pd->board_pci_idsel;
 445        else
 446                ctx->board_pci_idsel = alchemy_pci_def_idsel;
 447
 448        /* fill in relevant pci_controller members */
 449        ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops;
 450        ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres;
 451        ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores;
 452
 453        /* we can't ioremap the entire pci config space because it's too large,
 454         * nor can we dynamically ioremap it because some drivers use the
 455         * PCI config routines from within atomic contex and that becomes a
 456         * problem in get_vm_area().  Instead we use one wired TLB entry to
 457         * handle all config accesses for all busses.
 458         */
 459        ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP);
 460        if (!ctx->pci_cfg_vm) {
 461                dev_err(&pdev->dev, "unable to get vm area\n");
 462                ret = -ENOMEM;
 463                goto out4;
 464        }
 465        ctx->wired_entry = 8191;        /* impossibly high value */
 466        alchemy_pci_wired_entry(ctx);   /* install it */
 467
 468        set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base);
 469
 470        /* board may want to modify bits in the config register, do it now */
 471        val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
 472        val &= ~pd->pci_cfg_clr;
 473        val |= pd->pci_cfg_set;
 474        val &= ~PCI_CONFIG_PD;          /* clear disable bit */
 475        __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
 476        wmb();
 477
 478        __alchemy_pci_ctx = ctx;
 479        platform_set_drvdata(pdev, ctx);
 480        register_syscore_ops(&alchemy_pci_pmops);
 481        register_pci_controller(&ctx->alchemy_pci_ctrl);
 482
 483        dev_info(&pdev->dev, "PCI controller at %ld MHz\n",
 484                 clk_get_rate(c) / 1000000);
 485
 486        return 0;
 487
 488out4:
 489        iounmap(virt_io);
 490out3:
 491        iounmap(ctx->regs);
 492out5:
 493        clk_disable_unprepare(c);
 494out6:
 495        clk_put(c);
 496out2:
 497        release_mem_region(r->start, resource_size(r));
 498out1:
 499        kfree(ctx);
 500out:
 501        return ret;
 502}
 503
 504static struct platform_driver alchemy_pcictl_driver = {
 505        .probe          = alchemy_pci_probe,
 506        .driver = {
 507                .name   = "alchemy-pci",
 508        },
 509};
 510
 511static int __init alchemy_pci_init(void)
 512{
 513        /* Au1500/Au1550 have PCI */
 514        switch (alchemy_get_cputype()) {
 515        case ALCHEMY_CPU_AU1500:
 516        case ALCHEMY_CPU_AU1550:
 517                return platform_driver_register(&alchemy_pcictl_driver);
 518        }
 519        return 0;
 520}
 521arch_initcall(alchemy_pci_init);
 522
 523
 524int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 525{
 526        struct alchemy_pci_context *ctx = dev->sysdata;
 527        if (ctx && ctx->board_map_irq)
 528                return ctx->board_map_irq(dev, slot, pin);
 529        return -1;
 530}
 531
 532int pcibios_plat_dev_init(struct pci_dev *dev)
 533{
 534        return 0;
 535}
 536