linux/drivers/vme/bridges/vme_ca91cx42.c
<<
>>
Prefs
   1/*
   2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
   3 *
   4 * Author: Martyn Welch <martyn.welch@ge.com>
   5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
   6 *
   7 * Based on work by Tom Armistead and Ajit Prem
   8 * Copyright 2004 Motorola Inc.
   9 *
  10 * Derived from ca91c042.c by Michael Wyrick
  11 *
  12 * This program is free software; you can redistribute  it and/or modify it
  13 * under  the terms of  the GNU General  Public License as published by the
  14 * Free Software Foundation;  either version 2 of the  License, or (at your
  15 * option) any later version.
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/mm.h>
  20#include <linux/types.h>
  21#include <linux/errno.h>
  22#include <linux/pci.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/poll.h>
  25#include <linux/interrupt.h>
  26#include <linux/spinlock.h>
  27#include <linux/sched.h>
  28#include <linux/slab.h>
  29#include <linux/time.h>
  30#include <linux/io.h>
  31#include <linux/uaccess.h>
  32#include <linux/vme.h>
  33
  34#include "../vme_bridge.h"
  35#include "vme_ca91cx42.h"
  36
  37static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
  38static void ca91cx42_remove(struct pci_dev *);
  39
  40/* Module parameters */
  41static int geoid;
  42
  43static const char driver_name[] = "vme_ca91cx42";
  44
  45static const struct pci_device_id ca91cx42_ids[] = {
  46        { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
  47        { },
  48};
  49
  50MODULE_DEVICE_TABLE(pci, ca91cx42_ids);
  51
  52static struct pci_driver ca91cx42_driver = {
  53        .name = driver_name,
  54        .id_table = ca91cx42_ids,
  55        .probe = ca91cx42_probe,
  56        .remove = ca91cx42_remove,
  57};
  58
  59static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
  60{
  61        wake_up(&bridge->dma_queue);
  62
  63        return CA91CX42_LINT_DMA;
  64}
  65
  66static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
  67{
  68        int i;
  69        u32 serviced = 0;
  70
  71        for (i = 0; i < 4; i++) {
  72                if (stat & CA91CX42_LINT_LM[i]) {
  73                        /* We only enable interrupts if the callback is set */
  74                        bridge->lm_callback[i](bridge->lm_data[i]);
  75                        serviced |= CA91CX42_LINT_LM[i];
  76                }
  77        }
  78
  79        return serviced;
  80}
  81
  82/* XXX This needs to be split into 4 queues */
  83static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
  84{
  85        wake_up(&bridge->mbox_queue);
  86
  87        return CA91CX42_LINT_MBOX;
  88}
  89
  90static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
  91{
  92        wake_up(&bridge->iack_queue);
  93
  94        return CA91CX42_LINT_SW_IACK;
  95}
  96
  97static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
  98{
  99        int val;
 100        struct ca91cx42_driver *bridge;
 101
 102        bridge = ca91cx42_bridge->driver_priv;
 103
 104        val = ioread32(bridge->base + DGCS);
 105
 106        if (!(val & 0x00000800)) {
 107                dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
 108                        "Read Error DGCS=%08X\n", val);
 109        }
 110
 111        return CA91CX42_LINT_VERR;
 112}
 113
 114static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
 115{
 116        int val;
 117        struct ca91cx42_driver *bridge;
 118
 119        bridge = ca91cx42_bridge->driver_priv;
 120
 121        val = ioread32(bridge->base + DGCS);
 122
 123        if (!(val & 0x00000800))
 124                dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
 125                        "Read Error DGCS=%08X\n", val);
 126
 127        return CA91CX42_LINT_LERR;
 128}
 129
 130
 131static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
 132        int stat)
 133{
 134        int vec, i, serviced = 0;
 135        struct ca91cx42_driver *bridge;
 136
 137        bridge = ca91cx42_bridge->driver_priv;
 138
 139
 140        for (i = 7; i > 0; i--) {
 141                if (stat & (1 << i)) {
 142                        vec = ioread32(bridge->base +
 143                                CA91CX42_V_STATID[i]) & 0xff;
 144
 145                        vme_irq_handler(ca91cx42_bridge, i, vec);
 146
 147                        serviced |= (1 << i);
 148                }
 149        }
 150
 151        return serviced;
 152}
 153
 154static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
 155{
 156        u32 stat, enable, serviced = 0;
 157        struct vme_bridge *ca91cx42_bridge;
 158        struct ca91cx42_driver *bridge;
 159
 160        ca91cx42_bridge = ptr;
 161
 162        bridge = ca91cx42_bridge->driver_priv;
 163
 164        enable = ioread32(bridge->base + LINT_EN);
 165        stat = ioread32(bridge->base + LINT_STAT);
 166
 167        /* Only look at unmasked interrupts */
 168        stat &= enable;
 169
 170        if (unlikely(!stat))
 171                return IRQ_NONE;
 172
 173        if (stat & CA91CX42_LINT_DMA)
 174                serviced |= ca91cx42_DMA_irqhandler(bridge);
 175        if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
 176                        CA91CX42_LINT_LM3))
 177                serviced |= ca91cx42_LM_irqhandler(bridge, stat);
 178        if (stat & CA91CX42_LINT_MBOX)
 179                serviced |= ca91cx42_MB_irqhandler(bridge, stat);
 180        if (stat & CA91CX42_LINT_SW_IACK)
 181                serviced |= ca91cx42_IACK_irqhandler(bridge);
 182        if (stat & CA91CX42_LINT_VERR)
 183                serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
 184        if (stat & CA91CX42_LINT_LERR)
 185                serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
 186        if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
 187                        CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
 188                        CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
 189                        CA91CX42_LINT_VIRQ7))
 190                serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
 191
 192        /* Clear serviced interrupts */
 193        iowrite32(serviced, bridge->base + LINT_STAT);
 194
 195        return IRQ_HANDLED;
 196}
 197
 198static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
 199{
 200        int result, tmp;
 201        struct pci_dev *pdev;
 202        struct ca91cx42_driver *bridge;
 203
 204        bridge = ca91cx42_bridge->driver_priv;
 205
 206        /* Need pdev */
 207        pdev = to_pci_dev(ca91cx42_bridge->parent);
 208
 209        /* Disable interrupts from PCI to VME */
 210        iowrite32(0, bridge->base + VINT_EN);
 211
 212        /* Disable PCI interrupts */
 213        iowrite32(0, bridge->base + LINT_EN);
 214        /* Clear Any Pending PCI Interrupts */
 215        iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
 216
 217        result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
 218                        driver_name, ca91cx42_bridge);
 219        if (result) {
 220                dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
 221                       pdev->irq);
 222                return result;
 223        }
 224
 225        /* Ensure all interrupts are mapped to PCI Interrupt 0 */
 226        iowrite32(0, bridge->base + LINT_MAP0);
 227        iowrite32(0, bridge->base + LINT_MAP1);
 228        iowrite32(0, bridge->base + LINT_MAP2);
 229
 230        /* Enable DMA, mailbox & LM Interrupts */
 231        tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
 232                CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
 233                CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
 234
 235        iowrite32(tmp, bridge->base + LINT_EN);
 236
 237        return 0;
 238}
 239
 240static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
 241        struct pci_dev *pdev)
 242{
 243        struct vme_bridge *ca91cx42_bridge;
 244
 245        /* Disable interrupts from PCI to VME */
 246        iowrite32(0, bridge->base + VINT_EN);
 247
 248        /* Disable PCI interrupts */
 249        iowrite32(0, bridge->base + LINT_EN);
 250        /* Clear Any Pending PCI Interrupts */
 251        iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
 252
 253        ca91cx42_bridge = container_of((void *)bridge, struct vme_bridge,
 254                                       driver_priv);
 255        free_irq(pdev->irq, ca91cx42_bridge);
 256}
 257
 258static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
 259{
 260        u32 tmp;
 261
 262        tmp = ioread32(bridge->base + LINT_STAT);
 263
 264        if (tmp & (1 << level))
 265                return 0;
 266        else
 267                return 1;
 268}
 269
 270/*
 271 * Set up an VME interrupt
 272 */
 273static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
 274        int state, int sync)
 275
 276{
 277        struct pci_dev *pdev;
 278        u32 tmp;
 279        struct ca91cx42_driver *bridge;
 280
 281        bridge = ca91cx42_bridge->driver_priv;
 282
 283        /* Enable IRQ level */
 284        tmp = ioread32(bridge->base + LINT_EN);
 285
 286        if (state == 0)
 287                tmp &= ~CA91CX42_LINT_VIRQ[level];
 288        else
 289                tmp |= CA91CX42_LINT_VIRQ[level];
 290
 291        iowrite32(tmp, bridge->base + LINT_EN);
 292
 293        if ((state == 0) && (sync != 0)) {
 294                pdev = to_pci_dev(ca91cx42_bridge->parent);
 295
 296                synchronize_irq(pdev->irq);
 297        }
 298}
 299
 300static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
 301        int statid)
 302{
 303        u32 tmp;
 304        struct ca91cx42_driver *bridge;
 305
 306        bridge = ca91cx42_bridge->driver_priv;
 307
 308        /* Universe can only generate even vectors */
 309        if (statid & 1)
 310                return -EINVAL;
 311
 312        mutex_lock(&bridge->vme_int);
 313
 314        tmp = ioread32(bridge->base + VINT_EN);
 315
 316        /* Set Status/ID */
 317        iowrite32(statid << 24, bridge->base + STATID);
 318
 319        /* Assert VMEbus IRQ */
 320        tmp = tmp | (1 << (level + 24));
 321        iowrite32(tmp, bridge->base + VINT_EN);
 322
 323        /* Wait for IACK */
 324        wait_event_interruptible(bridge->iack_queue,
 325                                 ca91cx42_iack_received(bridge, level));
 326
 327        /* Return interrupt to low state */
 328        tmp = ioread32(bridge->base + VINT_EN);
 329        tmp = tmp & ~(1 << (level + 24));
 330        iowrite32(tmp, bridge->base + VINT_EN);
 331
 332        mutex_unlock(&bridge->vme_int);
 333
 334        return 0;
 335}
 336
 337static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
 338        unsigned long long vme_base, unsigned long long size,
 339        dma_addr_t pci_base, u32 aspace, u32 cycle)
 340{
 341        unsigned int i, addr = 0, granularity;
 342        unsigned int temp_ctl = 0;
 343        unsigned int vme_bound, pci_offset;
 344        struct vme_bridge *ca91cx42_bridge;
 345        struct ca91cx42_driver *bridge;
 346
 347        ca91cx42_bridge = image->parent;
 348
 349        bridge = ca91cx42_bridge->driver_priv;
 350
 351        i = image->number;
 352
 353        switch (aspace) {
 354        case VME_A16:
 355                addr |= CA91CX42_VSI_CTL_VAS_A16;
 356                break;
 357        case VME_A24:
 358                addr |= CA91CX42_VSI_CTL_VAS_A24;
 359                break;
 360        case VME_A32:
 361                addr |= CA91CX42_VSI_CTL_VAS_A32;
 362                break;
 363        case VME_USER1:
 364                addr |= CA91CX42_VSI_CTL_VAS_USER1;
 365                break;
 366        case VME_USER2:
 367                addr |= CA91CX42_VSI_CTL_VAS_USER2;
 368                break;
 369        case VME_A64:
 370        case VME_CRCSR:
 371        case VME_USER3:
 372        case VME_USER4:
 373        default:
 374                dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
 375                return -EINVAL;
 376                break;
 377        }
 378
 379        /*
 380         * Bound address is a valid address for the window, adjust
 381         * accordingly
 382         */
 383        vme_bound = vme_base + size;
 384        pci_offset = pci_base - vme_base;
 385
 386        if ((i == 0) || (i == 4))
 387                granularity = 0x1000;
 388        else
 389                granularity = 0x10000;
 390
 391        if (vme_base & (granularity - 1)) {
 392                dev_err(ca91cx42_bridge->parent, "Invalid VME base "
 393                        "alignment\n");
 394                return -EINVAL;
 395        }
 396        if (vme_bound & (granularity - 1)) {
 397                dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
 398                        "alignment\n");
 399                return -EINVAL;
 400        }
 401        if (pci_offset & (granularity - 1)) {
 402                dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
 403                        "alignment\n");
 404                return -EINVAL;
 405        }
 406
 407        /* Disable while we are mucking around */
 408        temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
 409        temp_ctl &= ~CA91CX42_VSI_CTL_EN;
 410        iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
 411
 412        /* Setup mapping */
 413        iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
 414        iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
 415        iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
 416
 417        /* Setup address space */
 418        temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
 419        temp_ctl |= addr;
 420
 421        /* Setup cycle types */
 422        temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
 423        if (cycle & VME_SUPER)
 424                temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
 425        if (cycle & VME_USER)
 426                temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
 427        if (cycle & VME_PROG)
 428                temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
 429        if (cycle & VME_DATA)
 430                temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
 431
 432        /* Write ctl reg without enable */
 433        iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
 434
 435        if (enabled)
 436                temp_ctl |= CA91CX42_VSI_CTL_EN;
 437
 438        iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
 439
 440        return 0;
 441}
 442
 443static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
 444        unsigned long long *vme_base, unsigned long long *size,
 445        dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
 446{
 447        unsigned int i, granularity = 0, ctl = 0;
 448        unsigned long long vme_bound, pci_offset;
 449        struct ca91cx42_driver *bridge;
 450
 451        bridge = image->parent->driver_priv;
 452
 453        i = image->number;
 454
 455        if ((i == 0) || (i == 4))
 456                granularity = 0x1000;
 457        else
 458                granularity = 0x10000;
 459
 460        /* Read Registers */
 461        ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
 462
 463        *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
 464        vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
 465        pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
 466
 467        *pci_base = (dma_addr_t)*vme_base + pci_offset;
 468        *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
 469
 470        *enabled = 0;
 471        *aspace = 0;
 472        *cycle = 0;
 473
 474        if (ctl & CA91CX42_VSI_CTL_EN)
 475                *enabled = 1;
 476
 477        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
 478                *aspace = VME_A16;
 479        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
 480                *aspace = VME_A24;
 481        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
 482                *aspace = VME_A32;
 483        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
 484                *aspace = VME_USER1;
 485        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
 486                *aspace = VME_USER2;
 487
 488        if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
 489                *cycle |= VME_SUPER;
 490        if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
 491                *cycle |= VME_USER;
 492        if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
 493                *cycle |= VME_PROG;
 494        if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
 495                *cycle |= VME_DATA;
 496
 497        return 0;
 498}
 499
 500/*
 501 * Allocate and map PCI Resource
 502 */
 503static int ca91cx42_alloc_resource(struct vme_master_resource *image,
 504        unsigned long long size)
 505{
 506        unsigned long long existing_size;
 507        int retval = 0;
 508        struct pci_dev *pdev;
 509        struct vme_bridge *ca91cx42_bridge;
 510
 511        ca91cx42_bridge = image->parent;
 512
 513        /* Find pci_dev container of dev */
 514        if (!ca91cx42_bridge->parent) {
 515                dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
 516                return -EINVAL;
 517        }
 518        pdev = to_pci_dev(ca91cx42_bridge->parent);
 519
 520        existing_size = (unsigned long long)(image->bus_resource.end -
 521                image->bus_resource.start);
 522
 523        /* If the existing size is OK, return */
 524        if (existing_size == (size - 1))
 525                return 0;
 526
 527        if (existing_size != 0) {
 528                iounmap(image->kern_base);
 529                image->kern_base = NULL;
 530                kfree(image->bus_resource.name);
 531                release_resource(&image->bus_resource);
 532                memset(&image->bus_resource, 0, sizeof(image->bus_resource));
 533        }
 534
 535        if (!image->bus_resource.name) {
 536                image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
 537                if (!image->bus_resource.name) {
 538                        retval = -ENOMEM;
 539                        goto err_name;
 540                }
 541        }
 542
 543        sprintf((char *)image->bus_resource.name, "%s.%d",
 544                ca91cx42_bridge->name, image->number);
 545
 546        image->bus_resource.start = 0;
 547        image->bus_resource.end = (unsigned long)size;
 548        image->bus_resource.flags = IORESOURCE_MEM;
 549
 550        retval = pci_bus_alloc_resource(pdev->bus,
 551                &image->bus_resource, size, 0x10000, PCIBIOS_MIN_MEM,
 552                0, NULL, NULL);
 553        if (retval) {
 554                dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
 555                        "resource for window %d size 0x%lx start 0x%lx\n",
 556                        image->number, (unsigned long)size,
 557                        (unsigned long)image->bus_resource.start);
 558                goto err_resource;
 559        }
 560
 561        image->kern_base = ioremap_nocache(
 562                image->bus_resource.start, size);
 563        if (!image->kern_base) {
 564                dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
 565                retval = -ENOMEM;
 566                goto err_remap;
 567        }
 568
 569        return 0;
 570
 571err_remap:
 572        release_resource(&image->bus_resource);
 573err_resource:
 574        kfree(image->bus_resource.name);
 575        memset(&image->bus_resource, 0, sizeof(image->bus_resource));
 576err_name:
 577        return retval;
 578}
 579
 580/*
 581 * Free and unmap PCI Resource
 582 */
 583static void ca91cx42_free_resource(struct vme_master_resource *image)
 584{
 585        iounmap(image->kern_base);
 586        image->kern_base = NULL;
 587        release_resource(&image->bus_resource);
 588        kfree(image->bus_resource.name);
 589        memset(&image->bus_resource, 0, sizeof(image->bus_resource));
 590}
 591
 592
 593static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
 594        unsigned long long vme_base, unsigned long long size, u32 aspace,
 595        u32 cycle, u32 dwidth)
 596{
 597        int retval = 0;
 598        unsigned int i, granularity = 0;
 599        unsigned int temp_ctl = 0;
 600        unsigned long long pci_bound, vme_offset, pci_base;
 601        struct vme_bridge *ca91cx42_bridge;
 602        struct ca91cx42_driver *bridge;
 603
 604        ca91cx42_bridge = image->parent;
 605
 606        bridge = ca91cx42_bridge->driver_priv;
 607
 608        i = image->number;
 609
 610        if ((i == 0) || (i == 4))
 611                granularity = 0x1000;
 612        else
 613                granularity = 0x10000;
 614
 615        /* Verify input data */
 616        if (vme_base & (granularity - 1)) {
 617                dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
 618                        "alignment\n");
 619                retval = -EINVAL;
 620                goto err_window;
 621        }
 622        if (size & (granularity - 1)) {
 623                dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
 624                        "alignment\n");
 625                retval = -EINVAL;
 626                goto err_window;
 627        }
 628
 629        spin_lock(&image->lock);
 630
 631        /*
 632         * Let's allocate the resource here rather than further up the stack as
 633         * it avoids pushing loads of bus dependent stuff up the stack
 634         */
 635        retval = ca91cx42_alloc_resource(image, size);
 636        if (retval) {
 637                spin_unlock(&image->lock);
 638                dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
 639                        "for resource name\n");
 640                retval = -ENOMEM;
 641                goto err_res;
 642        }
 643
 644        pci_base = (unsigned long long)image->bus_resource.start;
 645
 646        /*
 647         * Bound address is a valid address for the window, adjust
 648         * according to window granularity.
 649         */
 650        pci_bound = pci_base + size;
 651        vme_offset = vme_base - pci_base;
 652
 653        /* Disable while we are mucking around */
 654        temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
 655        temp_ctl &= ~CA91CX42_LSI_CTL_EN;
 656        iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
 657
 658        /* Setup cycle types */
 659        temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
 660        if (cycle & VME_BLT)
 661                temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
 662        if (cycle & VME_MBLT)
 663                temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
 664
 665        /* Setup data width */
 666        temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
 667        switch (dwidth) {
 668        case VME_D8:
 669                temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
 670                break;
 671        case VME_D16:
 672                temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
 673                break;
 674        case VME_D32:
 675                temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
 676                break;
 677        case VME_D64:
 678                temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
 679                break;
 680        default:
 681                spin_unlock(&image->lock);
 682                dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
 683                retval = -EINVAL;
 684                goto err_dwidth;
 685                break;
 686        }
 687
 688        /* Setup address space */
 689        temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
 690        switch (aspace) {
 691        case VME_A16:
 692                temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
 693                break;
 694        case VME_A24:
 695                temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
 696                break;
 697        case VME_A32:
 698                temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
 699                break;
 700        case VME_CRCSR:
 701                temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
 702                break;
 703        case VME_USER1:
 704                temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
 705                break;
 706        case VME_USER2:
 707                temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
 708                break;
 709        case VME_A64:
 710        case VME_USER3:
 711        case VME_USER4:
 712        default:
 713                spin_unlock(&image->lock);
 714                dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
 715                retval = -EINVAL;
 716                goto err_aspace;
 717                break;
 718        }
 719
 720        temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
 721        if (cycle & VME_SUPER)
 722                temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
 723        if (cycle & VME_PROG)
 724                temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
 725
 726        /* Setup mapping */
 727        iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
 728        iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
 729        iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
 730
 731        /* Write ctl reg without enable */
 732        iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
 733
 734        if (enabled)
 735                temp_ctl |= CA91CX42_LSI_CTL_EN;
 736
 737        iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
 738
 739        spin_unlock(&image->lock);
 740        return 0;
 741
 742err_aspace:
 743err_dwidth:
 744        ca91cx42_free_resource(image);
 745err_res:
 746err_window:
 747        return retval;
 748}
 749
 750static int __ca91cx42_master_get(struct vme_master_resource *image,
 751        int *enabled, unsigned long long *vme_base, unsigned long long *size,
 752        u32 *aspace, u32 *cycle, u32 *dwidth)
 753{
 754        unsigned int i, ctl;
 755        unsigned long long pci_base, pci_bound, vme_offset;
 756        struct ca91cx42_driver *bridge;
 757
 758        bridge = image->parent->driver_priv;
 759
 760        i = image->number;
 761
 762        ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
 763
 764        pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
 765        vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
 766        pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
 767
 768        *vme_base = pci_base + vme_offset;
 769        *size = (unsigned long long)(pci_bound - pci_base);
 770
 771        *enabled = 0;
 772        *aspace = 0;
 773        *cycle = 0;
 774        *dwidth = 0;
 775
 776        if (ctl & CA91CX42_LSI_CTL_EN)
 777                *enabled = 1;
 778
 779        /* Setup address space */
 780        switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
 781        case CA91CX42_LSI_CTL_VAS_A16:
 782                *aspace = VME_A16;
 783                break;
 784        case CA91CX42_LSI_CTL_VAS_A24:
 785                *aspace = VME_A24;
 786                break;
 787        case CA91CX42_LSI_CTL_VAS_A32:
 788                *aspace = VME_A32;
 789                break;
 790        case CA91CX42_LSI_CTL_VAS_CRCSR:
 791                *aspace = VME_CRCSR;
 792                break;
 793        case CA91CX42_LSI_CTL_VAS_USER1:
 794                *aspace = VME_USER1;
 795                break;
 796        case CA91CX42_LSI_CTL_VAS_USER2:
 797                *aspace = VME_USER2;
 798                break;
 799        }
 800
 801        /* XXX Not sure howto check for MBLT */
 802        /* Setup cycle types */
 803        if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
 804                *cycle |= VME_BLT;
 805        else
 806                *cycle |= VME_SCT;
 807
 808        if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
 809                *cycle |= VME_SUPER;
 810        else
 811                *cycle |= VME_USER;
 812
 813        if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
 814                *cycle = VME_PROG;
 815        else
 816                *cycle = VME_DATA;
 817
 818        /* Setup data width */
 819        switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
 820        case CA91CX42_LSI_CTL_VDW_D8:
 821                *dwidth = VME_D8;
 822                break;
 823        case CA91CX42_LSI_CTL_VDW_D16:
 824                *dwidth = VME_D16;
 825                break;
 826        case CA91CX42_LSI_CTL_VDW_D32:
 827                *dwidth = VME_D32;
 828                break;
 829        case CA91CX42_LSI_CTL_VDW_D64:
 830                *dwidth = VME_D64;
 831                break;
 832        }
 833
 834        return 0;
 835}
 836
 837static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
 838        unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
 839        u32 *cycle, u32 *dwidth)
 840{
 841        int retval;
 842
 843        spin_lock(&image->lock);
 844
 845        retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
 846                cycle, dwidth);
 847
 848        spin_unlock(&image->lock);
 849
 850        return retval;
 851}
 852
 853static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
 854        void *buf, size_t count, loff_t offset)
 855{
 856        ssize_t retval;
 857        void __iomem *addr = image->kern_base + offset;
 858        unsigned int done = 0;
 859        unsigned int count32;
 860
 861        if (count == 0)
 862                return 0;
 863
 864        spin_lock(&image->lock);
 865
 866        /* The following code handles VME address alignment. We cannot use
 867         * memcpy_xxx here because it may cut data transfers in to 8-bit
 868         * cycles when D16 or D32 cycles are required on the VME bus.
 869         * On the other hand, the bridge itself assures that the maximum data
 870         * cycle configured for the transfer is used and splits it
 871         * automatically for non-aligned addresses, so we don't want the
 872         * overhead of needlessly forcing small transfers for the entire cycle.
 873         */
 874        if ((uintptr_t)addr & 0x1) {
 875                *(u8 *)buf = ioread8(addr);
 876                done += 1;
 877                if (done == count)
 878                        goto out;
 879        }
 880        if ((uintptr_t)(addr + done) & 0x2) {
 881                if ((count - done) < 2) {
 882                        *(u8 *)(buf + done) = ioread8(addr + done);
 883                        done += 1;
 884                        goto out;
 885                } else {
 886                        *(u16 *)(buf + done) = ioread16(addr + done);
 887                        done += 2;
 888                }
 889        }
 890
 891        count32 = (count - done) & ~0x3;
 892        while (done < count32) {
 893                *(u32 *)(buf + done) = ioread32(addr + done);
 894                done += 4;
 895        }
 896
 897        if ((count - done) & 0x2) {
 898                *(u16 *)(buf + done) = ioread16(addr + done);
 899                done += 2;
 900        }
 901        if ((count - done) & 0x1) {
 902                *(u8 *)(buf + done) = ioread8(addr + done);
 903                done += 1;
 904        }
 905out:
 906        retval = count;
 907        spin_unlock(&image->lock);
 908
 909        return retval;
 910}
 911
 912static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
 913        void *buf, size_t count, loff_t offset)
 914{
 915        ssize_t retval;
 916        void __iomem *addr = image->kern_base + offset;
 917        unsigned int done = 0;
 918        unsigned int count32;
 919
 920        if (count == 0)
 921                return 0;
 922
 923        spin_lock(&image->lock);
 924
 925        /* Here we apply for the same strategy we do in master_read
 926         * function in order to assure the correct cycles.
 927         */
 928        if ((uintptr_t)addr & 0x1) {
 929                iowrite8(*(u8 *)buf, addr);
 930                done += 1;
 931                if (done == count)
 932                        goto out;
 933        }
 934        if ((uintptr_t)(addr + done) & 0x2) {
 935                if ((count - done) < 2) {
 936                        iowrite8(*(u8 *)(buf + done), addr + done);
 937                        done += 1;
 938                        goto out;
 939                } else {
 940                        iowrite16(*(u16 *)(buf + done), addr + done);
 941                        done += 2;
 942                }
 943        }
 944
 945        count32 = (count - done) & ~0x3;
 946        while (done < count32) {
 947                iowrite32(*(u32 *)(buf + done), addr + done);
 948                done += 4;
 949        }
 950
 951        if ((count - done) & 0x2) {
 952                iowrite16(*(u16 *)(buf + done), addr + done);
 953                done += 2;
 954        }
 955        if ((count - done) & 0x1) {
 956                iowrite8(*(u8 *)(buf + done), addr + done);
 957                done += 1;
 958        }
 959out:
 960        retval = count;
 961
 962        spin_unlock(&image->lock);
 963
 964        return retval;
 965}
 966
 967static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
 968        unsigned int mask, unsigned int compare, unsigned int swap,
 969        loff_t offset)
 970{
 971        u32 result;
 972        uintptr_t pci_addr;
 973        int i;
 974        struct ca91cx42_driver *bridge;
 975        struct device *dev;
 976
 977        bridge = image->parent->driver_priv;
 978        dev = image->parent->parent;
 979
 980        /* Find the PCI address that maps to the desired VME address */
 981        i = image->number;
 982
 983        /* Locking as we can only do one of these at a time */
 984        mutex_lock(&bridge->vme_rmw);
 985
 986        /* Lock image */
 987        spin_lock(&image->lock);
 988
 989        pci_addr = (uintptr_t)image->kern_base + offset;
 990
 991        /* Address must be 4-byte aligned */
 992        if (pci_addr & 0x3) {
 993                dev_err(dev, "RMW Address not 4-byte aligned\n");
 994                result = -EINVAL;
 995                goto out;
 996        }
 997
 998        /* Ensure RMW Disabled whilst configuring */
 999        iowrite32(0, bridge->base + SCYC_CTL);
1000
1001        /* Configure registers */
1002        iowrite32(mask, bridge->base + SCYC_EN);
1003        iowrite32(compare, bridge->base + SCYC_CMP);
1004        iowrite32(swap, bridge->base + SCYC_SWP);
1005        iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1006
1007        /* Enable RMW */
1008        iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1009
1010        /* Kick process off with a read to the required address. */
1011        result = ioread32(image->kern_base + offset);
1012
1013        /* Disable RMW */
1014        iowrite32(0, bridge->base + SCYC_CTL);
1015
1016out:
1017        spin_unlock(&image->lock);
1018
1019        mutex_unlock(&bridge->vme_rmw);
1020
1021        return result;
1022}
1023
1024static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1025        struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1026{
1027        struct ca91cx42_dma_entry *entry, *prev;
1028        struct vme_dma_pci *pci_attr;
1029        struct vme_dma_vme *vme_attr;
1030        dma_addr_t desc_ptr;
1031        int retval = 0;
1032        struct device *dev;
1033
1034        dev = list->parent->parent->parent;
1035
1036        /* XXX descriptor must be aligned on 64-bit boundaries */
1037        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1038        if (!entry) {
1039                retval = -ENOMEM;
1040                goto err_mem;
1041        }
1042
1043        /* Test descriptor alignment */
1044        if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1045                dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1046                        "required: %p\n", &entry->descriptor);
1047                retval = -EINVAL;
1048                goto err_align;
1049        }
1050
1051        memset(&entry->descriptor, 0, sizeof(entry->descriptor));
1052
1053        if (dest->type == VME_DMA_VME) {
1054                entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1055                vme_attr = dest->private;
1056                pci_attr = src->private;
1057        } else {
1058                vme_attr = src->private;
1059                pci_attr = dest->private;
1060        }
1061
1062        /* Check we can do fulfill required attributes */
1063        if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1064                VME_USER2)) != 0) {
1065
1066                dev_err(dev, "Unsupported cycle type\n");
1067                retval = -EINVAL;
1068                goto err_aspace;
1069        }
1070
1071        if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1072                VME_PROG | VME_DATA)) != 0) {
1073
1074                dev_err(dev, "Unsupported cycle type\n");
1075                retval = -EINVAL;
1076                goto err_cycle;
1077        }
1078
1079        /* Check to see if we can fulfill source and destination */
1080        if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1081                ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1082
1083                dev_err(dev, "Cannot perform transfer with this "
1084                        "source-destination combination\n");
1085                retval = -EINVAL;
1086                goto err_direct;
1087        }
1088
1089        /* Setup cycle types */
1090        if (vme_attr->cycle & VME_BLT)
1091                entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1092
1093        /* Setup data width */
1094        switch (vme_attr->dwidth) {
1095        case VME_D8:
1096                entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1097                break;
1098        case VME_D16:
1099                entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1100                break;
1101        case VME_D32:
1102                entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1103                break;
1104        case VME_D64:
1105                entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1106                break;
1107        default:
1108                dev_err(dev, "Invalid data width\n");
1109                return -EINVAL;
1110        }
1111
1112        /* Setup address space */
1113        switch (vme_attr->aspace) {
1114        case VME_A16:
1115                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1116                break;
1117        case VME_A24:
1118                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1119                break;
1120        case VME_A32:
1121                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1122                break;
1123        case VME_USER1:
1124                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1125                break;
1126        case VME_USER2:
1127                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1128                break;
1129        default:
1130                dev_err(dev, "Invalid address space\n");
1131                return -EINVAL;
1132                break;
1133        }
1134
1135        if (vme_attr->cycle & VME_SUPER)
1136                entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1137        if (vme_attr->cycle & VME_PROG)
1138                entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1139
1140        entry->descriptor.dtbc = count;
1141        entry->descriptor.dla = pci_attr->address;
1142        entry->descriptor.dva = vme_attr->address;
1143        entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1144
1145        /* Add to list */
1146        list_add_tail(&entry->list, &list->entries);
1147
1148        /* Fill out previous descriptors "Next Address" */
1149        if (entry->list.prev != &list->entries) {
1150                prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1151                        list);
1152                /* We need the bus address for the pointer */
1153                desc_ptr = virt_to_bus(&entry->descriptor);
1154                prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1155        }
1156
1157        return 0;
1158
1159err_cycle:
1160err_aspace:
1161err_direct:
1162err_align:
1163        kfree(entry);
1164err_mem:
1165        return retval;
1166}
1167
1168static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1169{
1170        u32 tmp;
1171        struct ca91cx42_driver *bridge;
1172
1173        bridge = ca91cx42_bridge->driver_priv;
1174
1175        tmp = ioread32(bridge->base + DGCS);
1176
1177        if (tmp & CA91CX42_DGCS_ACT)
1178                return 0;
1179        else
1180                return 1;
1181}
1182
1183static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1184{
1185        struct vme_dma_resource *ctrlr;
1186        struct ca91cx42_dma_entry *entry;
1187        int retval;
1188        dma_addr_t bus_addr;
1189        u32 val;
1190        struct device *dev;
1191        struct ca91cx42_driver *bridge;
1192
1193        ctrlr = list->parent;
1194
1195        bridge = ctrlr->parent->driver_priv;
1196        dev = ctrlr->parent->parent;
1197
1198        mutex_lock(&ctrlr->mtx);
1199
1200        if (!(list_empty(&ctrlr->running))) {
1201                /*
1202                 * XXX We have an active DMA transfer and currently haven't
1203                 *     sorted out the mechanism for "pending" DMA transfers.
1204                 *     Return busy.
1205                 */
1206                /* Need to add to pending here */
1207                mutex_unlock(&ctrlr->mtx);
1208                return -EBUSY;
1209        } else {
1210                list_add(&list->list, &ctrlr->running);
1211        }
1212
1213        /* Get first bus address and write into registers */
1214        entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1215                list);
1216
1217        bus_addr = virt_to_bus(&entry->descriptor);
1218
1219        mutex_unlock(&ctrlr->mtx);
1220
1221        iowrite32(0, bridge->base + DTBC);
1222        iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1223
1224        /* Start the operation */
1225        val = ioread32(bridge->base + DGCS);
1226
1227        /* XXX Could set VMEbus On and Off Counters here */
1228        val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1229
1230        val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1231                CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1232                CA91CX42_DGCS_PERR);
1233
1234        iowrite32(val, bridge->base + DGCS);
1235
1236        val |= CA91CX42_DGCS_GO;
1237
1238        iowrite32(val, bridge->base + DGCS);
1239
1240        retval = wait_event_interruptible(bridge->dma_queue,
1241                                          ca91cx42_dma_busy(ctrlr->parent));
1242
1243        if (retval) {
1244                val = ioread32(bridge->base + DGCS);
1245                iowrite32(val | CA91CX42_DGCS_STOP_REQ, bridge->base + DGCS);
1246                /* Wait for the operation to abort */
1247                wait_event(bridge->dma_queue,
1248                           ca91cx42_dma_busy(ctrlr->parent));
1249                retval = -EINTR;
1250                goto exit;
1251        }
1252
1253        /*
1254         * Read status register, this register is valid until we kick off a
1255         * new transfer.
1256         */
1257        val = ioread32(bridge->base + DGCS);
1258
1259        if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1260                CA91CX42_DGCS_PERR)) {
1261
1262                dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1263                val = ioread32(bridge->base + DCTL);
1264                retval = -EIO;
1265        }
1266
1267exit:
1268        /* Remove list from running list */
1269        mutex_lock(&ctrlr->mtx);
1270        list_del(&list->list);
1271        mutex_unlock(&ctrlr->mtx);
1272
1273        return retval;
1274
1275}
1276
1277static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1278{
1279        struct list_head *pos, *temp;
1280        struct ca91cx42_dma_entry *entry;
1281
1282        /* detach and free each entry */
1283        list_for_each_safe(pos, temp, &list->entries) {
1284                list_del(pos);
1285                entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1286                kfree(entry);
1287        }
1288
1289        return 0;
1290}
1291
1292/*
1293 * All 4 location monitors reside at the same base - this is therefore a
1294 * system wide configuration.
1295 *
1296 * This does not enable the LM monitor - that should be done when the first
1297 * callback is attached and disabled when the last callback is removed.
1298 */
1299static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1300        unsigned long long lm_base, u32 aspace, u32 cycle)
1301{
1302        u32 temp_base, lm_ctl = 0;
1303        int i;
1304        struct ca91cx42_driver *bridge;
1305        struct device *dev;
1306
1307        bridge = lm->parent->driver_priv;
1308        dev = lm->parent->parent;
1309
1310        /* Check the alignment of the location monitor */
1311        temp_base = (u32)lm_base;
1312        if (temp_base & 0xffff) {
1313                dev_err(dev, "Location monitor must be aligned to 64KB "
1314                        "boundary");
1315                return -EINVAL;
1316        }
1317
1318        mutex_lock(&lm->mtx);
1319
1320        /* If we already have a callback attached, we can't move it! */
1321        for (i = 0; i < lm->monitors; i++) {
1322                if (bridge->lm_callback[i]) {
1323                        mutex_unlock(&lm->mtx);
1324                        dev_err(dev, "Location monitor callback attached, "
1325                                "can't reset\n");
1326                        return -EBUSY;
1327                }
1328        }
1329
1330        switch (aspace) {
1331        case VME_A16:
1332                lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1333                break;
1334        case VME_A24:
1335                lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1336                break;
1337        case VME_A32:
1338                lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1339                break;
1340        default:
1341                mutex_unlock(&lm->mtx);
1342                dev_err(dev, "Invalid address space\n");
1343                return -EINVAL;
1344                break;
1345        }
1346
1347        if (cycle & VME_SUPER)
1348                lm_ctl |= CA91CX42_LM_CTL_SUPR;
1349        if (cycle & VME_USER)
1350                lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1351        if (cycle & VME_PROG)
1352                lm_ctl |= CA91CX42_LM_CTL_PGM;
1353        if (cycle & VME_DATA)
1354                lm_ctl |= CA91CX42_LM_CTL_DATA;
1355
1356        iowrite32(lm_base, bridge->base + LM_BS);
1357        iowrite32(lm_ctl, bridge->base + LM_CTL);
1358
1359        mutex_unlock(&lm->mtx);
1360
1361        return 0;
1362}
1363
1364/* Get configuration of the callback monitor and return whether it is enabled
1365 * or disabled.
1366 */
1367static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1368        unsigned long long *lm_base, u32 *aspace, u32 *cycle)
1369{
1370        u32 lm_ctl, enabled = 0;
1371        struct ca91cx42_driver *bridge;
1372
1373        bridge = lm->parent->driver_priv;
1374
1375        mutex_lock(&lm->mtx);
1376
1377        *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1378        lm_ctl = ioread32(bridge->base + LM_CTL);
1379
1380        if (lm_ctl & CA91CX42_LM_CTL_EN)
1381                enabled = 1;
1382
1383        if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1384                *aspace = VME_A16;
1385        if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1386                *aspace = VME_A24;
1387        if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1388                *aspace = VME_A32;
1389
1390        *cycle = 0;
1391        if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1392                *cycle |= VME_SUPER;
1393        if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1394                *cycle |= VME_USER;
1395        if (lm_ctl & CA91CX42_LM_CTL_PGM)
1396                *cycle |= VME_PROG;
1397        if (lm_ctl & CA91CX42_LM_CTL_DATA)
1398                *cycle |= VME_DATA;
1399
1400        mutex_unlock(&lm->mtx);
1401
1402        return enabled;
1403}
1404
1405/*
1406 * Attach a callback to a specific location monitor.
1407 *
1408 * Callback will be passed the monitor triggered.
1409 */
1410static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1411        void (*callback)(void *), void *data)
1412{
1413        u32 lm_ctl, tmp;
1414        struct ca91cx42_driver *bridge;
1415        struct device *dev;
1416
1417        bridge = lm->parent->driver_priv;
1418        dev = lm->parent->parent;
1419
1420        mutex_lock(&lm->mtx);
1421
1422        /* Ensure that the location monitor is configured - need PGM or DATA */
1423        lm_ctl = ioread32(bridge->base + LM_CTL);
1424        if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1425                mutex_unlock(&lm->mtx);
1426                dev_err(dev, "Location monitor not properly configured\n");
1427                return -EINVAL;
1428        }
1429
1430        /* Check that a callback isn't already attached */
1431        if (bridge->lm_callback[monitor]) {
1432                mutex_unlock(&lm->mtx);
1433                dev_err(dev, "Existing callback attached\n");
1434                return -EBUSY;
1435        }
1436
1437        /* Attach callback */
1438        bridge->lm_callback[monitor] = callback;
1439        bridge->lm_data[monitor] = data;
1440
1441        /* Enable Location Monitor interrupt */
1442        tmp = ioread32(bridge->base + LINT_EN);
1443        tmp |= CA91CX42_LINT_LM[monitor];
1444        iowrite32(tmp, bridge->base + LINT_EN);
1445
1446        /* Ensure that global Location Monitor Enable set */
1447        if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1448                lm_ctl |= CA91CX42_LM_CTL_EN;
1449                iowrite32(lm_ctl, bridge->base + LM_CTL);
1450        }
1451
1452        mutex_unlock(&lm->mtx);
1453
1454        return 0;
1455}
1456
1457/*
1458 * Detach a callback function forn a specific location monitor.
1459 */
1460static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1461{
1462        u32 tmp;
1463        struct ca91cx42_driver *bridge;
1464
1465        bridge = lm->parent->driver_priv;
1466
1467        mutex_lock(&lm->mtx);
1468
1469        /* Disable Location Monitor and ensure previous interrupts are clear */
1470        tmp = ioread32(bridge->base + LINT_EN);
1471        tmp &= ~CA91CX42_LINT_LM[monitor];
1472        iowrite32(tmp, bridge->base + LINT_EN);
1473
1474        iowrite32(CA91CX42_LINT_LM[monitor],
1475                 bridge->base + LINT_STAT);
1476
1477        /* Detach callback */
1478        bridge->lm_callback[monitor] = NULL;
1479        bridge->lm_data[monitor] = NULL;
1480
1481        /* If all location monitors disabled, disable global Location Monitor */
1482        if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1483                        CA91CX42_LINT_LM3)) == 0) {
1484                tmp = ioread32(bridge->base + LM_CTL);
1485                tmp &= ~CA91CX42_LM_CTL_EN;
1486                iowrite32(tmp, bridge->base + LM_CTL);
1487        }
1488
1489        mutex_unlock(&lm->mtx);
1490
1491        return 0;
1492}
1493
1494static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1495{
1496        u32 slot = 0;
1497        struct ca91cx42_driver *bridge;
1498
1499        bridge = ca91cx42_bridge->driver_priv;
1500
1501        if (!geoid) {
1502                slot = ioread32(bridge->base + VCSR_BS);
1503                slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1504        } else
1505                slot = geoid;
1506
1507        return (int)slot;
1508
1509}
1510
1511static void *ca91cx42_alloc_consistent(struct device *parent, size_t size,
1512        dma_addr_t *dma)
1513{
1514        struct pci_dev *pdev;
1515
1516        /* Find pci_dev container of dev */
1517        pdev = to_pci_dev(parent);
1518
1519        return pci_alloc_consistent(pdev, size, dma);
1520}
1521
1522static void ca91cx42_free_consistent(struct device *parent, size_t size,
1523        void *vaddr, dma_addr_t dma)
1524{
1525        struct pci_dev *pdev;
1526
1527        /* Find pci_dev container of dev */
1528        pdev = to_pci_dev(parent);
1529
1530        pci_free_consistent(pdev, size, vaddr, dma);
1531}
1532
1533/*
1534 * Configure CR/CSR space
1535 *
1536 * Access to the CR/CSR can be configured at power-up. The location of the
1537 * CR/CSR registers in the CR/CSR address space is determined by the boards
1538 * Auto-ID or Geographic address. This function ensures that the window is
1539 * enabled at an offset consistent with the boards geopgraphic address.
1540 */
1541static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1542        struct pci_dev *pdev)
1543{
1544        unsigned int crcsr_addr;
1545        int tmp, slot;
1546        struct ca91cx42_driver *bridge;
1547
1548        bridge = ca91cx42_bridge->driver_priv;
1549
1550        slot = ca91cx42_slot_get(ca91cx42_bridge);
1551
1552        /* Write CSR Base Address if slot ID is supplied as a module param */
1553        if (geoid)
1554                iowrite32(geoid << 27, bridge->base + VCSR_BS);
1555
1556        dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1557        if (slot == 0) {
1558                dev_err(&pdev->dev, "Slot number is unset, not configuring "
1559                        "CR/CSR space\n");
1560                return -EINVAL;
1561        }
1562
1563        /* Allocate mem for CR/CSR image */
1564        bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1565                                                     &bridge->crcsr_bus);
1566        if (!bridge->crcsr_kernel) {
1567                dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1568                        "image\n");
1569                return -ENOMEM;
1570        }
1571
1572        crcsr_addr = slot * (512 * 1024);
1573        iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1574
1575        tmp = ioread32(bridge->base + VCSR_CTL);
1576        tmp |= CA91CX42_VCSR_CTL_EN;
1577        iowrite32(tmp, bridge->base + VCSR_CTL);
1578
1579        return 0;
1580}
1581
1582static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1583        struct pci_dev *pdev)
1584{
1585        u32 tmp;
1586        struct ca91cx42_driver *bridge;
1587
1588        bridge = ca91cx42_bridge->driver_priv;
1589
1590        /* Turn off CR/CSR space */
1591        tmp = ioread32(bridge->base + VCSR_CTL);
1592        tmp &= ~CA91CX42_VCSR_CTL_EN;
1593        iowrite32(tmp, bridge->base + VCSR_CTL);
1594
1595        /* Free image */
1596        iowrite32(0, bridge->base + VCSR_TO);
1597
1598        pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1599                bridge->crcsr_bus);
1600}
1601
1602static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1603{
1604        int retval, i;
1605        u32 data;
1606        struct list_head *pos = NULL, *n;
1607        struct vme_bridge *ca91cx42_bridge;
1608        struct ca91cx42_driver *ca91cx42_device;
1609        struct vme_master_resource *master_image;
1610        struct vme_slave_resource *slave_image;
1611        struct vme_dma_resource *dma_ctrlr;
1612        struct vme_lm_resource *lm;
1613
1614        /* We want to support more than one of each bridge so we need to
1615         * dynamically allocate the bridge structure
1616         */
1617        ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
1618        if (!ca91cx42_bridge) {
1619                retval = -ENOMEM;
1620                goto err_struct;
1621        }
1622        vme_init_bridge(ca91cx42_bridge);
1623
1624        ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
1625        if (!ca91cx42_device) {
1626                retval = -ENOMEM;
1627                goto err_driver;
1628        }
1629
1630        ca91cx42_bridge->driver_priv = ca91cx42_device;
1631
1632        /* Enable the device */
1633        retval = pci_enable_device(pdev);
1634        if (retval) {
1635                dev_err(&pdev->dev, "Unable to enable device\n");
1636                goto err_enable;
1637        }
1638
1639        /* Map Registers */
1640        retval = pci_request_regions(pdev, driver_name);
1641        if (retval) {
1642                dev_err(&pdev->dev, "Unable to reserve resources\n");
1643                goto err_resource;
1644        }
1645
1646        /* map registers in BAR 0 */
1647        ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1648                4096);
1649        if (!ca91cx42_device->base) {
1650                dev_err(&pdev->dev, "Unable to remap CRG region\n");
1651                retval = -EIO;
1652                goto err_remap;
1653        }
1654
1655        /* Check to see if the mapping worked out */
1656        data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1657        if (data != PCI_VENDOR_ID_TUNDRA) {
1658                dev_err(&pdev->dev, "PCI_ID check failed\n");
1659                retval = -EIO;
1660                goto err_test;
1661        }
1662
1663        /* Initialize wait queues & mutual exclusion flags */
1664        init_waitqueue_head(&ca91cx42_device->dma_queue);
1665        init_waitqueue_head(&ca91cx42_device->iack_queue);
1666        mutex_init(&ca91cx42_device->vme_int);
1667        mutex_init(&ca91cx42_device->vme_rmw);
1668
1669        ca91cx42_bridge->parent = &pdev->dev;
1670        strcpy(ca91cx42_bridge->name, driver_name);
1671
1672        /* Setup IRQ */
1673        retval = ca91cx42_irq_init(ca91cx42_bridge);
1674        if (retval != 0) {
1675                dev_err(&pdev->dev, "Chip Initialization failed.\n");
1676                goto err_irq;
1677        }
1678
1679        /* Add master windows to list */
1680        for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1681                master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
1682                if (!master_image) {
1683                        retval = -ENOMEM;
1684                        goto err_master;
1685                }
1686                master_image->parent = ca91cx42_bridge;
1687                spin_lock_init(&master_image->lock);
1688                master_image->locked = 0;
1689                master_image->number = i;
1690                master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1691                        VME_CRCSR | VME_USER1 | VME_USER2;
1692                master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1693                        VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1694                master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1695                memset(&master_image->bus_resource, 0,
1696                       sizeof(master_image->bus_resource));
1697                master_image->kern_base  = NULL;
1698                list_add_tail(&master_image->list,
1699                        &ca91cx42_bridge->master_resources);
1700        }
1701
1702        /* Add slave windows to list */
1703        for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1704                slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
1705                if (!slave_image) {
1706                        retval = -ENOMEM;
1707                        goto err_slave;
1708                }
1709                slave_image->parent = ca91cx42_bridge;
1710                mutex_init(&slave_image->mtx);
1711                slave_image->locked = 0;
1712                slave_image->number = i;
1713                slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1714                        VME_USER2;
1715
1716                /* Only windows 0 and 4 support A16 */
1717                if (i == 0 || i == 4)
1718                        slave_image->address_attr |= VME_A16;
1719
1720                slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1721                        VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1722                list_add_tail(&slave_image->list,
1723                        &ca91cx42_bridge->slave_resources);
1724        }
1725
1726        /* Add dma engines to list */
1727        for (i = 0; i < CA91C142_MAX_DMA; i++) {
1728                dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
1729                if (!dma_ctrlr) {
1730                        retval = -ENOMEM;
1731                        goto err_dma;
1732                }
1733                dma_ctrlr->parent = ca91cx42_bridge;
1734                mutex_init(&dma_ctrlr->mtx);
1735                dma_ctrlr->locked = 0;
1736                dma_ctrlr->number = i;
1737                dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1738                        VME_DMA_MEM_TO_VME;
1739                INIT_LIST_HEAD(&dma_ctrlr->pending);
1740                INIT_LIST_HEAD(&dma_ctrlr->running);
1741                list_add_tail(&dma_ctrlr->list,
1742                        &ca91cx42_bridge->dma_resources);
1743        }
1744
1745        /* Add location monitor to list */
1746        lm = kmalloc(sizeof(*lm), GFP_KERNEL);
1747        if (!lm) {
1748                retval = -ENOMEM;
1749                goto err_lm;
1750        }
1751        lm->parent = ca91cx42_bridge;
1752        mutex_init(&lm->mtx);
1753        lm->locked = 0;
1754        lm->number = 1;
1755        lm->monitors = 4;
1756        list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1757
1758        ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1759        ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1760        ca91cx42_bridge->master_get = ca91cx42_master_get;
1761        ca91cx42_bridge->master_set = ca91cx42_master_set;
1762        ca91cx42_bridge->master_read = ca91cx42_master_read;
1763        ca91cx42_bridge->master_write = ca91cx42_master_write;
1764        ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1765        ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1766        ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1767        ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1768        ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1769        ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1770        ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1771        ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1772        ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1773        ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1774        ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1775        ca91cx42_bridge->alloc_consistent = ca91cx42_alloc_consistent;
1776        ca91cx42_bridge->free_consistent = ca91cx42_free_consistent;
1777
1778        data = ioread32(ca91cx42_device->base + MISC_CTL);
1779        dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1780                (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1781        dev_info(&pdev->dev, "Slot ID is %d\n",
1782                ca91cx42_slot_get(ca91cx42_bridge));
1783
1784        if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1785                dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1786
1787        /* Need to save ca91cx42_bridge pointer locally in link list for use in
1788         * ca91cx42_remove()
1789         */
1790        retval = vme_register_bridge(ca91cx42_bridge);
1791        if (retval != 0) {
1792                dev_err(&pdev->dev, "Chip Registration failed.\n");
1793                goto err_reg;
1794        }
1795
1796        pci_set_drvdata(pdev, ca91cx42_bridge);
1797
1798        return 0;
1799
1800err_reg:
1801        ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1802err_lm:
1803        /* resources are stored in link list */
1804        list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1805                lm = list_entry(pos, struct vme_lm_resource, list);
1806                list_del(pos);
1807                kfree(lm);
1808        }
1809err_dma:
1810        /* resources are stored in link list */
1811        list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1812                dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1813                list_del(pos);
1814                kfree(dma_ctrlr);
1815        }
1816err_slave:
1817        /* resources are stored in link list */
1818        list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1819                slave_image = list_entry(pos, struct vme_slave_resource, list);
1820                list_del(pos);
1821                kfree(slave_image);
1822        }
1823err_master:
1824        /* resources are stored in link list */
1825        list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1826                master_image = list_entry(pos, struct vme_master_resource,
1827                        list);
1828                list_del(pos);
1829                kfree(master_image);
1830        }
1831
1832        ca91cx42_irq_exit(ca91cx42_device, pdev);
1833err_irq:
1834err_test:
1835        iounmap(ca91cx42_device->base);
1836err_remap:
1837        pci_release_regions(pdev);
1838err_resource:
1839        pci_disable_device(pdev);
1840err_enable:
1841        kfree(ca91cx42_device);
1842err_driver:
1843        kfree(ca91cx42_bridge);
1844err_struct:
1845        return retval;
1846
1847}
1848
1849static void ca91cx42_remove(struct pci_dev *pdev)
1850{
1851        struct list_head *pos = NULL, *n;
1852        struct vme_master_resource *master_image;
1853        struct vme_slave_resource *slave_image;
1854        struct vme_dma_resource *dma_ctrlr;
1855        struct vme_lm_resource *lm;
1856        struct ca91cx42_driver *bridge;
1857        struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1858
1859        bridge = ca91cx42_bridge->driver_priv;
1860
1861
1862        /* Turn off Ints */
1863        iowrite32(0, bridge->base + LINT_EN);
1864
1865        /* Turn off the windows */
1866        iowrite32(0x00800000, bridge->base + LSI0_CTL);
1867        iowrite32(0x00800000, bridge->base + LSI1_CTL);
1868        iowrite32(0x00800000, bridge->base + LSI2_CTL);
1869        iowrite32(0x00800000, bridge->base + LSI3_CTL);
1870        iowrite32(0x00800000, bridge->base + LSI4_CTL);
1871        iowrite32(0x00800000, bridge->base + LSI5_CTL);
1872        iowrite32(0x00800000, bridge->base + LSI6_CTL);
1873        iowrite32(0x00800000, bridge->base + LSI7_CTL);
1874        iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1875        iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1876        iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1877        iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1878        iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1879        iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1880        iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1881        iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1882
1883        vme_unregister_bridge(ca91cx42_bridge);
1884
1885        ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1886
1887        /* resources are stored in link list */
1888        list_for_each_safe(pos, n, &ca91cx42_bridge->lm_resources) {
1889                lm = list_entry(pos, struct vme_lm_resource, list);
1890                list_del(pos);
1891                kfree(lm);
1892        }
1893
1894        /* resources are stored in link list */
1895        list_for_each_safe(pos, n, &ca91cx42_bridge->dma_resources) {
1896                dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1897                list_del(pos);
1898                kfree(dma_ctrlr);
1899        }
1900
1901        /* resources are stored in link list */
1902        list_for_each_safe(pos, n, &ca91cx42_bridge->slave_resources) {
1903                slave_image = list_entry(pos, struct vme_slave_resource, list);
1904                list_del(pos);
1905                kfree(slave_image);
1906        }
1907
1908        /* resources are stored in link list */
1909        list_for_each_safe(pos, n, &ca91cx42_bridge->master_resources) {
1910                master_image = list_entry(pos, struct vme_master_resource,
1911                        list);
1912                list_del(pos);
1913                kfree(master_image);
1914        }
1915
1916        ca91cx42_irq_exit(bridge, pdev);
1917
1918        iounmap(bridge->base);
1919
1920        pci_release_regions(pdev);
1921
1922        pci_disable_device(pdev);
1923
1924        kfree(ca91cx42_bridge);
1925}
1926
1927module_pci_driver(ca91cx42_driver);
1928
1929MODULE_PARM_DESC(geoid, "Override geographical addressing");
1930module_param(geoid, int, 0);
1931
1932MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1933MODULE_LICENSE("GPL");
1934