linux/drivers/staging/vme/bridges/vme_ca91cx42.c
<<
>>
Prefs
   1/*
   2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
   3 *
   4 * Author: Martyn Welch <martyn.welch@ge.com>
   5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
   6 *
   7 * Based on work by Tom Armistead and Ajit Prem
   8 * Copyright 2004 Motorola Inc.
   9 *
  10 * Derived from ca91c042.c by Michael Wyrick
  11 *
  12 * This program is free software; you can redistribute  it and/or modify it
  13 * under  the terms of  the GNU General  Public License as published by the
  14 * Free Software Foundation;  either version 2 of the  License, or (at your
  15 * option) any later version.
  16 */
  17
  18#include <linux/module.h>
  19#include <linux/mm.h>
  20#include <linux/types.h>
  21#include <linux/errno.h>
  22#include <linux/pci.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/poll.h>
  25#include <linux/interrupt.h>
  26#include <linux/spinlock.h>
  27#include <linux/sched.h>
  28#include <linux/slab.h>
  29#include <linux/time.h>
  30#include <linux/io.h>
  31#include <linux/uaccess.h>
  32
  33#include "../vme.h"
  34#include "../vme_bridge.h"
  35#include "vme_ca91cx42.h"
  36
  37static int __init ca91cx42_init(void);
  38static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
  39static void ca91cx42_remove(struct pci_dev *);
  40static void __exit ca91cx42_exit(void);
  41
  42/* Module parameters */
  43static int geoid;
  44
  45static char driver_name[] = "vme_ca91cx42";
  46
  47static DEFINE_PCI_DEVICE_TABLE(ca91cx42_ids) = {
  48        { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
  49        { },
  50};
  51
  52static struct pci_driver ca91cx42_driver = {
  53        .name = driver_name,
  54        .id_table = ca91cx42_ids,
  55        .probe = ca91cx42_probe,
  56        .remove = ca91cx42_remove,
  57};
  58
  59static u32 ca91cx42_DMA_irqhandler(struct ca91cx42_driver *bridge)
  60{
  61        wake_up(&bridge->dma_queue);
  62
  63        return CA91CX42_LINT_DMA;
  64}
  65
  66static u32 ca91cx42_LM_irqhandler(struct ca91cx42_driver *bridge, u32 stat)
  67{
  68        int i;
  69        u32 serviced = 0;
  70
  71        for (i = 0; i < 4; i++) {
  72                if (stat & CA91CX42_LINT_LM[i]) {
  73                        /* We only enable interrupts if the callback is set */
  74                        bridge->lm_callback[i](i);
  75                        serviced |= CA91CX42_LINT_LM[i];
  76                }
  77        }
  78
  79        return serviced;
  80}
  81
  82/* XXX This needs to be split into 4 queues */
  83static u32 ca91cx42_MB_irqhandler(struct ca91cx42_driver *bridge, int mbox_mask)
  84{
  85        wake_up(&bridge->mbox_queue);
  86
  87        return CA91CX42_LINT_MBOX;
  88}
  89
  90static u32 ca91cx42_IACK_irqhandler(struct ca91cx42_driver *bridge)
  91{
  92        wake_up(&bridge->iack_queue);
  93
  94        return CA91CX42_LINT_SW_IACK;
  95}
  96
  97static u32 ca91cx42_VERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
  98{
  99        int val;
 100        struct ca91cx42_driver *bridge;
 101
 102        bridge = ca91cx42_bridge->driver_priv;
 103
 104        val = ioread32(bridge->base + DGCS);
 105
 106        if (!(val & 0x00000800)) {
 107                dev_err(ca91cx42_bridge->parent, "ca91cx42_VERR_irqhandler DMA "
 108                        "Read Error DGCS=%08X\n", val);
 109        }
 110
 111        return CA91CX42_LINT_VERR;
 112}
 113
 114static u32 ca91cx42_LERR_irqhandler(struct vme_bridge *ca91cx42_bridge)
 115{
 116        int val;
 117        struct ca91cx42_driver *bridge;
 118
 119        bridge = ca91cx42_bridge->driver_priv;
 120
 121        val = ioread32(bridge->base + DGCS);
 122
 123        if (!(val & 0x00000800))
 124                dev_err(ca91cx42_bridge->parent, "ca91cx42_LERR_irqhandler DMA "
 125                        "Read Error DGCS=%08X\n", val);
 126
 127        return CA91CX42_LINT_LERR;
 128}
 129
 130
 131static u32 ca91cx42_VIRQ_irqhandler(struct vme_bridge *ca91cx42_bridge,
 132        int stat)
 133{
 134        int vec, i, serviced = 0;
 135        struct ca91cx42_driver *bridge;
 136
 137        bridge = ca91cx42_bridge->driver_priv;
 138
 139
 140        for (i = 7; i > 0; i--) {
 141                if (stat & (1 << i)) {
 142                        vec = ioread32(bridge->base +
 143                                CA91CX42_V_STATID[i]) & 0xff;
 144
 145                        vme_irq_handler(ca91cx42_bridge, i, vec);
 146
 147                        serviced |= (1 << i);
 148                }
 149        }
 150
 151        return serviced;
 152}
 153
 154static irqreturn_t ca91cx42_irqhandler(int irq, void *ptr)
 155{
 156        u32 stat, enable, serviced = 0;
 157        struct vme_bridge *ca91cx42_bridge;
 158        struct ca91cx42_driver *bridge;
 159
 160        ca91cx42_bridge = ptr;
 161
 162        bridge = ca91cx42_bridge->driver_priv;
 163
 164        enable = ioread32(bridge->base + LINT_EN);
 165        stat = ioread32(bridge->base + LINT_STAT);
 166
 167        /* Only look at unmasked interrupts */
 168        stat &= enable;
 169
 170        if (unlikely(!stat))
 171                return IRQ_NONE;
 172
 173        if (stat & CA91CX42_LINT_DMA)
 174                serviced |= ca91cx42_DMA_irqhandler(bridge);
 175        if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
 176                        CA91CX42_LINT_LM3))
 177                serviced |= ca91cx42_LM_irqhandler(bridge, stat);
 178        if (stat & CA91CX42_LINT_MBOX)
 179                serviced |= ca91cx42_MB_irqhandler(bridge, stat);
 180        if (stat & CA91CX42_LINT_SW_IACK)
 181                serviced |= ca91cx42_IACK_irqhandler(bridge);
 182        if (stat & CA91CX42_LINT_VERR)
 183                serviced |= ca91cx42_VERR_irqhandler(ca91cx42_bridge);
 184        if (stat & CA91CX42_LINT_LERR)
 185                serviced |= ca91cx42_LERR_irqhandler(ca91cx42_bridge);
 186        if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
 187                        CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
 188                        CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
 189                        CA91CX42_LINT_VIRQ7))
 190                serviced |= ca91cx42_VIRQ_irqhandler(ca91cx42_bridge, stat);
 191
 192        /* Clear serviced interrupts */
 193        iowrite32(stat, bridge->base + LINT_STAT);
 194
 195        return IRQ_HANDLED;
 196}
 197
 198static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
 199{
 200        int result, tmp;
 201        struct pci_dev *pdev;
 202        struct ca91cx42_driver *bridge;
 203
 204        bridge = ca91cx42_bridge->driver_priv;
 205
 206        /* Need pdev */
 207        pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
 208
 209        /* Initialise list for VME bus errors */
 210        INIT_LIST_HEAD(&ca91cx42_bridge->vme_errors);
 211
 212        mutex_init(&ca91cx42_bridge->irq_mtx);
 213
 214        /* Disable interrupts from PCI to VME */
 215        iowrite32(0, bridge->base + VINT_EN);
 216
 217        /* Disable PCI interrupts */
 218        iowrite32(0, bridge->base + LINT_EN);
 219        /* Clear Any Pending PCI Interrupts */
 220        iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
 221
 222        result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
 223                        driver_name, ca91cx42_bridge);
 224        if (result) {
 225                dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
 226                       pdev->irq);
 227                return result;
 228        }
 229
 230        /* Ensure all interrupts are mapped to PCI Interrupt 0 */
 231        iowrite32(0, bridge->base + LINT_MAP0);
 232        iowrite32(0, bridge->base + LINT_MAP1);
 233        iowrite32(0, bridge->base + LINT_MAP2);
 234
 235        /* Enable DMA, mailbox & LM Interrupts */
 236        tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
 237                CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
 238                CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
 239
 240        iowrite32(tmp, bridge->base + LINT_EN);
 241
 242        return 0;
 243}
 244
 245static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
 246        struct pci_dev *pdev)
 247{
 248        /* Disable interrupts from PCI to VME */
 249        iowrite32(0, bridge->base + VINT_EN);
 250
 251        /* Disable PCI interrupts */
 252        iowrite32(0, bridge->base + LINT_EN);
 253        /* Clear Any Pending PCI Interrupts */
 254        iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
 255
 256        free_irq(pdev->irq, pdev);
 257}
 258
 259/*
 260 * Set up an VME interrupt
 261 */
 262static void ca91cx42_irq_set(struct vme_bridge *ca91cx42_bridge, int level,
 263        int state, int sync)
 264
 265{
 266        struct pci_dev *pdev;
 267        u32 tmp;
 268        struct ca91cx42_driver *bridge;
 269
 270        bridge = ca91cx42_bridge->driver_priv;
 271
 272        /* Enable IRQ level */
 273        tmp = ioread32(bridge->base + LINT_EN);
 274
 275        if (state == 0)
 276                tmp &= ~CA91CX42_LINT_VIRQ[level];
 277        else
 278                tmp |= CA91CX42_LINT_VIRQ[level];
 279
 280        iowrite32(tmp, bridge->base + LINT_EN);
 281
 282        if ((state == 0) && (sync != 0)) {
 283                pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
 284                        dev);
 285
 286                synchronize_irq(pdev->irq);
 287        }
 288}
 289
 290static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
 291        int statid)
 292{
 293        u32 tmp;
 294        struct ca91cx42_driver *bridge;
 295
 296        bridge = ca91cx42_bridge->driver_priv;
 297
 298        /* Universe can only generate even vectors */
 299        if (statid & 1)
 300                return -EINVAL;
 301
 302        mutex_lock(&bridge->vme_int);
 303
 304        tmp = ioread32(bridge->base + VINT_EN);
 305
 306        /* Set Status/ID */
 307        iowrite32(statid << 24, bridge->base + STATID);
 308
 309        /* Assert VMEbus IRQ */
 310        tmp = tmp | (1 << (level + 24));
 311        iowrite32(tmp, bridge->base + VINT_EN);
 312
 313        /* Wait for IACK */
 314        wait_event_interruptible(bridge->iack_queue, 0);
 315
 316        /* Return interrupt to low state */
 317        tmp = ioread32(bridge->base + VINT_EN);
 318        tmp = tmp & ~(1 << (level + 24));
 319        iowrite32(tmp, bridge->base + VINT_EN);
 320
 321        mutex_unlock(&bridge->vme_int);
 322
 323        return 0;
 324}
 325
 326static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
 327        unsigned long long vme_base, unsigned long long size,
 328        dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
 329{
 330        unsigned int i, addr = 0, granularity;
 331        unsigned int temp_ctl = 0;
 332        unsigned int vme_bound, pci_offset;
 333        struct vme_bridge *ca91cx42_bridge;
 334        struct ca91cx42_driver *bridge;
 335
 336        ca91cx42_bridge = image->parent;
 337
 338        bridge = ca91cx42_bridge->driver_priv;
 339
 340        i = image->number;
 341
 342        switch (aspace) {
 343        case VME_A16:
 344                addr |= CA91CX42_VSI_CTL_VAS_A16;
 345                break;
 346        case VME_A24:
 347                addr |= CA91CX42_VSI_CTL_VAS_A24;
 348                break;
 349        case VME_A32:
 350                addr |= CA91CX42_VSI_CTL_VAS_A32;
 351                break;
 352        case VME_USER1:
 353                addr |= CA91CX42_VSI_CTL_VAS_USER1;
 354                break;
 355        case VME_USER2:
 356                addr |= CA91CX42_VSI_CTL_VAS_USER2;
 357                break;
 358        case VME_A64:
 359        case VME_CRCSR:
 360        case VME_USER3:
 361        case VME_USER4:
 362        default:
 363                dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
 364                return -EINVAL;
 365                break;
 366        }
 367
 368        /*
 369         * Bound address is a valid address for the window, adjust
 370         * accordingly
 371         */
 372        vme_bound = vme_base + size;
 373        pci_offset = pci_base - vme_base;
 374
 375        if ((i == 0) || (i == 4))
 376                granularity = 0x1000;
 377        else
 378                granularity = 0x10000;
 379
 380        if (vme_base & (granularity - 1)) {
 381                dev_err(ca91cx42_bridge->parent, "Invalid VME base "
 382                        "alignment\n");
 383                return -EINVAL;
 384        }
 385        if (vme_bound & (granularity - 1)) {
 386                dev_err(ca91cx42_bridge->parent, "Invalid VME bound "
 387                        "alignment\n");
 388                return -EINVAL;
 389        }
 390        if (pci_offset & (granularity - 1)) {
 391                dev_err(ca91cx42_bridge->parent, "Invalid PCI Offset "
 392                        "alignment\n");
 393                return -EINVAL;
 394        }
 395
 396        /* Disable while we are mucking around */
 397        temp_ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
 398        temp_ctl &= ~CA91CX42_VSI_CTL_EN;
 399        iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
 400
 401        /* Setup mapping */
 402        iowrite32(vme_base, bridge->base + CA91CX42_VSI_BS[i]);
 403        iowrite32(vme_bound, bridge->base + CA91CX42_VSI_BD[i]);
 404        iowrite32(pci_offset, bridge->base + CA91CX42_VSI_TO[i]);
 405
 406        /* Setup address space */
 407        temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
 408        temp_ctl |= addr;
 409
 410        /* Setup cycle types */
 411        temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
 412        if (cycle & VME_SUPER)
 413                temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
 414        if (cycle & VME_USER)
 415                temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
 416        if (cycle & VME_PROG)
 417                temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
 418        if (cycle & VME_DATA)
 419                temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
 420
 421        /* Write ctl reg without enable */
 422        iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
 423
 424        if (enabled)
 425                temp_ctl |= CA91CX42_VSI_CTL_EN;
 426
 427        iowrite32(temp_ctl, bridge->base + CA91CX42_VSI_CTL[i]);
 428
 429        return 0;
 430}
 431
 432static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
 433        unsigned long long *vme_base, unsigned long long *size,
 434        dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
 435{
 436        unsigned int i, granularity = 0, ctl = 0;
 437        unsigned long long vme_bound, pci_offset;
 438        struct ca91cx42_driver *bridge;
 439
 440        bridge = image->parent->driver_priv;
 441
 442        i = image->number;
 443
 444        if ((i == 0) || (i == 4))
 445                granularity = 0x1000;
 446        else
 447                granularity = 0x10000;
 448
 449        /* Read Registers */
 450        ctl = ioread32(bridge->base + CA91CX42_VSI_CTL[i]);
 451
 452        *vme_base = ioread32(bridge->base + CA91CX42_VSI_BS[i]);
 453        vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
 454        pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
 455
 456        *pci_base = (dma_addr_t)vme_base + pci_offset;
 457        *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
 458
 459        *enabled = 0;
 460        *aspace = 0;
 461        *cycle = 0;
 462
 463        if (ctl & CA91CX42_VSI_CTL_EN)
 464                *enabled = 1;
 465
 466        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
 467                *aspace = VME_A16;
 468        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
 469                *aspace = VME_A24;
 470        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
 471                *aspace = VME_A32;
 472        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
 473                *aspace = VME_USER1;
 474        if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
 475                *aspace = VME_USER2;
 476
 477        if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
 478                *cycle |= VME_SUPER;
 479        if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
 480                *cycle |= VME_USER;
 481        if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
 482                *cycle |= VME_PROG;
 483        if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
 484                *cycle |= VME_DATA;
 485
 486        return 0;
 487}
 488
 489/*
 490 * Allocate and map PCI Resource
 491 */
 492static int ca91cx42_alloc_resource(struct vme_master_resource *image,
 493        unsigned long long size)
 494{
 495        unsigned long long existing_size;
 496        int retval = 0;
 497        struct pci_dev *pdev;
 498        struct vme_bridge *ca91cx42_bridge;
 499
 500        ca91cx42_bridge = image->parent;
 501
 502        /* Find pci_dev container of dev */
 503        if (ca91cx42_bridge->parent == NULL) {
 504                dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
 505                return -EINVAL;
 506        }
 507        pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
 508
 509        existing_size = (unsigned long long)(image->bus_resource.end -
 510                image->bus_resource.start);
 511
 512        /* If the existing size is OK, return */
 513        if (existing_size == (size - 1))
 514                return 0;
 515
 516        if (existing_size != 0) {
 517                iounmap(image->kern_base);
 518                image->kern_base = NULL;
 519                if (image->bus_resource.name != NULL)
 520                        kfree(image->bus_resource.name);
 521                release_resource(&image->bus_resource);
 522                memset(&image->bus_resource, 0, sizeof(struct resource));
 523        }
 524
 525        if (image->bus_resource.name == NULL) {
 526                image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
 527                if (image->bus_resource.name == NULL) {
 528                        dev_err(ca91cx42_bridge->parent, "Unable to allocate "
 529                                "memory for resource name\n");
 530                        retval = -ENOMEM;
 531                        goto err_name;
 532                }
 533        }
 534
 535        sprintf((char *)image->bus_resource.name, "%s.%d",
 536                ca91cx42_bridge->name, image->number);
 537
 538        image->bus_resource.start = 0;
 539        image->bus_resource.end = (unsigned long)size;
 540        image->bus_resource.flags = IORESOURCE_MEM;
 541
 542        retval = pci_bus_alloc_resource(pdev->bus,
 543                &image->bus_resource, size, size, PCIBIOS_MIN_MEM,
 544                0, NULL, NULL);
 545        if (retval) {
 546                dev_err(ca91cx42_bridge->parent, "Failed to allocate mem "
 547                        "resource for window %d size 0x%lx start 0x%lx\n",
 548                        image->number, (unsigned long)size,
 549                        (unsigned long)image->bus_resource.start);
 550                goto err_resource;
 551        }
 552
 553        image->kern_base = ioremap_nocache(
 554                image->bus_resource.start, size);
 555        if (image->kern_base == NULL) {
 556                dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
 557                retval = -ENOMEM;
 558                goto err_remap;
 559        }
 560
 561        return 0;
 562
 563        iounmap(image->kern_base);
 564        image->kern_base = NULL;
 565err_remap:
 566        release_resource(&image->bus_resource);
 567err_resource:
 568        kfree(image->bus_resource.name);
 569        memset(&image->bus_resource, 0, sizeof(struct resource));
 570err_name:
 571        return retval;
 572}
 573
 574/*
 575 * Free and unmap PCI Resource
 576 */
 577static void ca91cx42_free_resource(struct vme_master_resource *image)
 578{
 579        iounmap(image->kern_base);
 580        image->kern_base = NULL;
 581        release_resource(&image->bus_resource);
 582        kfree(image->bus_resource.name);
 583        memset(&image->bus_resource, 0, sizeof(struct resource));
 584}
 585
 586
 587static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
 588        unsigned long long vme_base, unsigned long long size,
 589        vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
 590{
 591        int retval = 0;
 592        unsigned int i, granularity = 0;
 593        unsigned int temp_ctl = 0;
 594        unsigned long long pci_bound, vme_offset, pci_base;
 595        struct vme_bridge *ca91cx42_bridge;
 596        struct ca91cx42_driver *bridge;
 597
 598        ca91cx42_bridge = image->parent;
 599
 600        bridge = ca91cx42_bridge->driver_priv;
 601
 602        i = image->number;
 603
 604        if ((i == 0) || (i == 4))
 605                granularity = 0x1000;
 606        else
 607                granularity = 0x10000;
 608
 609        /* Verify input data */
 610        if (vme_base & (granularity - 1)) {
 611                dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
 612                        "alignment\n");
 613                retval = -EINVAL;
 614                goto err_window;
 615        }
 616        if (size & (granularity - 1)) {
 617                dev_err(ca91cx42_bridge->parent, "Invalid VME Window "
 618                        "alignment\n");
 619                retval = -EINVAL;
 620                goto err_window;
 621        }
 622
 623        spin_lock(&image->lock);
 624
 625        /*
 626         * Let's allocate the resource here rather than further up the stack as
 627         * it avoids pushing loads of bus dependant stuff up the stack
 628         */
 629        retval = ca91cx42_alloc_resource(image, size);
 630        if (retval) {
 631                spin_unlock(&image->lock);
 632                dev_err(ca91cx42_bridge->parent, "Unable to allocate memory "
 633                        "for resource name\n");
 634                retval = -ENOMEM;
 635                goto err_res;
 636        }
 637
 638        pci_base = (unsigned long long)image->bus_resource.start;
 639
 640        /*
 641         * Bound address is a valid address for the window, adjust
 642         * according to window granularity.
 643         */
 644        pci_bound = pci_base + size;
 645        vme_offset = vme_base - pci_base;
 646
 647        /* Disable while we are mucking around */
 648        temp_ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
 649        temp_ctl &= ~CA91CX42_LSI_CTL_EN;
 650        iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
 651
 652        /* Setup cycle types */
 653        temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
 654        if (cycle & VME_BLT)
 655                temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
 656        if (cycle & VME_MBLT)
 657                temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
 658
 659        /* Setup data width */
 660        temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
 661        switch (dwidth) {
 662        case VME_D8:
 663                temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
 664                break;
 665        case VME_D16:
 666                temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
 667                break;
 668        case VME_D32:
 669                temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
 670                break;
 671        case VME_D64:
 672                temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
 673                break;
 674        default:
 675                spin_unlock(&image->lock);
 676                dev_err(ca91cx42_bridge->parent, "Invalid data width\n");
 677                retval = -EINVAL;
 678                goto err_dwidth;
 679                break;
 680        }
 681
 682        /* Setup address space */
 683        temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
 684        switch (aspace) {
 685        case VME_A16:
 686                temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
 687                break;
 688        case VME_A24:
 689                temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
 690                break;
 691        case VME_A32:
 692                temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
 693                break;
 694        case VME_CRCSR:
 695                temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
 696                break;
 697        case VME_USER1:
 698                temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
 699                break;
 700        case VME_USER2:
 701                temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
 702                break;
 703        case VME_A64:
 704        case VME_USER3:
 705        case VME_USER4:
 706        default:
 707                spin_unlock(&image->lock);
 708                dev_err(ca91cx42_bridge->parent, "Invalid address space\n");
 709                retval = -EINVAL;
 710                goto err_aspace;
 711                break;
 712        }
 713
 714        temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
 715        if (cycle & VME_SUPER)
 716                temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
 717        if (cycle & VME_PROG)
 718                temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
 719
 720        /* Setup mapping */
 721        iowrite32(pci_base, bridge->base + CA91CX42_LSI_BS[i]);
 722        iowrite32(pci_bound, bridge->base + CA91CX42_LSI_BD[i]);
 723        iowrite32(vme_offset, bridge->base + CA91CX42_LSI_TO[i]);
 724
 725        /* Write ctl reg without enable */
 726        iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
 727
 728        if (enabled)
 729                temp_ctl |= CA91CX42_LSI_CTL_EN;
 730
 731        iowrite32(temp_ctl, bridge->base + CA91CX42_LSI_CTL[i]);
 732
 733        spin_unlock(&image->lock);
 734        return 0;
 735
 736err_aspace:
 737err_dwidth:
 738        ca91cx42_free_resource(image);
 739err_res:
 740err_window:
 741        return retval;
 742}
 743
 744static int __ca91cx42_master_get(struct vme_master_resource *image,
 745        int *enabled, unsigned long long *vme_base, unsigned long long *size,
 746        vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
 747{
 748        unsigned int i, ctl;
 749        unsigned long long pci_base, pci_bound, vme_offset;
 750        struct ca91cx42_driver *bridge;
 751
 752        bridge = image->parent->driver_priv;
 753
 754        i = image->number;
 755
 756        ctl = ioread32(bridge->base + CA91CX42_LSI_CTL[i]);
 757
 758        pci_base = ioread32(bridge->base + CA91CX42_LSI_BS[i]);
 759        vme_offset = ioread32(bridge->base + CA91CX42_LSI_TO[i]);
 760        pci_bound = ioread32(bridge->base + CA91CX42_LSI_BD[i]);
 761
 762        *vme_base = pci_base + vme_offset;
 763        *size = (unsigned long long)(pci_bound - pci_base);
 764
 765        *enabled = 0;
 766        *aspace = 0;
 767        *cycle = 0;
 768        *dwidth = 0;
 769
 770        if (ctl & CA91CX42_LSI_CTL_EN)
 771                *enabled = 1;
 772
 773        /* Setup address space */
 774        switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
 775        case CA91CX42_LSI_CTL_VAS_A16:
 776                *aspace = VME_A16;
 777                break;
 778        case CA91CX42_LSI_CTL_VAS_A24:
 779                *aspace = VME_A24;
 780                break;
 781        case CA91CX42_LSI_CTL_VAS_A32:
 782                *aspace = VME_A32;
 783                break;
 784        case CA91CX42_LSI_CTL_VAS_CRCSR:
 785                *aspace = VME_CRCSR;
 786                break;
 787        case CA91CX42_LSI_CTL_VAS_USER1:
 788                *aspace = VME_USER1;
 789                break;
 790        case CA91CX42_LSI_CTL_VAS_USER2:
 791                *aspace = VME_USER2;
 792                break;
 793        }
 794
 795        /* XXX Not sure howto check for MBLT */
 796        /* Setup cycle types */
 797        if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
 798                *cycle |= VME_BLT;
 799        else
 800                *cycle |= VME_SCT;
 801
 802        if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
 803                *cycle |= VME_SUPER;
 804        else
 805                *cycle |= VME_USER;
 806
 807        if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
 808                *cycle = VME_PROG;
 809        else
 810                *cycle = VME_DATA;
 811
 812        /* Setup data width */
 813        switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
 814        case CA91CX42_LSI_CTL_VDW_D8:
 815                *dwidth = VME_D8;
 816                break;
 817        case CA91CX42_LSI_CTL_VDW_D16:
 818                *dwidth = VME_D16;
 819                break;
 820        case CA91CX42_LSI_CTL_VDW_D32:
 821                *dwidth = VME_D32;
 822                break;
 823        case CA91CX42_LSI_CTL_VDW_D64:
 824                *dwidth = VME_D64;
 825                break;
 826        }
 827
 828        return 0;
 829}
 830
 831static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
 832        unsigned long long *vme_base, unsigned long long *size,
 833        vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
 834{
 835        int retval;
 836
 837        spin_lock(&image->lock);
 838
 839        retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
 840                cycle, dwidth);
 841
 842        spin_unlock(&image->lock);
 843
 844        return retval;
 845}
 846
 847static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
 848        void *buf, size_t count, loff_t offset)
 849{
 850        ssize_t retval;
 851        void *addr = image->kern_base + offset;
 852        unsigned int done = 0;
 853        unsigned int count32;
 854
 855        if (count == 0)
 856                return 0;
 857
 858        spin_lock(&image->lock);
 859
 860        /* The following code handles VME address alignment problem
 861         * in order to assure the maximal data width cycle.
 862         * We cannot use memcpy_xxx directly here because it
 863         * may cut data transfer in 8-bits cycles, thus making
 864         * D16 cycle impossible.
 865         * From the other hand, the bridge itself assures that
 866         * maximal configured data cycle is used and splits it
 867         * automatically for non-aligned addresses.
 868         */
 869        if ((int)addr & 0x1) {
 870                *(u8 *)buf = ioread8(addr);
 871                done += 1;
 872                if (done == count)
 873                        goto out;
 874        }
 875        if ((int)addr & 0x2) {
 876                if ((count - done) < 2) {
 877                        *(u8 *)(buf + done) = ioread8(addr + done);
 878                        done += 1;
 879                        goto out;
 880                } else {
 881                        *(u16 *)(buf + done) = ioread16(addr + done);
 882                        done += 2;
 883                }
 884        }
 885
 886        count32 = (count - done) & ~0x3;
 887        if (count32 > 0) {
 888                memcpy_fromio(buf + done, addr + done, (unsigned int)count);
 889                done += count32;
 890        }
 891
 892        if ((count - done) & 0x2) {
 893                *(u16 *)(buf + done) = ioread16(addr + done);
 894                done += 2;
 895        }
 896        if ((count - done) & 0x1) {
 897                *(u8 *)(buf + done) = ioread8(addr + done);
 898                done += 1;
 899        }
 900out:
 901        retval = count;
 902        spin_unlock(&image->lock);
 903
 904        return retval;
 905}
 906
 907static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
 908        void *buf, size_t count, loff_t offset)
 909{
 910        ssize_t retval;
 911        void *addr = image->kern_base + offset;
 912        unsigned int done = 0;
 913        unsigned int count32;
 914
 915        if (count == 0)
 916                return 0;
 917
 918        spin_lock(&image->lock);
 919
 920        /* Here we apply for the same strategy we do in master_read
 921         * function in order to assure D16 cycle when required.
 922         */
 923        if ((int)addr & 0x1) {
 924                iowrite8(*(u8 *)buf, addr);
 925                done += 1;
 926                if (done == count)
 927                        goto out;
 928        }
 929        if ((int)addr & 0x2) {
 930                if ((count - done) < 2) {
 931                        iowrite8(*(u8 *)(buf + done), addr + done);
 932                        done += 1;
 933                        goto out;
 934                } else {
 935                        iowrite16(*(u16 *)(buf + done), addr + done);
 936                        done += 2;
 937                }
 938        }
 939
 940        count32 = (count - done) & ~0x3;
 941        if (count32 > 0) {
 942                memcpy_toio(addr + done, buf + done, count32);
 943                done += count32;
 944        }
 945
 946        if ((count - done) & 0x2) {
 947                iowrite16(*(u16 *)(buf + done), addr + done);
 948                done += 2;
 949        }
 950        if ((count - done) & 0x1) {
 951                iowrite8(*(u8 *)(buf + done), addr + done);
 952                done += 1;
 953        }
 954out:
 955        retval = count;
 956
 957        spin_unlock(&image->lock);
 958
 959        return retval;
 960}
 961
 962static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
 963        unsigned int mask, unsigned int compare, unsigned int swap,
 964        loff_t offset)
 965{
 966        u32 pci_addr, result;
 967        int i;
 968        struct ca91cx42_driver *bridge;
 969        struct device *dev;
 970
 971        bridge = image->parent->driver_priv;
 972        dev = image->parent->parent;
 973
 974        /* Find the PCI address that maps to the desired VME address */
 975        i = image->number;
 976
 977        /* Locking as we can only do one of these at a time */
 978        mutex_lock(&bridge->vme_rmw);
 979
 980        /* Lock image */
 981        spin_lock(&image->lock);
 982
 983        pci_addr = (u32)image->kern_base + offset;
 984
 985        /* Address must be 4-byte aligned */
 986        if (pci_addr & 0x3) {
 987                dev_err(dev, "RMW Address not 4-byte aligned\n");
 988                result = -EINVAL;
 989                goto out;
 990        }
 991
 992        /* Ensure RMW Disabled whilst configuring */
 993        iowrite32(0, bridge->base + SCYC_CTL);
 994
 995        /* Configure registers */
 996        iowrite32(mask, bridge->base + SCYC_EN);
 997        iowrite32(compare, bridge->base + SCYC_CMP);
 998        iowrite32(swap, bridge->base + SCYC_SWP);
 999        iowrite32(pci_addr, bridge->base + SCYC_ADDR);
1000
1001        /* Enable RMW */
1002        iowrite32(CA91CX42_SCYC_CTL_CYC_RMW, bridge->base + SCYC_CTL);
1003
1004        /* Kick process off with a read to the required address. */
1005        result = ioread32(image->kern_base + offset);
1006
1007        /* Disable RMW */
1008        iowrite32(0, bridge->base + SCYC_CTL);
1009
1010out:
1011        spin_unlock(&image->lock);
1012
1013        mutex_unlock(&bridge->vme_rmw);
1014
1015        return result;
1016}
1017
1018static int ca91cx42_dma_list_add(struct vme_dma_list *list,
1019        struct vme_dma_attr *src, struct vme_dma_attr *dest, size_t count)
1020{
1021        struct ca91cx42_dma_entry *entry, *prev;
1022        struct vme_dma_pci *pci_attr;
1023        struct vme_dma_vme *vme_attr;
1024        dma_addr_t desc_ptr;
1025        int retval = 0;
1026        struct device *dev;
1027
1028        dev = list->parent->parent->parent;
1029
1030        /* XXX descriptor must be aligned on 64-bit boundaries */
1031        entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
1032        if (entry == NULL) {
1033                dev_err(dev, "Failed to allocate memory for dma resource "
1034                        "structure\n");
1035                retval = -ENOMEM;
1036                goto err_mem;
1037        }
1038
1039        /* Test descriptor alignment */
1040        if ((unsigned long)&entry->descriptor & CA91CX42_DCPP_M) {
1041                dev_err(dev, "Descriptor not aligned to 16 byte boundary as "
1042                        "required: %p\n", &entry->descriptor);
1043                retval = -EINVAL;
1044                goto err_align;
1045        }
1046
1047        memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
1048
1049        if (dest->type == VME_DMA_VME) {
1050                entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
1051                vme_attr = dest->private;
1052                pci_attr = src->private;
1053        } else {
1054                vme_attr = src->private;
1055                pci_attr = dest->private;
1056        }
1057
1058        /* Check we can do fullfill required attributes */
1059        if ((vme_attr->aspace & ~(VME_A16 | VME_A24 | VME_A32 | VME_USER1 |
1060                VME_USER2)) != 0) {
1061
1062                dev_err(dev, "Unsupported cycle type\n");
1063                retval = -EINVAL;
1064                goto err_aspace;
1065        }
1066
1067        if ((vme_attr->cycle & ~(VME_SCT | VME_BLT | VME_SUPER | VME_USER |
1068                VME_PROG | VME_DATA)) != 0) {
1069
1070                dev_err(dev, "Unsupported cycle type\n");
1071                retval = -EINVAL;
1072                goto err_cycle;
1073        }
1074
1075        /* Check to see if we can fullfill source and destination */
1076        if (!(((src->type == VME_DMA_PCI) && (dest->type == VME_DMA_VME)) ||
1077                ((src->type == VME_DMA_VME) && (dest->type == VME_DMA_PCI)))) {
1078
1079                dev_err(dev, "Cannot perform transfer with this "
1080                        "source-destination combination\n");
1081                retval = -EINVAL;
1082                goto err_direct;
1083        }
1084
1085        /* Setup cycle types */
1086        if (vme_attr->cycle & VME_BLT)
1087                entry->descriptor.dctl |= CA91CX42_DCTL_VCT_BLT;
1088
1089        /* Setup data width */
1090        switch (vme_attr->dwidth) {
1091        case VME_D8:
1092                entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D8;
1093                break;
1094        case VME_D16:
1095                entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D16;
1096                break;
1097        case VME_D32:
1098                entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D32;
1099                break;
1100        case VME_D64:
1101                entry->descriptor.dctl |= CA91CX42_DCTL_VDW_D64;
1102                break;
1103        default:
1104                dev_err(dev, "Invalid data width\n");
1105                return -EINVAL;
1106        }
1107
1108        /* Setup address space */
1109        switch (vme_attr->aspace) {
1110        case VME_A16:
1111                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A16;
1112                break;
1113        case VME_A24:
1114                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A24;
1115                break;
1116        case VME_A32:
1117                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_A32;
1118                break;
1119        case VME_USER1:
1120                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER1;
1121                break;
1122        case VME_USER2:
1123                entry->descriptor.dctl |= CA91CX42_DCTL_VAS_USER2;
1124                break;
1125        default:
1126                dev_err(dev, "Invalid address space\n");
1127                return -EINVAL;
1128                break;
1129        }
1130
1131        if (vme_attr->cycle & VME_SUPER)
1132                entry->descriptor.dctl |= CA91CX42_DCTL_SUPER_SUPR;
1133        if (vme_attr->cycle & VME_PROG)
1134                entry->descriptor.dctl |= CA91CX42_DCTL_PGM_PGM;
1135
1136        entry->descriptor.dtbc = count;
1137        entry->descriptor.dla = pci_attr->address;
1138        entry->descriptor.dva = vme_attr->address;
1139        entry->descriptor.dcpp = CA91CX42_DCPP_NULL;
1140
1141        /* Add to list */
1142        list_add_tail(&entry->list, &list->entries);
1143
1144        /* Fill out previous descriptors "Next Address" */
1145        if (entry->list.prev != &list->entries) {
1146                prev = list_entry(entry->list.prev, struct ca91cx42_dma_entry,
1147                        list);
1148                /* We need the bus address for the pointer */
1149                desc_ptr = virt_to_bus(&entry->descriptor);
1150                prev->descriptor.dcpp = desc_ptr & ~CA91CX42_DCPP_M;
1151        }
1152
1153        return 0;
1154
1155err_cycle:
1156err_aspace:
1157err_direct:
1158err_align:
1159        kfree(entry);
1160err_mem:
1161        return retval;
1162}
1163
1164static int ca91cx42_dma_busy(struct vme_bridge *ca91cx42_bridge)
1165{
1166        u32 tmp;
1167        struct ca91cx42_driver *bridge;
1168
1169        bridge = ca91cx42_bridge->driver_priv;
1170
1171        tmp = ioread32(bridge->base + DGCS);
1172
1173        if (tmp & CA91CX42_DGCS_ACT)
1174                return 0;
1175        else
1176                return 1;
1177}
1178
1179static int ca91cx42_dma_list_exec(struct vme_dma_list *list)
1180{
1181        struct vme_dma_resource *ctrlr;
1182        struct ca91cx42_dma_entry *entry;
1183        int retval = 0;
1184        dma_addr_t bus_addr;
1185        u32 val;
1186        struct device *dev;
1187        struct ca91cx42_driver *bridge;
1188
1189        ctrlr = list->parent;
1190
1191        bridge = ctrlr->parent->driver_priv;
1192        dev = ctrlr->parent->parent;
1193
1194        mutex_lock(&ctrlr->mtx);
1195
1196        if (!(list_empty(&ctrlr->running))) {
1197                /*
1198                 * XXX We have an active DMA transfer and currently haven't
1199                 *     sorted out the mechanism for "pending" DMA transfers.
1200                 *     Return busy.
1201                 */
1202                /* Need to add to pending here */
1203                mutex_unlock(&ctrlr->mtx);
1204                return -EBUSY;
1205        } else {
1206                list_add(&list->list, &ctrlr->running);
1207        }
1208
1209        /* Get first bus address and write into registers */
1210        entry = list_first_entry(&list->entries, struct ca91cx42_dma_entry,
1211                list);
1212
1213        bus_addr = virt_to_bus(&entry->descriptor);
1214
1215        mutex_unlock(&ctrlr->mtx);
1216
1217        iowrite32(0, bridge->base + DTBC);
1218        iowrite32(bus_addr & ~CA91CX42_DCPP_M, bridge->base + DCPP);
1219
1220        /* Start the operation */
1221        val = ioread32(bridge->base + DGCS);
1222
1223        /* XXX Could set VMEbus On and Off Counters here */
1224        val &= (CA91CX42_DGCS_VON_M | CA91CX42_DGCS_VOFF_M);
1225
1226        val |= (CA91CX42_DGCS_CHAIN | CA91CX42_DGCS_STOP | CA91CX42_DGCS_HALT |
1227                CA91CX42_DGCS_DONE | CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1228                CA91CX42_DGCS_PERR);
1229
1230        iowrite32(val, bridge->base + DGCS);
1231
1232        val |= CA91CX42_DGCS_GO;
1233
1234        iowrite32(val, bridge->base + DGCS);
1235
1236        wait_event_interruptible(bridge->dma_queue,
1237                ca91cx42_dma_busy(ctrlr->parent));
1238
1239        /*
1240         * Read status register, this register is valid until we kick off a
1241         * new transfer.
1242         */
1243        val = ioread32(bridge->base + DGCS);
1244
1245        if (val & (CA91CX42_DGCS_LERR | CA91CX42_DGCS_VERR |
1246                CA91CX42_DGCS_PERR)) {
1247
1248                dev_err(dev, "ca91c042: DMA Error. DGCS=%08X\n", val);
1249                val = ioread32(bridge->base + DCTL);
1250        }
1251
1252        /* Remove list from running list */
1253        mutex_lock(&ctrlr->mtx);
1254        list_del(&list->list);
1255        mutex_unlock(&ctrlr->mtx);
1256
1257        return retval;
1258
1259}
1260
1261static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
1262{
1263        struct list_head *pos, *temp;
1264        struct ca91cx42_dma_entry *entry;
1265
1266        /* detach and free each entry */
1267        list_for_each_safe(pos, temp, &list->entries) {
1268                list_del(pos);
1269                entry = list_entry(pos, struct ca91cx42_dma_entry, list);
1270                kfree(entry);
1271        }
1272
1273        return 0;
1274}
1275
1276/*
1277 * All 4 location monitors reside at the same base - this is therefore a
1278 * system wide configuration.
1279 *
1280 * This does not enable the LM monitor - that should be done when the first
1281 * callback is attached and disabled when the last callback is removed.
1282 */
1283static int ca91cx42_lm_set(struct vme_lm_resource *lm,
1284        unsigned long long lm_base, vme_address_t aspace, vme_cycle_t cycle)
1285{
1286        u32 temp_base, lm_ctl = 0;
1287        int i;
1288        struct ca91cx42_driver *bridge;
1289        struct device *dev;
1290
1291        bridge = lm->parent->driver_priv;
1292        dev = lm->parent->parent;
1293
1294        /* Check the alignment of the location monitor */
1295        temp_base = (u32)lm_base;
1296        if (temp_base & 0xffff) {
1297                dev_err(dev, "Location monitor must be aligned to 64KB "
1298                        "boundary");
1299                return -EINVAL;
1300        }
1301
1302        mutex_lock(&lm->mtx);
1303
1304        /* If we already have a callback attached, we can't move it! */
1305        for (i = 0; i < lm->monitors; i++) {
1306                if (bridge->lm_callback[i] != NULL) {
1307                        mutex_unlock(&lm->mtx);
1308                        dev_err(dev, "Location monitor callback attached, "
1309                                "can't reset\n");
1310                        return -EBUSY;
1311                }
1312        }
1313
1314        switch (aspace) {
1315        case VME_A16:
1316                lm_ctl |= CA91CX42_LM_CTL_AS_A16;
1317                break;
1318        case VME_A24:
1319                lm_ctl |= CA91CX42_LM_CTL_AS_A24;
1320                break;
1321        case VME_A32:
1322                lm_ctl |= CA91CX42_LM_CTL_AS_A32;
1323                break;
1324        default:
1325                mutex_unlock(&lm->mtx);
1326                dev_err(dev, "Invalid address space\n");
1327                return -EINVAL;
1328                break;
1329        }
1330
1331        if (cycle & VME_SUPER)
1332                lm_ctl |= CA91CX42_LM_CTL_SUPR;
1333        if (cycle & VME_USER)
1334                lm_ctl |= CA91CX42_LM_CTL_NPRIV;
1335        if (cycle & VME_PROG)
1336                lm_ctl |= CA91CX42_LM_CTL_PGM;
1337        if (cycle & VME_DATA)
1338                lm_ctl |= CA91CX42_LM_CTL_DATA;
1339
1340        iowrite32(lm_base, bridge->base + LM_BS);
1341        iowrite32(lm_ctl, bridge->base + LM_CTL);
1342
1343        mutex_unlock(&lm->mtx);
1344
1345        return 0;
1346}
1347
1348/* Get configuration of the callback monitor and return whether it is enabled
1349 * or disabled.
1350 */
1351static int ca91cx42_lm_get(struct vme_lm_resource *lm,
1352        unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
1353{
1354        u32 lm_ctl, enabled = 0;
1355        struct ca91cx42_driver *bridge;
1356
1357        bridge = lm->parent->driver_priv;
1358
1359        mutex_lock(&lm->mtx);
1360
1361        *lm_base = (unsigned long long)ioread32(bridge->base + LM_BS);
1362        lm_ctl = ioread32(bridge->base + LM_CTL);
1363
1364        if (lm_ctl & CA91CX42_LM_CTL_EN)
1365                enabled = 1;
1366
1367        if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A16)
1368                *aspace = VME_A16;
1369        if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A24)
1370                *aspace = VME_A24;
1371        if ((lm_ctl & CA91CX42_LM_CTL_AS_M) == CA91CX42_LM_CTL_AS_A32)
1372                *aspace = VME_A32;
1373
1374        *cycle = 0;
1375        if (lm_ctl & CA91CX42_LM_CTL_SUPR)
1376                *cycle |= VME_SUPER;
1377        if (lm_ctl & CA91CX42_LM_CTL_NPRIV)
1378                *cycle |= VME_USER;
1379        if (lm_ctl & CA91CX42_LM_CTL_PGM)
1380                *cycle |= VME_PROG;
1381        if (lm_ctl & CA91CX42_LM_CTL_DATA)
1382                *cycle |= VME_DATA;
1383
1384        mutex_unlock(&lm->mtx);
1385
1386        return enabled;
1387}
1388
1389/*
1390 * Attach a callback to a specific location monitor.
1391 *
1392 * Callback will be passed the monitor triggered.
1393 */
1394static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
1395        void (*callback)(int))
1396{
1397        u32 lm_ctl, tmp;
1398        struct ca91cx42_driver *bridge;
1399        struct device *dev;
1400
1401        bridge = lm->parent->driver_priv;
1402        dev = lm->parent->parent;
1403
1404        mutex_lock(&lm->mtx);
1405
1406        /* Ensure that the location monitor is configured - need PGM or DATA */
1407        lm_ctl = ioread32(bridge->base + LM_CTL);
1408        if ((lm_ctl & (CA91CX42_LM_CTL_PGM | CA91CX42_LM_CTL_DATA)) == 0) {
1409                mutex_unlock(&lm->mtx);
1410                dev_err(dev, "Location monitor not properly configured\n");
1411                return -EINVAL;
1412        }
1413
1414        /* Check that a callback isn't already attached */
1415        if (bridge->lm_callback[monitor] != NULL) {
1416                mutex_unlock(&lm->mtx);
1417                dev_err(dev, "Existing callback attached\n");
1418                return -EBUSY;
1419        }
1420
1421        /* Attach callback */
1422        bridge->lm_callback[monitor] = callback;
1423
1424        /* Enable Location Monitor interrupt */
1425        tmp = ioread32(bridge->base + LINT_EN);
1426        tmp |= CA91CX42_LINT_LM[monitor];
1427        iowrite32(tmp, bridge->base + LINT_EN);
1428
1429        /* Ensure that global Location Monitor Enable set */
1430        if ((lm_ctl & CA91CX42_LM_CTL_EN) == 0) {
1431                lm_ctl |= CA91CX42_LM_CTL_EN;
1432                iowrite32(lm_ctl, bridge->base + LM_CTL);
1433        }
1434
1435        mutex_unlock(&lm->mtx);
1436
1437        return 0;
1438}
1439
1440/*
1441 * Detach a callback function forn a specific location monitor.
1442 */
1443static int ca91cx42_lm_detach(struct vme_lm_resource *lm, int monitor)
1444{
1445        u32 tmp;
1446        struct ca91cx42_driver *bridge;
1447
1448        bridge = lm->parent->driver_priv;
1449
1450        mutex_lock(&lm->mtx);
1451
1452        /* Disable Location Monitor and ensure previous interrupts are clear */
1453        tmp = ioread32(bridge->base + LINT_EN);
1454        tmp &= ~CA91CX42_LINT_LM[monitor];
1455        iowrite32(tmp, bridge->base + LINT_EN);
1456
1457        iowrite32(CA91CX42_LINT_LM[monitor],
1458                 bridge->base + LINT_STAT);
1459
1460        /* Detach callback */
1461        bridge->lm_callback[monitor] = NULL;
1462
1463        /* If all location monitors disabled, disable global Location Monitor */
1464        if ((tmp & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
1465                        CA91CX42_LINT_LM3)) == 0) {
1466                tmp = ioread32(bridge->base + LM_CTL);
1467                tmp &= ~CA91CX42_LM_CTL_EN;
1468                iowrite32(tmp, bridge->base + LM_CTL);
1469        }
1470
1471        mutex_unlock(&lm->mtx);
1472
1473        return 0;
1474}
1475
1476static int ca91cx42_slot_get(struct vme_bridge *ca91cx42_bridge)
1477{
1478        u32 slot = 0;
1479        struct ca91cx42_driver *bridge;
1480
1481        bridge = ca91cx42_bridge->driver_priv;
1482
1483        if (!geoid) {
1484                slot = ioread32(bridge->base + VCSR_BS);
1485                slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
1486        } else
1487                slot = geoid;
1488
1489        return (int)slot;
1490
1491}
1492
1493static int __init ca91cx42_init(void)
1494{
1495        return pci_register_driver(&ca91cx42_driver);
1496}
1497
1498/*
1499 * Configure CR/CSR space
1500 *
1501 * Access to the CR/CSR can be configured at power-up. The location of the
1502 * CR/CSR registers in the CR/CSR address space is determined by the boards
1503 * Auto-ID or Geographic address. This function ensures that the window is
1504 * enabled at an offset consistent with the boards geopgraphic address.
1505 */
1506static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
1507        struct pci_dev *pdev)
1508{
1509        unsigned int crcsr_addr;
1510        int tmp, slot;
1511        struct ca91cx42_driver *bridge;
1512
1513        bridge = ca91cx42_bridge->driver_priv;
1514
1515        slot = ca91cx42_slot_get(ca91cx42_bridge);
1516
1517        /* Write CSR Base Address if slot ID is supplied as a module param */
1518        if (geoid)
1519                iowrite32(geoid << 27, bridge->base + VCSR_BS);
1520
1521        dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
1522        if (slot == 0) {
1523                dev_err(&pdev->dev, "Slot number is unset, not configuring "
1524                        "CR/CSR space\n");
1525                return -EINVAL;
1526        }
1527
1528        /* Allocate mem for CR/CSR image */
1529        bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
1530                &bridge->crcsr_bus);
1531        if (bridge->crcsr_kernel == NULL) {
1532                dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
1533                        "image\n");
1534                return -ENOMEM;
1535        }
1536
1537        memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
1538
1539        crcsr_addr = slot * (512 * 1024);
1540        iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
1541
1542        tmp = ioread32(bridge->base + VCSR_CTL);
1543        tmp |= CA91CX42_VCSR_CTL_EN;
1544        iowrite32(tmp, bridge->base + VCSR_CTL);
1545
1546        return 0;
1547}
1548
1549static void ca91cx42_crcsr_exit(struct vme_bridge *ca91cx42_bridge,
1550        struct pci_dev *pdev)
1551{
1552        u32 tmp;
1553        struct ca91cx42_driver *bridge;
1554
1555        bridge = ca91cx42_bridge->driver_priv;
1556
1557        /* Turn off CR/CSR space */
1558        tmp = ioread32(bridge->base + VCSR_CTL);
1559        tmp &= ~CA91CX42_VCSR_CTL_EN;
1560        iowrite32(tmp, bridge->base + VCSR_CTL);
1561
1562        /* Free image */
1563        iowrite32(0, bridge->base + VCSR_TO);
1564
1565        pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
1566                bridge->crcsr_bus);
1567}
1568
1569static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1570{
1571        int retval, i;
1572        u32 data;
1573        struct list_head *pos = NULL;
1574        struct vme_bridge *ca91cx42_bridge;
1575        struct ca91cx42_driver *ca91cx42_device;
1576        struct vme_master_resource *master_image;
1577        struct vme_slave_resource *slave_image;
1578        struct vme_dma_resource *dma_ctrlr;
1579        struct vme_lm_resource *lm;
1580
1581        /* We want to support more than one of each bridge so we need to
1582         * dynamically allocate the bridge structure
1583         */
1584        ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
1585
1586        if (ca91cx42_bridge == NULL) {
1587                dev_err(&pdev->dev, "Failed to allocate memory for device "
1588                        "structure\n");
1589                retval = -ENOMEM;
1590                goto err_struct;
1591        }
1592
1593        ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
1594
1595        if (ca91cx42_device == NULL) {
1596                dev_err(&pdev->dev, "Failed to allocate memory for device "
1597                        "structure\n");
1598                retval = -ENOMEM;
1599                goto err_driver;
1600        }
1601
1602        ca91cx42_bridge->driver_priv = ca91cx42_device;
1603
1604        /* Enable the device */
1605        retval = pci_enable_device(pdev);
1606        if (retval) {
1607                dev_err(&pdev->dev, "Unable to enable device\n");
1608                goto err_enable;
1609        }
1610
1611        /* Map Registers */
1612        retval = pci_request_regions(pdev, driver_name);
1613        if (retval) {
1614                dev_err(&pdev->dev, "Unable to reserve resources\n");
1615                goto err_resource;
1616        }
1617
1618        /* map registers in BAR 0 */
1619        ca91cx42_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
1620                4096);
1621        if (!ca91cx42_device->base) {
1622                dev_err(&pdev->dev, "Unable to remap CRG region\n");
1623                retval = -EIO;
1624                goto err_remap;
1625        }
1626
1627        /* Check to see if the mapping worked out */
1628        data = ioread32(ca91cx42_device->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1629        if (data != PCI_VENDOR_ID_TUNDRA) {
1630                dev_err(&pdev->dev, "PCI_ID check failed\n");
1631                retval = -EIO;
1632                goto err_test;
1633        }
1634
1635        /* Initialize wait queues & mutual exclusion flags */
1636        init_waitqueue_head(&ca91cx42_device->dma_queue);
1637        init_waitqueue_head(&ca91cx42_device->iack_queue);
1638        mutex_init(&ca91cx42_device->vme_int);
1639        mutex_init(&ca91cx42_device->vme_rmw);
1640
1641        ca91cx42_bridge->parent = &pdev->dev;
1642        strcpy(ca91cx42_bridge->name, driver_name);
1643
1644        /* Setup IRQ */
1645        retval = ca91cx42_irq_init(ca91cx42_bridge);
1646        if (retval != 0) {
1647                dev_err(&pdev->dev, "Chip Initialization failed.\n");
1648                goto err_irq;
1649        }
1650
1651        /* Add master windows to list */
1652        INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
1653        for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1654                master_image = kmalloc(sizeof(struct vme_master_resource),
1655                        GFP_KERNEL);
1656                if (master_image == NULL) {
1657                        dev_err(&pdev->dev, "Failed to allocate memory for "
1658                        "master resource structure\n");
1659                        retval = -ENOMEM;
1660                        goto err_master;
1661                }
1662                master_image->parent = ca91cx42_bridge;
1663                spin_lock_init(&master_image->lock);
1664                master_image->locked = 0;
1665                master_image->number = i;
1666                master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1667                        VME_CRCSR | VME_USER1 | VME_USER2;
1668                master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1669                        VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1670                master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1671                memset(&master_image->bus_resource, 0,
1672                        sizeof(struct resource));
1673                master_image->kern_base  = NULL;
1674                list_add_tail(&master_image->list,
1675                        &ca91cx42_bridge->master_resources);
1676        }
1677
1678        /* Add slave windows to list */
1679        INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
1680        for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1681                slave_image = kmalloc(sizeof(struct vme_slave_resource),
1682                        GFP_KERNEL);
1683                if (slave_image == NULL) {
1684                        dev_err(&pdev->dev, "Failed to allocate memory for "
1685                        "slave resource structure\n");
1686                        retval = -ENOMEM;
1687                        goto err_slave;
1688                }
1689                slave_image->parent = ca91cx42_bridge;
1690                mutex_init(&slave_image->mtx);
1691                slave_image->locked = 0;
1692                slave_image->number = i;
1693                slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1694                        VME_USER2;
1695
1696                /* Only windows 0 and 4 support A16 */
1697                if (i == 0 || i == 4)
1698                        slave_image->address_attr |= VME_A16;
1699
1700                slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1701                        VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1702                list_add_tail(&slave_image->list,
1703                        &ca91cx42_bridge->slave_resources);
1704        }
1705
1706        /* Add dma engines to list */
1707        INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
1708        for (i = 0; i < CA91C142_MAX_DMA; i++) {
1709                dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1710                        GFP_KERNEL);
1711                if (dma_ctrlr == NULL) {
1712                        dev_err(&pdev->dev, "Failed to allocate memory for "
1713                        "dma resource structure\n");
1714                        retval = -ENOMEM;
1715                        goto err_dma;
1716                }
1717                dma_ctrlr->parent = ca91cx42_bridge;
1718                mutex_init(&dma_ctrlr->mtx);
1719                dma_ctrlr->locked = 0;
1720                dma_ctrlr->number = i;
1721                dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
1722                        VME_DMA_MEM_TO_VME;
1723                INIT_LIST_HEAD(&dma_ctrlr->pending);
1724                INIT_LIST_HEAD(&dma_ctrlr->running);
1725                list_add_tail(&dma_ctrlr->list,
1726                        &ca91cx42_bridge->dma_resources);
1727        }
1728
1729        /* Add location monitor to list */
1730        INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
1731        lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1732        if (lm == NULL) {
1733                dev_err(&pdev->dev, "Failed to allocate memory for "
1734                "location monitor resource structure\n");
1735                retval = -ENOMEM;
1736                goto err_lm;
1737        }
1738        lm->parent = ca91cx42_bridge;
1739        mutex_init(&lm->mtx);
1740        lm->locked = 0;
1741        lm->number = 1;
1742        lm->monitors = 4;
1743        list_add_tail(&lm->list, &ca91cx42_bridge->lm_resources);
1744
1745        ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1746        ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1747        ca91cx42_bridge->master_get = ca91cx42_master_get;
1748        ca91cx42_bridge->master_set = ca91cx42_master_set;
1749        ca91cx42_bridge->master_read = ca91cx42_master_read;
1750        ca91cx42_bridge->master_write = ca91cx42_master_write;
1751        ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1752        ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1753        ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1754        ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1755        ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1756        ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1757        ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1758        ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1759        ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1760        ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1761        ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1762
1763        data = ioread32(ca91cx42_device->base + MISC_CTL);
1764        dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1765                (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1766        dev_info(&pdev->dev, "Slot ID is %d\n",
1767                ca91cx42_slot_get(ca91cx42_bridge));
1768
1769        if (ca91cx42_crcsr_init(ca91cx42_bridge, pdev))
1770                dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1771
1772        /* Need to save ca91cx42_bridge pointer locally in link list for use in
1773         * ca91cx42_remove()
1774         */
1775        retval = vme_register_bridge(ca91cx42_bridge);
1776        if (retval != 0) {
1777                dev_err(&pdev->dev, "Chip Registration failed.\n");
1778                goto err_reg;
1779        }
1780
1781        pci_set_drvdata(pdev, ca91cx42_bridge);
1782
1783        return 0;
1784
1785        vme_unregister_bridge(ca91cx42_bridge);
1786err_reg:
1787        ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1788err_lm:
1789        /* resources are stored in link list */
1790        list_for_each(pos, &ca91cx42_bridge->lm_resources) {
1791                lm = list_entry(pos, struct vme_lm_resource, list);
1792                list_del(pos);
1793                kfree(lm);
1794        }
1795err_dma:
1796        /* resources are stored in link list */
1797        list_for_each(pos, &ca91cx42_bridge->dma_resources) {
1798                dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1799                list_del(pos);
1800                kfree(dma_ctrlr);
1801        }
1802err_slave:
1803        /* resources are stored in link list */
1804        list_for_each(pos, &ca91cx42_bridge->slave_resources) {
1805                slave_image = list_entry(pos, struct vme_slave_resource, list);
1806                list_del(pos);
1807                kfree(slave_image);
1808        }
1809err_master:
1810        /* resources are stored in link list */
1811        list_for_each(pos, &ca91cx42_bridge->master_resources) {
1812                master_image = list_entry(pos, struct vme_master_resource,
1813                        list);
1814                list_del(pos);
1815                kfree(master_image);
1816        }
1817
1818        ca91cx42_irq_exit(ca91cx42_device, pdev);
1819err_irq:
1820err_test:
1821        iounmap(ca91cx42_device->base);
1822err_remap:
1823        pci_release_regions(pdev);
1824err_resource:
1825        pci_disable_device(pdev);
1826err_enable:
1827        kfree(ca91cx42_device);
1828err_driver:
1829        kfree(ca91cx42_bridge);
1830err_struct:
1831        return retval;
1832
1833}
1834
1835static void ca91cx42_remove(struct pci_dev *pdev)
1836{
1837        struct list_head *pos = NULL;
1838        struct vme_master_resource *master_image;
1839        struct vme_slave_resource *slave_image;
1840        struct vme_dma_resource *dma_ctrlr;
1841        struct vme_lm_resource *lm;
1842        struct ca91cx42_driver *bridge;
1843        struct vme_bridge *ca91cx42_bridge = pci_get_drvdata(pdev);
1844
1845        bridge = ca91cx42_bridge->driver_priv;
1846
1847
1848        /* Turn off Ints */
1849        iowrite32(0, bridge->base + LINT_EN);
1850
1851        /* Turn off the windows */
1852        iowrite32(0x00800000, bridge->base + LSI0_CTL);
1853        iowrite32(0x00800000, bridge->base + LSI1_CTL);
1854        iowrite32(0x00800000, bridge->base + LSI2_CTL);
1855        iowrite32(0x00800000, bridge->base + LSI3_CTL);
1856        iowrite32(0x00800000, bridge->base + LSI4_CTL);
1857        iowrite32(0x00800000, bridge->base + LSI5_CTL);
1858        iowrite32(0x00800000, bridge->base + LSI6_CTL);
1859        iowrite32(0x00800000, bridge->base + LSI7_CTL);
1860        iowrite32(0x00F00000, bridge->base + VSI0_CTL);
1861        iowrite32(0x00F00000, bridge->base + VSI1_CTL);
1862        iowrite32(0x00F00000, bridge->base + VSI2_CTL);
1863        iowrite32(0x00F00000, bridge->base + VSI3_CTL);
1864        iowrite32(0x00F00000, bridge->base + VSI4_CTL);
1865        iowrite32(0x00F00000, bridge->base + VSI5_CTL);
1866        iowrite32(0x00F00000, bridge->base + VSI6_CTL);
1867        iowrite32(0x00F00000, bridge->base + VSI7_CTL);
1868
1869        vme_unregister_bridge(ca91cx42_bridge);
1870
1871        ca91cx42_crcsr_exit(ca91cx42_bridge, pdev);
1872
1873        /* resources are stored in link list */
1874        list_for_each(pos, &ca91cx42_bridge->lm_resources) {
1875                lm = list_entry(pos, struct vme_lm_resource, list);
1876                list_del(pos);
1877                kfree(lm);
1878        }
1879
1880        /* resources are stored in link list */
1881        list_for_each(pos, &ca91cx42_bridge->dma_resources) {
1882                dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1883                list_del(pos);
1884                kfree(dma_ctrlr);
1885        }
1886
1887        /* resources are stored in link list */
1888        list_for_each(pos, &ca91cx42_bridge->slave_resources) {
1889                slave_image = list_entry(pos, struct vme_slave_resource, list);
1890                list_del(pos);
1891                kfree(slave_image);
1892        }
1893
1894        /* resources are stored in link list */
1895        list_for_each(pos, &ca91cx42_bridge->master_resources) {
1896                master_image = list_entry(pos, struct vme_master_resource,
1897                        list);
1898                list_del(pos);
1899                kfree(master_image);
1900        }
1901
1902        ca91cx42_irq_exit(bridge, pdev);
1903
1904        iounmap(bridge->base);
1905
1906        pci_release_regions(pdev);
1907
1908        pci_disable_device(pdev);
1909
1910        kfree(ca91cx42_bridge);
1911}
1912
1913static void __exit ca91cx42_exit(void)
1914{
1915        pci_unregister_driver(&ca91cx42_driver);
1916}
1917
1918MODULE_PARM_DESC(geoid, "Override geographical addressing");
1919module_param(geoid, int, 0);
1920
1921MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1922MODULE_LICENSE("GPL");
1923
1924module_init(ca91cx42_init);
1925module_exit(ca91cx42_exit);
1926